Пример #1
0
/*
 * Check to see if this CPU supports long mode.
 */
static int
bi_checkcpu(void)
{
    char *cpu_vendor;
    int vendor[3];
    int eflags, regs[4];

    /* Check for presence of "cpuid". */
    eflags = read_eflags();
    write_eflags(eflags ^ PSL_ID);
    if (!((eflags ^ read_eflags()) & PSL_ID))
	return (0);

    /* Fetch the vendor string. */
    do_cpuid(0, regs);
    vendor[0] = regs[1];
    vendor[1] = regs[3];
    vendor[2] = regs[2];
    cpu_vendor = (char *)vendor;

    /* Check for vendors that support AMD features. */
    if (strncmp(cpu_vendor, "GenuineIntel", 12) != 0 &&
	strncmp(cpu_vendor, "AuthenticAMD", 12) != 0)
	return (0);

    /* Has to support AMD features. */
    do_cpuid(0x80000000, regs);
    if (!(regs[0] >= 0x80000001))
	return (0);

    /* Check for long mode. */
    do_cpuid(0x80000001, regs);
    return (regs[3] & AMDID_LM);
}
Пример #2
0
/*
 * Cyrix 486S/DX series
 */
static void
init_cy486dx(void)
{
	u_long	eflags;
	u_char	ccr2;

	eflags = read_eflags();
	disable_intr();
	invd();

	ccr2 = read_cyrix_reg(CCR2);
#ifdef CPU_SUSP_HLT
	ccr2 |= CCR2_SUSP_HLT;
#endif

#ifdef PC98
	/* Enables WB cache interface pin and Lock NW bit in CR0. */
	ccr2 |= CCR2_WB | CCR2_LOCK_NW;
	/* Unlock NW bit in CR0. */
	write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
#endif

	write_cyrix_reg(CCR2, ccr2);
	write_eflags(eflags);
}
Пример #3
0
/*
 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
 * L2 cache).
 */
static void
init_mendocino(void)
{
#ifdef CPU_PPRO2CELERON
	u_long	eflags;
	u_int64_t	bbl_cr_ctl3;

	eflags = read_eflags();
	disable_intr();

	load_cr0(rcr0() | CR0_CD | CR0_NW);
	wbinvd();

	bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);

	/* If the L2 cache is configured, do nothing. */
	if (!(bbl_cr_ctl3 & 1)) {
		bbl_cr_ctl3 = 0x134052bLL;

		/* Set L2 Cache Latency (Default: 5). */
#ifdef	CPU_CELERON_L2_LATENCY
#if CPU_L2_LATENCY > 15
#error invalid CPU_L2_LATENCY.
#endif
		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
#else
		bbl_cr_ctl3 |= 5 << 1;
#endif
		wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
	}

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
	write_eflags(eflags);
#endif /* CPU_PPRO2CELERON */
}
Пример #4
0
void BX_CPU_C::real_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error_code, Bit16u error_code)
{
  // real mode interrupt
  Bit16u cs_selector, ip;

  if ((vector*4+3) > BX_CPU_THIS_PTR idtr.limit) {
    BX_ERROR(("interrupt(real mode) vector > idtr.limit"));
    exception(BX_GP_EXCEPTION, 0, 0);
  }

  push_16((Bit16u) read_eflags());

  cs_selector = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
  push_16(cs_selector);
  ip = EIP;
  push_16(ip);

  access_read_linear(BX_CPU_THIS_PTR idtr.base + 4 * vector,     2, 0, BX_READ, &ip);
  EIP = (Bit32u) ip;
  access_read_linear(BX_CPU_THIS_PTR idtr.base + 4 * vector + 2, 2, 0, BX_READ, &cs_selector);
  load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_selector);

  /* INT affects the following flags: I,T */
  BX_CPU_THIS_PTR clear_IF();
  BX_CPU_THIS_PTR clear_TF();
#if BX_CPU_LEVEL >= 4
  BX_CPU_THIS_PTR clear_AC();
#endif
  BX_CPU_THIS_PTR clear_RF();
}
Пример #5
0
void
enable_K6_wt_alloc(void)
{
	quad_t	size;
	u_int64_t	whcr;
	u_long	eflags;

	eflags = read_eflags();
	disable_intr();
	wbinvd();

#ifdef CPU_DISABLE_CACHE
	/*
	 * Certain K6-2 box becomes unstable when write allocation is
	 * enabled.
	 */
	/*
	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
	 * All other bits in TR12 have no effect on the processer's operation.
	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
	 * on the AMD-K6.
	 */
	wrmsr(0x0000000e, (u_int64_t)0x0008);
#endif
	/* Don't assume that memory size is aligned with 4M. */
	if (Maxmem > 0)
	  size = ((Maxmem >> 8) + 3) >> 2;
	else
Пример #6
0
void
trap(struct Trapframe *tf)
{
	// The environment may have set DF and some versions
	// of GCC rely on DF being clear
	asm volatile("cld" ::: "cc");

	// Halt the CPU if some other CPU has called panic()
	extern char *panicstr;
	if (panicstr)
		asm volatile("hlt");

	// Re-acqurie the big kernel lock if we were halted in
	// sched_yield()
	if (xchg(&thiscpu->cpu_status, CPU_STARTED) == CPU_HALTED)
		lock_kernel();
	// Check that interrupts are disabled.  If this assertion
	// fails, DO NOT be tempted to fix it by inserting a "cli" in
	// the interrupt path.
	assert(!(read_eflags() & FL_IF));

	if ((tf->tf_cs & 3) == 3) {
		// Trapped from user mode.
		// Acquire the big kernel lock before doing any
		// serious kernel work.
		// LAB 4: Your code here.

		assert(curenv);
		lock_kernel();

		// Garbage collect if current enviroment is a zombie
		if (curenv->env_status == ENV_DYING) {
			env_free(curenv);
			curenv = NULL;
			sched_yield();
		}

		// Copy trap frame (which is currently on the stack)
		// into 'curenv->env_tf', so that running the environment
		// will restart at the trap point.
		curenv->env_tf = *tf;
		// The trapframe on the stack should be ignored from here on.
		tf = &curenv->env_tf;
	}

	// Record that tf is the last real trapframe so
	// print_trapframe can print some additional information.
	last_tf = tf;

	// Dispatch based on what type of trap occurred
	trap_dispatch(tf);

	// If we made it to this point, then no other environment was
	// scheduled, so we should return to the current environment
	// if doing so makes sense.
	if (curenv && curenv->env_status == ENV_RUNNING)
		env_run(curenv);
	else
		sched_yield();
}
Пример #7
0
void BX_CPU_C::real_mode_int(Bit8u vector, bx_bool push_error, Bit16u error_code)
{
  if ((vector*4+3) > BX_CPU_THIS_PTR idtr.limit) {
    BX_ERROR(("interrupt(real mode) vector > idtr.limit"));
    exception(BX_GP_EXCEPTION, 0);
  }

  push_16((Bit16u) read_eflags());
  push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
  push_16(IP);

  Bit16u new_ip = system_read_word(BX_CPU_THIS_PTR idtr.base + 4 * vector);
  // CS.LIMIT can't change when in real/v8086 mode
  if (new_ip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
    BX_ERROR(("interrupt(real mode): instruction pointer not within code segment limits"));
    exception(BX_GP_EXCEPTION, 0);
  }

  Bit16u cs_selector = system_read_word(BX_CPU_THIS_PTR idtr.base + 4 * vector + 2);
  load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_selector);
  EIP = new_ip;

  /* INT affects the following flags: I,T */
  BX_CPU_THIS_PTR clear_IF();
  BX_CPU_THIS_PTR clear_TF();
#if BX_CPU_LEVEL >= 4
  BX_CPU_THIS_PTR clear_AC();
#endif
  BX_CPU_THIS_PTR clear_RF();
}
Пример #8
0
void
trap(struct Trapframe *tf)
{
	asm volatile("cld" : : : "cc");
	assert(!(read_eflags() & FL_IF));

	// cprintf("Incoming TRAP frame at %p\n", tf);

	if ((tf->tf_cs & 3) == 3) {
		// Trapped from user mode.
		// Copy trap frame (which is currently on the stack)
		// into 'curenv->env_tf', so that running the environment
		// will restart at the trap point.
		assert(curenv);
		curenv->env_tf = *tf;
		// The trapframe on the stack should be ignored from here on.
		tf = &curenv->env_tf;
	}
	
	// Dispatch based on what type of trap occurred
	trap_dispatch(tf);

	// If we made it to this point, then no other environment was
	// scheduled, so we should return to the current environment
	// if doing so makes sense.
	if (curenv && curenv->env_status == ENV_RUNNABLE)
		env_run(curenv);
	else
		sched_yield();
}
Пример #9
0
void
cpu_idle_mwait_cycle(void)
{
	struct cpu_info *ci = curcpu();

	if ((read_eflags() & PSL_I) == 0)
		panic("idle with interrupts blocked!");

	/* something already queued? */
	if (!cpu_is_idle(ci))
		return;

	/*
	 * About to idle; setting the MWAIT_IN_IDLE bit tells
	 * cpu_unidle() that it can't be a no-op and tells cpu_kick()
	 * that it doesn't need to use an IPI.  We also set the
	 * MWAIT_KEEP_IDLING bit: those routines clear it to stop
	 * the mwait.  Once they're set, we do a final check of the
	 * queue, in case another cpu called setrunqueue() and added
	 * something to the queue and called cpu_unidle() between
	 * the check in sched_idle() and here.
	 */
	atomic_setbits_int(&ci->ci_mwait, MWAIT_IDLING | MWAIT_ONLY);
	if (cpu_is_idle(ci)) {
		monitor(&ci->ci_mwait, 0, 0);
		if ((ci->ci_mwait & MWAIT_IDLING) == MWAIT_IDLING)
			mwait(0, 0);
	}

	/* done idling; let cpu_kick() know that an IPI is required */
	atomic_clearbits_int(&ci->ci_mwait, MWAIT_IDLING);
}
Пример #10
0
/*
 * IBM Blue Lightning
 */
static void
init_bluelightning(void)
{
	u_long	eflags;

#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
	need_post_dma_flush = 1;
#endif

	eflags = read_eflags();
	disable_intr();

	load_cr0(rcr0() | CR0_CD | CR0_NW);
	invd();

#ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
	wrmsr(0x1000, 0x9c92LL);	/* FP operand can be cacheable on Cyrix FPU */
#else
	wrmsr(0x1000, 0x1c92LL);	/* Intel FPU */
#endif
	/* Enables 13MB and 0-640KB cache. */
	wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
#ifdef CPU_BLUELIGHTNING_3X
	wrmsr(0x1002, 0x04000000LL);	/* Enables triple-clock mode. */
#else
	wrmsr(0x1002, 0x03000000LL);	/* Enables double-clock mode. */
#endif

	/* Enable caching in CR0. */
	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
	invd();
	write_eflags(eflags);
}
Пример #11
0
void
trap(struct Trapframe *tf)
{
	// The environment may have set DF and some versions
	// of GCC rely on DF being clear
	asm volatile("cld" ::: "cc");

	// Check that interrupts are disabled.  If this assertion
	// fails, DO NOT be tempted to fix it by inserting a "cli" in
	// the interrupt path.
	assert(!(read_eflags() & FL_IF));

	if ((tf->tf_cs & 3) == 3) {
		// Trapped from user mode.
		// Copy trap frame (which is currently on the stack)
		// into 'curenv->env_tf', so that running the environment
		// will restart at the trap point.
		assert(curenv);
		curenv->env_tf = *tf;
		// The trapframe on the stack should be ignored from here on.
		tf = &curenv->env_tf;
	}
	
	// Dispatch based on what type of trap occurred
	trap_dispatch(tf);

	// If we made it to this point, then no other environment was
	// scheduled, so we should return to the current environment
	// if doing so makes sense.
	if (curenv && curenv->env_status == ENV_RUNNABLE)
		env_run(curenv);
	else
		sched_yield();
}
Пример #12
0
/*
 * Cyrix 6x86MX (code-named M2)
 *
 * XXX - What should I do here?  Please let me know.
 */
static void
init_6x86MX(void)
{
	u_long	eflags;
	u_char	ccr3, ccr4;

	eflags = read_eflags();
	disable_intr();

	load_cr0(rcr0() | CR0_CD | CR0_NW);
	wbinvd();

	/* Initialize CCR0. */
	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);

	/* Initialize CCR1. */
#ifdef CPU_CYRIX_NO_LOCK
	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
#else
	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
#endif

	/* Initialize CCR2. */
#ifdef CPU_SUSP_HLT
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
#else
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
#endif

	ccr3 = read_cyrix_reg(CCR3);
	write_cyrix_reg(CCR3, CCR3_MAPEN0);

	/* Initialize CCR4. */
	ccr4 = read_cyrix_reg(CCR4);
	ccr4 &= ~CCR4_IOMASK;
#ifdef CPU_IORT
	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
#else
	write_cyrix_reg(CCR4, ccr4 | 7);
#endif

	/* Initialize CCR5. */
#ifdef CPU_WT_ALLOC
	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
#endif

	/* Restore CCR3. */
	write_cyrix_reg(CCR3, ccr3);

	/* Unlock NW bit in CR0. */
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */

	/* Lock NW bit in CR0. */
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);

	write_eflags(eflags);
}
Пример #13
0
static int
acpi_timer_test(void)
{
    uint32_t	last, this;
    int		min, max, max2, n, delta;
    register_t	s;

    min = INT32_MAX;
    max = max2 = 0;

    /* Test the timer with interrupts disabled to get accurate results. */
#if defined(__i386__)
    s = read_eflags();
#elif defined(__x86_64__)
    s = read_rflags();
#else
#error "no read_eflags"
#endif
    cpu_disable_intr();
    AcpiGetTimer(&last);
    for (n = 0; n < 2000; n++) {
	AcpiGetTimer(&this);
	delta = acpi_TimerDelta(this, last);
	if (delta > max) {
	    max2 = max;
	    max = delta;
	} else if (delta > max2) {
	    max2 = delta;
	}
	if (delta < min)
	    min = delta;
	last = this;
    }
#if defined(__i386__)
    write_eflags(s);
#elif defined(__x86_64__)
    write_rflags(s);
#else
#error "no read_eflags"
#endif

    delta = max2 - min;
    if ((max - min > 8 || delta > 3) && vmm_guest == VMM_GUEST_NONE)
	n = 0;
    else if (min < 0 || max == 0 || max2 == 0)
	n = 0;
    else
	n = 1;
    if (bootverbose) {
	kprintf("ACPI timer looks %s min = %d, max = %d, width = %d\n",
		n ? "GOOD" : "BAD ",
		min, max, max - min);
    }

    return (n);
}
Пример #14
0
void
pushcli(void)
{
  int eflags;

  eflags = read_eflags();
  cli();
  if(cpus[cpu()].ncli++ == 0)
    cpus[cpu()].intena = eflags & FL_IF;
}
Пример #15
0
void
popcli(void)
{
  if(read_eflags()&FL_IF)
    panic("popcli - interruptible");
  if(--cpus[cpu()].ncli < 0)
    panic("popcli");
  if(cpus[cpu()].ncli == 0 && cpus[cpu()].intena)
    sti();
}
Пример #16
0
int BX_CPU_C::v86_redirect_interrupt(Bit8u vector)
{
#if BX_CPU_LEVEL >= 5
  if (BX_CPU_THIS_PTR cr4.get_VME())
  {
    bx_address tr_base = BX_CPU_THIS_PTR tr.cache.u.segment.base;
    if (BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled < 103) {
      BX_ERROR(("INT_Ib(): TR.limit < 103 in VME"));
      exception(BX_GP_EXCEPTION, 0);
    }

    Bit32u io_base = system_read_word(tr_base + 102), offset = io_base - 32 + (vector >> 3);
    if (offset > BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled) {
      BX_ERROR(("INT_Ib(): failed to fetch VME redirection bitmap"));
      exception(BX_GP_EXCEPTION, 0);
    }

    Bit8u vme_redirection_bitmap = system_read_byte(tr_base + offset);
    if (!(vme_redirection_bitmap & (1 << (vector & 7))))
    {
      // redirect interrupt through virtual-mode idt
      Bit16u temp_flags = (Bit16u) read_eflags();

      Bit16u temp_CS = system_read_word(vector*4 + 2);
      Bit16u temp_IP = system_read_word(vector*4);

      if (BX_CPU_THIS_PTR get_IOPL() < 3) {
        temp_flags |= EFlagsIOPLMask;
        if (BX_CPU_THIS_PTR get_VIF())
          temp_flags |=  EFlagsIFMask;
        else
          temp_flags &= ~EFlagsIFMask;
      }

      Bit16u old_IP = IP;
      Bit16u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;

      push_16(temp_flags);
      // push return address onto new stack
      push_16(old_CS);
      push_16(old_IP);

      load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], (Bit16u) temp_CS);
      EIP = temp_IP;

      BX_CPU_THIS_PTR clear_TF();
      BX_CPU_THIS_PTR clear_RF();
      if (BX_CPU_THIS_PTR get_IOPL() == 3)
        BX_CPU_THIS_PTR clear_IF();
      else
        BX_CPU_THIS_PTR clear_VIF();

      return 1;
    }
  }
Пример #17
0
static ACPI_STATUS
enter_s4_with_bios(void)
{
	ACPI_OBJECT_LIST	ArgList;
	ACPI_OBJECT		Arg;
	u_long			ef;
	UINT32			ret;
	ACPI_STATUS		status;

	/* run the _PTS and _GTS methods */

	ACPI_MEMSET(&ArgList, 0, sizeof(ArgList));
	ArgList.Count = 1;
	ArgList.Pointer = &Arg;

	ACPI_MEMSET(&Arg, 0, sizeof(Arg));
	Arg.Type = ACPI_TYPE_INTEGER;
	Arg.Integer.Value = ACPI_STATE_S4;

	AcpiEvaluateObject(NULL, "\\_PTS", &ArgList, NULL);
	AcpiEvaluateObject(NULL, "\\_GTS", &ArgList, NULL);

	/* clear wake status */

	AcpiSetRegister(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_LOCK);

	ef = read_eflags();
	disable_intr();

	AcpiHwDisableNonWakeupGpes();

	/* flush caches */

	ACPI_FLUSH_CPU_CACHE();

	/*
	 * write the value to command port and wait until we enter sleep state
	 */
	do {
		AcpiOsStall(1000000);
		AcpiOsWritePort(AcpiGbl_FADT->SmiCmd,
				AcpiGbl_FADT->S4BiosReq, 8);
		status = AcpiGetRegister(ACPI_BITREG_WAKE_STATUS,
					&ret, ACPI_MTX_LOCK);
		if (ACPI_FAILURE(status))
			break;
	} while (!ret);

	AcpiHwEnableNonWakeupGpes();

	write_eflags(ef);

	return (AE_OK);
}
Пример #18
0
Bit64s BX_CPU_C::param_save(bx_param_c *param, Bit64s val)
{
#else
  UNUSED(devptr);
#endif // !BX_USE_CPU_SMF
  const char *pname, *segname;
  bx_segment_reg_t *segment = NULL;

  pname = param->get_name();
  if (!strcmp(pname, "cpu_version")) {
    val = get_cpu_version_information();
  } else if (!strcmp(pname, "cpuid_std")) {
    val = get_std_cpuid_features();
  } else if (!strcmp(pname, "cpuid_ext")) {
    val = get_extended_cpuid_features();
  } else if (!strcmp(pname, "EFLAGS")) {
    val = BX_CPU_THIS_PTR read_eflags();
#if BX_SUPPORT_X86_64
  } else if (!strcmp(pname, "EFER")) {
    val = BX_CPU_THIS_PTR get_EFER();
#endif
  } else if (!strcmp(pname, "ar_byte") || !strcmp(pname, "selector")) {
    segname = param->get_parent()->get_name();
    if (!strcmp(segname, "CS")) {
      segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS];
    } else if (!strcmp(segname, "DS")) {
      segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
    } else if (!strcmp(segname, "SS")) {
      segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS];
    } else if (!strcmp(segname, "ES")) {
      segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES];
    } else if (!strcmp(segname, "FS")) {
      segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS];
    } else if (!strcmp(segname, "GS")) {
      segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS];
    } else if (!strcmp(segname, "LDTR")) {
      segment = &BX_CPU_THIS_PTR ldtr;
    } else if (!strcmp(segname, "TR")) {
      segment = &BX_CPU_THIS_PTR tr;
    }
    if (segment != NULL) {
      if (!strcmp(pname, "ar_byte")) {
        val = ar_byte(&(segment->cache));
      }
      else if (!strcmp(pname, "selector")) {
        val = segment->selector.value;
      }
    }
  }
  else {
    BX_PANIC(("Unknown param %s in param_save handler !", pname));
  }
  return val;
}
Пример #19
0
/*
 * There are i486 based upgrade products for i386 machines.
 * In this case, BIOS doesn't enables CPU cache.
 */
void
init_i486_on_386(void)
{
	u_long	eflags;

	eflags = read_eflags();
	cpu_disable_intr();

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */

	write_eflags(eflags);
}
Пример #20
0
void pm_init(void)
{
	descriptor_t *gdt_p = (descriptor_t *) gdtr.base;
	ptr_16_32_t idtr;

	/*
	 * Update addresses in GDT and IDT to their virtual counterparts.
	 */
	idtr.limit = sizeof(idt);
	idtr.base = (uintptr_t) idt;
	gdtr_load(&gdtr);
	idtr_load(&idtr);
	
	/*
	 * Each CPU has its private GDT and TSS.
	 * All CPUs share one IDT.
	 */

	if (config.cpu_active == 1) {
		idt_init();
		/*
		 * NOTE: bootstrap CPU has statically allocated TSS, because
		 * the heap hasn't been initialized so far.
		 */
		tss_p = &tss0;
	} else {
		tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC);
		if (!tss_p)
			panic("Cannot allocate TSS.");
	}

	tss_initialize(tss_p);
	
	gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL;
	gdt_p[TSS_DES].special = 1;
	gdt_p[TSS_DES].granularity = 0;
	
	gdt_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p);
	gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);

	/*
	 * As of this moment, the current CPU has its own GDT pointing
	 * to its own TSS. We just need to load the TR register.
	 */
	tr_load(GDT_SELECTOR(TSS_DES));
	
	/* Disable I/O on nonprivileged levels and clear NT flag. */
	write_eflags(read_eflags() & ~(EFLAGS_IOPL | EFLAGS_NT));

	/* Disable alignment check */
	write_cr0(read_cr0() & ~CR0_AM);
}
Пример #21
0
/*
 * Check to see if this CPU supports long mode.
 */
static int
bi_checkcpu(void)
{
#if 0
    char *cpu_vendor;
    int vendor[3];
    int eflags, regs[4];

    /* Check for presence of "cpuid". */
    eflags = read_eflags();
    write_eflags(eflags ^ PSL_ID);
    if (!((eflags ^ read_eflags()) & PSL_ID))
	return (0);

    /* Fetch the vendor string. */
    do_cpuid(0, regs);
    vendor[0] = regs[1];
    vendor[1] = regs[3];
    vendor[2] = regs[2];
    cpu_vendor = (char *)vendor;

    /* Check for vendors that support AMD features. */
    if (strncmp(cpu_vendor, INTEL_VENDOR_ID, 12) != 0 &&
	strncmp(cpu_vendor, AMD_VENDOR_ID, 12) != 0 &&
	strncmp(cpu_vendor, CENTAUR_VENDOR_ID, 12) != 0)
	return (0);

    /* Has to support AMD features. */
    do_cpuid(0x80000000, regs);
    if (!(regs[0] >= 0x80000001))
	return (0);

    /* Check for long mode. */
    do_cpuid(0x80000001, regs);
    return (regs[3] & AMDID_LM);
#else
	return (1);
#endif
}
Пример #22
0
static int
acpi_timer_test(void)
{
    uint32_t	last, this;
    int		min, max, n, delta;
    register_t	s;

    min = 10000000;
    max = 0;

    /* Test the timer with interrupts disabled to get accurate results. */
#if defined(__i386__)
    s = read_eflags();
#elif defined(__x86_64__)
    s = read_rflags();
#else
#error "no read_eflags"
#endif
    cpu_disable_intr();
    last = acpi_timer_read();
    for (n = 0; n < 2000; n++) {
	this = acpi_timer_read();
	delta = acpi_TimerDelta(this, last);
	if (delta > max)
	    max = delta;
	else if (delta < min)
	    min = delta;
	last = this;
    }
#if defined(__i386__)
    write_eflags(s);
#elif defined(__x86_64__)
    write_rflags(s);
#else
#error "no read_eflags"
#endif

    if (max - min > 2)
	n = 0;
    else if (min < 0 || max == 0)
	n = 0;
    else
	n = 1;
    if (bootverbose) {
	kprintf("ACPI timer looks %s min = %d, max = %d, width = %d\n",
		n ? "GOOD" : "BAD ",
		min, max, max - min);
    }

    return (n);
}
Пример #23
0
void set_palette(int start, int end, unsigned char* rgb){
	int i, eflags;
	eflags = read_eflags();	//替代作者的io_load_eflags()
	io_cli();
	outb(0x03c8, start);	//替代作者的io_out8()
	for(i=start; i<=end; i++){
		outb(0x03c9,rgb[0]/4);
		outb(0x03c9,rgb[1]/4);
		outb(0x03c9,rgb[2]/4);
		rgb+=3;
	}
	write_eflags(eflags);	//替代作者的io_store_eflags(eflags)
	return;
}
Пример #24
0
// Enter scheduler.  Must already hold proc_table_lock
// and have changed curproc[cpu()]->state.
void
sched(void)
{
  if(read_eflags()&FL_IF)
    panic("sched interruptible");
  if(cp->state == RUNNING)
    panic("sched running");
  if(!holding(&proc_table_lock))
    panic("sched proc_table_lock");
  if(cpus[cpu()].ncli != 1)
    panic("sched locks");

  swtch(&cp->context, &cpus[cpu()].context);
}
Пример #25
0
/*
 * There are i486 based upgrade products for i386 machines.
 * In this case, BIOS doesn't enables CPU cache.
 */
static void
init_i486_on_386(void)
{
	u_long	eflags;

#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
	need_post_dma_flush = 1;
#endif

	eflags = read_eflags();
	disable_intr();

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */

	write_eflags(eflags);
}
Пример #26
0
/*
 * Cyrix 486S/DX series
 */
static void
init_cy486dx(void)
{
	u_long	eflags;
	u_char	ccr2;

	eflags = read_eflags();
	cpu_disable_intr();
	invd();

	ccr2 = read_cyrix_reg(CCR2);
#ifdef CPU_SUSP_HLT
	ccr2 |= CCR2_SUSP_HLT;
#endif

	write_cyrix_reg(CCR2, ccr2);
	write_eflags(eflags);
}
Пример #27
0
static void
smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
	int cpu, ncpu, othercpus;
	struct _call_data data;

	othercpus = mp_ncpus - 1;
	if (CPU_ISFULLSET(&mask)) {
		if (othercpus < 1)
			return;
	} else {
		CPU_CLR(PCPU_GET(cpuid), &mask);
		if (CPU_EMPTY(&mask))
			return;
	}
	if (!(read_eflags() & PSL_I))
		panic("%s: interrupts disabled", __func__);
	mtx_lock_spin(&smp_ipi_mtx);
	KASSERT(call_data == NULL, ("call_data isn't null?!"));
	call_data = &data;		
	call_data->func_id = vector;
	call_data->arg1 = addr1;
	call_data->arg2 = addr2;
	atomic_store_rel_int(&smp_tlb_wait, 0);
	if (CPU_ISFULLSET(&mask)) {
		ncpu = othercpus;
		ipi_all_but_self(vector);
	} else {
		ncpu = 0;
		while ((cpu = cpusetobj_ffs(&mask)) != 0) {
			cpu--;
			CPU_CLR(cpu, &mask);
			CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu,
			    vector);
			ipi_send_cpu(cpu, vector);
			ncpu++;
		}
	}
	while (smp_tlb_wait < ncpu)
		ia32_pause();
	call_data = NULL;
	mtx_unlock_spin(&smp_ipi_mtx);
}
Пример #28
0
int
cpu(void)
{
	// Cannot call cpu when interrupts are enabled:
	// result not guaranteed to last long enough to be used!
	// Would prefer to panic but even printing is chancy here:
	// everything, including cprintf, calls cpu, at least indirectly
	// through acquire and release.
	if(read_eflags()&FL_IF){
		static int n;
		if(n++ == 0)
			cprintf("cpu called from %x with interrupts enabled\n",
				((uint32_t*)read_ebp())[1]);
	}

	if(lapic)
		return lapic[ID]>>ID_SHIFT;
	return 0;
}
Пример #29
0
/*
 * Cyrix 486SLC/DLC/SR/DR series
 */
static void
init_486dlc(void)
{
	u_long	eflags;
	u_char	ccr0;

	eflags = read_eflags();
	disable_intr();
	invd();

	ccr0 = read_cyrix_reg(CCR0);
#ifndef CYRIX_CACHE_WORKS
	ccr0 |= CCR0_NC1 | CCR0_BARB;
	write_cyrix_reg(CCR0, ccr0);
	invd();
#else
	ccr0 &= ~CCR0_NC0;
#ifndef CYRIX_CACHE_REALLY_WORKS
	ccr0 |= CCR0_NC1 | CCR0_BARB;
#else
	ccr0 |= CCR0_NC1;
#endif
#ifdef CPU_DIRECT_MAPPED_CACHE
	ccr0 |= CCR0_CO;			/* Direct mapped mode. */
#endif
	write_cyrix_reg(CCR0, ccr0);

	/* Clear non-cacheable region. */
	write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
	write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
	write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
	write_cyrix_reg(NCR4+2, NCR_SIZE_0K);

	write_cyrix_reg(0, 0);	/* dummy write */

	/* Enable caching in CR0. */
	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
	invd();
#endif /* !CYRIX_CACHE_WORKS */
	write_eflags(eflags);
}
Пример #30
0
void trap(struct frame *tf)
{
	
	// The environment may have set DF and some versions
	// of GCC rely on DF being clear
	asm volatile("cld" ::: "cc");

	assert(!(read_eflags() & FL_IF));
	
	curtask->trapframe = tf;
	if ((tf->tf_cs & 3) == 3) {
		
	} else {
		//print_frame(tf);
		//panic("kernel double fault\n");
	}
	trap_dispatch(tf);

	

	schedule();
}