コード例 #1
0
ファイル: mp_desc.c プロジェクト: MACasuba/MACasuba-Utils-git
void
cpu_desc_init(
	cpu_data_t	*cdp,
	boolean_t	is_boot_cpu)
{
	cpu_desc_table_t	*cdt = cdp->cpu_desc_tablep;
	cpu_desc_index_t	*cdi = &cdp->cpu_desc_index;

	if (is_boot_cpu) {
	    /*
	     * Master CPU uses the tables built at boot time.
	     * Just set the index pointers to the high shared-mapping space.
	     * Note that the sysenter stack uses empty space above the ktss
	     * in the HIGH_FIXED_KTSS page. In this case we don't map the
	     * the real master_sstk in low memory.
	     */
	    cdi->cdi_ktss = (struct i386_tss *)
				pmap_index_to_virt(HIGH_FIXED_KTSS) ;
	    cdi->cdi_sstk  = (vm_offset_t) (cdi->cdi_ktss + 1) +
				(vm_offset_t) &master_sstk.top -
				(vm_offset_t) &master_sstk;
#if	MACH_KDB
	    cdi->cdi_dbtss = (struct i386_tss *)
				pmap_index_to_virt(HIGH_FIXED_DBTSS);
#endif	/* MACH_KDB */
	    cdi->cdi_gdt = (struct fake_descriptor *)
				pmap_index_to_virt(HIGH_FIXED_GDT);
	    cdi->cdi_idt = (struct fake_descriptor *)
				pmap_index_to_virt(HIGH_FIXED_IDT);
	    cdi->cdi_ldt = (struct fake_descriptor *)
				pmap_index_to_virt(HIGH_FIXED_LDT_BEGIN);
	} else {

	    vm_offset_t	cpu_hi_desc;

	    cpu_hi_desc = pmap_cpu_high_shared_remap(cdp->cpu_number,
						     HIGH_CPU_DESC,
						     (vm_offset_t) cdt, 1);

	    /*
	     * Per-cpu GDT, IDT, LDT, KTSS descriptors are allocated in one
	     * block (cpu_desc_table) and double-mapped into high shared space
	     * in one page window.
	     * Also, a transient stack for the fast sysenter path. The top of
	     * which is set at context switch time to point to the PCB using
	     * the high address.
	     */
	    cdi->cdi_gdt  = (struct fake_descriptor *) (cpu_hi_desc +
				offsetof(cpu_desc_table_t, gdt[0]));
	    cdi->cdi_idt  = (struct fake_descriptor *) (cpu_hi_desc +
				offsetof(cpu_desc_table_t, idt[0]));
	    cdi->cdi_ktss = (struct i386_tss *) (cpu_hi_desc +
				offsetof(cpu_desc_table_t, ktss));
	    cdi->cdi_sstk = cpu_hi_desc +
				offsetof(cpu_desc_table_t, sstk.top);
				
	    /*
	     * LDT descriptors are mapped into a seperate area.
	     */
	    cdi->cdi_ldt  = (struct fake_descriptor *)
				pmap_cpu_high_shared_remap(
				cdp->cpu_number,
				HIGH_CPU_LDT_BEGIN,
				(vm_offset_t) cdp->cpu_ldtp,
				HIGH_CPU_LDT_END - HIGH_CPU_LDT_BEGIN + 1);

	    /*
	     * Copy the tables
	     */
	    bcopy((char *)master_idt,
		  (char *)cdt->idt,
		  sizeof(master_idt));
	    bcopy((char *)master_gdt,
		  (char *)cdt->gdt,
		  sizeof(master_gdt));
	    bcopy((char *)master_ldt,
		  (char *)cdp->cpu_ldtp,
		  sizeof(master_ldt));
	    bzero((char *)&cdt->ktss,
		  sizeof(struct i386_tss));

#if	MACH_KDB
	    cdi->cdi_dbtss = (struct i386_tss *) (cpu_hi_desc +
				offsetof(cpu_desc_table_t, dbtss));
	    bcopy((char *)&master_dbtss,
		  (char *)&cdt->dbtss,
		  sizeof(struct i386_tss));
#endif	/* MACH_KDB */

	    /*
	     * Fix up the entries in the GDT to point to
	     * this LDT and this TSS.
	     */
	    cdt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern;
	    cdt->gdt[sel_idx(KERNEL_LDT)].offset = (vm_offset_t) cdi->cdi_ldt;
	    fix_desc(&cdt->gdt[sel_idx(KERNEL_LDT)], 1);

	    cdt->gdt[sel_idx(USER_LDT)] = ldt_desc_pattern;
	    cdt->gdt[sel_idx(USER_LDT)].offset = (vm_offset_t) cdi->cdi_ldt;
	    fix_desc(&cdt->gdt[sel_idx(USER_LDT)], 1);

	    cdt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern;
	    cdt->gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) cdi->cdi_ktss;
	    fix_desc(&cdt->gdt[sel_idx(KERNEL_TSS)], 1);

	    cdt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern;
	    cdt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp;
	    fix_desc(&cdt->gdt[sel_idx(CPU_DATA_GS)], 1);

#if	MACH_KDB
	    cdt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern;
	    cdt->gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) cdi->cdi_dbtss;
	    fix_desc(&cdt->gdt[sel_idx(DEBUG_TSS)], 1);

	    cdt->dbtss.esp0 = (int)(db_task_stack_store +
		    (INTSTACK_SIZE * (cdp->cpu_number)) - sizeof (natural_t));
	    cdt->dbtss.esp = cdt->dbtss.esp0;
	    cdt->dbtss.eip = (int)&db_task_start;
#endif	/* MACH_KDB */

	    cdt->ktss.ss0 = KERNEL_DS;
	    cdt->ktss.io_bit_map_offset = 0x0FFF;	/* no IO bitmap */

	    cpu_userwindow_init(cdp->cpu_number);
	    cpu_physwindow_init(cdp->cpu_number);

	}

}
コード例 #2
0
ファイル: genassym.c プロジェクト: SbIm/xnu-env
int
main(
	int	argc,
	char	**argv)
{

	DECLARE("AST_URGENT",		AST_URGENT);
	DECLARE("AST_BSD",			AST_BSD);

	/* Simple Lock structure */
	DECLARE("SLOCK_ILK",	offsetof(usimple_lock_t, interlock));
#if	MACH_LDEBUG
	DECLARE("SLOCK_TYPE",	offsetof(usimple_lock_t, lock_type));
	DECLARE("SLOCK_PC",	offsetof(usimple_lock_t, debug.lock_pc));
	DECLARE("SLOCK_THREAD",	offsetof(usimple_lock_t, debug.lock_thread));
	DECLARE("SLOCK_DURATIONH",offsetof(usimple_lock_t, debug.duration[0]));
	DECLARE("SLOCK_DURATIONL",offsetof(usimple_lock_t, debug.duration[1]));
	DECLARE("USLOCK_TAG",	USLOCK_TAG);
#endif	/* MACH_LDEBUG */

	/* Mutex structure */
	DECLARE("MUTEX_OWNER", offsetof(lck_mtx_t *, lck_mtx_owner));
	DECLARE("MUTEX_PTR",   offsetof(lck_mtx_t *, lck_mtx_ptr));
	DECLARE("MUTEX_STATE", offsetof(lck_mtx_t *, lck_mtx_state));
#ifdef __i386__
	DECLARE("MUTEX_TYPE",	offsetof(lck_mtx_ext_t *, lck_mtx_deb.type));
	DECLARE("MUTEX_PC",		offsetof(lck_mtx_ext_t *, lck_mtx_deb.pc));
	DECLARE("MUTEX_THREAD",	offsetof(lck_mtx_ext_t *, lck_mtx_deb.thread));
	DECLARE("MUTEX_ATTR",	offsetof(lck_mtx_ext_t *, lck_mtx_attr));
	DECLARE("MUTEX_ATTR_DEBUG", LCK_MTX_ATTR_DEBUG);
	DECLARE("MUTEX_ATTR_DEBUGb", LCK_MTX_ATTR_DEBUGb);
	DECLARE("MUTEX_ATTR_STAT", LCK_MTX_ATTR_STAT);
	DECLARE("MUTEX_ATTR_STATb", LCK_MTX_ATTR_STATb);
	DECLARE("MUTEX_TAG",	MUTEX_TAG);
#endif
	DECLARE("MUTEX_IND",	LCK_MTX_TAG_INDIRECT);
	DECLARE("MUTEX_EXT",	LCK_MTX_PTR_EXTENDED);
	DECLARE("MUTEX_ITAG",	offsetof(lck_mtx_t *, lck_mtx_tag));
	DECLARE("MUTEX_PTR",	offsetof(lck_mtx_t *, lck_mtx_ptr));
	DECLARE("MUTEX_ASSERT_OWNED",	LCK_MTX_ASSERT_OWNED);
	DECLARE("MUTEX_ASSERT_NOTOWNED",LCK_MTX_ASSERT_NOTOWNED);
	DECLARE("GRP_MTX_STAT_UTIL",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt));
	DECLARE("GRP_MTX_STAT_MISS",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt));
	DECLARE("GRP_MTX_STAT_WAIT",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt));
	
	/* x86 only */
	DECLARE("MUTEX_DESTROYED", LCK_MTX_TAG_DESTROYED);

	/* Per-mutex statistic element */
	DECLARE("MTX_ACQ_TSC",	offsetof(lck_mtx_ext_t *, lck_mtx_stat));

	/* Mutex group statistics elements */
	DECLARE("MUTEX_GRP",	offsetof(lck_mtx_ext_t *, lck_mtx_grp));
	
	/*
	 * The use of this field is somewhat at variance with the alias.
	 */
	DECLARE("GRP_MTX_STAT_DIRECT_WAIT",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt));

	DECLARE("GRP_MTX_STAT_HELD_MAX",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max));
	/* Reader writer lock types */
	DECLARE("RW_SHARED",    LCK_RW_TYPE_SHARED);
	DECLARE("RW_EXCL",      LCK_RW_TYPE_EXCLUSIVE);

	DECLARE("TH_RECOVER",		offsetof(thread_t, recover));
	DECLARE("TH_CONTINUATION",	offsetof(thread_t, continuation));
	DECLARE("TH_KERNEL_STACK",	offsetof(thread_t, kernel_stack));

	DECLARE("TASK_MACH_EXC_PORT",
		offsetof(task_t, exc_actions[EXC_MACH_SYSCALL].port));
	DECLARE("TASK_SYSCALLS_MACH",	offsetof(struct task *, syscalls_mach));
	DECLARE("TASK_SYSCALLS_UNIX",	offsetof(struct task *, syscalls_unix));

	DECLARE("TASK_VTIMERS",			offsetof(struct task *, vtimers));

	/* These fields are being added on demand */
	DECLARE("ACT_MACH_EXC_PORT",
		offsetof(thread_t, exc_actions[EXC_MACH_SYSCALL].port));

	DECLARE("ACT_TASK",	offsetof(thread_t, task));
	DECLARE("ACT_AST",	offsetof(thread_t, ast));
	DECLARE("ACT_PCB",	offsetof(thread_t, machine.pcb));
	DECLARE("ACT_SPF",	offsetof(thread_t, machine.specFlags));
	DECLARE("ACT_MAP",	offsetof(thread_t, map));
	DECLARE("ACT_PCB_ISS", 	offsetof(thread_t, machine.xxx_pcb.iss));
	DECLARE("ACT_PCB_IDS", 	offsetof(thread_t, machine.xxx_pcb.ids));
#if NCOPY_WINDOWS > 0
	DECLARE("ACT_COPYIO_STATE", offsetof(thread_t, machine.copyio_state));
	DECLARE("WINDOWS_CLEAN", WINDOWS_CLEAN);
#endif

	DECLARE("MAP_PMAP",	offsetof(vm_map_t, pmap));

#define IEL_SIZE		(sizeof(struct i386_exception_link *))
	DECLARE("IEL_SIZE",	IEL_SIZE);
	DECLARE("IKS_SIZE",	sizeof(struct x86_kernel_state));

	/*
	 * KSS_* are offsets from the top of the kernel stack (cpu_kernel_stack)
	 */
#if defined(__i386__)
	DECLARE("KSS_EBX", IEL_SIZE + offsetof(struct x86_kernel_state *, k_ebx));
	DECLARE("KSS_ESP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_esp));
	DECLARE("KSS_EBP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_ebp));
	DECLARE("KSS_EDI", IEL_SIZE + offsetof(struct x86_kernel_state *, k_edi));
	DECLARE("KSS_ESI", IEL_SIZE + offsetof(struct x86_kernel_state *, k_esi));
	DECLARE("KSS_EIP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_eip));
#elif defined(__x86_64__)
	DECLARE("KSS_RBX", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rbx));
	DECLARE("KSS_RSP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rsp));
	DECLARE("KSS_RBP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rbp));
	DECLARE("KSS_R12", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r12));
	DECLARE("KSS_R13", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r13));
	DECLARE("KSS_R14", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r14));
	DECLARE("KSS_R15", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r15));
	DECLARE("KSS_RIP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rip));	
#else
#error Unsupported architecture
#endif
	
	DECLARE("PCB_FPS",	offsetof(pcb_t, ifps));
	DECLARE("PCB_ISS",	offsetof(pcb_t, iss));

	DECLARE("DS_DR0",	offsetof(struct x86_debug_state32 *, dr0));
	DECLARE("DS_DR1",	offsetof(struct x86_debug_state32 *, dr1));
	DECLARE("DS_DR2",	offsetof(struct x86_debug_state32 *, dr2));
	DECLARE("DS_DR3",	offsetof(struct x86_debug_state32 *, dr3));
	DECLARE("DS_DR4",	offsetof(struct x86_debug_state32 *, dr4));
	DECLARE("DS_DR5",	offsetof(struct x86_debug_state32 *, dr5));
	DECLARE("DS_DR6",	offsetof(struct x86_debug_state32 *, dr6));
	DECLARE("DS_DR7",	offsetof(struct x86_debug_state32 *, dr7));

	DECLARE("DS64_DR0",	offsetof(struct x86_debug_state64 *, dr0));
	DECLARE("DS64_DR1",	offsetof(struct x86_debug_state64 *, dr1));
	DECLARE("DS64_DR2",	offsetof(struct x86_debug_state64 *, dr2));
	DECLARE("DS64_DR3",	offsetof(struct x86_debug_state64 *, dr3));
	DECLARE("DS64_DR4",	offsetof(struct x86_debug_state64 *, dr4));
	DECLARE("DS64_DR5",	offsetof(struct x86_debug_state64 *, dr5));
	DECLARE("DS64_DR6",	offsetof(struct x86_debug_state64 *, dr6));
	DECLARE("DS64_DR7",	offsetof(struct x86_debug_state64 *, dr7));

	DECLARE("FP_VALID",	offsetof(struct x86_fpsave_state *,fp_valid));

	DECLARE("SS_FLAVOR",	offsetof(x86_saved_state_t *, flavor));
	DECLARE("SS_32",	x86_SAVED_STATE32);
	DECLARE("SS_64",	x86_SAVED_STATE64);

#define R_(x)  offsetof(x86_saved_state_t *, ss_32.x)
	DECLARE("R32_CS",	R_(cs));
	DECLARE("R32_SS",	R_(ss));
	DECLARE("R32_DS",	R_(ds));
	DECLARE("R32_ES",	R_(es));
	DECLARE("R32_FS",	R_(fs));
	DECLARE("R32_GS",	R_(gs));
	DECLARE("R32_UESP",	R_(uesp));
	DECLARE("R32_EBP",	R_(ebp));
	DECLARE("R32_EAX",	R_(eax));
	DECLARE("R32_EBX",	R_(ebx));
	DECLARE("R32_ECX",	R_(ecx));
	DECLARE("R32_EDX",	R_(edx));
	DECLARE("R32_ESI",	R_(esi));
	DECLARE("R32_EDI",	R_(edi));
	DECLARE("R32_TRAPNO",	R_(trapno));
	DECLARE("R32_ERR",	R_(err));
	DECLARE("R32_EFLAGS",	R_(efl));
	DECLARE("R32_EIP",	R_(eip));
	DECLARE("R32_CR2",	R_(cr2));
	DECLARE("ISS32_SIZE",	sizeof (x86_saved_state32_t));

#define R64_(x)  offsetof(x86_saved_state_t *, ss_64.x)
	DECLARE("R64_FS",	R64_(fs));
	DECLARE("R64_GS",	R64_(gs));
	DECLARE("R64_R8",	R64_(r8));
	DECLARE("R64_R9",	R64_(r9));
	DECLARE("R64_R10",	R64_(r10));
	DECLARE("R64_R11",	R64_(r11));
	DECLARE("R64_R12",	R64_(r12));
	DECLARE("R64_R13",	R64_(r13));
	DECLARE("R64_R14",	R64_(r14));
	DECLARE("R64_R15",	R64_(r15));
	DECLARE("R64_RBP",	R64_(rbp));
	DECLARE("R64_RAX",	R64_(rax));
	DECLARE("R64_RBX",	R64_(rbx));
	DECLARE("R64_RCX",	R64_(rcx));
	DECLARE("R64_RDX",	R64_(rdx));
	DECLARE("R64_RSI",	R64_(rsi));
	DECLARE("R64_RDI",	R64_(rdi));
	DECLARE("R64_V_ARG6",	R64_(v_arg6));
	DECLARE("R64_V_ARG7",	R64_(v_arg7));
	DECLARE("R64_V_ARG8",	R64_(v_arg8));
	DECLARE("R64_CS",	R64_(isf.cs));
	DECLARE("R64_SS",	R64_(isf.ss));
	DECLARE("R64_RSP",	R64_(isf.rsp));
	DECLARE("R64_TRAPNO",	R64_(isf.trapno));
	DECLARE("R64_TRAPFN",	R64_(isf.trapfn));
	DECLARE("R64_ERR",	R64_(isf.err));
	DECLARE("R64_RFLAGS",	R64_(isf.rflags));
	DECLARE("R64_RIP",	R64_(isf.rip));
	DECLARE("R64_CR2",	R64_(cr2));
	DECLARE("ISS64_OFFSET",	R64_(isf));
	DECLARE("ISS64_SIZE",	sizeof (x86_saved_state64_t));

#define ISF64_(x)  offsetof(x86_64_intr_stack_frame_t *, x)
	DECLARE("ISF64_TRAPNO",	ISF64_(trapno));
	DECLARE("ISF64_TRAPFN",	ISF64_(trapfn));
	DECLARE("ISF64_ERR",	ISF64_(err));
	DECLARE("ISF64_RIP",	ISF64_(rip));
	DECLARE("ISF64_CS",	ISF64_(cs));
	DECLARE("ISF64_RFLAGS",	ISF64_(rflags));
	DECLARE("ISF64_RSP",	ISF64_(rsp));
	DECLARE("ISF64_SS",	ISF64_(ss));
	DECLARE("ISF64_SIZE",	sizeof(x86_64_intr_stack_frame_t));

	DECLARE("ISC32_OFFSET",	offsetof(x86_saved_state_compat32_t *, isf64));
#define ISC32_(x)  offsetof(x86_saved_state_compat32_t *, isf64.x)
	DECLARE("ISC32_TRAPNO", ISC32_(trapno));
	DECLARE("ISC32_TRAPFN",	ISC32_(trapfn));
	DECLARE("ISC32_ERR",	ISC32_(err));
	DECLARE("ISC32_RIP",	ISC32_(rip));
	DECLARE("ISC32_CS",	ISC32_(cs));
	DECLARE("ISC32_RFLAGS",	ISC32_(rflags));
	DECLARE("ISC32_RSP",	ISC32_(rsp));
	DECLARE("ISC32_SS",	ISC32_(ss));

	DECLARE("NBPG",			I386_PGBYTES);
	DECLARE("PAGE_SIZE",            I386_PGBYTES);
	DECLARE("PAGE_MASK",            I386_PGBYTES-1);
	DECLARE("PAGE_SHIFT",           12);
	DECLARE("NKPT",                 NKPT);
#ifdef __i386__
	DECLARE("KPTDI",                KPTDI);
#endif
	DECLARE("VM_MIN_ADDRESS",	VM_MIN_ADDRESS);
	DECLARE("VM_MAX_ADDRESS",	VM_MAX_ADDRESS);
	DECLARE("KERNELBASE",		VM_MIN_KERNEL_ADDRESS);
	DECLARE("LINEAR_KERNELBASE",	LINEAR_KERNEL_ADDRESS);
	DECLARE("KERNEL_STACK_SIZE",	KERNEL_STACK_SIZE);
#ifdef __i386__
	DECLARE("KERNEL_UBER_BASE_HI32", KERNEL_UBER_BASE_HI32);
#endif

	DECLARE("ASM_COMM_PAGE32_BASE_ADDRESS",  _COMM_PAGE32_BASE_ADDRESS);
	DECLARE("ASM_COMM_PAGE32_START_ADDRESS",  _COMM_PAGE32_START_ADDRESS);
	DECLARE("ASM_COMM_PAGE_SCHED_GEN",  _COMM_PAGE_SCHED_GEN);

	DECLARE("PDESHIFT",	PDESHIFT);
	DECLARE("PTEMASK",	PTEMASK);
	DECLARE("PTEINDX",      PTEINDX);
	DECLARE("INTEL_PTE_PFN",	INTEL_PTE_PFN);
	DECLARE("INTEL_PTE_VALID",	INTEL_PTE_VALID);
	DECLARE("INTEL_PTE_WRITE",	INTEL_PTE_WRITE);
	DECLARE("INTEL_PTE_PS",       INTEL_PTE_PS);
	DECLARE("INTEL_PTE_USER",        INTEL_PTE_USER);
	DECLARE("INTEL_PTE_INVALID",	INTEL_PTE_INVALID);
	DECLARE("NPGPTD", NPGPTD);
#if defined(__x86_64__)
	DECLARE("INITPT_SEG_BASE",INITPT_SEG_BASE);
	DECLARE("INITGDT_SEG_BASE",INITGDT_SEG_BASE);
	DECLARE("SLEEP_SEG_BASE",SLEEP_SEG_BASE);
	DECLARE("PROT_MODE_GDT_SIZE",PROT_MODE_GDT_SIZE);
	DECLARE("KERNEL_PML4_INDEX",KERNEL_PML4_INDEX);
#endif
	DECLARE("IDTSZ",	IDTSZ);
	DECLARE("GDTSZ",	GDTSZ);
	DECLARE("LDTSZ",	LDTSZ);

	DECLARE("KERNEL_DS",	KERNEL_DS);
	DECLARE("USER_CS",	USER_CS);
	DECLARE("USER_DS",	USER_DS);
	DECLARE("KERNEL32_CS",	KERNEL32_CS);
	DECLARE("KERNEL64_CS",  KERNEL64_CS);
	DECLARE("USER64_CS",	USER64_CS);
	DECLARE("KERNEL_TSS",	KERNEL_TSS);
	DECLARE("KERNEL_LDT",	KERNEL_LDT);
#ifdef __i386__
	DECLARE("DF_TSS",	DF_TSS);
	DECLARE("MC_TSS",	MC_TSS);
#if	MACH_KDB
	DECLARE("DEBUG_TSS",	DEBUG_TSS);
#endif	/* MACH_KDB */
	DECLARE("CPU_DATA_GS",	CPU_DATA_GS);
#endif /* __i386__ */
	DECLARE("SYSENTER_CS",	SYSENTER_CS);
	DECLARE("SYSENTER_TF_CS",SYSENTER_TF_CS);
	DECLARE("SYSENTER_DS",	SYSENTER_DS);
	DECLARE("SYSCALL_CS",	SYSCALL_CS);
#ifdef __i386__
	DECLARE("USER_WINDOW_SEL",	USER_WINDOW_SEL);
	DECLARE("PHYS_WINDOW_SEL",	PHYS_WINDOW_SEL);
#endif

        DECLARE("CPU_THIS",
		offsetof(cpu_data_t *, cpu_this));
        DECLARE("CPU_ACTIVE_THREAD",
		offsetof(cpu_data_t *, cpu_active_thread));
        DECLARE("CPU_ACTIVE_STACK",
		offsetof(cpu_data_t *, cpu_active_stack));
        DECLARE("CPU_KERNEL_STACK",
		offsetof(cpu_data_t *, cpu_kernel_stack));
        DECLARE("CPU_INT_STACK_TOP",
		offsetof(cpu_data_t *, cpu_int_stack_top));
#if	MACH_RT
        DECLARE("CPU_PREEMPTION_LEVEL",
		offsetof(cpu_data_t *, cpu_preemption_level));
#endif	/* MACH_RT */
        DECLARE("CPU_HIBERNATE",
		offsetof(cpu_data_t *, cpu_hibernate));
        DECLARE("CPU_INTERRUPT_LEVEL",
		offsetof(cpu_data_t *, cpu_interrupt_level));
        DECLARE("CPU_SIMPLE_LOCK_COUNT",
		offsetof(cpu_data_t *,cpu_simple_lock_count));
        DECLARE("CPU_NUMBER_GS",
		offsetof(cpu_data_t *,cpu_number));
        DECLARE("CPU_RUNNING",
		offsetof(cpu_data_t *,cpu_running));
        DECLARE("CPU_MCOUNT_OFF",
		offsetof(cpu_data_t *,cpu_mcount_off));
	DECLARE("CPU_PENDING_AST",
		offsetof(cpu_data_t *,cpu_pending_ast));
	DECLARE("CPU_DESC_TABLEP",
		offsetof(cpu_data_t *,cpu_desc_tablep));
	DECLARE("CPU_DESC_INDEX",
		offsetof(cpu_data_t *,cpu_desc_index));
	DECLARE("CDI_GDT",
		offsetof(cpu_desc_index_t *,cdi_gdt));
	DECLARE("CDI_IDT",
		offsetof(cpu_desc_index_t *,cdi_idt));
	DECLARE("CPU_PROCESSOR",
		offsetof(cpu_data_t *,cpu_processor));
        DECLARE("CPU_INT_STATE",
		offsetof(cpu_data_t *, cpu_int_state));
        DECLARE("CPU_INT_EVENT_TIME",
		offsetof(cpu_data_t *, cpu_int_event_time));

#ifdef __i386__
        DECLARE("CPU_HI_ISS",
		offsetof(cpu_data_t *, cpu_hi_iss));
#endif
        DECLARE("CPU_TASK_CR3",
		offsetof(cpu_data_t *, cpu_task_cr3));
        DECLARE("CPU_ACTIVE_CR3",
		offsetof(cpu_data_t *, cpu_active_cr3));
        DECLARE("CPU_KERNEL_CR3",
		offsetof(cpu_data_t *, cpu_kernel_cr3));
#ifdef __x86_64__
		DECLARE("CPU_TLB_INVALID",
		offsetof(cpu_data_t *, cpu_tlb_invalid));
#endif

	DECLARE("CPU_IS64BIT",
		offsetof(cpu_data_t *, cpu_is64bit));
	DECLARE("CPU_TASK_MAP",
		offsetof(cpu_data_t *, cpu_task_map));
	DECLARE("TASK_MAP_32BIT",		TASK_MAP_32BIT); 
	DECLARE("TASK_MAP_64BIT",		TASK_MAP_64BIT);
#ifdef __i386__
	DECLARE("TASK_MAP_64BIT_SHARED",	TASK_MAP_64BIT_SHARED); 
#endif
	DECLARE("CPU_UBER_USER_GS_BASE",
		offsetof(cpu_data_t *, cpu_uber.cu_user_gs_base));
	DECLARE("CPU_UBER_ISF",
		offsetof(cpu_data_t *, cpu_uber.cu_isf));
	DECLARE("CPU_UBER_TMP",
		offsetof(cpu_data_t *, cpu_uber.cu_tmp));
	DECLARE("CPU_UBER_ARG_STORE",
		offsetof(cpu_data_t *, cpu_uber_arg_store));
	DECLARE("CPU_UBER_ARG_STORE_VALID",
		offsetof(cpu_data_t *, cpu_uber_arg_store_valid));

	DECLARE("CPU_NANOTIME",
		offsetof(cpu_data_t *, cpu_nanotime));

	DECLARE("CPU_DR7",
		offsetof(cpu_data_t *, cpu_dr7));

	DECLARE("hwIntCnt", 	offsetof(cpu_data_t *,cpu_hwIntCnt));

	DECLARE("enaExpTrace",	enaExpTrace);
	DECLARE("enaExpTraceb",	enaExpTraceb);
	DECLARE("enaUsrFCall",	enaUsrFCall);
	DECLARE("enaUsrFCallb",	enaUsrFCallb);
	DECLARE("enaUsrPhyMp",	enaUsrPhyMp);
	DECLARE("enaUsrPhyMpb",	enaUsrPhyMpb);
	DECLARE("enaDiagSCs",	enaDiagSCs);
	DECLARE("enaDiagSCsb",	enaDiagSCsb);
	DECLARE("enaDiagEM",	enaDiagEM);
	DECLARE("enaDiagEMb",	enaDiagEMb);
	DECLARE("enaNotifyEM",	enaNotifyEM);
	DECLARE("enaNotifyEMb",	enaNotifyEMb);
	DECLARE("dgLock",		offsetof(struct diagWork *, dgLock));
	DECLARE("dgFlags",		offsetof(struct diagWork *, dgFlags));
	DECLARE("dgMisc1",		offsetof(struct diagWork *, dgMisc1));
	DECLARE("dgMisc2",		offsetof(struct diagWork *, dgMisc2));
	DECLARE("dgMisc3",		offsetof(struct diagWork *, dgMisc3));
	DECLARE("dgMisc4",		offsetof(struct diagWork *, dgMisc4));
	DECLARE("dgMisc5",		offsetof(struct diagWork *, dgMisc5));

	DECLARE("INTEL_PTE_KERNEL",	INTEL_PTE_VALID|INTEL_PTE_WRITE);
	DECLARE("PDESHIFT",     PDESHIFT);
	DECLARE("PDESIZE",     PDESIZE);
	DECLARE("PTESIZE",     PTESIZE);
#ifdef __i386__
	DECLARE("PTDPTDI",     PTDPTDI);
	DECLARE("APTDPTDI",     APTDPTDI);
	DECLARE("HIGH_MEM_BASE", HIGH_MEM_BASE);
	DECLARE("HIGH_IDT_BASE", pmap_index_to_virt(HIGH_FIXED_IDT));
#endif

	DECLARE("KERNELBASEPDE",
		(LINEAR_KERNEL_ADDRESS >> PDESHIFT) *
		sizeof(pt_entry_t));

	DECLARE("TSS_ESP0",	offsetof(struct i386_tss *, esp0));
	DECLARE("TSS_SS0",	offsetof(struct i386_tss *, ss0));
	DECLARE("TSS_LDT",	offsetof(struct i386_tss *, ldt));
	DECLARE("TSS_PDBR",	offsetof(struct i386_tss *, cr3));
	DECLARE("TSS_LINK",	offsetof(struct i386_tss *, back_link));

	DECLARE("K_TASK_GATE",	ACC_P|ACC_PL_K|ACC_TASK_GATE);
	DECLARE("K_TRAP_GATE",	ACC_P|ACC_PL_K|ACC_TRAP_GATE);
	DECLARE("U_TRAP_GATE",	ACC_P|ACC_PL_U|ACC_TRAP_GATE);
	DECLARE("K_INTR_GATE",	ACC_P|ACC_PL_K|ACC_INTR_GATE);
	DECLARE("U_INTR_GATE",  ACC_P|ACC_PL_U|ACC_INTR_GATE);
	DECLARE("K_TSS",	ACC_P|ACC_PL_K|ACC_TSS);

	/*
	 *	usimple_lock fields
	 */
	DECLARE("USL_INTERLOCK",	offsetof(usimple_lock_t, interlock));

	DECLARE("INTSTACK_SIZE",	INTSTACK_SIZE);
	DECLARE("KADDR", offsetof(struct boot_args *, kaddr));
	DECLARE("KSIZE", offsetof(struct boot_args *, ksize));
	DECLARE("MEMORYMAP", offsetof(struct boot_args *, MemoryMap));
	DECLARE("DEVICETREEP", offsetof(struct boot_args *, deviceTreeP));

	DECLARE("RNT_TSC_BASE",
		offsetof(rtc_nanotime_t *, tsc_base));
	DECLARE("RNT_NS_BASE",
		offsetof(rtc_nanotime_t *, ns_base));
	DECLARE("RNT_SCALE",
		offsetof(rtc_nanotime_t *, scale));
	DECLARE("RNT_SHIFT",
		offsetof(rtc_nanotime_t *, shift));
	DECLARE("RNT_GENERATION",
		offsetof(rtc_nanotime_t *, generation));

	/* values from kern/timer.h */
#ifdef __LP64__
	DECLARE("TIMER_ALL", offsetof(struct timer *, all_bits));
#else
	DECLARE("TIMER_LOW",	 	offsetof(struct timer *, low_bits));
	DECLARE("TIMER_HIGH",		offsetof(struct timer *, high_bits));
	DECLARE("TIMER_HIGHCHK",	offsetof(struct timer *, high_bits_check));	
#endif
#if !STAT_TIME
	DECLARE("TIMER_TSTAMP",
		offsetof(struct timer *, tstamp));

	DECLARE("THREAD_TIMER",
		offsetof(struct processor *, processor_data.thread_timer));
#endif
	DECLARE("KERNEL_TIMER",
		offsetof(struct processor *, processor_data.kernel_timer));
	DECLARE("SYSTEM_TIMER",
		offsetof(struct thread *, system_timer));
	DECLARE("USER_TIMER",
		offsetof(struct thread *, user_timer));
	DECLARE("SYSTEM_STATE",
			offsetof(struct processor *, processor_data.system_state));
	DECLARE("USER_STATE",
			offsetof(struct processor *, processor_data.user_state));
	DECLARE("IDLE_STATE",
			offsetof(struct processor *, processor_data.idle_state));
	DECLARE("CURRENT_STATE",
			offsetof(struct processor *, processor_data.current_state));

	DECLARE("OnProc", OnProc);


#if	CONFIG_DTRACE
	DECLARE("LS_LCK_MTX_LOCK_ACQUIRE", LS_LCK_MTX_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE", LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_UNLOCK_RELEASE", LS_LCK_MTX_UNLOCK_RELEASE);
	DECLARE("LS_LCK_MTX_TRY_LOCK_ACQUIRE", LS_LCK_MTX_TRY_LOCK_ACQUIRE);
	DECLARE("LS_LCK_RW_LOCK_SHARED_ACQUIRE", LS_LCK_RW_LOCK_SHARED_ACQUIRE);
	DECLARE("LS_LCK_RW_DONE_RELEASE", LS_LCK_RW_DONE_RELEASE);
	DECLARE("LS_LCK_MTX_EXT_LOCK_ACQUIRE", LS_LCK_MTX_EXT_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE", LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_EXT_UNLOCK_RELEASE", LS_LCK_MTX_EXT_UNLOCK_RELEASE);
	DECLARE("LS_LCK_RW_LOCK_EXCL_ACQUIRE", LS_LCK_RW_LOCK_EXCL_ACQUIRE);
	DECLARE("LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE", LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE);
	DECLARE("LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE", LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE);
	DECLARE("LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE", LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE);
	DECLARE("LS_LCK_MTX_LOCK_SPIN_ACQUIRE", LS_LCK_MTX_LOCK_SPIN_ACQUIRE);
#endif

	return (0);
}