Esempio n. 1
0
static DWORD slider_get_pos (skin_item_t *item) 
{
	DECLARE (si_nrmslider_t, slider, 0);

    slider = (si_nrmslider_t*) item->type_data;
    return slider->slider_info.cur_pos;
}
Esempio n. 2
0
File: gen.c Progetto: jiamacs/rhype
int
main(void)
{
	DECLARE(GDB_ZERO, 0);
#ifdef USE_GDB_STUB
	DECLARE(GDB_EAX, offsetof(struct cpu_state, eax));
	DECLARE(GDB_ECX, offsetof(struct cpu_state, ecx));
	DECLARE(GDB_EDX, offsetof(struct cpu_state, edx));
	DECLARE(GDB_EBX, offsetof(struct cpu_state, ebx));
	DECLARE(GDB_ESP, offsetof(struct cpu_state, esp));
	DECLARE(GDB_EBP, offsetof(struct cpu_state, ebp));
	DECLARE(GDB_ESI, offsetof(struct cpu_state, esi));
	DECLARE(GDB_EDI, offsetof(struct cpu_state, edi));
	DECLARE(GDB_EIP, offsetof(struct cpu_state, eip));
	DECLARE(GDB_EFLAGS, offsetof(struct cpu_state, eflags));
	DECLARE(GDB_CS, offsetof(struct cpu_state, cs));
	DECLARE(GDB_SS, offsetof(struct cpu_state, ss));
	DECLARE(GDB_DS, offsetof(struct cpu_state, ds));
	DECLARE(GDB_ES, offsetof(struct cpu_state, es));
	DECLARE(GDB_FS, offsetof(struct cpu_state, fs));
	DECLARE(GDB_GS, offsetof(struct cpu_state, gs));
#endif /* USE_GDB_STUB */

	return 0;
}
Esempio n. 3
0
File: ip.c Progetto: yeshog/yh
RESULT ip_process( ip_header_p iph, SHORT len )
{
    DECLARE( SHORT, data_len,    0 );
    DECLARE( BYTE,  j,           0 );
    DECLARE( WORD,  dst,         0 );
    if( len < sizeof( ip_header ) )
    {
        return IP_HEADER_LEN_TOO_SMALL;
    }
    dst = R_STRUCT_VAR_TYPE( WORD, iph->dst );
    if( dst != R_WORD( __IP, 0 ) )
    {
        return IP_HEADER_DST_NOT_ME;
    }
    data_len = get_ip_data_len( iph );
    if( data_len == 0 )
    {
        return IP_HEADER_DATA_LEN_ZERO;
    }
    if( data_len > MAX_IP_DATA_LEN )
    {
        return IP_HEADER_DATA_LEN_TOO_BIG;
    }
    if( data_len != len )
    {
        return IP_HEADER_AND_ACTUAL_LEN_MISMATCH;
    }

    j = get_ip_header_len( iph );
    if( j > data_len )
    {
        return IP_HEADER_LEN_BIGGER_THAN_PKT;
    }

    for ( j = 0; j < sizeof( supported_ip_protocols );
            j++ )
    {
        if( supported_ip_protocols[ j ]
                     == iph->protocol )
            break;
    }
    if( j == sizeof( supported_ip_protocols ) )
    {
        return IP_HEADER_PROTO_NOT_SUPPORTED;
    }
    return OK;
}
void  arch_foo(void) {
	/* StackNvgprType */
	DECLARE(STACK_NVGPR_R4,		offsetof(StackNvgprType, r4));
	DECLARE(STACK_NVGPR_R5,		offsetof(StackNvgprType, r5));
	DECLARE(STACK_NVGPR_R6,		offsetof(StackNvgprType, r6));
	DECLARE(STACK_NVGPR_R7,		offsetof(StackNvgprType, r7));
	DECLARE(STACK_NVGPR_R8,		offsetof(StackNvgprType, r8));
	DECLARE(STACK_NVGPR_R10,	offsetof(StackNvgprType, r10));
	DECLARE(STACK_NVGPR_R11,	offsetof(StackNvgprType, r11));
	DECLARE(STACK_NVGPR_VA,	offsetof(StackNvgprType, va));

	/* StackCallAndContextType */
}
Esempio n. 5
0
static DWORD slider_set_pos (skin_item_t *item, DWORD pos) 
{
    DWORD old_pos;
	DECLARE (si_nrmslider_t, slider, 0);

    slider = (si_nrmslider_t*) item->type_data;
    old_pos =  slider->slider_info.cur_pos;

    if ( pos < slider->slider_info.min_pos ) pos = slider->slider_info.min_pos;
	if ( pos > slider->slider_info.max_pos ) pos = slider->slider_info.max_pos;
	slider->slider_info.cur_pos = pos;
	return 1;
}
Esempio n. 6
0
File: gen.c Progetto: jiamacs/rhype
int
main(void)
{
	DECLARE(GDB_ZERO, 0);
#ifdef USE_GDB_STUB
	DECLARE(GDB_MSR, offsetof(struct cpu_state, msr));
	DECLARE(GDB_PC, offsetof(struct cpu_state, pc));
	DECLARE(GDB_CR, offsetof(struct cpu_state, cr));
	DECLARE(GDB_XER, offsetof(struct cpu_state, xer));
	DECLARE(GDB_CTR, offsetof(struct cpu_state, ctr));
	DECLARE(GDB_LR, offsetof(struct cpu_state, lr));
	DECLARE(GDB_DAR, offsetof(struct cpu_state, dar));
	DECLARE(GDB_DSISR, offsetof(struct cpu_state, dsisr));
	DECLARE(GDB_GPR0, offsetof(struct cpu_state, gpr[0]));
	DECLARE(GDB_HSRR0, offsetof(struct cpu_state, hsrr0));
	DECLARE(GDB_HSRR1, offsetof(struct cpu_state, hsrr1));
	DECLARE(GDB_HDEC, offsetof(struct cpu_state, hdec));
	DECLARE(GDB_CPU_STATE_SIZE, sizeof (struct cpu_state));
#endif

	return 0;
}
Esempio n. 7
0
void  asm_foo(void) {
#elif defined(__CWCC__)
#define DECLARE(_var,_offset) \
    __declspec(section ".apa") char _var[100+ (_offset)]
#pragma section ".apa" ".apa"
#elif (defined(__DCC__))
#define DECLARE(_sym,_val) \
	const int arc_dummy_ ## _sym = _val
#else
//#define DECLARE(_sym,_val) const char arc_dummy_ ## _sym = _val
#define DECLARE(_sym,_val) const int arc_dummy_ ## _sym = _val
#endif
	DECLARE(PCB_STACK_CURR_P,	offsetof(OsTaskVarType, stack));
	DECLARE(PCB_CONST_P,		offsetof(OsTaskVarType, constPtr));
////	DECLARE(PCB_ENTRY_P,		offsetof(OsTaskVarType, entry));
	DECLARE(SYS_CURR_PCB_P,		offsetof(Os_SysType, currTaskPtr));
	DECLARE(SYS_INT_NEST_CNT, offsetof(Os_SysType, intNestCnt));
	DECLARE(SYS_INT_STACK, offsetof(Os_SysType, intStack));
	DECLARE(SYS_SIZE, sizeof(Os_SysType));
#if defined(__GNUC__) || defined(__ghs__)
}
#include "swresize.h"
#include "swresize_desc.cpp"

typedef struct alg
{
					int in;
					char *name;
}alg;
#define DECLARE(y) {SWS_##y,(char *)#y}

/**
	Convert mplayer-resize numbering <--> avidemux one

*/
alg algs[]={
				DECLARE(BILINEAR),
				DECLARE(BICUBIC),
				DECLARE(LANCZOS)
		};


/**
    \class swScaleResizeFilter
*/
class swScaleResizeFilter : public  ADM_coreVideoFilter
{
protected:
            
				ADMColorScalerFull	*resizer;
				bool        reset(uint32_t nw, uint32_t old,uint32_t algo);
				bool        clean( void );
Esempio n. 9
0
int
main(
	int	argc,
	char	**argv)
{

	DECLARE("T_PREFETCH_ABT",	T_PREFETCH_ABT);
	DECLARE("T_DATA_ABT",		T_DATA_ABT);

	DECLARE("AST_URGENT",		AST_URGENT);
	DECLARE("AST_PREEMPTION",	AST_PREEMPTION);

	DECLARE("TH_RECOVER",		offsetof(struct thread, recover));
	DECLARE("TH_CONTINUATION",	offsetof(struct thread, continuation));
	DECLARE("TH_KERNEL_STACK",	offsetof(struct thread, kernel_stack));
	DECLARE("TH_KSTACKPTR",		offsetof(struct thread, machine.kstackptr));
        DECLARE("TH_UTHREAD",		offsetof(struct thread, uthread));

	DECLARE("TASK_MACH_EXC_PORT",
		offsetof(struct task, exc_actions[EXC_MACH_SYSCALL].port));

	/* These fields are being added on demand */
	DECLARE("ACT_TASK",	offsetof(struct thread, task));
	DECLARE("ACT_PCBDATA",	offsetof(struct thread, machine.PcbData));
#if __ARM_VFP__
	DECLARE("ACT_UVFP",     offsetof(struct thread, machine.uVFPdata));
	DECLARE("ACT_KVFP",     offsetof(struct thread, machine.kVFPdata));
#endif
	DECLARE("TH_CTH_SELF",	offsetof(struct thread, machine.cthread_self));
	DECLARE("TH_CTH_DATA",	offsetof(struct thread, machine.cthread_data));
	DECLARE("ACT_PCBDATA_PC",	offsetof(struct thread, machine.PcbData.pc));
	DECLARE("ACT_PCBDATA_R0",	offsetof(struct thread, machine.PcbData.r[0]));
	DECLARE("ACT_PREEMPT_CNT",	offsetof(struct thread, machine.preemption_count));
	DECLARE("ACT_CPUDATAP",	offsetof(struct thread, machine.CpuDatap));
	DECLARE("ACT_MAP",	offsetof(struct thread, map));
#if __ARM_USER_PROTECT__
	DECLARE("ACT_UPTW_TTC", offsetof(struct thread, machine.uptw_ttc));
	DECLARE("ACT_UPTW_TTB", offsetof(struct thread, machine.uptw_ttb));
	DECLARE("ACT_KPTW_TTB", offsetof(struct thread, machine.kptw_ttb));
	DECLARE("ACT_ASID", offsetof(struct thread, machine.asid));
#endif
	DECLARE("ACT_DEBUGDATA",	offsetof(struct thread, machine.DebugData));
	DECLARE("TH_IOTIER_OVERRIDE",	offsetof(struct thread, iotier_override));
	DECLARE("TH_RWLOCK_CNT",	offsetof(struct thread, rwlock_count));	
	DECLARE("TH_SCHED_FLAGS",	offsetof(struct thread, sched_flags));
	DECLARE("TH_SFLAG_RW_PROMOTED",	TH_SFLAG_RW_PROMOTED);

	DECLARE("TH_MACH_SYSCALLS", offsetof(struct thread, syscalls_mach));
	DECLARE("TH_UNIX_SYSCALLS", offsetof(struct thread, syscalls_unix));
	DECLARE("TASK_BSD_INFO", offsetof(struct task, bsd_info));

	DECLARE("MACH_TRAP_TABLE_COUNT", MACH_TRAP_TABLE_COUNT);
	DECLARE("MACH_TRAP_TABLE_ENTRY_SIZE", sizeof(mach_trap_t));

	DECLARE("MAP_PMAP",	offsetof(struct _vm_map, pmap));

	DECLARE("SS_SIZE", 	sizeof(struct arm_saved_state));
	DECLARE("SS_LR", offsetof(struct arm_saved_state, lr));
	DECLARE("SS_CPSR", offsetof(struct arm_saved_state, cpsr));
	DECLARE("SS_PC", offsetof(struct arm_saved_state, pc));
	DECLARE("SS_R0", offsetof(struct arm_saved_state, r[0]));
	DECLARE("SS_R4", offsetof(struct arm_saved_state, r[4]));
	DECLARE("SS_R9", offsetof(struct arm_saved_state, r[9]));
	DECLARE("SS_R12", offsetof(struct arm_saved_state, r[12]));
	DECLARE("SS_SP", offsetof(struct arm_saved_state, sp));
	DECLARE("SS_STATUS", offsetof(struct arm_saved_state, fsr));
	DECLARE("SS_VADDR", offsetof(struct arm_saved_state, far));
	DECLARE("SS_EXC", offsetof(struct arm_saved_state, exception));

#if __ARM_VFP__
	DECLARE("VSS_SIZE", sizeof(struct arm_vfpsaved_state));
	DECLARE("VSS_FPSCR", offsetof(struct arm_vfpsaved_state, fpscr));
	DECLARE("VSS_FPEXC", offsetof(struct arm_vfpsaved_state, fpexc));

	DECLARE("EXC_CTX_SIZE", sizeof(struct arm_saved_state) +
                            sizeof(struct arm_vfpsaved_state) +
                            VFPSAVE_ALIGN);
	DECLARE("VSS_ALIGN", VFPSAVE_ALIGN);
#else
	DECLARE("EXC_CTX_SIZE", sizeof(struct arm_saved_state));
#endif


	DECLARE("PGBYTES", ARM_PGBYTES);
	DECLARE("PGSHIFT", ARM_PGSHIFT);
	DECLARE("PGMASK", ARM_PGMASK);

	DECLARE("VM_MIN_ADDRESS",	VM_MIN_ADDRESS);
	DECLARE("VM_MAX_ADDRESS",	VM_MAX_ADDRESS);
	DECLARE("KERNELBASE",		VM_MIN_KERNEL_ADDRESS);
	DECLARE("KERNEL_STACK_SIZE",	KERNEL_STACK_SIZE);

	DECLARE("KERN_INVALID_ADDRESS",	KERN_INVALID_ADDRESS);

	DECLARE("MAX_CPUS",	MAX_CPUS);

	DECLARE("cdeSize",
		sizeof(struct cpu_data_entry));

	DECLARE("cdSize",
		sizeof(struct cpu_data));

        DECLARE("CPU_ACTIVE_THREAD",
		offsetof(cpu_data_t, cpu_active_thread));
        DECLARE("CPU_ACTIVE_STACK",
		offsetof(cpu_data_t, cpu_active_stack));
        DECLARE("CPU_ISTACKPTR",
		offsetof(cpu_data_t, istackptr));
        DECLARE("CPU_INTSTACK_TOP",
		offsetof(cpu_data_t, intstack_top));
        DECLARE("CPU_FIQSTACKPTR",
		offsetof(cpu_data_t, fiqstackptr));
        DECLARE("CPU_FIQSTACK_TOP",
		offsetof(cpu_data_t, fiqstack_top));
        DECLARE("CPU_NUMBER_GS",
		offsetof(cpu_data_t,cpu_number));
        DECLARE("CPU_IDENT",
		offsetof(cpu_data_t,cpu_ident));
        DECLARE("CPU_RUNNING",
		offsetof(cpu_data_t,cpu_running));
        DECLARE("CPU_MCOUNT_OFF",
		offsetof(cpu_data_t,cpu_mcount_off));
	DECLARE("CPU_PENDING_AST",
		offsetof(cpu_data_t,cpu_pending_ast));
	DECLARE("CPU_PROCESSOR",
		offsetof(cpu_data_t,cpu_processor));
	DECLARE("CPU_CACHE_DISPATCH",
		offsetof(cpu_data_t,cpu_cache_dispatch));
        DECLARE("CPU_BASE_TIMEBASE_LOW",
		offsetof(cpu_data_t,cpu_base_timebase_low));
        DECLARE("CPU_BASE_TIMEBASE_HIGH",
		offsetof(cpu_data_t,cpu_base_timebase_high));
        DECLARE("CPU_TIMEBASE_LOW",
		offsetof(cpu_data_t,cpu_timebase_low));
        DECLARE("CPU_TIMEBASE_HIGH",
		offsetof(cpu_data_t,cpu_timebase_high));
	DECLARE("CPU_DECREMENTER",
		offsetof(cpu_data_t,cpu_decrementer));
	DECLARE("CPU_GET_DECREMENTER_FUNC",
		offsetof(cpu_data_t,cpu_get_decrementer_func));
	DECLARE("CPU_SET_DECREMENTER_FUNC",
		offsetof(cpu_data_t,cpu_set_decrementer_func));
	DECLARE("CPU_GET_FIQ_HANDLER",
		offsetof(cpu_data_t,cpu_get_fiq_handler));
	DECLARE("CPU_TBD_HARDWARE_ADDR",
		offsetof(cpu_data_t,cpu_tbd_hardware_addr));
	DECLARE("CPU_TBD_HARDWARE_VAL",
		offsetof(cpu_data_t,cpu_tbd_hardware_val));
	DECLARE("CPU_INT_STATE",
		offsetof(cpu_data_t,cpu_int_state));
	DECLARE("INTERRUPT_HANDLER",
		offsetof(cpu_data_t,interrupt_handler));
	DECLARE("INTERRUPT_TARGET",
		offsetof(cpu_data_t,interrupt_target));
	DECLARE("INTERRUPT_REFCON",
		offsetof(cpu_data_t,interrupt_refCon));
	DECLARE("INTERRUPT_NUB",
		offsetof(cpu_data_t,interrupt_nub));
	DECLARE("INTERRUPT_SOURCE",
		offsetof(cpu_data_t,interrupt_source));
	DECLARE("CPU_USER_DEBUG",
		offsetof(cpu_data_t, cpu_user_debug));
	DECLARE("CPU_STAT_IRQ",
		offsetof(cpu_data_t, cpu_stat.irq_ex_cnt));
	DECLARE("CPU_STAT_IRQ_WAKE",
		offsetof(cpu_data_t, cpu_stat.irq_ex_cnt_wake));
	DECLARE("CPU_RESET_HANDLER",
		offsetof(cpu_data_t, cpu_reset_handler));
	DECLARE("CPU_RESET_ASSIST",
		offsetof(cpu_data_t, cpu_reset_assist));
	DECLARE("RTCLOCK_DATAP",
		offsetof(cpu_data_t, rtclock_datap));
#ifdef	__arm__
	DECLARE("CPU_EXC_VECTORS",
		offsetof(cpu_data_t, cpu_exc_vectors));
#endif

	DECLARE("RTCLOCKDataSize",
		sizeof(rtclock_data_t));
	DECLARE("RTCLOCK_ADJ_ABSTIME_LOW",
		offsetof(rtclock_data_t, rtc_adj.abstime_val.low));
	DECLARE("RTCLOCK_ADJ_ABSTIME_HIGH",
		offsetof(rtclock_data_t, rtc_adj.abstime_val.high));
	DECLARE("RTCLOCK_BASE_ABSTIME_LOW",
		offsetof(rtclock_data_t, rtc_base.abstime_val.low));
	DECLARE("RTCLOCK_BASE_ABSTIME_HIGH",
		offsetof(rtclock_data_t, rtc_base.abstime_val.high));
	DECLARE("RTCLOCK_TB_FUNC",
		offsetof(rtclock_data_t, rtc_timebase_func));
	DECLARE("RTCLOCK_TB_ADDR",
		offsetof(rtclock_data_t, rtc_timebase_addr));
	DECLARE("RTCLOCK_TB_VAL",
		offsetof(rtclock_data_t, rtc_timebase_val));

	DECLARE("SIGPdec",	SIGPdec);

	DECLARE("rhdSize",
		sizeof(struct reset_handler_data));

	DECLARE("CPU_DATA_ENTRIES",	offsetof(struct reset_handler_data, cpu_data_entries));
	DECLARE("BOOT_ARGS",	offsetof(struct reset_handler_data, boot_args));
	DECLARE("ASSIST_RESET_HANDLER",	offsetof(struct reset_handler_data, assist_reset_handler));

	DECLARE("CPU_DATA_PADDR",	offsetof(struct cpu_data_entry, cpu_data_paddr));


	DECLARE("INTSTACK_SIZE",	INTSTACK_SIZE);

	/* values from kern/timer.h */
	DECLARE("TIMER_LOW",
		offsetof(struct timer, low_bits));
	DECLARE("TIMER_HIGH",
		offsetof(struct timer, high_bits));
	DECLARE("TIMER_HIGHCHK",
		offsetof(struct timer, high_bits_check));
	DECLARE("TIMER_TSTAMP",
		offsetof(struct timer, tstamp));
	DECLARE("THREAD_TIMER",
		offsetof(struct processor, processor_data.thread_timer));
	DECLARE("KERNEL_TIMER",
		offsetof(struct processor, processor_data.kernel_timer));
	DECLARE("SYSTEM_STATE",
		offsetof(struct processor, processor_data.system_state));
	DECLARE("USER_STATE",
		offsetof(struct processor, processor_data.user_state));
	DECLARE("CURRENT_STATE",
		offsetof(struct processor, processor_data.current_state));

	DECLARE("SYSTEM_TIMER",
		offsetof(struct thread, system_timer));
	DECLARE("USER_TIMER",
		offsetof(struct thread, user_timer));

#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
	DECLARE("PRECISE_USER_KERNEL_TIME",
		offsetof(struct thread, precise_user_kernel_time));
#endif

	DECLARE("BA_VIRT_BASE",
		offsetof(struct boot_args, virtBase));
	DECLARE("BA_PHYS_BASE",
		offsetof(struct boot_args, physBase));
	DECLARE("BA_MEM_SIZE",
		offsetof(struct boot_args, memSize));
	DECLARE("BA_TOP_OF_KERNEL_DATA",
		offsetof(struct boot_args, topOfKernelData));

	DECLARE("ENTROPY_INDEX_PTR",
		offsetof(entropy_data_t, index_ptr));
	DECLARE("ENTROPY_BUFFER",
		offsetof(entropy_data_t, buffer));
	DECLARE("ENTROPY_DATA_SIZE", sizeof(struct entropy_data));

	return (0);
}
Esempio n. 10
0
int
main(
	int	argc,
	char	**argv)
{

	DECLARE("AST_URGENT",		AST_URGENT);
	DECLARE("AST_BSD",			AST_BSD);

	/* Simple Lock structure */
	DECLARE("SLOCK_ILK",	offsetof(usimple_lock_t, interlock));
#if	MACH_LDEBUG
	DECLARE("SLOCK_TYPE",	offsetof(usimple_lock_t, lock_type));
	DECLARE("SLOCK_PC",	offsetof(usimple_lock_t, debug.lock_pc));
	DECLARE("SLOCK_THREAD",	offsetof(usimple_lock_t, debug.lock_thread));
	DECLARE("SLOCK_DURATIONH",offsetof(usimple_lock_t, debug.duration[0]));
	DECLARE("SLOCK_DURATIONL",offsetof(usimple_lock_t, debug.duration[1]));
	DECLARE("USLOCK_TAG",	USLOCK_TAG);
#endif	/* MACH_LDEBUG */

	/* Mutex structure */
	DECLARE("MUTEX_OWNER", offsetof(lck_mtx_t *, lck_mtx_owner));
	DECLARE("MUTEX_PTR",   offsetof(lck_mtx_t *, lck_mtx_ptr));
	DECLARE("MUTEX_STATE", offsetof(lck_mtx_t *, lck_mtx_state));
#ifdef __i386__
	DECLARE("MUTEX_TYPE",	offsetof(lck_mtx_ext_t *, lck_mtx_deb.type));
	DECLARE("MUTEX_PC",		offsetof(lck_mtx_ext_t *, lck_mtx_deb.pc));
	DECLARE("MUTEX_THREAD",	offsetof(lck_mtx_ext_t *, lck_mtx_deb.thread));
	DECLARE("MUTEX_ATTR",	offsetof(lck_mtx_ext_t *, lck_mtx_attr));
	DECLARE("MUTEX_ATTR_DEBUG", LCK_MTX_ATTR_DEBUG);
	DECLARE("MUTEX_ATTR_DEBUGb", LCK_MTX_ATTR_DEBUGb);
	DECLARE("MUTEX_ATTR_STAT", LCK_MTX_ATTR_STAT);
	DECLARE("MUTEX_ATTR_STATb", LCK_MTX_ATTR_STATb);
	DECLARE("MUTEX_TAG",	MUTEX_TAG);
#endif
	DECLARE("MUTEX_IND",	LCK_MTX_TAG_INDIRECT);
	DECLARE("MUTEX_EXT",	LCK_MTX_PTR_EXTENDED);
	DECLARE("MUTEX_ITAG",	offsetof(lck_mtx_t *, lck_mtx_tag));
	DECLARE("MUTEX_PTR",	offsetof(lck_mtx_t *, lck_mtx_ptr));
	DECLARE("MUTEX_ASSERT_OWNED",	LCK_MTX_ASSERT_OWNED);
	DECLARE("MUTEX_ASSERT_NOTOWNED",LCK_MTX_ASSERT_NOTOWNED);
	DECLARE("GRP_MTX_STAT_UTIL",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt));
	DECLARE("GRP_MTX_STAT_MISS",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt));
	DECLARE("GRP_MTX_STAT_WAIT",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt));
	
	/* x86 only */
	DECLARE("MUTEX_DESTROYED", LCK_MTX_TAG_DESTROYED);

	/* Per-mutex statistic element */
	DECLARE("MTX_ACQ_TSC",	offsetof(lck_mtx_ext_t *, lck_mtx_stat));

	/* Mutex group statistics elements */
	DECLARE("MUTEX_GRP",	offsetof(lck_mtx_ext_t *, lck_mtx_grp));
	
	/*
	 * The use of this field is somewhat at variance with the alias.
	 */
	DECLARE("GRP_MTX_STAT_DIRECT_WAIT",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt));

	DECLARE("GRP_MTX_STAT_HELD_MAX",	offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max));
	/* Reader writer lock types */
	DECLARE("RW_SHARED",    LCK_RW_TYPE_SHARED);
	DECLARE("RW_EXCL",      LCK_RW_TYPE_EXCLUSIVE);

	DECLARE("TH_RECOVER",		offsetof(thread_t, recover));
	DECLARE("TH_CONTINUATION",	offsetof(thread_t, continuation));
	DECLARE("TH_KERNEL_STACK",	offsetof(thread_t, kernel_stack));

	DECLARE("TASK_MACH_EXC_PORT",
		offsetof(task_t, exc_actions[EXC_MACH_SYSCALL].port));
	DECLARE("TASK_SYSCALLS_MACH",	offsetof(struct task *, syscalls_mach));
	DECLARE("TASK_SYSCALLS_UNIX",	offsetof(struct task *, syscalls_unix));

	DECLARE("TASK_VTIMERS",			offsetof(struct task *, vtimers));

	/* These fields are being added on demand */
	DECLARE("ACT_MACH_EXC_PORT",
		offsetof(thread_t, exc_actions[EXC_MACH_SYSCALL].port));

	DECLARE("ACT_TASK",	offsetof(thread_t, task));
	DECLARE("ACT_AST",	offsetof(thread_t, ast));
	DECLARE("ACT_PCB",	offsetof(thread_t, machine.pcb));
	DECLARE("ACT_SPF",	offsetof(thread_t, machine.specFlags));
	DECLARE("ACT_MAP",	offsetof(thread_t, map));
	DECLARE("ACT_PCB_ISS", 	offsetof(thread_t, machine.xxx_pcb.iss));
	DECLARE("ACT_PCB_IDS", 	offsetof(thread_t, machine.xxx_pcb.ids));
#if NCOPY_WINDOWS > 0
	DECLARE("ACT_COPYIO_STATE", offsetof(thread_t, machine.copyio_state));
	DECLARE("WINDOWS_CLEAN", WINDOWS_CLEAN);
#endif

	DECLARE("MAP_PMAP",	offsetof(vm_map_t, pmap));

#define IEL_SIZE		(sizeof(struct i386_exception_link *))
	DECLARE("IEL_SIZE",	IEL_SIZE);
	DECLARE("IKS_SIZE",	sizeof(struct x86_kernel_state));

	/*
	 * KSS_* are offsets from the top of the kernel stack (cpu_kernel_stack)
	 */
#if defined(__i386__)
	DECLARE("KSS_EBX", IEL_SIZE + offsetof(struct x86_kernel_state *, k_ebx));
	DECLARE("KSS_ESP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_esp));
	DECLARE("KSS_EBP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_ebp));
	DECLARE("KSS_EDI", IEL_SIZE + offsetof(struct x86_kernel_state *, k_edi));
	DECLARE("KSS_ESI", IEL_SIZE + offsetof(struct x86_kernel_state *, k_esi));
	DECLARE("KSS_EIP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_eip));
#elif defined(__x86_64__)
	DECLARE("KSS_RBX", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rbx));
	DECLARE("KSS_RSP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rsp));
	DECLARE("KSS_RBP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rbp));
	DECLARE("KSS_R12", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r12));
	DECLARE("KSS_R13", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r13));
	DECLARE("KSS_R14", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r14));
	DECLARE("KSS_R15", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r15));
	DECLARE("KSS_RIP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rip));	
#else
#error Unsupported architecture
#endif
	
	DECLARE("PCB_FPS",	offsetof(pcb_t, ifps));
	DECLARE("PCB_ISS",	offsetof(pcb_t, iss));

	DECLARE("DS_DR0",	offsetof(struct x86_debug_state32 *, dr0));
	DECLARE("DS_DR1",	offsetof(struct x86_debug_state32 *, dr1));
	DECLARE("DS_DR2",	offsetof(struct x86_debug_state32 *, dr2));
	DECLARE("DS_DR3",	offsetof(struct x86_debug_state32 *, dr3));
	DECLARE("DS_DR4",	offsetof(struct x86_debug_state32 *, dr4));
	DECLARE("DS_DR5",	offsetof(struct x86_debug_state32 *, dr5));
	DECLARE("DS_DR6",	offsetof(struct x86_debug_state32 *, dr6));
	DECLARE("DS_DR7",	offsetof(struct x86_debug_state32 *, dr7));

	DECLARE("DS64_DR0",	offsetof(struct x86_debug_state64 *, dr0));
	DECLARE("DS64_DR1",	offsetof(struct x86_debug_state64 *, dr1));
	DECLARE("DS64_DR2",	offsetof(struct x86_debug_state64 *, dr2));
	DECLARE("DS64_DR3",	offsetof(struct x86_debug_state64 *, dr3));
	DECLARE("DS64_DR4",	offsetof(struct x86_debug_state64 *, dr4));
	DECLARE("DS64_DR5",	offsetof(struct x86_debug_state64 *, dr5));
	DECLARE("DS64_DR6",	offsetof(struct x86_debug_state64 *, dr6));
	DECLARE("DS64_DR7",	offsetof(struct x86_debug_state64 *, dr7));

	DECLARE("FP_VALID",	offsetof(struct x86_fpsave_state *,fp_valid));

	DECLARE("SS_FLAVOR",	offsetof(x86_saved_state_t *, flavor));
	DECLARE("SS_32",	x86_SAVED_STATE32);
	DECLARE("SS_64",	x86_SAVED_STATE64);

#define R_(x)  offsetof(x86_saved_state_t *, ss_32.x)
	DECLARE("R32_CS",	R_(cs));
	DECLARE("R32_SS",	R_(ss));
	DECLARE("R32_DS",	R_(ds));
	DECLARE("R32_ES",	R_(es));
	DECLARE("R32_FS",	R_(fs));
	DECLARE("R32_GS",	R_(gs));
	DECLARE("R32_UESP",	R_(uesp));
	DECLARE("R32_EBP",	R_(ebp));
	DECLARE("R32_EAX",	R_(eax));
	DECLARE("R32_EBX",	R_(ebx));
	DECLARE("R32_ECX",	R_(ecx));
	DECLARE("R32_EDX",	R_(edx));
	DECLARE("R32_ESI",	R_(esi));
	DECLARE("R32_EDI",	R_(edi));
	DECLARE("R32_TRAPNO",	R_(trapno));
	DECLARE("R32_ERR",	R_(err));
	DECLARE("R32_EFLAGS",	R_(efl));
	DECLARE("R32_EIP",	R_(eip));
	DECLARE("R32_CR2",	R_(cr2));
	DECLARE("ISS32_SIZE",	sizeof (x86_saved_state32_t));

#define R64_(x)  offsetof(x86_saved_state_t *, ss_64.x)
	DECLARE("R64_FS",	R64_(fs));
	DECLARE("R64_GS",	R64_(gs));
	DECLARE("R64_R8",	R64_(r8));
	DECLARE("R64_R9",	R64_(r9));
	DECLARE("R64_R10",	R64_(r10));
	DECLARE("R64_R11",	R64_(r11));
	DECLARE("R64_R12",	R64_(r12));
	DECLARE("R64_R13",	R64_(r13));
	DECLARE("R64_R14",	R64_(r14));
	DECLARE("R64_R15",	R64_(r15));
	DECLARE("R64_RBP",	R64_(rbp));
	DECLARE("R64_RAX",	R64_(rax));
	DECLARE("R64_RBX",	R64_(rbx));
	DECLARE("R64_RCX",	R64_(rcx));
	DECLARE("R64_RDX",	R64_(rdx));
	DECLARE("R64_RSI",	R64_(rsi));
	DECLARE("R64_RDI",	R64_(rdi));
	DECLARE("R64_V_ARG6",	R64_(v_arg6));
	DECLARE("R64_V_ARG7",	R64_(v_arg7));
	DECLARE("R64_V_ARG8",	R64_(v_arg8));
	DECLARE("R64_CS",	R64_(isf.cs));
	DECLARE("R64_SS",	R64_(isf.ss));
	DECLARE("R64_RSP",	R64_(isf.rsp));
	DECLARE("R64_TRAPNO",	R64_(isf.trapno));
	DECLARE("R64_TRAPFN",	R64_(isf.trapfn));
	DECLARE("R64_ERR",	R64_(isf.err));
	DECLARE("R64_RFLAGS",	R64_(isf.rflags));
	DECLARE("R64_RIP",	R64_(isf.rip));
	DECLARE("R64_CR2",	R64_(cr2));
	DECLARE("ISS64_OFFSET",	R64_(isf));
	DECLARE("ISS64_SIZE",	sizeof (x86_saved_state64_t));

#define ISF64_(x)  offsetof(x86_64_intr_stack_frame_t *, x)
	DECLARE("ISF64_TRAPNO",	ISF64_(trapno));
	DECLARE("ISF64_TRAPFN",	ISF64_(trapfn));
	DECLARE("ISF64_ERR",	ISF64_(err));
	DECLARE("ISF64_RIP",	ISF64_(rip));
	DECLARE("ISF64_CS",	ISF64_(cs));
	DECLARE("ISF64_RFLAGS",	ISF64_(rflags));
	DECLARE("ISF64_RSP",	ISF64_(rsp));
	DECLARE("ISF64_SS",	ISF64_(ss));
	DECLARE("ISF64_SIZE",	sizeof(x86_64_intr_stack_frame_t));

	DECLARE("ISC32_OFFSET",	offsetof(x86_saved_state_compat32_t *, isf64));
#define ISC32_(x)  offsetof(x86_saved_state_compat32_t *, isf64.x)
	DECLARE("ISC32_TRAPNO", ISC32_(trapno));
	DECLARE("ISC32_TRAPFN",	ISC32_(trapfn));
	DECLARE("ISC32_ERR",	ISC32_(err));
	DECLARE("ISC32_RIP",	ISC32_(rip));
	DECLARE("ISC32_CS",	ISC32_(cs));
	DECLARE("ISC32_RFLAGS",	ISC32_(rflags));
	DECLARE("ISC32_RSP",	ISC32_(rsp));
	DECLARE("ISC32_SS",	ISC32_(ss));

	DECLARE("NBPG",			I386_PGBYTES);
	DECLARE("PAGE_SIZE",            I386_PGBYTES);
	DECLARE("PAGE_MASK",            I386_PGBYTES-1);
	DECLARE("PAGE_SHIFT",           12);
	DECLARE("NKPT",                 NKPT);
#ifdef __i386__
	DECLARE("KPTDI",                KPTDI);
#endif
	DECLARE("VM_MIN_ADDRESS",	VM_MIN_ADDRESS);
	DECLARE("VM_MAX_ADDRESS",	VM_MAX_ADDRESS);
	DECLARE("KERNELBASE",		VM_MIN_KERNEL_ADDRESS);
	DECLARE("LINEAR_KERNELBASE",	LINEAR_KERNEL_ADDRESS);
	DECLARE("KERNEL_STACK_SIZE",	KERNEL_STACK_SIZE);
#ifdef __i386__
	DECLARE("KERNEL_UBER_BASE_HI32", KERNEL_UBER_BASE_HI32);
#endif

	DECLARE("ASM_COMM_PAGE32_BASE_ADDRESS",  _COMM_PAGE32_BASE_ADDRESS);
	DECLARE("ASM_COMM_PAGE32_START_ADDRESS",  _COMM_PAGE32_START_ADDRESS);
	DECLARE("ASM_COMM_PAGE_SCHED_GEN",  _COMM_PAGE_SCHED_GEN);

	DECLARE("PDESHIFT",	PDESHIFT);
	DECLARE("PTEMASK",	PTEMASK);
	DECLARE("PTEINDX",      PTEINDX);
	DECLARE("INTEL_PTE_PFN",	INTEL_PTE_PFN);
	DECLARE("INTEL_PTE_VALID",	INTEL_PTE_VALID);
	DECLARE("INTEL_PTE_WRITE",	INTEL_PTE_WRITE);
	DECLARE("INTEL_PTE_PS",       INTEL_PTE_PS);
	DECLARE("INTEL_PTE_USER",        INTEL_PTE_USER);
	DECLARE("INTEL_PTE_INVALID",	INTEL_PTE_INVALID);
	DECLARE("NPGPTD", NPGPTD);
#if defined(__x86_64__)
	DECLARE("INITPT_SEG_BASE",INITPT_SEG_BASE);
	DECLARE("INITGDT_SEG_BASE",INITGDT_SEG_BASE);
	DECLARE("SLEEP_SEG_BASE",SLEEP_SEG_BASE);
	DECLARE("PROT_MODE_GDT_SIZE",PROT_MODE_GDT_SIZE);
	DECLARE("KERNEL_PML4_INDEX",KERNEL_PML4_INDEX);
#endif
	DECLARE("IDTSZ",	IDTSZ);
	DECLARE("GDTSZ",	GDTSZ);
	DECLARE("LDTSZ",	LDTSZ);

	DECLARE("KERNEL_DS",	KERNEL_DS);
	DECLARE("USER_CS",	USER_CS);
	DECLARE("USER_DS",	USER_DS);
	DECLARE("KERNEL32_CS",	KERNEL32_CS);
	DECLARE("KERNEL64_CS",  KERNEL64_CS);
	DECLARE("USER64_CS",	USER64_CS);
	DECLARE("KERNEL_TSS",	KERNEL_TSS);
	DECLARE("KERNEL_LDT",	KERNEL_LDT);
#ifdef __i386__
	DECLARE("DF_TSS",	DF_TSS);
	DECLARE("MC_TSS",	MC_TSS);
#if	MACH_KDB
	DECLARE("DEBUG_TSS",	DEBUG_TSS);
#endif	/* MACH_KDB */
	DECLARE("CPU_DATA_GS",	CPU_DATA_GS);
#endif /* __i386__ */
	DECLARE("SYSENTER_CS",	SYSENTER_CS);
	DECLARE("SYSENTER_TF_CS",SYSENTER_TF_CS);
	DECLARE("SYSENTER_DS",	SYSENTER_DS);
	DECLARE("SYSCALL_CS",	SYSCALL_CS);
#ifdef __i386__
	DECLARE("USER_WINDOW_SEL",	USER_WINDOW_SEL);
	DECLARE("PHYS_WINDOW_SEL",	PHYS_WINDOW_SEL);
#endif

        DECLARE("CPU_THIS",
		offsetof(cpu_data_t *, cpu_this));
        DECLARE("CPU_ACTIVE_THREAD",
		offsetof(cpu_data_t *, cpu_active_thread));
        DECLARE("CPU_ACTIVE_STACK",
		offsetof(cpu_data_t *, cpu_active_stack));
        DECLARE("CPU_KERNEL_STACK",
		offsetof(cpu_data_t *, cpu_kernel_stack));
        DECLARE("CPU_INT_STACK_TOP",
		offsetof(cpu_data_t *, cpu_int_stack_top));
#if	MACH_RT
        DECLARE("CPU_PREEMPTION_LEVEL",
		offsetof(cpu_data_t *, cpu_preemption_level));
#endif	/* MACH_RT */
        DECLARE("CPU_HIBERNATE",
		offsetof(cpu_data_t *, cpu_hibernate));
        DECLARE("CPU_INTERRUPT_LEVEL",
		offsetof(cpu_data_t *, cpu_interrupt_level));
        DECLARE("CPU_SIMPLE_LOCK_COUNT",
		offsetof(cpu_data_t *,cpu_simple_lock_count));
        DECLARE("CPU_NUMBER_GS",
		offsetof(cpu_data_t *,cpu_number));
        DECLARE("CPU_RUNNING",
		offsetof(cpu_data_t *,cpu_running));
        DECLARE("CPU_MCOUNT_OFF",
		offsetof(cpu_data_t *,cpu_mcount_off));
	DECLARE("CPU_PENDING_AST",
		offsetof(cpu_data_t *,cpu_pending_ast));
	DECLARE("CPU_DESC_TABLEP",
		offsetof(cpu_data_t *,cpu_desc_tablep));
	DECLARE("CPU_DESC_INDEX",
		offsetof(cpu_data_t *,cpu_desc_index));
	DECLARE("CDI_GDT",
		offsetof(cpu_desc_index_t *,cdi_gdt));
	DECLARE("CDI_IDT",
		offsetof(cpu_desc_index_t *,cdi_idt));
	DECLARE("CPU_PROCESSOR",
		offsetof(cpu_data_t *,cpu_processor));
        DECLARE("CPU_INT_STATE",
		offsetof(cpu_data_t *, cpu_int_state));
        DECLARE("CPU_INT_EVENT_TIME",
		offsetof(cpu_data_t *, cpu_int_event_time));

#ifdef __i386__
        DECLARE("CPU_HI_ISS",
		offsetof(cpu_data_t *, cpu_hi_iss));
#endif
        DECLARE("CPU_TASK_CR3",
		offsetof(cpu_data_t *, cpu_task_cr3));
        DECLARE("CPU_ACTIVE_CR3",
		offsetof(cpu_data_t *, cpu_active_cr3));
        DECLARE("CPU_KERNEL_CR3",
		offsetof(cpu_data_t *, cpu_kernel_cr3));
#ifdef __x86_64__
		DECLARE("CPU_TLB_INVALID",
		offsetof(cpu_data_t *, cpu_tlb_invalid));
#endif

	DECLARE("CPU_IS64BIT",
		offsetof(cpu_data_t *, cpu_is64bit));
	DECLARE("CPU_TASK_MAP",
		offsetof(cpu_data_t *, cpu_task_map));
	DECLARE("TASK_MAP_32BIT",		TASK_MAP_32BIT); 
	DECLARE("TASK_MAP_64BIT",		TASK_MAP_64BIT);
#ifdef __i386__
	DECLARE("TASK_MAP_64BIT_SHARED",	TASK_MAP_64BIT_SHARED); 
#endif
	DECLARE("CPU_UBER_USER_GS_BASE",
		offsetof(cpu_data_t *, cpu_uber.cu_user_gs_base));
	DECLARE("CPU_UBER_ISF",
		offsetof(cpu_data_t *, cpu_uber.cu_isf));
	DECLARE("CPU_UBER_TMP",
		offsetof(cpu_data_t *, cpu_uber.cu_tmp));
	DECLARE("CPU_UBER_ARG_STORE",
		offsetof(cpu_data_t *, cpu_uber_arg_store));
	DECLARE("CPU_UBER_ARG_STORE_VALID",
		offsetof(cpu_data_t *, cpu_uber_arg_store_valid));

	DECLARE("CPU_NANOTIME",
		offsetof(cpu_data_t *, cpu_nanotime));

	DECLARE("CPU_DR7",
		offsetof(cpu_data_t *, cpu_dr7));

	DECLARE("hwIntCnt", 	offsetof(cpu_data_t *,cpu_hwIntCnt));

	DECLARE("enaExpTrace",	enaExpTrace);
	DECLARE("enaExpTraceb",	enaExpTraceb);
	DECLARE("enaUsrFCall",	enaUsrFCall);
	DECLARE("enaUsrFCallb",	enaUsrFCallb);
	DECLARE("enaUsrPhyMp",	enaUsrPhyMp);
	DECLARE("enaUsrPhyMpb",	enaUsrPhyMpb);
	DECLARE("enaDiagSCs",	enaDiagSCs);
	DECLARE("enaDiagSCsb",	enaDiagSCsb);
	DECLARE("enaDiagEM",	enaDiagEM);
	DECLARE("enaDiagEMb",	enaDiagEMb);
	DECLARE("enaNotifyEM",	enaNotifyEM);
	DECLARE("enaNotifyEMb",	enaNotifyEMb);
	DECLARE("dgLock",		offsetof(struct diagWork *, dgLock));
	DECLARE("dgFlags",		offsetof(struct diagWork *, dgFlags));
	DECLARE("dgMisc1",		offsetof(struct diagWork *, dgMisc1));
	DECLARE("dgMisc2",		offsetof(struct diagWork *, dgMisc2));
	DECLARE("dgMisc3",		offsetof(struct diagWork *, dgMisc3));
	DECLARE("dgMisc4",		offsetof(struct diagWork *, dgMisc4));
	DECLARE("dgMisc5",		offsetof(struct diagWork *, dgMisc5));

	DECLARE("INTEL_PTE_KERNEL",	INTEL_PTE_VALID|INTEL_PTE_WRITE);
	DECLARE("PDESHIFT",     PDESHIFT);
	DECLARE("PDESIZE",     PDESIZE);
	DECLARE("PTESIZE",     PTESIZE);
#ifdef __i386__
	DECLARE("PTDPTDI",     PTDPTDI);
	DECLARE("APTDPTDI",     APTDPTDI);
	DECLARE("HIGH_MEM_BASE", HIGH_MEM_BASE);
	DECLARE("HIGH_IDT_BASE", pmap_index_to_virt(HIGH_FIXED_IDT));
#endif

	DECLARE("KERNELBASEPDE",
		(LINEAR_KERNEL_ADDRESS >> PDESHIFT) *
		sizeof(pt_entry_t));

	DECLARE("TSS_ESP0",	offsetof(struct i386_tss *, esp0));
	DECLARE("TSS_SS0",	offsetof(struct i386_tss *, ss0));
	DECLARE("TSS_LDT",	offsetof(struct i386_tss *, ldt));
	DECLARE("TSS_PDBR",	offsetof(struct i386_tss *, cr3));
	DECLARE("TSS_LINK",	offsetof(struct i386_tss *, back_link));

	DECLARE("K_TASK_GATE",	ACC_P|ACC_PL_K|ACC_TASK_GATE);
	DECLARE("K_TRAP_GATE",	ACC_P|ACC_PL_K|ACC_TRAP_GATE);
	DECLARE("U_TRAP_GATE",	ACC_P|ACC_PL_U|ACC_TRAP_GATE);
	DECLARE("K_INTR_GATE",	ACC_P|ACC_PL_K|ACC_INTR_GATE);
	DECLARE("U_INTR_GATE",  ACC_P|ACC_PL_U|ACC_INTR_GATE);
	DECLARE("K_TSS",	ACC_P|ACC_PL_K|ACC_TSS);

	/*
	 *	usimple_lock fields
	 */
	DECLARE("USL_INTERLOCK",	offsetof(usimple_lock_t, interlock));

	DECLARE("INTSTACK_SIZE",	INTSTACK_SIZE);
	DECLARE("KADDR", offsetof(struct boot_args *, kaddr));
	DECLARE("KSIZE", offsetof(struct boot_args *, ksize));
	DECLARE("MEMORYMAP", offsetof(struct boot_args *, MemoryMap));
	DECLARE("DEVICETREEP", offsetof(struct boot_args *, deviceTreeP));

	DECLARE("RNT_TSC_BASE",
		offsetof(rtc_nanotime_t *, tsc_base));
	DECLARE("RNT_NS_BASE",
		offsetof(rtc_nanotime_t *, ns_base));
	DECLARE("RNT_SCALE",
		offsetof(rtc_nanotime_t *, scale));
	DECLARE("RNT_SHIFT",
		offsetof(rtc_nanotime_t *, shift));
	DECLARE("RNT_GENERATION",
		offsetof(rtc_nanotime_t *, generation));

	/* values from kern/timer.h */
#ifdef __LP64__
	DECLARE("TIMER_ALL", offsetof(struct timer *, all_bits));
#else
	DECLARE("TIMER_LOW",	 	offsetof(struct timer *, low_bits));
	DECLARE("TIMER_HIGH",		offsetof(struct timer *, high_bits));
	DECLARE("TIMER_HIGHCHK",	offsetof(struct timer *, high_bits_check));	
#endif
#if !STAT_TIME
	DECLARE("TIMER_TSTAMP",
		offsetof(struct timer *, tstamp));

	DECLARE("THREAD_TIMER",
		offsetof(struct processor *, processor_data.thread_timer));
#endif
	DECLARE("KERNEL_TIMER",
		offsetof(struct processor *, processor_data.kernel_timer));
	DECLARE("SYSTEM_TIMER",
		offsetof(struct thread *, system_timer));
	DECLARE("USER_TIMER",
		offsetof(struct thread *, user_timer));
	DECLARE("SYSTEM_STATE",
			offsetof(struct processor *, processor_data.system_state));
	DECLARE("USER_STATE",
			offsetof(struct processor *, processor_data.user_state));
	DECLARE("IDLE_STATE",
			offsetof(struct processor *, processor_data.idle_state));
	DECLARE("CURRENT_STATE",
			offsetof(struct processor *, processor_data.current_state));

	DECLARE("OnProc", OnProc);


#if	CONFIG_DTRACE
	DECLARE("LS_LCK_MTX_LOCK_ACQUIRE", LS_LCK_MTX_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE", LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_UNLOCK_RELEASE", LS_LCK_MTX_UNLOCK_RELEASE);
	DECLARE("LS_LCK_MTX_TRY_LOCK_ACQUIRE", LS_LCK_MTX_TRY_LOCK_ACQUIRE);
	DECLARE("LS_LCK_RW_LOCK_SHARED_ACQUIRE", LS_LCK_RW_LOCK_SHARED_ACQUIRE);
	DECLARE("LS_LCK_RW_DONE_RELEASE", LS_LCK_RW_DONE_RELEASE);
	DECLARE("LS_LCK_MTX_EXT_LOCK_ACQUIRE", LS_LCK_MTX_EXT_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE", LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_EXT_UNLOCK_RELEASE", LS_LCK_MTX_EXT_UNLOCK_RELEASE);
	DECLARE("LS_LCK_RW_LOCK_EXCL_ACQUIRE", LS_LCK_RW_LOCK_EXCL_ACQUIRE);
	DECLARE("LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE", LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE);
	DECLARE("LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE", LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE);
	DECLARE("LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE", LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE);
	DECLARE("LS_LCK_MTX_LOCK_SPIN_ACQUIRE", LS_LCK_MTX_LOCK_SPIN_ACQUIRE);
#endif

	return (0);
}
Esempio n. 11
0
File: genassym.c Progetto: armv8/xnu
int main(int argc, char **argv)
{

    DECLARE("AST_URGENT", AST_URGENT);
    DECLARE("AST_BSD", AST_BSD);

    DECLARE("MAX_CPUS", MAX_CPUS);

    /*
     * Mutex structure 
     */
    DECLARE("MUTEX_PTR", offsetof(lck_mtx_t *, lck_mtx_ptr));
    DECLARE("MUTEX_STATE", offsetof(lck_mtx_t *, lck_mtx_state));

    DECLARE("MUTEX_IND", LCK_MTX_TAG_INDIRECT);
    DECLARE("MUTEX_PTR", offsetof(lck_mtx_t *, lck_mtx_ptr));
    DECLARE("MUTEX_ASSERT_OWNED", LCK_MTX_ASSERT_OWNED);
    DECLARE("MUTEX_ASSERT_NOTOWNED", LCK_MTX_ASSERT_NOTOWNED);
    DECLARE("GRP_MTX_STAT_UTIL",
            offsetof(lck_grp_t *,
                     lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt));
    DECLARE("GRP_MTX_STAT_MISS",
            offsetof(lck_grp_t *,
                     lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt));
    DECLARE("GRP_MTX_STAT_WAIT",
            offsetof(lck_grp_t *,
                     lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt));

    /*
     * Per-mutex statistic element 
     */
    DECLARE("MTX_ACQ_TSC", offsetof(lck_mtx_ext_t *, lck_mtx_stat));

    /*
     * Mutex group statistics elements 
     */
    DECLARE("MUTEX_GRP", offsetof(lck_mtx_ext_t *, lck_mtx_grp));

    /*
     * Boot-args 
     */
    DECLARE("BOOT_ARGS_VERSION", offsetof(boot_args *, Version));
    DECLARE("BOOT_ARGS_VIRTBASE", offsetof(boot_args *, virtBase));
    DECLARE("BOOT_ARGS_PHYSBASE", offsetof(boot_args *, physBase));
    DECLARE("BOOT_ARGS_MEMSIZE", offsetof(boot_args *, memSize));
    DECLARE("BOOT_ARGS_TOP_OF_KERNEL", offsetof(boot_args *, topOfKernelData));
    DECLARE("BOOT_ARGS_DEVICETREEP", offsetof(boot_args *, deviceTreeP));

    /*
     * The use of this field is somewhat at variance with the alias.
     */
    DECLARE("GRP_MTX_STAT_DIRECT_WAIT",
            offsetof(lck_grp_t *,
                     lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt));

    DECLARE("GRP_MTX_STAT_HELD_MAX",
            offsetof(lck_grp_t *,
                     lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max));
    /*
     * Reader writer lock types 
     */
    DECLARE("RW_SHARED", LCK_RW_TYPE_SHARED);
    DECLARE("RW_EXCL", LCK_RW_TYPE_EXCLUSIVE);

    DECLARE("TH_RECOVER", offsetof(thread_t, recover));
    DECLARE("TH_CONTINUATION", offsetof(thread_t, continuation));
    DECLARE("TH_KERNEL_STACK", offsetof(thread_t, kernel_stack));
    DECLARE("TH_MUTEX_COUNT", offsetof(thread_t, mutex_count));
    DECLARE("TH_WAS_PROMOTED_ON_WAKEUP",
            offsetof(thread_t, was_promoted_on_wakeup));

    DECLARE("TH_SYSCALLS_MACH", offsetof(thread_t, syscalls_mach));
    DECLARE("TH_SYSCALLS_UNIX", offsetof(thread_t, syscalls_unix));

    DECLARE("TASK_VTIMERS", offsetof(struct task *, vtimers));

    /*
     * These fields are being added on demand 
     */
    DECLARE("MACHINE_THREAD", offsetof(thread_t, machine));
    DECLARE("MACHINE_THREAD_PREEMPT_COUNT",
            offsetof(thread_t, machine.preempt_count));
    DECLARE("MACHINE_THREAD_CPU_DATA", offsetof(thread_t, machine.cpu_data));
    DECLARE("MACHINE_THREAD_CTHREAD_SELF",
            offsetof(thread_t, machine.cthread_self));

    DECLARE("CPU_PENDING_AST", offsetof(cpu_data_t *, cpu_pending_ast));
    DECLARE("CPU_PREEMPT_COUNT", offsetof(cpu_data_t *, cpu_preemption_level));

    DECLARE("CPU_FLEH_RESET", offsetof(cpu_data_t *, fleh_reset));
    DECLARE("CPU_FLEH_UNDEF", offsetof(cpu_data_t *, fleh_undef));
    DECLARE("CPU_FLEH_SWI", offsetof(cpu_data_t *, fleh_swi));
    DECLARE("CPU_FLEH_PREFETCH", offsetof(cpu_data_t *, fleh_prefabt));
    DECLARE("CPU_FLEH_DATAABORT", offsetof(cpu_data_t *, fleh_dataabt));
    DECLARE("CPU_FLEH_DATAEXC", offsetof(cpu_data_t *, fleh_prefabt));
    DECLARE("CPU_FLEH_IRQ", offsetof(cpu_data_t *, fleh_irq));

    DECLARE("CPU_PMAP", offsetof(cpu_data_t *, user_pmap));

    DECLARE("TH_TASK", offsetof(thread_t, task));
    DECLARE("TH_AST", offsetof(thread_t, ast));
    DECLARE("TH_MAP", offsetof(thread_t, map));
    DECLARE("TH_PCB_ISS", offsetof(thread_t, machine.iss));
    DECLARE("TH_PCB_USS", offsetof(thread_t, machine.uss));

#if NCOPY_WINDOWS > 0
    DECLARE("TH_COPYIO_STATE", offsetof(thread_t, machine.copyio_state));
    DECLARE("WINDOWS_CLEAN", WINDOWS_CLEAN);
#endif

    DECLARE("MAP_PMAP", offsetof(vm_map_t, pmap));

    DECLARE("VM_MIN_ADDRESS", VM_MIN_ADDRESS);
    DECLARE("VM_MAX_ADDRESS", VM_MAX_ADDRESS);
    DECLARE("KERNELBASE", VM_MIN_KERNEL_ADDRESS);
    DECLARE("LINEAR_KERNELBASE", LINEAR_KERNEL_ADDRESS);
    DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE);

    DECLARE("ASM_COMM_PAGE32_BASE_ADDRESS", _COMM_PAGE32_BASE_ADDRESS);
    DECLARE("ASM_COMM_PAGE32_START_ADDRESS", _COMM_PAGE32_START_ADDRESS);

    DECLARE("CPU_THIS", offsetof(cpu_data_t *, cpu_this));
    DECLARE("CPU_ACTIVE_THREAD", offsetof(cpu_data_t *, cpu_active_thread));
    DECLARE("CPU_ACTIVE_STACK", offsetof(cpu_data_t *, cpu_active_stack));
    DECLARE("CPU_KERNEL_STACK", offsetof(cpu_data_t *, cpu_kernel_stack));
    DECLARE("CPU_INT_STACK_TOP", offsetof(cpu_data_t *, cpu_int_stack_top));

#if	MACH_RT
    DECLARE("CPU_PREEMPTION_LEVEL",
            offsetof(cpu_data_t *, cpu_preemption_level));
#endif                          /* MACH_RT */
    DECLARE("CPU_PROCESSOR", offsetof(cpu_data_t *, cpu_processor));
    DECLARE("CPU_ONFAULT", offsetof(cpu_data_t *, cpu_onfault));

    DECLARE("CPU_INTERRUPT_LEVEL",
            offsetof(cpu_data_t *, cpu_interrupt_level));

    /*
     *  usimple_lock fields
     */
    DECLARE("INTSTACK_SIZE", INTSTACK_SIZE);

    /*
     * values from kern/timer.h 
     */
    DECLARE("TIMER_TSTAMP", offsetof(struct timer *, tstamp));

#if	CONFIG_DTRACE
    DECLARE("LS_LCK_MTX_LOCK_ACQUIRE", LS_LCK_MTX_LOCK_ACQUIRE);
    DECLARE("LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE",
            LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE);
    DECLARE("LS_LCK_MTX_UNLOCK_RELEASE", LS_LCK_MTX_UNLOCK_RELEASE);
    DECLARE("LS_LCK_MTX_TRY_LOCK_ACQUIRE", LS_LCK_MTX_TRY_LOCK_ACQUIRE);
    DECLARE("LS_LCK_RW_LOCK_SHARED_ACQUIRE", LS_LCK_RW_LOCK_SHARED_ACQUIRE);
    DECLARE("LS_LCK_RW_DONE_RELEASE", LS_LCK_RW_DONE_RELEASE);
    DECLARE("LS_LCK_MTX_EXT_LOCK_ACQUIRE", LS_LCK_MTX_EXT_LOCK_ACQUIRE);
    DECLARE("LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE", LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE);
    DECLARE("LS_LCK_MTX_EXT_UNLOCK_RELEASE", LS_LCK_MTX_EXT_UNLOCK_RELEASE);
    DECLARE("LS_LCK_RW_LOCK_EXCL_ACQUIRE", LS_LCK_RW_LOCK_EXCL_ACQUIRE);
    DECLARE("LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE",
            LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE);
    DECLARE("LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE", LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE);
    DECLARE("LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE",
            LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE);
    DECLARE("LS_LCK_MTX_LOCK_SPIN_ACQUIRE", LS_LCK_MTX_LOCK_SPIN_ACQUIRE);
#endif

    return (0);
}
Esempio n. 12
0
int
main(
	int	argc,
	char	**argv)
{

	DECLARE("AST_URGENT",		AST_URGENT);
	DECLARE("AST_BSD",			AST_BSD);

	DECLARE("MAX_CPUS",			MAX_CPUS);

	/* Simple Lock structure */
	DECLARE("SLOCK_ILK",	offsetof(usimple_lock_data_t, interlock));
#if	MACH_LDEBUG
	DECLARE("SLOCK_TYPE",	offsetof(usimple_lock_data_t, lock_type));
	DECLARE("SLOCK_PC",	offsetof(usimple_lock_data_t, debug.lock_pc));
	DECLARE("SLOCK_THREAD",	offsetof(usimple_lock_data_t, debug.lock_thread));
	DECLARE("SLOCK_DURATIONH",offsetof(usimple_lock_data_t, debug.duration[0]));
	DECLARE("SLOCK_DURATIONL",offsetof(usimple_lock_data_t, debug.duration[1]));
	DECLARE("USLOCK_TAG",	USLOCK_TAG);
#endif	/* MACH_LDEBUG */

	/* Mutex structure */
	DECLARE("MUTEX_OWNER", offsetof(lck_mtx_t, lck_mtx_owner));
	DECLARE("MUTEX_PTR",   offsetof(lck_mtx_t, lck_mtx_ptr));
	DECLARE("MUTEX_STATE", offsetof(lck_mtx_t, lck_mtx_state));
	DECLARE("MUTEX_IND",	LCK_MTX_TAG_INDIRECT);
	DECLARE("MUTEX_ASSERT_OWNED",	LCK_MTX_ASSERT_OWNED);
	DECLARE("MUTEX_ASSERT_NOTOWNED",LCK_MTX_ASSERT_NOTOWNED);
	DECLARE("GRP_MTX_STAT_UTIL",	offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt));
	DECLARE("GRP_MTX_STAT_MISS",	offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt));
	DECLARE("GRP_MTX_STAT_WAIT",	offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt));
	
	/* x86 only */
	DECLARE("MUTEX_DESTROYED", LCK_MTX_TAG_DESTROYED);

	/* Per-mutex statistic element */
	DECLARE("MTX_ACQ_TSC",	offsetof(lck_mtx_ext_t, lck_mtx_stat));

	/* Mutex group statistics elements */
	DECLARE("MUTEX_GRP",	offsetof(lck_mtx_ext_t, lck_mtx_grp));
	
	/*
	 * The use of this field is somewhat at variance with the alias.
	 */
	DECLARE("GRP_MTX_STAT_DIRECT_WAIT",	offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt));

	DECLARE("GRP_MTX_STAT_HELD_MAX",	offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max));
	/* Reader writer lock types */
	DECLARE("RW_SHARED",    LCK_RW_TYPE_SHARED);
	DECLARE("RW_EXCL",      LCK_RW_TYPE_EXCLUSIVE);

	DECLARE("TH_RECOVER",		offsetof(struct thread, recover));
	DECLARE("TH_CONTINUATION",	offsetof(struct thread, continuation));
	DECLARE("TH_KERNEL_STACK",	offsetof(struct thread, kernel_stack));
	DECLARE("TH_MUTEX_COUNT",	offsetof(struct thread, mutex_count));
	DECLARE("TH_WAS_PROMOTED_ON_WAKEUP", offsetof(struct thread, was_promoted_on_wakeup));
	DECLARE("TH_IOTIER_OVERRIDE",	offsetof(struct thread, iotier_override));

	DECLARE("TH_SYSCALLS_MACH",	offsetof(struct thread, syscalls_mach));
	DECLARE("TH_SYSCALLS_UNIX",	offsetof(struct thread, syscalls_unix));

	DECLARE("TASK_VTIMERS",			offsetof(struct task, vtimers));

	/* These fields are being added on demand */
	DECLARE("TH_TASK",	offsetof(struct thread, task));
	DECLARE("TH_AST",	offsetof(struct thread, ast));
	DECLARE("TH_MAP",	offsetof(struct thread, map));
	DECLARE("TH_SPF",	offsetof(struct thread, machine.specFlags));
	DECLARE("TH_PCB_ISS", 	offsetof(struct thread, machine.iss));
	DECLARE("TH_PCB_IDS", 	offsetof(struct thread, machine.ids));
	DECLARE("TH_PCB_FPS",	offsetof(struct thread, machine.ifps));
#if NCOPY_WINDOWS > 0
	DECLARE("TH_COPYIO_STATE", offsetof(struct thread, machine.copyio_state));
	DECLARE("WINDOWS_CLEAN", WINDOWS_CLEAN);
#endif
	DECLARE("TH_RWLOCK_COUNT",	offsetof(struct thread, rwlock_count));

	DECLARE("MAP_PMAP",	offsetof(struct _vm_map, pmap));

#define IEL_SIZE		(sizeof(struct i386_exception_link *))
	DECLARE("IKS_SIZE",	sizeof(struct thread_kernel_state));

	/*
	 * KSS_* are offsets from the top of the kernel stack (cpu_kernel_stack)
	 */
	DECLARE("KSS_RBX",	offsetof(struct thread_kernel_state, machine.k_rbx));
	DECLARE("KSS_RSP",	offsetof(struct thread_kernel_state, machine.k_rsp));
	DECLARE("KSS_RBP",	offsetof(struct thread_kernel_state, machine.k_rbp));
	DECLARE("KSS_R12",	offsetof(struct thread_kernel_state, machine.k_r12));
	DECLARE("KSS_R13",	offsetof(struct thread_kernel_state, machine.k_r13));
	DECLARE("KSS_R14",	offsetof(struct thread_kernel_state, machine.k_r14));
	DECLARE("KSS_R15",	offsetof(struct thread_kernel_state, machine.k_r15));
	DECLARE("KSS_RIP",	offsetof(struct thread_kernel_state, machine.k_rip));
	
	DECLARE("DS_DR0",	offsetof(struct x86_debug_state32, dr0));
	DECLARE("DS_DR1",	offsetof(struct x86_debug_state32, dr1));
	DECLARE("DS_DR2",	offsetof(struct x86_debug_state32, dr2));
	DECLARE("DS_DR3",	offsetof(struct x86_debug_state32, dr3));
	DECLARE("DS_DR4",	offsetof(struct x86_debug_state32, dr4));
	DECLARE("DS_DR5",	offsetof(struct x86_debug_state32, dr5));
	DECLARE("DS_DR6",	offsetof(struct x86_debug_state32, dr6));
	DECLARE("DS_DR7",	offsetof(struct x86_debug_state32, dr7));

	DECLARE("DS64_DR0",	offsetof(struct x86_debug_state64, dr0));
	DECLARE("DS64_DR1",	offsetof(struct x86_debug_state64, dr1));
	DECLARE("DS64_DR2",	offsetof(struct x86_debug_state64, dr2));
	DECLARE("DS64_DR3",	offsetof(struct x86_debug_state64, dr3));
	DECLARE("DS64_DR4",	offsetof(struct x86_debug_state64, dr4));
	DECLARE("DS64_DR5",	offsetof(struct x86_debug_state64, dr5));
	DECLARE("DS64_DR6",	offsetof(struct x86_debug_state64, dr6));
	DECLARE("DS64_DR7",	offsetof(struct x86_debug_state64, dr7));

	DECLARE("FP_VALID",	offsetof(struct x86_fx_thread_state,fp_valid));

	DECLARE("SS_FLAVOR",	offsetof(x86_saved_state_t, flavor));
	DECLARE("SS_32",	x86_SAVED_STATE32);
	DECLARE("SS_64",	x86_SAVED_STATE64);

#define R_(x)  offsetof(x86_saved_state_t, ss_32.x)
	DECLARE("R32_CS",	R_(cs));
	DECLARE("R32_SS",	R_(ss));
	DECLARE("R32_DS",	R_(ds));
	DECLARE("R32_ES",	R_(es));
	DECLARE("R32_FS",	R_(fs));
	DECLARE("R32_GS",	R_(gs));
	DECLARE("R32_UESP",	R_(uesp));
	DECLARE("R32_EBP",	R_(ebp));
	DECLARE("R32_EAX",	R_(eax));
	DECLARE("R32_EBX",	R_(ebx));
	DECLARE("R32_ECX",	R_(ecx));
	DECLARE("R32_EDX",	R_(edx));
	DECLARE("R32_ESI",	R_(esi));
	DECLARE("R32_EDI",	R_(edi));
	DECLARE("R32_TRAPNO",	R_(trapno));
	DECLARE("R32_ERR",	R_(err));
	DECLARE("R32_EFLAGS",	R_(efl));
	DECLARE("R32_EIP",	R_(eip));
	DECLARE("R32_CR2",	R_(cr2));
	DECLARE("ISS32_SIZE",	sizeof (x86_saved_state32_t));

#define R64_(x)  offsetof(x86_saved_state_t, ss_64.x)
	DECLARE("R64_FS",	R64_(fs));
	DECLARE("R64_GS",	R64_(gs));
	DECLARE("R64_R8",	R64_(r8));
	DECLARE("R64_R9",	R64_(r9));
	DECLARE("R64_R10",	R64_(r10));
	DECLARE("R64_R11",	R64_(r11));
	DECLARE("R64_R12",	R64_(r12));
	DECLARE("R64_R13",	R64_(r13));
	DECLARE("R64_R14",	R64_(r14));
	DECLARE("R64_R15",	R64_(r15));
	DECLARE("R64_RBP",	R64_(rbp));
	DECLARE("R64_RAX",	R64_(rax));
	DECLARE("R64_RBX",	R64_(rbx));
	DECLARE("R64_RCX",	R64_(rcx));
	DECLARE("R64_RDX",	R64_(rdx));
	DECLARE("R64_RSI",	R64_(rsi));
	DECLARE("R64_RDI",	R64_(rdi));
	DECLARE("R64_CS",	R64_(isf.cs));
	DECLARE("R64_SS",	R64_(isf.ss));
	DECLARE("R64_RSP",	R64_(isf.rsp));
	DECLARE("R64_TRAPNO",	R64_(isf.trapno));
	DECLARE("R64_TRAPFN",	R64_(isf.trapfn));
	DECLARE("R64_ERR",	R64_(isf.err));
	DECLARE("R64_RFLAGS",	R64_(isf.rflags));
	DECLARE("R64_RIP",	R64_(isf.rip));
	DECLARE("R64_CR2",	R64_(cr2));
	DECLARE("ISS64_OFFSET",	R64_(isf));
	DECLARE("ISS64_SIZE",	sizeof (x86_saved_state64_t));

#define ISF64_(x)  offsetof(x86_64_intr_stack_frame_t, x)
	DECLARE("ISF64_TRAPNO",	ISF64_(trapno));
	DECLARE("ISF64_TRAPFN",	ISF64_(trapfn));
	DECLARE("ISF64_ERR",	ISF64_(err));
	DECLARE("ISF64_RIP",	ISF64_(rip));
	DECLARE("ISF64_CS",	ISF64_(cs));
	DECLARE("ISF64_RFLAGS",	ISF64_(rflags));
	DECLARE("ISF64_RSP",	ISF64_(rsp));
	DECLARE("ISF64_SS",	ISF64_(ss));
	DECLARE("ISF64_SIZE",	sizeof(x86_64_intr_stack_frame_t));

	DECLARE("NBPG",			I386_PGBYTES);
	DECLARE("PAGE_SIZE",            I386_PGBYTES);
	DECLARE("PAGE_MASK",            I386_PGBYTES-1);
	DECLARE("PAGE_SHIFT",           12);
	DECLARE("NKPT",                 NKPT);
	DECLARE("VM_MIN_ADDRESS",	VM_MIN_ADDRESS);
	DECLARE("VM_MAX_ADDRESS",	VM_MAX_ADDRESS);
	DECLARE("KERNELBASE",		VM_MIN_KERNEL_ADDRESS);
	DECLARE("LINEAR_KERNELBASE",	LINEAR_KERNEL_ADDRESS);
	DECLARE("KERNEL_STACK_SIZE",	KERNEL_STACK_SIZE);

	DECLARE("ASM_COMM_PAGE32_BASE_ADDRESS",  _COMM_PAGE32_BASE_ADDRESS);
	DECLARE("ASM_COMM_PAGE32_START_ADDRESS",  _COMM_PAGE32_START_ADDRESS);
	DECLARE("ASM_COMM_PAGE_SCHED_GEN",  _COMM_PAGE_SCHED_GEN);

	DECLARE("KERNEL_PML4_INDEX", KERNEL_PML4_INDEX);
	DECLARE("IDTSZ",	IDTSZ);
	DECLARE("GDTSZ",	GDTSZ);
	DECLARE("LDTSZ",	LDTSZ);

	DECLARE("KERNEL_DS",	KERNEL_DS);
	DECLARE("USER_CS",	USER_CS);
	DECLARE("USER_DS",	USER_DS);
	DECLARE("KERNEL32_CS",	KERNEL32_CS);
	DECLARE("KERNEL64_CS",  KERNEL64_CS);
	DECLARE("USER64_CS",	USER64_CS);
	DECLARE("KERNEL_TSS",	KERNEL_TSS);
	DECLARE("KERNEL_LDT",	KERNEL_LDT);
	DECLARE("SYSENTER_CS",	SYSENTER_CS);
	DECLARE("SYSENTER_TF_CS",SYSENTER_TF_CS);
	DECLARE("SYSENTER_DS",	SYSENTER_DS);
	DECLARE("SYSCALL_CS",	SYSCALL_CS);

        DECLARE("CPU_THIS",
		offsetof(cpu_data_t, cpu_this));
        DECLARE("CPU_ACTIVE_THREAD",
		offsetof(cpu_data_t, cpu_active_thread));
        DECLARE("CPU_ACTIVE_STACK",
		offsetof(cpu_data_t, cpu_active_stack));
        DECLARE("CPU_KERNEL_STACK",
		offsetof(cpu_data_t, cpu_kernel_stack));
        DECLARE("CPU_INT_STACK_TOP",
		offsetof(cpu_data_t, cpu_int_stack_top));
        DECLARE("CPU_PREEMPTION_LEVEL",
		offsetof(cpu_data_t, cpu_preemption_level));
        DECLARE("CPU_HIBERNATE",
		offsetof(cpu_data_t, cpu_hibernate));
        DECLARE("CPU_INTERRUPT_LEVEL",
		offsetof(cpu_data_t, cpu_interrupt_level));
	DECLARE("CPU_NESTED_ISTACK",
	    offsetof(cpu_data_t, cpu_nested_istack));
        DECLARE("CPU_NUMBER_GS",
		offsetof(cpu_data_t,cpu_number));
        DECLARE("CPU_RUNNING",
		offsetof(cpu_data_t,cpu_running));
	DECLARE("CPU_PENDING_AST",
		offsetof(cpu_data_t,cpu_pending_ast));
	DECLARE("CPU_DESC_TABLEP",
		offsetof(cpu_data_t,cpu_desc_tablep));
	DECLARE("CPU_DESC_INDEX",
		offsetof(cpu_data_t,cpu_desc_index));
	DECLARE("CDI_GDT",
		offsetof(cpu_desc_index_t,cdi_gdt));
	DECLARE("CDI_IDT",
		offsetof(cpu_desc_index_t,cdi_idt));
	DECLARE("CPU_PROCESSOR",
		offsetof(cpu_data_t,cpu_processor));
        DECLARE("CPU_INT_STATE",
		offsetof(cpu_data_t, cpu_int_state));
        DECLARE("CPU_INT_EVENT_TIME",
		offsetof(cpu_data_t, cpu_int_event_time));

        DECLARE("CPU_TASK_CR3",
		offsetof(cpu_data_t, cpu_task_cr3));
        DECLARE("CPU_ACTIVE_CR3",
		offsetof(cpu_data_t, cpu_active_cr3));
        DECLARE("CPU_KERNEL_CR3",
		offsetof(cpu_data_t, cpu_kernel_cr3));
	DECLARE("CPU_TLB_INVALID",
		offsetof(cpu_data_t, cpu_tlb_invalid));
	DECLARE("CPU_PAGEZERO_MAPPED",
		offsetof(cpu_data_t, cpu_pagezero_mapped));

	DECLARE("CPU_TASK_MAP",
		offsetof(cpu_data_t, cpu_task_map));
	DECLARE("TASK_MAP_32BIT",		TASK_MAP_32BIT); 
	DECLARE("TASK_MAP_64BIT",		TASK_MAP_64BIT);
	DECLARE("CPU_UBER_USER_GS_BASE",
		offsetof(cpu_data_t, cpu_uber.cu_user_gs_base));
	DECLARE("CPU_UBER_ISF",
		offsetof(cpu_data_t, cpu_uber.cu_isf));
	DECLARE("CPU_UBER_TMP",
		offsetof(cpu_data_t, cpu_uber.cu_tmp));

	DECLARE("CPU_NANOTIME",
		offsetof(cpu_data_t, cpu_nanotime));

	DECLARE("CPU_DR7",
		offsetof(cpu_data_t, cpu_dr7));

	DECLARE("hwIntCnt", 	offsetof(cpu_data_t,cpu_hwIntCnt));
	DECLARE("CPU_ACTIVE_PCID",
		offsetof(cpu_data_t, cpu_active_pcid));
	DECLARE("CPU_KERNEL_PCID",
		offsetof(cpu_data_t, cpu_kernel_pcid));

	DECLARE("CPU_PCID_COHERENTP",
		offsetof(cpu_data_t, cpu_pmap_pcid_coherentp));
	DECLARE("CPU_PCID_COHERENTP_KERNEL",
		offsetof(cpu_data_t, cpu_pmap_pcid_coherentp_kernel));
	DECLARE("CPU_PMAP_PCID_ENABLED",
	    offsetof(cpu_data_t, cpu_pmap_pcid_enabled));

#ifdef	PCID_STATS	
	DECLARE("CPU_PMAP_USER_RETS",
	    offsetof(cpu_data_t, cpu_pmap_user_rets));
	DECLARE("CPU_PMAP_PCID_PRESERVES",
	    offsetof(cpu_data_t, cpu_pmap_pcid_preserves));
	DECLARE("CPU_PMAP_PCID_FLUSHES",
	    offsetof(cpu_data_t, cpu_pmap_pcid_flushes));
#endif
	DECLARE("CPU_TLB_INVALID_LOCAL",
	    offsetof(cpu_data_t, cpu_tlb_invalid_local));
	DECLARE("CPU_TLB_INVALID_GLOBAL",
		offsetof(cpu_data_t, cpu_tlb_invalid_global));
	DECLARE("enaExpTrace",	enaExpTrace);
	DECLARE("enaUsrFCall",	enaUsrFCall);
	DECLARE("enaUsrPhyMp",	enaUsrPhyMp);
	DECLARE("enaDiagSCs",	enaDiagSCs);
	DECLARE("enaDiagEM",	enaDiagEM);
	DECLARE("enaNotifyEM",	enaNotifyEM);
	DECLARE("dgLock",		offsetof(struct diagWork, dgLock));
	DECLARE("dgFlags",		offsetof(struct diagWork, dgFlags));
	DECLARE("dgMisc1",		offsetof(struct diagWork, dgMisc1));
	DECLARE("dgMisc2",		offsetof(struct diagWork, dgMisc2));
	DECLARE("dgMisc3",		offsetof(struct diagWork, dgMisc3));
	DECLARE("dgMisc4",		offsetof(struct diagWork, dgMisc4));
	DECLARE("dgMisc5",		offsetof(struct diagWork, dgMisc5));

	DECLARE("TSS_ESP0",	offsetof(struct i386_tss, esp0));
	DECLARE("TSS_SS0",	offsetof(struct i386_tss, ss0));
	DECLARE("TSS_LDT",	offsetof(struct i386_tss, ldt));
	DECLARE("TSS_PDBR",	offsetof(struct i386_tss, cr3));
	DECLARE("TSS_LINK",	offsetof(struct i386_tss, back_link));

	DECLARE("K_TASK_GATE",	ACC_P|ACC_PL_K|ACC_TASK_GATE);
	DECLARE("K_TRAP_GATE",	ACC_P|ACC_PL_K|ACC_TRAP_GATE);
	DECLARE("U_TRAP_GATE",	ACC_P|ACC_PL_U|ACC_TRAP_GATE);
	DECLARE("K_INTR_GATE",	ACC_P|ACC_PL_K|ACC_INTR_GATE);
	DECLARE("U_INTR_GATE",  ACC_P|ACC_PL_U|ACC_INTR_GATE);
	DECLARE("K_TSS",	ACC_P|ACC_PL_K|ACC_TSS);

	/*
	 *	usimple_lock fields
	 */
	DECLARE("USL_INTERLOCK",	offsetof(usimple_lock_data_t, interlock));

	DECLARE("INTSTACK_SIZE",	INTSTACK_SIZE);
	DECLARE("KADDR", offsetof(struct boot_args, kaddr));
	DECLARE("KSIZE", offsetof(struct boot_args, ksize));
	DECLARE("MEMORYMAP", offsetof(struct boot_args, MemoryMap));
	DECLARE("DEVICETREEP", offsetof(struct boot_args, deviceTreeP));

	DECLARE("RNT_TSC_BASE",
		offsetof(pal_rtc_nanotime_t, tsc_base));
	DECLARE("RNT_NS_BASE",
		offsetof(pal_rtc_nanotime_t, ns_base));
	DECLARE("RNT_SCALE",
		offsetof(pal_rtc_nanotime_t, scale));
	DECLARE("RNT_SHIFT",
		offsetof(pal_rtc_nanotime_t, shift));
	DECLARE("RNT_GENERATION",
		offsetof(pal_rtc_nanotime_t, generation));

	/* values from kern/timer.h */
#ifdef __LP64__
	DECLARE("TIMER_ALL", offsetof(struct timer, all_bits));
#else
	DECLARE("TIMER_LOW",	 	offsetof(struct timer, low_bits));
	DECLARE("TIMER_HIGH",		offsetof(struct timer, high_bits));
	DECLARE("TIMER_HIGHCHK",	offsetof(struct timer, high_bits_check));	
#endif
	DECLARE("TIMER_TSTAMP",
		offsetof(struct timer, tstamp));

	DECLARE("THREAD_TIMER",
		offsetof(struct processor, processor_data.thread_timer));
	DECLARE("KERNEL_TIMER",
		offsetof(struct processor, processor_data.kernel_timer));
	DECLARE("SYSTEM_TIMER",
		offsetof(struct thread, system_timer));
	DECLARE("USER_TIMER",
		offsetof(struct thread, user_timer));
	DECLARE("SYSTEM_STATE",
			offsetof(struct processor, processor_data.system_state));
	DECLARE("USER_STATE",
			offsetof(struct processor, processor_data.user_state));
	DECLARE("IDLE_STATE",
			offsetof(struct processor, processor_data.idle_state));
	DECLARE("CURRENT_STATE",
			offsetof(struct processor, processor_data.current_state));

	DECLARE("OnProc", OnProc);


#if	CONFIG_DTRACE
	DECLARE("LS_LCK_MTX_LOCK_ACQUIRE", LS_LCK_MTX_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE", LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_UNLOCK_RELEASE", LS_LCK_MTX_UNLOCK_RELEASE);
	DECLARE("LS_LCK_MTX_TRY_LOCK_ACQUIRE", LS_LCK_MTX_TRY_LOCK_ACQUIRE);
	DECLARE("LS_LCK_RW_LOCK_SHARED_ACQUIRE", LS_LCK_RW_LOCK_SHARED_ACQUIRE);
	DECLARE("LS_LCK_RW_DONE_RELEASE", LS_LCK_RW_DONE_RELEASE);
	DECLARE("LS_LCK_MTX_EXT_LOCK_ACQUIRE", LS_LCK_MTX_EXT_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE", LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE);
	DECLARE("LS_LCK_MTX_EXT_UNLOCK_RELEASE", LS_LCK_MTX_EXT_UNLOCK_RELEASE);
	DECLARE("LS_LCK_RW_LOCK_EXCL_ACQUIRE", LS_LCK_RW_LOCK_EXCL_ACQUIRE);
	DECLARE("LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE", LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE);
	DECLARE("LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE", LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE);
	DECLARE("LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE", LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE);
	DECLARE("LS_LCK_MTX_LOCK_SPIN_ACQUIRE", LS_LCK_MTX_LOCK_SPIN_ACQUIRE);
#endif

	return (0);
}