Esempio n. 1
0
void getToken()
{
    char tmp;
    token.clear();                         //清空token
    while( BLANK(ch) ){                 //跳过空白
        getch();
    }

    if(last_is_character && left_right_single_quotation){  //上一个是左单引号
        last_is_character = false;
        symbol = "CHARACTER";
        while(ch != '\''){              //获取字符内容(理论上单引号内应该就一个字符)
            token += ch;
            getch();
        }
    }
    else if(last_is_string && left_right_double_quotation){ //上一个是左双引号
        last_is_string = false;
        symbol = "STRING";
        while(ch != '"'){
            token += ch;
            getch();
        }
    }
    else if( LETTER(ch) ){                  //以下是跟引号无关的正常判断:是字母,读出字符串
        while(NUM_LETTER(ch)){
            token += ch;
            tmp = ch;
            getch();
        }

        //查找是否在保留字中
        map<string, string>::iterator it = revWord.find(token);
        if(it != revWord.end()){        //找到,是保留字
            symbol = it->second;
            printToken(symbol, token);
        }
        else{                           //不是保留字,是标识符
            symbol = "IDENTIFIER";
            printToken(symbol, token);
        }
    }
    else if( NUM(ch) ){
        while(NUM(ch)){
            token += ch;
            tmp = ch;
            getch();
        }

        symbol = "NUMBER";
        printToken(symbol, token);
    }
    else if(ch == ':'){
        token += ch;
        tmp = ch;
        getch();
        if(ch == '='){
            symbol = "ASSIGNSY";            //:=
            token += ch;
            printToken(symbol, ":=");
            getch();                    //else语句中不能再放getch()了,下一个字符已经被读过了
        }
        else{
            symbol = charSymb[tmp];         //:
            // cout << ++counter << " " << symbol << " " << tmp << endl;
        }
    }
    else if(ch == '<'){
        tmp = ch;
        token += ch;
        getch();
        if(ch == '='){
            symbol = "LEQ";                 //<=
            token += ch;
            printToken(symbol, "<=");
            getch();                                //!!!!!!!!!!!
        }
        else if(ch == '>'){
            symbol = "NEQ";                 //<>
            token += ch;
            printToken(symbol, "<>");
            getch();                                //!!!!!!!!!!!
        }
        else{
            symbol = charSymb[tmp];         //<
            // cout << ++counter << " " << symbol << " " << tmp << endl;
        }
    }
    else if(ch == '>'){
        tmp = ch;
        token += ch;
        getch();
        if(ch == '='){
            symbol = "GEQ";                 //>=
            token += ch;
            printToken(symbol, ">=");
            getch();                                //!!!!!!!!!!!
        }
        else{
            symbol = charSymb[tmp];         //>
            // cout << ++counter << " " << symbol << " " << tmp << endl;
        }
    }
    else{                                   //其余单字操作符
        symbol = charSymb[ch];
        token += ch;
        // cout << ++counter << " " << symbol << " " << ch << endl;
        if(ch == '\''){                     //单引号和双引号还要再处理一些东西
            last_is_character = true;
            left_right_single_quotation = !left_right_single_quotation;
        }
        else if(ch == '"'){
            last_is_string = true;
            left_right_double_quotation = !left_right_double_quotation;
        }
        getch();           //!!!!太重要了
    }


}
void foo(void)
{
	OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
	OFFSET(IA32_SIGCONTEXT_bx, sigcontext, bx);
	OFFSET(IA32_SIGCONTEXT_cx, sigcontext, cx);
	OFFSET(IA32_SIGCONTEXT_dx, sigcontext, dx);
	OFFSET(IA32_SIGCONTEXT_si, sigcontext, si);
	OFFSET(IA32_SIGCONTEXT_di, sigcontext, di);
	OFFSET(IA32_SIGCONTEXT_bp, sigcontext, bp);
	OFFSET(IA32_SIGCONTEXT_sp, sigcontext, sp);
	OFFSET(IA32_SIGCONTEXT_ip, sigcontext, ip);
	BLANK();

	OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
	OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
	OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
	OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
	OFFSET(CPUINFO_hard_math, cpuinfo_x86, hard_math);
	OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
	OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
	OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
	BLANK();

	OFFSET(TI_sysenter_return, thread_info, sysenter_return);
	OFFSET(TI_cpu, thread_info, cpu);
	BLANK();

	OFFSET(PT_EBX, pt_regs, bx);
	OFFSET(PT_ECX, pt_regs, cx);
	OFFSET(PT_EDX, pt_regs, dx);
	OFFSET(PT_ESI, pt_regs, si);
	OFFSET(PT_EDI, pt_regs, di);
	OFFSET(PT_EBP, pt_regs, bp);
	OFFSET(PT_EAX, pt_regs, ax);
	OFFSET(PT_DS,  pt_regs, ds);
	OFFSET(PT_ES,  pt_regs, es);
	OFFSET(PT_FS,  pt_regs, fs);
	OFFSET(PT_GS,  pt_regs, gs);
	OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
	OFFSET(PT_EIP, pt_regs, ip);
	OFFSET(PT_CS,  pt_regs, cs);
	OFFSET(PT_EFLAGS, pt_regs, flags);
	OFFSET(PT_OLDESP, pt_regs, sp);
	OFFSET(PT_OLDSS,  pt_regs, ss);
	BLANK();

	OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
	BLANK();

	/*                                           */
	DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
		 sizeof(struct tss_struct));

#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
	BLANK();
	OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
	OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending);

	BLANK();
	OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
	OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc);
	OFFSET(LGUEST_PAGES_host_cr3, lguest_pages, state.host_cr3);
	OFFSET(LGUEST_PAGES_host_sp, lguest_pages, state.host_sp);
	OFFSET(LGUEST_PAGES_guest_gdt_desc, lguest_pages,state.guest_gdt_desc);
	OFFSET(LGUEST_PAGES_guest_idt_desc, lguest_pages,state.guest_idt_desc);
	OFFSET(LGUEST_PAGES_guest_gdt, lguest_pages, state.guest_gdt);
	OFFSET(LGUEST_PAGES_regs_trapnum, lguest_pages, regs.trapnum);
	OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
	OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
#endif
	BLANK();
	DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
	DEFINE(NR_syscalls, sizeof(syscalls));
}
int main(void)
{
  DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
  BLANK();
  DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
  DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
  DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
  DEFINE(TI_TASK,		offsetof(struct thread_info, task));
  DEFINE(TI_EXEC_DOMAIN,	offsetof(struct thread_info, exec_domain));
  DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
  DEFINE(TI_CPU_DOMAIN,		offsetof(struct thread_info, cpu_domain));
  DEFINE(TI_CPU_SAVE,		offsetof(struct thread_info, cpu_context));
  DEFINE(TI_USED_CP,		offsetof(struct thread_info, used_cp));
  DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));
  DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate));
  DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));
#ifdef CONFIG_SMP
  DEFINE(VFP_CPU,		offsetof(union vfp_state, hard.cpu));
#endif
#ifdef CONFIG_ARM_THUMBEE
  DEFINE(TI_THUMBEE_STATE,	offsetof(struct thread_info, thumbee_state));
#endif
#ifdef CONFIG_IWMMXT
  DEFINE(TI_IWMMXT_STATE,	offsetof(struct thread_info, fpstate.iwmmxt));
#endif
#ifdef CONFIG_CRUNCH
  DEFINE(TI_CRUNCH_STATE,	offsetof(struct thread_info, crunchstate));
#endif
  BLANK();
  DEFINE(S_R0,			offsetof(struct pt_regs, ARM_r0));
  DEFINE(S_R1,			offsetof(struct pt_regs, ARM_r1));
  DEFINE(S_R2,			offsetof(struct pt_regs, ARM_r2));
  DEFINE(S_R3,			offsetof(struct pt_regs, ARM_r3));
  DEFINE(S_R4,			offsetof(struct pt_regs, ARM_r4));
  DEFINE(S_R5,			offsetof(struct pt_regs, ARM_r5));
  DEFINE(S_R6,			offsetof(struct pt_regs, ARM_r6));
  DEFINE(S_R7,			offsetof(struct pt_regs, ARM_r7));
  DEFINE(S_R8,			offsetof(struct pt_regs, ARM_r8));
  DEFINE(S_R9,			offsetof(struct pt_regs, ARM_r9));
  DEFINE(S_R10,			offsetof(struct pt_regs, ARM_r10));
  DEFINE(S_FP,			offsetof(struct pt_regs, ARM_fp));
  DEFINE(S_IP,			offsetof(struct pt_regs, ARM_ip));
  DEFINE(S_SP,			offsetof(struct pt_regs, ARM_sp));
  DEFINE(S_LR,			offsetof(struct pt_regs, ARM_lr));
  DEFINE(S_PC,			offsetof(struct pt_regs, ARM_pc));
  DEFINE(S_PSR,			offsetof(struct pt_regs, ARM_cpsr));
  DEFINE(S_OLD_R0,		offsetof(struct pt_regs, ARM_ORIG_r0));
  DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
  BLANK();
#ifdef CONFIG_CPU_HAS_ASID
  DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id));
  BLANK();
#endif
  DEFINE(VMA_VM_MM,		offsetof(struct vm_area_struct, vm_mm));
  DEFINE(VMA_VM_FLAGS,		offsetof(struct vm_area_struct, vm_flags));
  BLANK();
  DEFINE(VM_EXEC,	       	VM_EXEC);
  BLANK();
  DEFINE(PAGE_SZ,	       	PAGE_SIZE);
  BLANK();
  DEFINE(SYS_ERROR0,		0x9f0000);
  BLANK();
  DEFINE(SIZEOF_MACHINE_DESC,	sizeof(struct machine_desc));
  DEFINE(MACHINFO_TYPE,		offsetof(struct machine_desc, nr));
  DEFINE(MACHINFO_NAME,		offsetof(struct machine_desc, name));
  DEFINE(MACHINFO_PHYSIO,	offsetof(struct machine_desc, phys_io));
  DEFINE(MACHINFO_PGOFFIO,	offsetof(struct machine_desc, io_pg_offst));
  BLANK();
  DEFINE(PROC_INFO_SZ,		sizeof(struct proc_info_list));
  DEFINE(PROCINFO_INITFUNC,	offsetof(struct proc_info_list, __cpu_flush));
  DEFINE(PROCINFO_MM_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_mm_mmu_flags));
  DEFINE(PROCINFO_IO_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_io_mmu_flags));
  BLANK();
#ifdef MULTI_DABORT
  DEFINE(PROCESSOR_DABT_FUNC,	offsetof(struct processor, _data_abort));
#endif
#ifdef MULTI_PABORT
  DEFINE(PROCESSOR_PABT_FUNC,	offsetof(struct processor, _prefetch_abort));
#endif
  return 0; 
}
Esempio n. 4
0
int
ReadNetlist (char *filename)
{
  static char *command = NULL;
  char inputline[MAX_NETLIST_LINE_LENGTH + 1];
  char temp[MAX_NETLIST_LINE_LENGTH + 1];
  FILE *fp;
  LibraryMenuType *menu = NULL;
  LibraryEntryType *entry;
  int i, j, lines, kind;
  bool continued;
  bool used_popen = false;
  int retval = 0;

  if (!filename)
    return 1;			/* nothing to do */

  Message (_("Importing PCB netlist %s\n"), filename);

  if (EMPTY_STRING_P (Settings.RatCommand))
    {
      fp = fopen (filename, "r");
      if (!fp)
	{
	  Message("Cannot open %s for reading", filename);
	  return 1;
	}
    }
  else
    {
      used_popen = true;
      free (command);
      command = EvaluateFilename (Settings.RatCommand,
				  Settings.RatPath, filename, NULL);

      /* open pipe to stdout of command */
      if (*command == '\0' || (fp = popen (command, "r")) == NULL)
	{
	  PopenErrorMessage (command);
	  return 1;
	}
    }
  lines = 0;
  /* kind = 0  is net name
   * kind = 1  is route style name
   * kind = 2  is connection
   */
  kind = 0;
  while (fgets (inputline, MAX_NETLIST_LINE_LENGTH, fp))
    {
      size_t len = strlen (inputline);
      /* check for maximum length line */
      if (len)
	{
	  if (inputline[--len] != '\n')
	    Message (_("Line length (%i) exceeded in netlist file.\n"
		       "additional characters will be ignored.\n"),
		     MAX_NETLIST_LINE_LENGTH);
	  else
	    inputline[len] = '\0';
	}
      continued = (inputline[len - 1] == '\\') ? true : false;
      if (continued)
	inputline[len - 1] = '\0';
      lines++;
      i = 0;
      while (inputline[i] != '\0')
	{
	  j = 0;
	  /* skip leading blanks */
	  while (inputline[i] != '\0' && BLANK (inputline[i]))
	    i++;
	  if (kind == 0)
	    {
	      /* add two spaces for included/unincluded */
	      temp[j++] = ' ';
	      temp[j++] = ' ';
	    }
	  while (!BLANK (inputline[i]))
	    temp[j++] = inputline[i++];
	  temp[j] = '\0';
	  while (inputline[i] != '\0' && BLANK (inputline[i]))
	    i++;
	  if (kind == 0)
	    {
	      menu = GetLibraryMenuMemory (&PCB->NetlistLib);
	      menu->Name = strdup (temp);
	      menu->flag = 1;
	      kind++;
	    }
	  else
	    {
	      if (kind == 1 && strchr (temp, '-') == NULL)
		{
		  kind++;
		  menu->Style = strdup (temp);
		}
	      else
		{
		  entry = GetLibraryEntryMemory (menu);
		  entry->ListEntry = strdup (temp);
		}
	    }
	}
      if (!continued)
	kind = 0;
    }
  if (!lines)
    {
      Message (_("Empty netlist file!\n"));
      retval = 1;
    }
  if (used_popen)
    pclose (fp);
  else
    fclose (fp);
  sort_netlist ();
  return retval;
}
Esempio n. 5
0
int main(void)
{
	DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
	DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
	DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
	BLANK();
	DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
	BLANK();
	DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
	DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
	DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
	BLANK();
	DEFINE(__TI_task, offsetof(struct thread_info, task));
	DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
	DEFINE(__TI_flags, offsetof(struct thread_info, flags));
	DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
	DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
	DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
	DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer));
	DEFINE(__TI_last_break, offsetof(struct thread_info, last_break));
	BLANK();
	DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
	DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
	DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
	DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
	DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc));
	DEFINE(__PT_SVCNR, offsetof(struct pt_regs, svcnr));
	DEFINE(__PT_SIZE, sizeof(struct pt_regs));
	BLANK();
	DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
	DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs));
	DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1));
	BLANK();
	/* timeval/timezone offsets for use by vdso */
	DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count));
	DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
	DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
	DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
	DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
	DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
	DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
	DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
	DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult));
	DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
	DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
	/* constants used by the vdso */
	DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
	DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
	DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
	BLANK();
	/* constants for SIGP */
	DEFINE(__SIGP_STOP, sigp_stop);
	DEFINE(__SIGP_RESTART, sigp_restart);
	DEFINE(__SIGP_SENSE, sigp_sense);
	DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset);
	BLANK();
	/* lowcore offsets */
	DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
	DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr));
	DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
	DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
	DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
	DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
	DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
	DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
	DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid));
	DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
	DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id));
	DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id));
	DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
	DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
	DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm));
	DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
	DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
	DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
	DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
	BLANK();
	DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
	DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
	DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
	DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
	DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
	DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
	DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
	DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
	DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
	DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
	DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
	DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
	DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area));
	DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
	DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
	DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
	DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer));
	DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer));
	DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer));
	DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer));
	DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer));
	DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer));
	DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
	DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
	DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
	DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid));
	DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
	DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
	DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
	DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
	DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
	DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
	DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
	DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
	DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
	DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
	DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
	DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
	DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
	DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area));
	DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area));
	DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
	DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
	DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
	DEFINE(__LC_SAVE_AREA_64, offsetof(struct _lowcore, save_area_64));
#ifdef CONFIG_32BIT
	DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
#else /* CONFIG_32BIT */
	DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
	DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
	DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
	DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
	DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
	DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
	DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
	DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
	DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
#endif /* CONFIG_32BIT */
	return 0;
}
Esempio n. 6
0
int main(void)
{
  DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
#ifdef CONFIG_CC_STACKPROTECTOR
  DEFINE(TSK_STACK_CANARY,	offsetof(struct task_struct, stack_canary));
#endif
  BLANK();
  DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
  DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
  DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
  DEFINE(TI_TASK,		offsetof(struct thread_info, task));
  DEFINE(TI_EXEC_DOMAIN,	offsetof(struct thread_info, exec_domain));
  DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
  DEFINE(TI_CPU_DOMAIN,		offsetof(struct thread_info, cpu_domain));
  DEFINE(TI_CPU_SAVE,		offsetof(struct thread_info, cpu_context));
  DEFINE(TI_USED_CP,		offsetof(struct thread_info, used_cp));
  DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));
  DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate));
#ifdef CONFIG_VFP
  DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));
#ifdef CONFIG_SMP
  DEFINE(VFP_CPU,		offsetof(union vfp_state, hard.cpu));
#endif
#endif
#ifdef CONFIG_ARM_THUMBEE
  DEFINE(TI_THUMBEE_STATE,	offsetof(struct thread_info, thumbee_state));
#endif
#ifdef CONFIG_IWMMXT
  DEFINE(TI_IWMMXT_STATE,	offsetof(struct thread_info, fpstate.iwmmxt));
#endif
#ifdef CONFIG_CRUNCH
  DEFINE(TI_CRUNCH_STATE,	offsetof(struct thread_info, crunchstate));
#endif
  BLANK();
  DEFINE(S_R0,			offsetof(struct pt_regs, ARM_r0));
  DEFINE(S_R1,			offsetof(struct pt_regs, ARM_r1));
  DEFINE(S_R2,			offsetof(struct pt_regs, ARM_r2));
  DEFINE(S_R3,			offsetof(struct pt_regs, ARM_r3));
  DEFINE(S_R4,			offsetof(struct pt_regs, ARM_r4));
  DEFINE(S_R5,			offsetof(struct pt_regs, ARM_r5));
  DEFINE(S_R6,			offsetof(struct pt_regs, ARM_r6));
  DEFINE(S_R7,			offsetof(struct pt_regs, ARM_r7));
  DEFINE(S_R8,			offsetof(struct pt_regs, ARM_r8));
  DEFINE(S_R9,			offsetof(struct pt_regs, ARM_r9));
  DEFINE(S_R10,			offsetof(struct pt_regs, ARM_r10));
  DEFINE(S_FP,			offsetof(struct pt_regs, ARM_fp));
  DEFINE(S_IP,			offsetof(struct pt_regs, ARM_ip));
  DEFINE(S_SP,			offsetof(struct pt_regs, ARM_sp));
  DEFINE(S_LR,			offsetof(struct pt_regs, ARM_lr));
  DEFINE(S_PC,			offsetof(struct pt_regs, ARM_pc));
  DEFINE(S_PSR,			offsetof(struct pt_regs, ARM_cpsr));
  DEFINE(S_OLD_R0,		offsetof(struct pt_regs, ARM_ORIG_r0));
  DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
  BLANK();
#ifdef CONFIG_CACHE_L2X0
  DEFINE(L2X0_R_PHY_BASE,	offsetof(struct l2x0_regs, phy_base));
  DEFINE(L2X0_R_AUX_CTRL,	offsetof(struct l2x0_regs, aux_ctrl));
  DEFINE(L2X0_R_TAG_LATENCY,	offsetof(struct l2x0_regs, tag_latency));
  DEFINE(L2X0_R_DATA_LATENCY,	offsetof(struct l2x0_regs, data_latency));
  DEFINE(L2X0_R_FILTER_START,	offsetof(struct l2x0_regs, filter_start));
  DEFINE(L2X0_R_FILTER_END,	offsetof(struct l2x0_regs, filter_end));
  DEFINE(L2X0_R_PREFETCH_CTRL,	offsetof(struct l2x0_regs, prefetch_ctrl));
  DEFINE(L2X0_R_PWR_CTRL,	offsetof(struct l2x0_regs, pwr_ctrl));
  BLANK();
#endif
#ifdef CONFIG_CPU_HAS_ASID
  DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id));
  BLANK();
#endif
  DEFINE(VMA_VM_MM,		offsetof(struct vm_area_struct, vm_mm));
  DEFINE(VMA_VM_FLAGS,		offsetof(struct vm_area_struct, vm_flags));
  BLANK();
  DEFINE(VM_EXEC,	       	VM_EXEC);
  BLANK();
  DEFINE(PAGE_SZ,	       	PAGE_SIZE);
  BLANK();
  DEFINE(SYS_ERROR0,		0x9f0000);
  BLANK();
  DEFINE(SIZEOF_MACHINE_DESC,	sizeof(struct machine_desc));
  DEFINE(MACHINFO_TYPE,		offsetof(struct machine_desc, nr));
  DEFINE(MACHINFO_NAME,		offsetof(struct machine_desc, name));
  BLANK();
  DEFINE(PROC_INFO_SZ,		sizeof(struct proc_info_list));
  DEFINE(PROCINFO_INITFUNC,	offsetof(struct proc_info_list, __cpu_flush));
  DEFINE(PROCINFO_MM_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_mm_mmu_flags));
  DEFINE(PROCINFO_IO_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_io_mmu_flags));
  BLANK();
#ifdef MULTI_DABORT
  DEFINE(PROCESSOR_DABT_FUNC,	offsetof(struct processor, _data_abort));
#endif
#ifdef MULTI_PABORT
  DEFINE(PROCESSOR_PABT_FUNC,	offsetof(struct processor, _prefetch_abort));
#endif
#ifdef MULTI_CPU
  DEFINE(CPU_SLEEP_SIZE,	offsetof(struct processor, suspend_size));
  DEFINE(CPU_DO_SUSPEND,	offsetof(struct processor, do_suspend));
  DEFINE(CPU_DO_RESUME,		offsetof(struct processor, do_resume));
#endif
#ifdef MULTI_CACHE
  DEFINE(CACHE_FLUSH_KERN_ALL,	offsetof(struct cpu_cache_fns, flush_kern_all));
#endif
  BLANK();
  DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);
  DEFINE(DMA_TO_DEVICE,		DMA_TO_DEVICE);
  DEFINE(DMA_FROM_DEVICE,	DMA_FROM_DEVICE);
#ifdef CONFIG_HIBERNATION
  BLANK();
  DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
  DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
  DEFINE(PBE_NEXT, offsetof(struct pbe, next));
  DEFINE(SAVED_CONTEXT_R0, offsetof(struct saved_context, r0));
  DEFINE(SAVED_CONTEXT_USR, offsetof(struct saved_context, usr_sp));
  DEFINE(SAVED_CONTEXT_SVR, offsetof(struct saved_context, svr_sp));
  DEFINE(SAVED_CONTEXT_ABT, offsetof(struct saved_context, abt_sp));
  DEFINE(SAVED_CONTEXT_IRQ, offsetof(struct saved_context, irq_sp));
  DEFINE(SAVED_CONTEXT_UND, offsetof(struct saved_context, und_sp));
  DEFINE(SAVED_CONTEXT_FIQ, offsetof(struct saved_context, fiq_sp));
  DEFINE(SAVED_CONTEXT_CP15, offsetof(struct saved_context, cp15_control));
#endif

  return 0;
}
Esempio n. 7
0
int main(void)
{
	/* struct task_struct */
	OFFSET(TASK_THREAD, task_struct, thread);
	BLANK();

	/* struct thread_struct */
	OFFSET(THREAD_KSP, thread_struct, ksp);
	OFFSET(THREAD_KPSR, thread_struct, kpsr);
	BLANK();

	/* struct pt_regs */
	OFFSET(PT_ORIG_R2, pt_regs, orig_r2);
	OFFSET(PT_ORIG_R7, pt_regs, orig_r7);

	OFFSET(PT_R1, pt_regs, r1);
	OFFSET(PT_R2, pt_regs, r2);
	OFFSET(PT_R3, pt_regs, r3);
	OFFSET(PT_R4, pt_regs, r4);
	OFFSET(PT_R5, pt_regs, r5);
	OFFSET(PT_R6, pt_regs, r6);
	OFFSET(PT_R7, pt_regs, r7);
	OFFSET(PT_R8, pt_regs, r8);
	OFFSET(PT_R9, pt_regs, r9);
	OFFSET(PT_R10, pt_regs, r10);
	OFFSET(PT_R11, pt_regs, r11);
	OFFSET(PT_R12, pt_regs, r12);
	OFFSET(PT_R13, pt_regs, r13);
	OFFSET(PT_R14, pt_regs, r14);
	OFFSET(PT_R15, pt_regs, r15);
	OFFSET(PT_EA, pt_regs, ea);
	OFFSET(PT_RA, pt_regs, ra);
	OFFSET(PT_FP, pt_regs, fp);
	OFFSET(PT_SP, pt_regs, sp);
	OFFSET(PT_GP, pt_regs, gp);
	OFFSET(PT_ESTATUS, pt_regs, estatus);
	DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
	BLANK();

	/* struct switch_stack */
	OFFSET(SW_R16, switch_stack, r16);
	OFFSET(SW_R17, switch_stack, r17);
	OFFSET(SW_R18, switch_stack, r18);
	OFFSET(SW_R19, switch_stack, r19);
	OFFSET(SW_R20, switch_stack, r20);
	OFFSET(SW_R21, switch_stack, r21);
	OFFSET(SW_R22, switch_stack, r22);
	OFFSET(SW_R23, switch_stack, r23);
	OFFSET(SW_FP, switch_stack, fp);
	OFFSET(SW_GP, switch_stack, gp);
	OFFSET(SW_RA, switch_stack, ra);
	DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack));
	BLANK();

	/* struct thread_info */
	OFFSET(TI_FLAGS, thread_info, flags);
	OFFSET(TI_PREEMPT_COUNT, thread_info, preempt_count);
	BLANK();

	return 0;
}
Esempio n. 8
0
void foo(void)
{
	OFFSET(SIGCONTEXT_eax, sigcontext, eax);
	OFFSET(SIGCONTEXT_ebx, sigcontext, ebx);
	OFFSET(SIGCONTEXT_ecx, sigcontext, ecx);
	OFFSET(SIGCONTEXT_edx, sigcontext, edx);
	OFFSET(SIGCONTEXT_esi, sigcontext, esi);
	OFFSET(SIGCONTEXT_edi, sigcontext, edi);
	OFFSET(SIGCONTEXT_ebp, sigcontext, ebp);
	OFFSET(SIGCONTEXT_esp, sigcontext, esp);
	OFFSET(SIGCONTEXT_eip, sigcontext, eip);
	BLANK();

	OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
	OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
	OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
	OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
	OFFSET(CPUINFO_hard_math, cpuinfo_x86, hard_math);
	OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
	OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
	OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
	BLANK();

	OFFSET(TI_task, thread_info, task);
	OFFSET(TI_exec_domain, thread_info, exec_domain);
	OFFSET(TI_flags, thread_info, flags);
	OFFSET(TI_status, thread_info, status);
	OFFSET(TI_preempt_count, thread_info, preempt_count);
	OFFSET(TI_addr_limit, thread_info, addr_limit);
	OFFSET(TI_restart_block, thread_info, restart_block);
	OFFSET(TI_sysenter_return, thread_info, sysenter_return);
	BLANK();

	OFFSET(GDS_size, Xgt_desc_struct, size);
	OFFSET(GDS_address, Xgt_desc_struct, address);
	OFFSET(GDS_pad, Xgt_desc_struct, pad);
	BLANK();

	OFFSET(PT_EBX, pt_regs, ebx);
	OFFSET(PT_ECX, pt_regs, ecx);
	OFFSET(PT_EDX, pt_regs, edx);
	OFFSET(PT_ESI, pt_regs, esi);
	OFFSET(PT_EDI, pt_regs, edi);
	OFFSET(PT_EBP, pt_regs, ebp);
	OFFSET(PT_EAX, pt_regs, eax);
	OFFSET(PT_DS,  pt_regs, xds);
	OFFSET(PT_ES,  pt_regs, xes);
	OFFSET(PT_GS,  pt_regs, xgs);
	OFFSET(PT_ORIG_EAX, pt_regs, orig_eax);
	OFFSET(PT_EIP, pt_regs, eip);
	OFFSET(PT_CS,  pt_regs, xcs);
	OFFSET(PT_EFLAGS, pt_regs, eflags);
	OFFSET(PT_OLDESP, pt_regs, esp);
	OFFSET(PT_OLDSS,  pt_regs, xss);
	BLANK();

	OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
	OFFSET(RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
	BLANK();

	OFFSET(pbe_address, pbe, address);
	OFFSET(pbe_orig_address, pbe, orig_address);
	OFFSET(pbe_next, pbe, next);

	/* Offset from the sysenter stack to tss.esp0 */
	DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, esp0) -
		 sizeof(struct tss_struct));

	DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
	DEFINE(VDSO_PRELINK, VDSO_PRELINK);

	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);

	BLANK();
 	OFFSET(PDA_cpu, i386_pda, cpu_number);
	OFFSET(PDA_pcurrent, i386_pda, pcurrent);

#ifdef CONFIG_PARAVIRT
	BLANK();
	OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled);
	OFFSET(PARAVIRT_irq_disable, paravirt_ops, irq_disable);
	OFFSET(PARAVIRT_irq_enable, paravirt_ops, irq_enable);
	OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit);
	OFFSET(PARAVIRT_iret, paravirt_ops, iret);
	OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
#endif
}
Esempio n. 9
0
void foo(void)
{
	OFFSET(REGS_A16,	pt_regs, a16);
	OFFSET(REGS_A17,	pt_regs, a17);
	OFFSET(REGS_A18,	pt_regs, a18);
	OFFSET(REGS_A19,	pt_regs, a19);
	OFFSET(REGS_A20,	pt_regs, a20);
	OFFSET(REGS_A21,	pt_regs, a21);
	OFFSET(REGS_A22,	pt_regs, a22);
	OFFSET(REGS_A23,	pt_regs, a23);
	OFFSET(REGS_A24,	pt_regs, a24);
	OFFSET(REGS_A25,	pt_regs, a25);
	OFFSET(REGS_A26,	pt_regs, a26);
	OFFSET(REGS_A27,	pt_regs, a27);
	OFFSET(REGS_A28,	pt_regs, a28);
	OFFSET(REGS_A29,	pt_regs, a29);
	OFFSET(REGS_A30,	pt_regs, a30);
	OFFSET(REGS_A31,	pt_regs, a31);

	OFFSET(REGS_B16,	pt_regs, b16);
	OFFSET(REGS_B17,	pt_regs, b17);
	OFFSET(REGS_B18,	pt_regs, b18);
	OFFSET(REGS_B19,	pt_regs, b19);
	OFFSET(REGS_B20,	pt_regs, b20);
	OFFSET(REGS_B21,	pt_regs, b21);
	OFFSET(REGS_B22,	pt_regs, b22);
	OFFSET(REGS_B23,	pt_regs, b23);
	OFFSET(REGS_B24,	pt_regs, b24);
	OFFSET(REGS_B25,	pt_regs, b25);
	OFFSET(REGS_B26,	pt_regs, b26);
	OFFSET(REGS_B27,	pt_regs, b27);
	OFFSET(REGS_B28,	pt_regs, b28);
	OFFSET(REGS_B29,	pt_regs, b29);
	OFFSET(REGS_B30,	pt_regs, b30);
	OFFSET(REGS_B31,	pt_regs, b31);

	OFFSET(REGS_A0,		pt_regs, a0);
	OFFSET(REGS_A1,		pt_regs, a1);
	OFFSET(REGS_A2,		pt_regs, a2);
	OFFSET(REGS_A3,		pt_regs, a3);
	OFFSET(REGS_A4,		pt_regs, a4);
	OFFSET(REGS_A5,		pt_regs, a5);
	OFFSET(REGS_A6,		pt_regs, a6);
	OFFSET(REGS_A7,		pt_regs, a7);
	OFFSET(REGS_A8,		pt_regs, a8);
	OFFSET(REGS_A9,		pt_regs, a9);
	OFFSET(REGS_A10,	pt_regs, a10);
	OFFSET(REGS_A11,	pt_regs, a11);
	OFFSET(REGS_A12,	pt_regs, a12);
	OFFSET(REGS_A13,	pt_regs, a13);
	OFFSET(REGS_A14,	pt_regs, a14);
	OFFSET(REGS_A15,	pt_regs, a15);

	OFFSET(REGS_B0,		pt_regs, b0);
	OFFSET(REGS_B1,		pt_regs, b1);
	OFFSET(REGS_B2,		pt_regs, b2);
	OFFSET(REGS_B3,		pt_regs, b3);
	OFFSET(REGS_B4,		pt_regs, b4);
	OFFSET(REGS_B5,		pt_regs, b5);
	OFFSET(REGS_B6,		pt_regs, b6);
	OFFSET(REGS_B7,		pt_regs, b7);
	OFFSET(REGS_B8,		pt_regs, b8);
	OFFSET(REGS_B9,		pt_regs, b9);
	OFFSET(REGS_B10,	pt_regs, b10);
	OFFSET(REGS_B11,	pt_regs, b11);
	OFFSET(REGS_B12,	pt_regs, b12);
	OFFSET(REGS_B13,	pt_regs, b13);
	OFFSET(REGS_DP,		pt_regs, dp);
	OFFSET(REGS_SP,		pt_regs, sp);

	OFFSET(REGS_TSR,	pt_regs, tsr);
	OFFSET(REGS_ORIG_A4,	pt_regs, orig_a4);

	DEFINE(REGS__END,	sizeof(struct pt_regs));
	BLANK();

	OFFSET(THREAD_PC,	thread_struct, pc);
	OFFSET(THREAD_B15_14,	thread_struct, b15_14);
	OFFSET(THREAD_A15_14,	thread_struct, a15_14);
	OFFSET(THREAD_B13_12,	thread_struct, b13_12);
	OFFSET(THREAD_A13_12,	thread_struct, a13_12);
	OFFSET(THREAD_B11_10,	thread_struct, b11_10);
	OFFSET(THREAD_A11_10,	thread_struct, a11_10);
	OFFSET(THREAD_RICL_ICL,	thread_struct, ricl_icl);
	BLANK();

	OFFSET(TASK_STATE,	task_struct, state);
	BLANK();

	OFFSET(THREAD_INFO_FLAGS,	thread_info, flags);
	OFFSET(THREAD_INFO_PREEMPT_COUNT, thread_info, preempt_count);
	BLANK();

	/* These would be unneccessary if we ran asm files
	 * through the preprocessor.
	 */
	DEFINE(KTHREAD_SIZE, THREAD_SIZE);
	DEFINE(KTHREAD_SHIFT, THREAD_SHIFT);
	DEFINE(KTHREAD_START_SP, THREAD_START_SP);
	DEFINE(ENOSYS_, ENOSYS);
	DEFINE(NR_SYSCALLS_, __NR_syscalls);

	DEFINE(_TIF_SYSCALL_TRACE, (1<<TIF_SYSCALL_TRACE));
	DEFINE(_TIF_NOTIFY_RESUME, (1<<TIF_NOTIFY_RESUME));
	DEFINE(_TIF_SIGPENDING, (1<<TIF_SIGPENDING));
	DEFINE(_TIF_NEED_RESCHED, (1<<TIF_NEED_RESCHED));
	DEFINE(_TIF_POLLING_NRFLAG, (1<<TIF_POLLING_NRFLAG));

	DEFINE(_TIF_ALLWORK_MASK, TIF_ALLWORK_MASK);
	DEFINE(_TIF_WORK_MASK, TIF_WORK_MASK);
}
Esempio n. 10
0
void output_thread_fpu_defines(void)
{
	OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
	OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
	OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
	OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]);
	OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]);
	OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]);
	OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]);
	OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]);
	OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]);
	OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]);
	OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]);
	OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]);
	OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]);
	OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]);
	OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]);
	OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]);
	OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]);
	OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]);
	OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]);
	OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]);
	OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]);
	OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]);
	OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]);
	OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]);
	OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]);
	OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]);
	OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]);
	OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]);
	OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]);
	OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]);
	OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
	OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);

	/* the least significant 64 bits of each FP register */
	OFFSET(THREAD_FPR0_LS64, task_struct,
	       thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR1_LS64, task_struct,
	       thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR2_LS64, task_struct,
	       thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR3_LS64, task_struct,
	       thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR4_LS64, task_struct,
	       thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR5_LS64, task_struct,
	       thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR6_LS64, task_struct,
	       thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR7_LS64, task_struct,
	       thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR8_LS64, task_struct,
	       thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR9_LS64, task_struct,
	       thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR10_LS64, task_struct,
	       thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR11_LS64, task_struct,
	       thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR12_LS64, task_struct,
	       thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR13_LS64, task_struct,
	       thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR14_LS64, task_struct,
	       thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR15_LS64, task_struct,
	       thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR16_LS64, task_struct,
	       thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR17_LS64, task_struct,
	       thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR18_LS64, task_struct,
	       thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR19_LS64, task_struct,
	       thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR20_LS64, task_struct,
	       thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR21_LS64, task_struct,
	       thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR22_LS64, task_struct,
	       thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR23_LS64, task_struct,
	       thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR24_LS64, task_struct,
	       thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR25_LS64, task_struct,
	       thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR26_LS64, task_struct,
	       thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR27_LS64, task_struct,
	       thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR28_LS64, task_struct,
	       thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR29_LS64, task_struct,
	       thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR30_LS64, task_struct,
	       thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
	OFFSET(THREAD_FPR31_LS64, task_struct,
	       thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);

	OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
	OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
	BLANK();
}
int main(void)
{
#ifdef CONFIG_PARAVIRT
	OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame);
	OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
	OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
	BLANK();
#endif

#if defined(CONFIG_KVM_GUEST) && defined(CONFIG_PARAVIRT_SPINLOCKS)
	OFFSET(KVM_STEAL_TIME_preempted, kvm_steal_time, preempted);
	BLANK();
#endif

#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
	ENTRY(bx);
	ENTRY(cx);
	ENTRY(dx);
	ENTRY(sp);
	ENTRY(bp);
	ENTRY(si);
	ENTRY(di);
	ENTRY(r8);
	ENTRY(r9);
	ENTRY(r10);
	ENTRY(r11);
	ENTRY(r12);
	ENTRY(r13);
	ENTRY(r14);
	ENTRY(r15);
	ENTRY(flags);
	BLANK();
#undef ENTRY

#define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry)
	ENTRY(cr0);
	ENTRY(cr2);
	ENTRY(cr3);
	ENTRY(cr4);
	ENTRY(cr8);
	ENTRY(gdt_desc);
	BLANK();
#undef ENTRY

	OFFSET(TSS_ist, tss_struct, x86_tss.ist);
	OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
	BLANK();

#ifdef CONFIG_CC_STACKPROTECTOR
	DEFINE(stack_canary_offset, offsetof(union irq_stack_union, stack_canary));
	BLANK();
#endif

	DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
	DEFINE(NR_syscalls, sizeof(syscalls_64));

	DEFINE(__NR_syscall_compat_max, sizeof(syscalls_ia32) - 1);
	DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32));

	return 0;
}
Esempio n. 12
0
int main(void)
{
	DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
	DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
	DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
	BLANK();
	DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
	BLANK();
	DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
	DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
	DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
	BLANK();
	DEFINE(__TI_task, offsetof(struct thread_info, task));
	DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
	DEFINE(__TI_flags, offsetof(struct thread_info, flags));
	DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
	DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
	DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
	DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
	DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer));
	DEFINE(__TI_last_break, offsetof(struct thread_info, last_break));
	BLANK();
	DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
	DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
	DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
	DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
	DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code));
	DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm));
	DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long));
	DEFINE(__PT_FLAGS, offsetof(struct pt_regs, flags));
	DEFINE(__PT_SIZE, sizeof(struct pt_regs));
	BLANK();
	DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
	DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs));
	DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1));
	BLANK();
	/* timeval/timezone offsets for use by vdso */
	DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count));
	DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
	DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
	DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
	DEFINE(__VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
	DEFINE(__VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
	DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
	DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
	DEFINE(__VDSO_WTOM_CRS_SEC, offsetof(struct vdso_data, wtom_coarse_sec));
	DEFINE(__VDSO_WTOM_CRS_NSEC, offsetof(struct vdso_data, wtom_coarse_nsec));
	DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
	DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
	DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
	DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
	DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
	DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
	/* constants used by the vdso */
	DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
	DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
	DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
	DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
	DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
	DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
	DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
	BLANK();
	/* idle data offsets */
	DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
	DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit));
	DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter));
	DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit));
	/* lowcore offsets */
	DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
	DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
	DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
	DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
	DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
	DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
	DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
	DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
	DEFINE(__LC_MON_CLASS_NR, offsetof(struct _lowcore, mon_class_num));
	DEFINE(__LC_PER_CODE, offsetof(struct _lowcore, per_code));
	DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_atmid));
	DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
	DEFINE(__LC_EXC_ACCESS_ID, offsetof(struct _lowcore, exc_access_id));
	DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
	DEFINE(__LC_OP_ACCESS_ID, offsetof(struct _lowcore, op_access_id));
	DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_mode_id));
	DEFINE(__LC_MON_CODE, offsetof(struct _lowcore, monitor_code));
	DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
	DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
	DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm));
	DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
	DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
	DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
	DEFINE(__LC_MCCK_EXT_DAM_CODE, offsetof(struct _lowcore, external_damage_code));
	DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
	DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
	DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
	DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
	DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
	DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
	DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
	DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
	DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
	DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
	DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
	DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
	BLANK();
	DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
	DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
	DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
	DEFINE(__LC_CPU_FLAGS, offsetof(struct _lowcore, cpu_flags));
	DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
	DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
	DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
	DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer));
	DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer));
	DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer));
	DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer));
	DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer));
	DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer));
	DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
	DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
	DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
	DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid));
	DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
	DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
	DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
	DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
	DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack));
	DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
	DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data));
	DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source));
	DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
	DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
	DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
	DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
	DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
	DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
	DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
	BLANK();
	DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
	DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
	DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
	DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area));
	DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area));
	DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
	DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
	DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
#ifdef CONFIG_32BIT
	DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
#else /* CONFIG_32BIT */
	DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
	DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
	DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
	DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
	DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
	DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
	DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
	DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
	DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
	DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
	DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
	DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
	DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
	DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
#endif /* CONFIG_32BIT */
	return 0;
}
Esempio n. 13
0
void foo(void)
{
	DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct));
	DEFINE(IA64_THREAD_INFO_SIZE, sizeof (struct thread_info));
	DEFINE(IA64_PT_REGS_SIZE, sizeof (struct pt_regs));
	DEFINE(IA64_SWITCH_STACK_SIZE, sizeof (struct switch_stack));
	DEFINE(IA64_SIGINFO_SIZE, sizeof (struct siginfo));
	DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
	DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
	DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));

	BLANK();

	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));

	BLANK();

	DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
	DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
	DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
	DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
	DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
	DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
	DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand));
	DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
	DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
	DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct task_struct, thread.ksp));
	DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct task_struct, thread.on_ustack));
	DEFINE(IA64_THREAD_INFO_CPU_OFFSET, offsetof (struct thread_info, cpu));

	BLANK();

	DEFINE(IA64_SIGHAND_SIGLOCK_OFFSET,offsetof (struct sighand_struct, siglock));

	BLANK();

	DEFINE(IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,offsetof (struct signal_struct,
							     group_stop_count));
	DEFINE(IA64_SIGNAL_SHARED_PENDING_OFFSET,offsetof (struct signal_struct, shared_pending));

	BLANK();

	DEFINE(IA64_PT_REGS_B6_OFFSET, offsetof (struct pt_regs, b6));
	DEFINE(IA64_PT_REGS_B7_OFFSET, offsetof (struct pt_regs, b7));
	DEFINE(IA64_PT_REGS_AR_CSD_OFFSET, offsetof (struct pt_regs, ar_csd));
	DEFINE(IA64_PT_REGS_AR_SSD_OFFSET, offsetof (struct pt_regs, ar_ssd));
	DEFINE(IA64_PT_REGS_R8_OFFSET, offsetof (struct pt_regs, r8));
	DEFINE(IA64_PT_REGS_R9_OFFSET, offsetof (struct pt_regs, r9));
	DEFINE(IA64_PT_REGS_R10_OFFSET, offsetof (struct pt_regs, r10));
	DEFINE(IA64_PT_REGS_R11_OFFSET, offsetof (struct pt_regs, r11));
	DEFINE(IA64_PT_REGS_CR_IPSR_OFFSET, offsetof (struct pt_regs, cr_ipsr));
	DEFINE(IA64_PT_REGS_CR_IIP_OFFSET, offsetof (struct pt_regs, cr_iip));
	DEFINE(IA64_PT_REGS_CR_IFS_OFFSET, offsetof (struct pt_regs, cr_ifs));
	DEFINE(IA64_PT_REGS_AR_UNAT_OFFSET, offsetof (struct pt_regs, ar_unat));
	DEFINE(IA64_PT_REGS_AR_PFS_OFFSET, offsetof (struct pt_regs, ar_pfs));
	DEFINE(IA64_PT_REGS_AR_RSC_OFFSET, offsetof (struct pt_regs, ar_rsc));
	DEFINE(IA64_PT_REGS_AR_RNAT_OFFSET, offsetof (struct pt_regs, ar_rnat));

	DEFINE(IA64_PT_REGS_AR_BSPSTORE_OFFSET, offsetof (struct pt_regs, ar_bspstore));
	DEFINE(IA64_PT_REGS_PR_OFFSET, offsetof (struct pt_regs, pr));
	DEFINE(IA64_PT_REGS_B0_OFFSET, offsetof (struct pt_regs, b0));
	DEFINE(IA64_PT_REGS_LOADRS_OFFSET, offsetof (struct pt_regs, loadrs));
	DEFINE(IA64_PT_REGS_R1_OFFSET, offsetof (struct pt_regs, r1));
	DEFINE(IA64_PT_REGS_R12_OFFSET, offsetof (struct pt_regs, r12));
	DEFINE(IA64_PT_REGS_R13_OFFSET, offsetof (struct pt_regs, r13));
	DEFINE(IA64_PT_REGS_AR_FPSR_OFFSET, offsetof (struct pt_regs, ar_fpsr));
	DEFINE(IA64_PT_REGS_R15_OFFSET, offsetof (struct pt_regs, r15));
	DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
	DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
	DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
	DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
	DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
	DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
	DEFINE(IA64_PT_REGS_R19_OFFSET, offsetof (struct pt_regs, r19));
	DEFINE(IA64_PT_REGS_R20_OFFSET, offsetof (struct pt_regs, r20));
	DEFINE(IA64_PT_REGS_R21_OFFSET, offsetof (struct pt_regs, r21));
	DEFINE(IA64_PT_REGS_R22_OFFSET, offsetof (struct pt_regs, r22));
	DEFINE(IA64_PT_REGS_R23_OFFSET, offsetof (struct pt_regs, r23));
	DEFINE(IA64_PT_REGS_R24_OFFSET, offsetof (struct pt_regs, r24));
	DEFINE(IA64_PT_REGS_R25_OFFSET, offsetof (struct pt_regs, r25));
	DEFINE(IA64_PT_REGS_R26_OFFSET, offsetof (struct pt_regs, r26));
	DEFINE(IA64_PT_REGS_R27_OFFSET, offsetof (struct pt_regs, r27));
	DEFINE(IA64_PT_REGS_R28_OFFSET, offsetof (struct pt_regs, r28));
	DEFINE(IA64_PT_REGS_R29_OFFSET, offsetof (struct pt_regs, r29));
	DEFINE(IA64_PT_REGS_R30_OFFSET, offsetof (struct pt_regs, r30));
	DEFINE(IA64_PT_REGS_R31_OFFSET, offsetof (struct pt_regs, r31));
	DEFINE(IA64_PT_REGS_AR_CCV_OFFSET, offsetof (struct pt_regs, ar_ccv));
	DEFINE(IA64_PT_REGS_F6_OFFSET, offsetof (struct pt_regs, f6));
	DEFINE(IA64_PT_REGS_F7_OFFSET, offsetof (struct pt_regs, f7));
	DEFINE(IA64_PT_REGS_F8_OFFSET, offsetof (struct pt_regs, f8));
	DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
	DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
	DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));

	BLANK();

	DEFINE(IA64_SWITCH_STACK_CALLER_UNAT_OFFSET, offsetof (struct switch_stack, caller_unat));
	DEFINE(IA64_SWITCH_STACK_AR_FPSR_OFFSET, offsetof (struct switch_stack, ar_fpsr));
	DEFINE(IA64_SWITCH_STACK_F2_OFFSET, offsetof (struct switch_stack, f2));
	DEFINE(IA64_SWITCH_STACK_F3_OFFSET, offsetof (struct switch_stack, f3));
	DEFINE(IA64_SWITCH_STACK_F4_OFFSET, offsetof (struct switch_stack, f4));
	DEFINE(IA64_SWITCH_STACK_F5_OFFSET, offsetof (struct switch_stack, f5));
	DEFINE(IA64_SWITCH_STACK_F12_OFFSET, offsetof (struct switch_stack, f12));
	DEFINE(IA64_SWITCH_STACK_F13_OFFSET, offsetof (struct switch_stack, f13));
	DEFINE(IA64_SWITCH_STACK_F14_OFFSET, offsetof (struct switch_stack, f14));
	DEFINE(IA64_SWITCH_STACK_F15_OFFSET, offsetof (struct switch_stack, f15));
	DEFINE(IA64_SWITCH_STACK_F16_OFFSET, offsetof (struct switch_stack, f16));
	DEFINE(IA64_SWITCH_STACK_F17_OFFSET, offsetof (struct switch_stack, f17));
	DEFINE(IA64_SWITCH_STACK_F18_OFFSET, offsetof (struct switch_stack, f18));
	DEFINE(IA64_SWITCH_STACK_F19_OFFSET, offsetof (struct switch_stack, f19));
	DEFINE(IA64_SWITCH_STACK_F20_OFFSET, offsetof (struct switch_stack, f20));
	DEFINE(IA64_SWITCH_STACK_F21_OFFSET, offsetof (struct switch_stack, f21));
	DEFINE(IA64_SWITCH_STACK_F22_OFFSET, offsetof (struct switch_stack, f22));
	DEFINE(IA64_SWITCH_STACK_F23_OFFSET, offsetof (struct switch_stack, f23));
	DEFINE(IA64_SWITCH_STACK_F24_OFFSET, offsetof (struct switch_stack, f24));
	DEFINE(IA64_SWITCH_STACK_F25_OFFSET, offsetof (struct switch_stack, f25));
	DEFINE(IA64_SWITCH_STACK_F26_OFFSET, offsetof (struct switch_stack, f26));
	DEFINE(IA64_SWITCH_STACK_F27_OFFSET, offsetof (struct switch_stack, f27));
	DEFINE(IA64_SWITCH_STACK_F28_OFFSET, offsetof (struct switch_stack, f28));
	DEFINE(IA64_SWITCH_STACK_F29_OFFSET, offsetof (struct switch_stack, f29));
	DEFINE(IA64_SWITCH_STACK_F30_OFFSET, offsetof (struct switch_stack, f30));
	DEFINE(IA64_SWITCH_STACK_F31_OFFSET, offsetof (struct switch_stack, f31));
	DEFINE(IA64_SWITCH_STACK_R4_OFFSET, offsetof (struct switch_stack, r4));
	DEFINE(IA64_SWITCH_STACK_R5_OFFSET, offsetof (struct switch_stack, r5));
	DEFINE(IA64_SWITCH_STACK_R6_OFFSET, offsetof (struct switch_stack, r6));
	DEFINE(IA64_SWITCH_STACK_R7_OFFSET, offsetof (struct switch_stack, r7));
	DEFINE(IA64_SWITCH_STACK_B0_OFFSET, offsetof (struct switch_stack, b0));
	DEFINE(IA64_SWITCH_STACK_B1_OFFSET, offsetof (struct switch_stack, b1));
	DEFINE(IA64_SWITCH_STACK_B2_OFFSET, offsetof (struct switch_stack, b2));
	DEFINE(IA64_SWITCH_STACK_B3_OFFSET, offsetof (struct switch_stack, b3));
	DEFINE(IA64_SWITCH_STACK_B4_OFFSET, offsetof (struct switch_stack, b4));
	DEFINE(IA64_SWITCH_STACK_B5_OFFSET, offsetof (struct switch_stack, b5));
	DEFINE(IA64_SWITCH_STACK_AR_PFS_OFFSET, offsetof (struct switch_stack, ar_pfs));
	DEFINE(IA64_SWITCH_STACK_AR_LC_OFFSET, offsetof (struct switch_stack, ar_lc));
	DEFINE(IA64_SWITCH_STACK_AR_UNAT_OFFSET, offsetof (struct switch_stack, ar_unat));
	DEFINE(IA64_SWITCH_STACK_AR_RNAT_OFFSET, offsetof (struct switch_stack, ar_rnat));
	DEFINE(IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET, offsetof (struct switch_stack, ar_bspstore));
	DEFINE(IA64_SWITCH_STACK_PR_OFFSET, offsetof (struct switch_stack, pr));

	BLANK();

	DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip));
	DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp));
	DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr));
	DEFINE(IA64_SIGCONTEXT_AR_RNAT_OFFSET, offsetof (struct sigcontext, sc_ar_rnat));
	DEFINE(IA64_SIGCONTEXT_AR_UNAT_OFFSET, offsetof (struct sigcontext, sc_ar_unat));
	DEFINE(IA64_SIGCONTEXT_B0_OFFSET, offsetof (struct sigcontext, sc_br[0]));
	DEFINE(IA64_SIGCONTEXT_CFM_OFFSET, offsetof (struct sigcontext, sc_cfm));
	DEFINE(IA64_SIGCONTEXT_FLAGS_OFFSET, offsetof (struct sigcontext, sc_flags));
	DEFINE(IA64_SIGCONTEXT_FR6_OFFSET, offsetof (struct sigcontext, sc_fr[6]));
	DEFINE(IA64_SIGCONTEXT_PR_OFFSET, offsetof (struct sigcontext, sc_pr));
	DEFINE(IA64_SIGCONTEXT_R12_OFFSET, offsetof (struct sigcontext, sc_gr[12]));
	DEFINE(IA64_SIGCONTEXT_RBS_BASE_OFFSET,offsetof (struct sigcontext, sc_rbs_base));
	DEFINE(IA64_SIGCONTEXT_LOADRS_OFFSET, offsetof (struct sigcontext, sc_loadrs));

	BLANK();

	DEFINE(IA64_SIGPENDING_SIGNAL_OFFSET, offsetof (struct sigpending, signal));

	BLANK();

	DEFINE(IA64_SIGFRAME_ARG0_OFFSET, offsetof (struct sigframe, arg0));
	DEFINE(IA64_SIGFRAME_ARG1_OFFSET, offsetof (struct sigframe, arg1));
	DEFINE(IA64_SIGFRAME_ARG2_OFFSET, offsetof (struct sigframe, arg2));
	DEFINE(IA64_SIGFRAME_HANDLER_OFFSET, offsetof (struct sigframe, handler));
	DEFINE(IA64_SIGFRAME_SIGCONTEXT_OFFSET, offsetof (struct sigframe, sc));
	BLANK();
    /* for assembly files which can't include sched.h: */
	DEFINE(IA64_CLONE_VFORK, CLONE_VFORK);
	DEFINE(IA64_CLONE_VM, CLONE_VM);

	BLANK();
	DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
	DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));


	DEFINE(CLONE_SETTLS_BIT, 19);
#if CLONE_SETTLS != (1<<19)
# error "CLONE_SETTLS_BIT incorrect, please fix"
#endif

	BLANK();
	DEFINE(IA64_MCA_TLB_INFO_SIZE, sizeof (struct ia64_mca_tlb_info));
	/* used by head.S */
	DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));

	BLANK();
	/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
	DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
	DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
	DEFINE(IA64_TIME_INTERPOLATOR_SHIFT_OFFSET, offsetof (struct time_interpolator, shift));
	DEFINE(IA64_TIME_INTERPOLATOR_NSEC_OFFSET, offsetof (struct time_interpolator, nsec_per_cyc));
	DEFINE(IA64_TIME_INTERPOLATOR_OFFSET_OFFSET, offsetof (struct time_interpolator, offset));
	DEFINE(IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET, offsetof (struct time_interpolator, last_cycle));
	DEFINE(IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET, offsetof (struct time_interpolator, last_counter));
	DEFINE(IA64_TIME_INTERPOLATOR_JITTER_OFFSET, offsetof (struct time_interpolator, jitter));
	DEFINE(IA64_TIME_INTERPOLATOR_MASK_OFFSET, offsetof (struct time_interpolator, mask));
	DEFINE(IA64_TIME_SOURCE_CPU, TIME_SOURCE_CPU);
	DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
	DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
	DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
}
Esempio n. 14
0
void __dummy__(void)
{
    OFFSET(UREGS_eax, struct cpu_user_regs, eax);
    OFFSET(UREGS_ebx, struct cpu_user_regs, ebx);
    OFFSET(UREGS_ecx, struct cpu_user_regs, ecx);
    OFFSET(UREGS_edx, struct cpu_user_regs, edx);
    OFFSET(UREGS_esi, struct cpu_user_regs, esi);
    OFFSET(UREGS_edi, struct cpu_user_regs, edi);
    OFFSET(UREGS_esp, struct cpu_user_regs, esp);
    OFFSET(UREGS_ebp, struct cpu_user_regs, ebp);
    OFFSET(UREGS_eip, struct cpu_user_regs, eip);
    OFFSET(UREGS_cs, struct cpu_user_regs, cs);
    OFFSET(UREGS_ds, struct cpu_user_regs, ds);
    OFFSET(UREGS_es, struct cpu_user_regs, es);
    OFFSET(UREGS_fs, struct cpu_user_regs, fs);
    OFFSET(UREGS_gs, struct cpu_user_regs, gs);
    OFFSET(UREGS_ss, struct cpu_user_regs, ss);
    OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
    OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
    OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
    OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
    OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp);
    DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
    BLANK();

    OFFSET(VCPU_processor, struct vcpu, processor);
    OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
    OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
    OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
    OFFSET(VCPU_event_sel, struct vcpu,
           arch.guest_context.event_callback_cs);
    OFFSET(VCPU_event_addr, struct vcpu, 
           arch.guest_context.event_callback_eip);
    OFFSET(VCPU_failsafe_sel, struct vcpu,
           arch.guest_context.failsafe_callback_cs);
    OFFSET(VCPU_failsafe_addr, struct vcpu,
           arch.guest_context.failsafe_callback_eip);
    OFFSET(VCPU_kernel_ss, struct vcpu,
           arch.guest_context.kernel_ss);
    OFFSET(VCPU_kernel_sp, struct vcpu,
           arch.guest_context.kernel_sp);
    OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
    OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
    OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
    DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
    BLANK();

    OFFSET(TSS_ss0, struct tss_struct, ss0);
    OFFSET(TSS_esp0, struct tss_struct, esp0);
    OFFSET(TSS_ss1, struct tss_struct, ss1);
    OFFSET(TSS_esp1, struct tss_struct, esp1);
    DEFINE(TSS_sizeof, sizeof(struct tss_struct));
    BLANK();

    OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
    OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
    OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
    BLANK();

    OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
#ifndef VMXASSIST
    OFFSET(VCPU_vmx_emul, struct vcpu, arch.hvm_vmx.vmxemul);
#endif
    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
    BLANK();

    OFFSET(VMCB_rax, struct vmcb_struct, rax);
    OFFSET(VMCB_rip, struct vmcb_struct, rip);
    OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
    OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
    BLANK();

    OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
    OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask);
    BLANK();

    DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
    BLANK();

    OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
    OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
    OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
    OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
    BLANK();

#if PERF_COUNTERS
    DEFINE(PERFC_hypercalls, PERFC_hypercalls);
    DEFINE(PERFC_exceptions, PERFC_exceptions);
    BLANK();
#endif

    DEFINE(FIXMAP_apic_base, fix_to_virt(FIX_APIC_BASE));
    BLANK();

    DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
    BLANK();

    OFFSET(CPUINFO_ext_features, struct cpuinfo_x86, x86_capability[1]);
}
int main(void)
{
#ifdef CONFIG_PARAVIRT
    OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame);
    OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32);
    OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
    OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
    BLANK();
#endif

#ifdef CONFIG_IA32_EMULATION
    OFFSET(TI_sysenter_return, thread_info, sysenter_return);
    BLANK();

#define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry)
    ENTRY(ax);
    ENTRY(bx);
    ENTRY(cx);
    ENTRY(dx);
    ENTRY(si);
    ENTRY(di);
    ENTRY(bp);
    ENTRY(sp);
    ENTRY(ip);
    BLANK();
#undef ENTRY

    OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
    BLANK();
#endif

#define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
    ENTRY(bx);
    ENTRY(bx);
    ENTRY(cx);
    ENTRY(dx);
    ENTRY(sp);
    ENTRY(bp);
    ENTRY(si);
    ENTRY(di);
    ENTRY(r8);
    ENTRY(r9);
    ENTRY(r10);
    ENTRY(r11);
    ENTRY(r12);
    ENTRY(r13);
    ENTRY(r14);
    ENTRY(r15);
    ENTRY(flags);
    BLANK();
#undef ENTRY

#define ENTRY(entry) OFFSET(saved_context_ ## entry, saved_context, entry)
    ENTRY(cr0);
    ENTRY(cr2);
    ENTRY(cr3);
    ENTRY(cr4);
    ENTRY(cr8);
    BLANK();
#undef ENTRY

    OFFSET(TSS_ist, tss_struct, x86_tss.ist);
    BLANK();

    DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
    DEFINE(NR_syscalls, sizeof(syscalls_64));

    DEFINE(__NR_ia32_syscall_max, sizeof(syscalls_ia32) - 1);
    DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32));

    return 0;
}
Esempio n. 16
0
void foo(void)
{
	DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct));
	DEFINE(IA64_THREAD_INFO_SIZE, sizeof (struct thread_info));
	DEFINE(IA64_PT_REGS_SIZE, sizeof (struct pt_regs));
	DEFINE(IA64_SWITCH_STACK_SIZE, sizeof (struct switch_stack));
	DEFINE(IA64_SIGINFO_SIZE, sizeof (struct siginfo));
	DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
	DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
	DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));

	BUILD_BUG_ON(sizeof(struct upid) != 32);
	DEFINE(IA64_UPID_SHIFT, 5);

	BLANK();

	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
	DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
	DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
	DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
	DEFINE(TI_AC_UTIME, offsetof(struct thread_info, ac_utime));
#endif

	BLANK();

	DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked));
	DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
	DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
	DEFINE(IA64_TASK_TGIDLINK_OFFSET, offsetof (struct task_struct, pids[PIDTYPE_PID].pid));
	DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level));
	DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0]));
	DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending));
	DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
	DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
	DEFINE(IA64_TASK_SIGHAND_OFFSET,offsetof (struct task_struct, sighand));
	DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal));
	DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
	DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct task_struct, thread.ksp));
	DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct task_struct, thread.on_ustack));

	BLANK();

	DEFINE(IA64_SIGHAND_SIGLOCK_OFFSET,offsetof (struct sighand_struct, siglock));

	BLANK();

	DEFINE(IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,offsetof (struct signal_struct,
							     group_stop_count));
	DEFINE(IA64_SIGNAL_SHARED_PENDING_OFFSET,offsetof (struct signal_struct, shared_pending));

	BLANK();

	DEFINE(IA64_PT_REGS_B6_OFFSET, offsetof (struct pt_regs, b6));
	DEFINE(IA64_PT_REGS_B7_OFFSET, offsetof (struct pt_regs, b7));
	DEFINE(IA64_PT_REGS_AR_CSD_OFFSET, offsetof (struct pt_regs, ar_csd));
	DEFINE(IA64_PT_REGS_AR_SSD_OFFSET, offsetof (struct pt_regs, ar_ssd));
	DEFINE(IA64_PT_REGS_R8_OFFSET, offsetof (struct pt_regs, r8));
	DEFINE(IA64_PT_REGS_R9_OFFSET, offsetof (struct pt_regs, r9));
	DEFINE(IA64_PT_REGS_R10_OFFSET, offsetof (struct pt_regs, r10));
	DEFINE(IA64_PT_REGS_R11_OFFSET, offsetof (struct pt_regs, r11));
	DEFINE(IA64_PT_REGS_CR_IPSR_OFFSET, offsetof (struct pt_regs, cr_ipsr));
	DEFINE(IA64_PT_REGS_CR_IIP_OFFSET, offsetof (struct pt_regs, cr_iip));
	DEFINE(IA64_PT_REGS_CR_IFS_OFFSET, offsetof (struct pt_regs, cr_ifs));
	DEFINE(IA64_PT_REGS_AR_UNAT_OFFSET, offsetof (struct pt_regs, ar_unat));
	DEFINE(IA64_PT_REGS_AR_PFS_OFFSET, offsetof (struct pt_regs, ar_pfs));
	DEFINE(IA64_PT_REGS_AR_RSC_OFFSET, offsetof (struct pt_regs, ar_rsc));
	DEFINE(IA64_PT_REGS_AR_RNAT_OFFSET, offsetof (struct pt_regs, ar_rnat));

	DEFINE(IA64_PT_REGS_AR_BSPSTORE_OFFSET, offsetof (struct pt_regs, ar_bspstore));
	DEFINE(IA64_PT_REGS_PR_OFFSET, offsetof (struct pt_regs, pr));
	DEFINE(IA64_PT_REGS_B0_OFFSET, offsetof (struct pt_regs, b0));
	DEFINE(IA64_PT_REGS_LOADRS_OFFSET, offsetof (struct pt_regs, loadrs));
	DEFINE(IA64_PT_REGS_R1_OFFSET, offsetof (struct pt_regs, r1));
	DEFINE(IA64_PT_REGS_R12_OFFSET, offsetof (struct pt_regs, r12));
	DEFINE(IA64_PT_REGS_R13_OFFSET, offsetof (struct pt_regs, r13));
	DEFINE(IA64_PT_REGS_AR_FPSR_OFFSET, offsetof (struct pt_regs, ar_fpsr));
	DEFINE(IA64_PT_REGS_R15_OFFSET, offsetof (struct pt_regs, r15));
	DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
	DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
	DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
	DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
	DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
	DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
	DEFINE(IA64_PT_REGS_R19_OFFSET, offsetof (struct pt_regs, r19));
	DEFINE(IA64_PT_REGS_R20_OFFSET, offsetof (struct pt_regs, r20));
	DEFINE(IA64_PT_REGS_R21_OFFSET, offsetof (struct pt_regs, r21));
	DEFINE(IA64_PT_REGS_R22_OFFSET, offsetof (struct pt_regs, r22));
	DEFINE(IA64_PT_REGS_R23_OFFSET, offsetof (struct pt_regs, r23));
	DEFINE(IA64_PT_REGS_R24_OFFSET, offsetof (struct pt_regs, r24));
	DEFINE(IA64_PT_REGS_R25_OFFSET, offsetof (struct pt_regs, r25));
	DEFINE(IA64_PT_REGS_R26_OFFSET, offsetof (struct pt_regs, r26));
	DEFINE(IA64_PT_REGS_R27_OFFSET, offsetof (struct pt_regs, r27));
	DEFINE(IA64_PT_REGS_R28_OFFSET, offsetof (struct pt_regs, r28));
	DEFINE(IA64_PT_REGS_R29_OFFSET, offsetof (struct pt_regs, r29));
	DEFINE(IA64_PT_REGS_R30_OFFSET, offsetof (struct pt_regs, r30));
	DEFINE(IA64_PT_REGS_R31_OFFSET, offsetof (struct pt_regs, r31));
	DEFINE(IA64_PT_REGS_AR_CCV_OFFSET, offsetof (struct pt_regs, ar_ccv));
	DEFINE(IA64_PT_REGS_F6_OFFSET, offsetof (struct pt_regs, f6));
	DEFINE(IA64_PT_REGS_F7_OFFSET, offsetof (struct pt_regs, f7));
	DEFINE(IA64_PT_REGS_F8_OFFSET, offsetof (struct pt_regs, f8));
	DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
	DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
	DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));

	BLANK();

	DEFINE(IA64_SWITCH_STACK_CALLER_UNAT_OFFSET, offsetof (struct switch_stack, caller_unat));
	DEFINE(IA64_SWITCH_STACK_AR_FPSR_OFFSET, offsetof (struct switch_stack, ar_fpsr));
	DEFINE(IA64_SWITCH_STACK_F2_OFFSET, offsetof (struct switch_stack, f2));
	DEFINE(IA64_SWITCH_STACK_F3_OFFSET, offsetof (struct switch_stack, f3));
	DEFINE(IA64_SWITCH_STACK_F4_OFFSET, offsetof (struct switch_stack, f4));
	DEFINE(IA64_SWITCH_STACK_F5_OFFSET, offsetof (struct switch_stack, f5));
	DEFINE(IA64_SWITCH_STACK_F12_OFFSET, offsetof (struct switch_stack, f12));
	DEFINE(IA64_SWITCH_STACK_F13_OFFSET, offsetof (struct switch_stack, f13));
	DEFINE(IA64_SWITCH_STACK_F14_OFFSET, offsetof (struct switch_stack, f14));
	DEFINE(IA64_SWITCH_STACK_F15_OFFSET, offsetof (struct switch_stack, f15));
	DEFINE(IA64_SWITCH_STACK_F16_OFFSET, offsetof (struct switch_stack, f16));
	DEFINE(IA64_SWITCH_STACK_F17_OFFSET, offsetof (struct switch_stack, f17));
	DEFINE(IA64_SWITCH_STACK_F18_OFFSET, offsetof (struct switch_stack, f18));
	DEFINE(IA64_SWITCH_STACK_F19_OFFSET, offsetof (struct switch_stack, f19));
	DEFINE(IA64_SWITCH_STACK_F20_OFFSET, offsetof (struct switch_stack, f20));
	DEFINE(IA64_SWITCH_STACK_F21_OFFSET, offsetof (struct switch_stack, f21));
	DEFINE(IA64_SWITCH_STACK_F22_OFFSET, offsetof (struct switch_stack, f22));
	DEFINE(IA64_SWITCH_STACK_F23_OFFSET, offsetof (struct switch_stack, f23));
	DEFINE(IA64_SWITCH_STACK_F24_OFFSET, offsetof (struct switch_stack, f24));
	DEFINE(IA64_SWITCH_STACK_F25_OFFSET, offsetof (struct switch_stack, f25));
	DEFINE(IA64_SWITCH_STACK_F26_OFFSET, offsetof (struct switch_stack, f26));
	DEFINE(IA64_SWITCH_STACK_F27_OFFSET, offsetof (struct switch_stack, f27));
	DEFINE(IA64_SWITCH_STACK_F28_OFFSET, offsetof (struct switch_stack, f28));
	DEFINE(IA64_SWITCH_STACK_F29_OFFSET, offsetof (struct switch_stack, f29));
	DEFINE(IA64_SWITCH_STACK_F30_OFFSET, offsetof (struct switch_stack, f30));
	DEFINE(IA64_SWITCH_STACK_F31_OFFSET, offsetof (struct switch_stack, f31));
	DEFINE(IA64_SWITCH_STACK_R4_OFFSET, offsetof (struct switch_stack, r4));
	DEFINE(IA64_SWITCH_STACK_R5_OFFSET, offsetof (struct switch_stack, r5));
	DEFINE(IA64_SWITCH_STACK_R6_OFFSET, offsetof (struct switch_stack, r6));
	DEFINE(IA64_SWITCH_STACK_R7_OFFSET, offsetof (struct switch_stack, r7));
	DEFINE(IA64_SWITCH_STACK_B0_OFFSET, offsetof (struct switch_stack, b0));
	DEFINE(IA64_SWITCH_STACK_B1_OFFSET, offsetof (struct switch_stack, b1));
	DEFINE(IA64_SWITCH_STACK_B2_OFFSET, offsetof (struct switch_stack, b2));
	DEFINE(IA64_SWITCH_STACK_B3_OFFSET, offsetof (struct switch_stack, b3));
	DEFINE(IA64_SWITCH_STACK_B4_OFFSET, offsetof (struct switch_stack, b4));
	DEFINE(IA64_SWITCH_STACK_B5_OFFSET, offsetof (struct switch_stack, b5));
	DEFINE(IA64_SWITCH_STACK_AR_PFS_OFFSET, offsetof (struct switch_stack, ar_pfs));
	DEFINE(IA64_SWITCH_STACK_AR_LC_OFFSET, offsetof (struct switch_stack, ar_lc));
	DEFINE(IA64_SWITCH_STACK_AR_UNAT_OFFSET, offsetof (struct switch_stack, ar_unat));
	DEFINE(IA64_SWITCH_STACK_AR_RNAT_OFFSET, offsetof (struct switch_stack, ar_rnat));
	DEFINE(IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET, offsetof (struct switch_stack, ar_bspstore));
	DEFINE(IA64_SWITCH_STACK_PR_OFFSET, offsetof (struct switch_stack, pr));

	BLANK();

	DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip));
	DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp));
	DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr));
	DEFINE(IA64_SIGCONTEXT_AR_RNAT_OFFSET, offsetof (struct sigcontext, sc_ar_rnat));
	DEFINE(IA64_SIGCONTEXT_AR_UNAT_OFFSET, offsetof (struct sigcontext, sc_ar_unat));
	DEFINE(IA64_SIGCONTEXT_B0_OFFSET, offsetof (struct sigcontext, sc_br[0]));
	DEFINE(IA64_SIGCONTEXT_CFM_OFFSET, offsetof (struct sigcontext, sc_cfm));
	DEFINE(IA64_SIGCONTEXT_FLAGS_OFFSET, offsetof (struct sigcontext, sc_flags));
	DEFINE(IA64_SIGCONTEXT_FR6_OFFSET, offsetof (struct sigcontext, sc_fr[6]));
	DEFINE(IA64_SIGCONTEXT_PR_OFFSET, offsetof (struct sigcontext, sc_pr));
	DEFINE(IA64_SIGCONTEXT_R12_OFFSET, offsetof (struct sigcontext, sc_gr[12]));
	DEFINE(IA64_SIGCONTEXT_RBS_BASE_OFFSET,offsetof (struct sigcontext, sc_rbs_base));
	DEFINE(IA64_SIGCONTEXT_LOADRS_OFFSET, offsetof (struct sigcontext, sc_loadrs));

	BLANK();

	DEFINE(IA64_SIGPENDING_SIGNAL_OFFSET, offsetof (struct sigpending, signal));

	BLANK();

	DEFINE(IA64_SIGFRAME_ARG0_OFFSET, offsetof (struct sigframe, arg0));
	DEFINE(IA64_SIGFRAME_ARG1_OFFSET, offsetof (struct sigframe, arg1));
	DEFINE(IA64_SIGFRAME_ARG2_OFFSET, offsetof (struct sigframe, arg2));
	DEFINE(IA64_SIGFRAME_HANDLER_OFFSET, offsetof (struct sigframe, handler));
	DEFINE(IA64_SIGFRAME_SIGCONTEXT_OFFSET, offsetof (struct sigframe, sc));
	BLANK();
    /* for assembly files which can't include sched.h: */
	DEFINE(IA64_CLONE_VFORK, CLONE_VFORK);
	DEFINE(IA64_CLONE_VM, CLONE_VM);

	BLANK();
	DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET,
	       offsetof (struct cpuinfo_ia64, nsec_per_cyc));
	DEFINE(IA64_CPUINFO_PTCE_BASE_OFFSET,
	       offsetof (struct cpuinfo_ia64, ptce_base));
	DEFINE(IA64_CPUINFO_PTCE_COUNT_OFFSET,
	       offsetof (struct cpuinfo_ia64, ptce_count));
	DEFINE(IA64_CPUINFO_PTCE_STRIDE_OFFSET,
	       offsetof (struct cpuinfo_ia64, ptce_stride));
	BLANK();
	DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET,
	       offsetof (struct timespec, tv_nsec));

	DEFINE(CLONE_SETTLS_BIT, 19);
#if CLONE_SETTLS != (1<<19)
# error "CLONE_SETTLS_BIT incorrect, please fix"
#endif

	BLANK();
	DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET,
	       offsetof (struct ia64_mca_cpu, mca_stack));
	DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
	       offsetof (struct ia64_mca_cpu, init_stack));
	BLANK();
	DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET,
	       offsetof (struct ia64_sal_os_state, os_gp));
	DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
	       offsetof (struct ia64_sal_os_state, proc_state_param));
	DEFINE(IA64_SAL_OS_STATE_SAL_RA_OFFSET,
	       offsetof (struct ia64_sal_os_state, sal_ra));
	DEFINE(IA64_SAL_OS_STATE_SAL_GP_OFFSET,
	       offsetof (struct ia64_sal_os_state, sal_gp));
	DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
	       offsetof (struct ia64_sal_os_state, pal_min_state));
	DEFINE(IA64_SAL_OS_STATE_OS_STATUS_OFFSET,
	       offsetof (struct ia64_sal_os_state, os_status));
	DEFINE(IA64_SAL_OS_STATE_CONTEXT_OFFSET,
	       offsetof (struct ia64_sal_os_state, context));
	DEFINE(IA64_SAL_OS_STATE_SIZE,
	       sizeof (struct ia64_sal_os_state));
	BLANK();

	DEFINE(IA64_PMSA_GR_OFFSET,
	       offsetof (struct pal_min_state_area_s, pmsa_gr));
	DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
	       offsetof (struct pal_min_state_area_s, pmsa_bank1_gr));
	DEFINE(IA64_PMSA_PR_OFFSET,
	       offsetof (struct pal_min_state_area_s, pmsa_pr));
	DEFINE(IA64_PMSA_BR0_OFFSET,
	       offsetof (struct pal_min_state_area_s, pmsa_br0));
	DEFINE(IA64_PMSA_RSC_OFFSET,
	       offsetof (struct pal_min_state_area_s, pmsa_rsc));
	DEFINE(IA64_PMSA_IIP_OFFSET,
	       offsetof (struct pal_min_state_area_s, pmsa_iip));
	DEFINE(IA64_PMSA_IPSR_OFFSET,
	       offsetof (struct pal_min_state_area_s, pmsa_ipsr));
	DEFINE(IA64_PMSA_IFS_OFFSET,
	       offsetof (struct pal_min_state_area_s, pmsa_ifs));
	DEFINE(IA64_PMSA_XIP_OFFSET,
	       offsetof (struct pal_min_state_area_s, pmsa_xip));
	BLANK();

	/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
	DEFINE(IA64_GTOD_SEQ_OFFSET,
	       offsetof (struct fsyscall_gtod_data_t, seq));
	DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
		offsetof (struct fsyscall_gtod_data_t, wall_time));
	DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
		offsetof (struct fsyscall_gtod_data_t, monotonic_time));
	DEFINE(IA64_CLKSRC_MASK_OFFSET,
		offsetof (struct fsyscall_gtod_data_t, clk_mask));
	DEFINE(IA64_CLKSRC_MULT_OFFSET,
		offsetof (struct fsyscall_gtod_data_t, clk_mult));
	DEFINE(IA64_CLKSRC_SHIFT_OFFSET,
		offsetof (struct fsyscall_gtod_data_t, clk_shift));
	DEFINE(IA64_CLKSRC_MMIO_OFFSET,
		offsetof (struct fsyscall_gtod_data_t, clk_fsys_mmio));
	DEFINE(IA64_CLKSRC_CYCLE_LAST_OFFSET,
		offsetof (struct fsyscall_gtod_data_t, clk_cycle_last));
	DEFINE(IA64_ITC_JITTER_OFFSET,
		offsetof (struct itc_jitter_data_t, itc_jitter));
	DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
		offsetof (struct itc_jitter_data_t, itc_lastcycle));

#ifdef CONFIG_XEN
	BLANK();

	DEFINE(XEN_NATIVE_ASM, XEN_NATIVE);
	DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN);

#define DEFINE_MAPPED_REG_OFS(sym, field) \
	DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field)))

	DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
	DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
	DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
	DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
	DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
	DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
	DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
	DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
	DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
	DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
	DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
	DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
	DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
	DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
	DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
	DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
	DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
	DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset);
	DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last);
#endif /* CONFIG_XEN */
}
Esempio n. 17
0
void foo(void)
{
	OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
	OFFSET(IA32_SIGCONTEXT_bx, sigcontext, bx);
	OFFSET(IA32_SIGCONTEXT_cx, sigcontext, cx);
	OFFSET(IA32_SIGCONTEXT_dx, sigcontext, dx);
	OFFSET(IA32_SIGCONTEXT_si, sigcontext, si);
	OFFSET(IA32_SIGCONTEXT_di, sigcontext, di);
	OFFSET(IA32_SIGCONTEXT_bp, sigcontext, bp);
	OFFSET(IA32_SIGCONTEXT_sp, sigcontext, sp);
	OFFSET(IA32_SIGCONTEXT_ip, sigcontext, ip);
	BLANK();

	OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
	OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
	OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
	OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask);
	OFFSET(CPUINFO_hard_math, cpuinfo_x86, hard_math);
	OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level);
	OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability);
	OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
	BLANK();

	OFFSET(TI_task, thread_info, task);
	OFFSET(TI_exec_domain, thread_info, exec_domain);
	OFFSET(TI_flags, thread_info, flags);
	OFFSET(TI_status, thread_info, status);
	OFFSET(TI_preempt_count, thread_info, preempt_count);
	OFFSET(TI_addr_limit, thread_info, addr_limit);
	OFFSET(TI_restart_block, thread_info, restart_block);
	OFFSET(TI_sysenter_return, thread_info, sysenter_return);
	OFFSET(TI_cpu, thread_info, cpu);
	BLANK();

	OFFSET(GDS_size, desc_ptr, size);
	OFFSET(GDS_address, desc_ptr, address);
	BLANK();

	OFFSET(PT_EBX, pt_regs, bx);
	OFFSET(PT_ECX, pt_regs, cx);
	OFFSET(PT_EDX, pt_regs, dx);
	OFFSET(PT_ESI, pt_regs, si);
	OFFSET(PT_EDI, pt_regs, di);
	OFFSET(PT_EBP, pt_regs, bp);
	OFFSET(PT_EAX, pt_regs, ax);
	OFFSET(PT_DS,  pt_regs, ds);
	OFFSET(PT_ES,  pt_regs, es);
	OFFSET(PT_FS,  pt_regs, fs);
	OFFSET(PT_GS,  pt_regs, gs);
	OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
	OFFSET(PT_EIP, pt_regs, ip);
	OFFSET(PT_CS,  pt_regs, cs);
	OFFSET(PT_EFLAGS, pt_regs, flags);
	OFFSET(PT_OLDESP, pt_regs, sp);
	OFFSET(PT_OLDSS,  pt_regs, ss);
	BLANK();

	OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
	OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
	BLANK();

	OFFSET(pbe_address, pbe, address);
	OFFSET(pbe_orig_address, pbe, orig_address);
	OFFSET(pbe_next, pbe, next);

	/* Offset from the sysenter stack to tss.sp0 */
	DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
		 sizeof(struct tss_struct));

	DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
	DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
	DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
	DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
	DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);

	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);

#ifdef CONFIG_PARAVIRT
	BLANK();
	OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
	OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
	OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
	OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
	OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
	OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
	OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
	OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);

#ifdef CONFIG_PAX_KERNEXEC
	OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
#endif

#endif

#ifdef CONFIG_XEN
	BLANK();
	OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
	OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
#endif

#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
	BLANK();
	OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
	OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending);
	OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir);

	BLANK();
	OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
	OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc);
	OFFSET(LGUEST_PAGES_host_cr3, lguest_pages, state.host_cr3);
	OFFSET(LGUEST_PAGES_host_sp, lguest_pages, state.host_sp);
	OFFSET(LGUEST_PAGES_guest_gdt_desc, lguest_pages,state.guest_gdt_desc);
	OFFSET(LGUEST_PAGES_guest_idt_desc, lguest_pages,state.guest_idt_desc);
	OFFSET(LGUEST_PAGES_guest_gdt, lguest_pages, state.guest_gdt);
	OFFSET(LGUEST_PAGES_regs_trapnum, lguest_pages, regs.trapnum);
	OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
	OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
#endif

	BLANK();
	OFFSET(BP_scratch, boot_params, scratch);
	OFFSET(BP_loadflags, boot_params, hdr.loadflags);
	OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
	OFFSET(BP_version, boot_params, hdr.version);
	OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);
}
Esempio n. 18
0
int main(void)
{
	/* task struct offsets */
	OFFSET(__TASK_stack, task_struct, stack);
	OFFSET(__TASK_thread, task_struct, thread);
	OFFSET(__TASK_pid, task_struct, pid);
	BLANK();
	/* thread struct offsets */
	OFFSET(__THREAD_ksp, thread_struct, ksp);
	OFFSET(__THREAD_sysc_table,  thread_struct, sys_call_table);
	OFFSET(__THREAD_last_break, thread_struct, last_break);
	OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
	OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
	OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
	OFFSET(__THREAD_per_address, thread_struct, per_event.address);
	OFFSET(__THREAD_per_paid, thread_struct, per_event.paid);
	OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
	BLANK();
	/* thread info offsets */
	OFFSET(__TI_flags, task_struct, thread_info.flags);
	BLANK();
	/* pt_regs offsets */
	OFFSET(__PT_ARGS, pt_regs, args);
	OFFSET(__PT_PSW, pt_regs, psw);
	OFFSET(__PT_GPRS, pt_regs, gprs);
	OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
	OFFSET(__PT_INT_CODE, pt_regs, int_code);
	OFFSET(__PT_INT_PARM, pt_regs, int_parm);
	OFFSET(__PT_INT_PARM_LONG, pt_regs, int_parm_long);
	OFFSET(__PT_FLAGS, pt_regs, flags);
	DEFINE(__PT_SIZE, sizeof(struct pt_regs));
	BLANK();
	/* stack_frame offsets */
	OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
	OFFSET(__SF_GPRS, stack_frame, gprs);
	OFFSET(__SF_EMPTY, stack_frame, empty1);
	OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[0]);
	OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[1]);
	OFFSET(__SF_SIE_REASON, stack_frame, empty1[2]);
	BLANK();
	/* timeval/timezone offsets for use by vdso */
	OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count);
	OFFSET(__VDSO_XTIME_STAMP, vdso_data, xtime_tod_stamp);
	OFFSET(__VDSO_XTIME_SEC, vdso_data, xtime_clock_sec);
	OFFSET(__VDSO_XTIME_NSEC, vdso_data, xtime_clock_nsec);
	OFFSET(__VDSO_XTIME_CRS_SEC, vdso_data, xtime_coarse_sec);
	OFFSET(__VDSO_XTIME_CRS_NSEC, vdso_data, xtime_coarse_nsec);
	OFFSET(__VDSO_WTOM_SEC, vdso_data, wtom_clock_sec);
	OFFSET(__VDSO_WTOM_NSEC, vdso_data, wtom_clock_nsec);
	OFFSET(__VDSO_WTOM_CRS_SEC, vdso_data, wtom_coarse_sec);
	OFFSET(__VDSO_WTOM_CRS_NSEC, vdso_data, wtom_coarse_nsec);
	OFFSET(__VDSO_TIMEZONE, vdso_data, tz_minuteswest);
	OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
	OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
	OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
	OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
	OFFSET(__VDSO_TS_END, vdso_data, ts_end);
	OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
	OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
	OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
	OFFSET(__VDSO_NODE_ID, vdso_per_cpu_data, node_id);
	BLANK();
	/* constants used by the vdso */
	DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
	DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
	DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
	DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
	DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
	DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
	DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
	BLANK();
	/* idle data offsets */
	OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
	OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit);
	OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
	OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit);
	BLANK();
	/* hardware defined lowcore locations 0x000 - 0x1ff */
	OFFSET(__LC_EXT_PARAMS, lowcore, ext_params);
	OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr);
	OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code);
	OFFSET(__LC_SVC_ILC, lowcore, svc_ilc);
	OFFSET(__LC_SVC_INT_CODE, lowcore, svc_code);
	OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc);
	OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code);
	OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code);
	OFFSET(__LC_MON_CLASS_NR, lowcore, mon_class_num);
	OFFSET(__LC_PER_CODE, lowcore, per_code);
	OFFSET(__LC_PER_ATMID, lowcore, per_atmid);
	OFFSET(__LC_PER_ADDRESS, lowcore, per_address);
	OFFSET(__LC_EXC_ACCESS_ID, lowcore, exc_access_id);
	OFFSET(__LC_PER_ACCESS_ID, lowcore, per_access_id);
	OFFSET(__LC_OP_ACCESS_ID, lowcore, op_access_id);
	OFFSET(__LC_AR_MODE_ID, lowcore, ar_mode_id);
	OFFSET(__LC_TRANS_EXC_CODE, lowcore, trans_exc_code);
	OFFSET(__LC_MON_CODE, lowcore, monitor_code);
	OFFSET(__LC_SUBCHANNEL_ID, lowcore, subchannel_id);
	OFFSET(__LC_SUBCHANNEL_NR, lowcore, subchannel_nr);
	OFFSET(__LC_IO_INT_PARM, lowcore, io_int_parm);
	OFFSET(__LC_IO_INT_WORD, lowcore, io_int_word);
	OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list);
	OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list);
	OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
	OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
	OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
	OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
	OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
	OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
	OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
	OFFSET(__LC_PGM_OLD_PSW, lowcore, program_old_psw);
	OFFSET(__LC_MCK_OLD_PSW, lowcore, mcck_old_psw);
	OFFSET(__LC_IO_OLD_PSW, lowcore, io_old_psw);
	OFFSET(__LC_RST_NEW_PSW, lowcore, restart_psw);
	OFFSET(__LC_EXT_NEW_PSW, lowcore, external_new_psw);
	OFFSET(__LC_SVC_NEW_PSW, lowcore, svc_new_psw);
	OFFSET(__LC_PGM_NEW_PSW, lowcore, program_new_psw);
	OFFSET(__LC_MCK_NEW_PSW, lowcore, mcck_new_psw);
	OFFSET(__LC_IO_NEW_PSW, lowcore, io_new_psw);
	/* software defined lowcore locations 0x200 - 0xdff*/
	OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync);
	OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async);
	OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart);
	OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags);
	OFFSET(__LC_RETURN_PSW, lowcore, return_psw);
	OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw);
	OFFSET(__LC_SYNC_ENTER_TIMER, lowcore, sync_enter_timer);
	OFFSET(__LC_ASYNC_ENTER_TIMER, lowcore, async_enter_timer);
	OFFSET(__LC_MCCK_ENTER_TIMER, lowcore, mcck_enter_timer);
	OFFSET(__LC_EXIT_TIMER, lowcore, exit_timer);
	OFFSET(__LC_USER_TIMER, lowcore, user_timer);
	OFFSET(__LC_SYSTEM_TIMER, lowcore, system_timer);
	OFFSET(__LC_STEAL_TIMER, lowcore, steal_timer);
	OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer);
	OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
	OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
	OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
	OFFSET(__LC_CURRENT, lowcore, current_task);
	OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
	OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
	OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
	OFFSET(__LC_RESTART_STACK, lowcore, restart_stack);
	OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
	OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
	OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
	OFFSET(__LC_USER_ASCE, lowcore, user_asce);
	OFFSET(__LC_LPP, lowcore, lpp);
	OFFSET(__LC_CURRENT_PID, lowcore, current_pid);
	OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
	OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
	OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
	OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
	OFFSET(__LC_GMAP, lowcore, gmap);
	OFFSET(__LC_PASTE, lowcore, paste);
	/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
	OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
	/* hardware defined lowcore locations 0x1000 - 0x18ff */
	OFFSET(__LC_MCESAD, lowcore, mcesad);
	OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2);
	OFFSET(__LC_FPREGS_SAVE_AREA, lowcore, floating_pt_save_area);
	OFFSET(__LC_GPREGS_SAVE_AREA, lowcore, gpregs_save_area);
	OFFSET(__LC_PSW_SAVE_AREA, lowcore, psw_save_area);
	OFFSET(__LC_PREFIX_SAVE_AREA, lowcore, prefixreg_save_area);
	OFFSET(__LC_FP_CREG_SAVE_AREA, lowcore, fpt_creg_save_area);
	OFFSET(__LC_TOD_PROGREG_SAVE_AREA, lowcore, tod_progreg_save_area);
	OFFSET(__LC_CPU_TIMER_SAVE_AREA, lowcore, cpu_timer_save_area);
	OFFSET(__LC_CLOCK_COMP_SAVE_AREA, lowcore, clock_comp_save_area);
	OFFSET(__LC_AREGS_SAVE_AREA, lowcore, access_regs_save_area);
	OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area);
	OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
	BLANK();
	/* gmap/sie offsets */
	OFFSET(__GMAP_ASCE, gmap, asce);
	OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
	OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20);
	return 0;
}
Esempio n. 19
0
int main(void)
{
  DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
#ifdef CONFIG_CC_STACKPROTECTOR
  DEFINE(TSK_STACK_CANARY,	offsetof(struct task_struct, stack_canary));
#endif
  BLANK();
  DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
  DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
  DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
  DEFINE(TI_TASK,		offsetof(struct thread_info, task));
  DEFINE(TI_EXEC_DOMAIN,	offsetof(struct thread_info, exec_domain));
  DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
  DEFINE(TI_CPU_DOMAIN,		offsetof(struct thread_info, cpu_domain));
  DEFINE(TI_CPU_SAVE,		offsetof(struct thread_info, cpu_context));
  DEFINE(TI_USED_CP,		offsetof(struct thread_info, used_cp));
  DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));
  DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate));
#ifdef CONFIG_VFP
  DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));
#ifdef CONFIG_SMP
  DEFINE(VFP_CPU,		offsetof(union vfp_state, hard.cpu));
#endif
#endif
#ifdef CONFIG_ARM_THUMBEE
  DEFINE(TI_THUMBEE_STATE,	offsetof(struct thread_info, thumbee_state));
#endif
#ifdef CONFIG_IWMMXT
  DEFINE(TI_IWMMXT_STATE,	offsetof(struct thread_info, fpstate.iwmmxt));
#endif
#ifdef CONFIG_CRUNCH
  DEFINE(TI_CRUNCH_STATE,	offsetof(struct thread_info, crunchstate));
#endif
  BLANK();
  DEFINE(S_R0,			offsetof(struct pt_regs, ARM_r0));
  DEFINE(S_R1,			offsetof(struct pt_regs, ARM_r1));
  DEFINE(S_R2,			offsetof(struct pt_regs, ARM_r2));
  DEFINE(S_R3,			offsetof(struct pt_regs, ARM_r3));
  DEFINE(S_R4,			offsetof(struct pt_regs, ARM_r4));
  DEFINE(S_R5,			offsetof(struct pt_regs, ARM_r5));
  DEFINE(S_R6,			offsetof(struct pt_regs, ARM_r6));
  DEFINE(S_R7,			offsetof(struct pt_regs, ARM_r7));
  DEFINE(S_R8,			offsetof(struct pt_regs, ARM_r8));
  DEFINE(S_R9,			offsetof(struct pt_regs, ARM_r9));
  DEFINE(S_R10,			offsetof(struct pt_regs, ARM_r10));
  DEFINE(S_FP,			offsetof(struct pt_regs, ARM_fp));
  DEFINE(S_IP,			offsetof(struct pt_regs, ARM_ip));
  DEFINE(S_SP,			offsetof(struct pt_regs, ARM_sp));
  DEFINE(S_LR,			offsetof(struct pt_regs, ARM_lr));
  DEFINE(S_PC,			offsetof(struct pt_regs, ARM_pc));
  DEFINE(S_PSR,			offsetof(struct pt_regs, ARM_cpsr));
  DEFINE(S_OLD_R0,		offsetof(struct pt_regs, ARM_ORIG_r0));
  DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
  BLANK();
#ifdef CONFIG_CACHE_L2X0
  DEFINE(L2X0_R_PHY_BASE,	offsetof(struct l2x0_regs, phy_base));
  DEFINE(L2X0_R_AUX_CTRL,	offsetof(struct l2x0_regs, aux_ctrl));
  DEFINE(L2X0_R_TAG_LATENCY,	offsetof(struct l2x0_regs, tag_latency));
  DEFINE(L2X0_R_DATA_LATENCY,	offsetof(struct l2x0_regs, data_latency));
  DEFINE(L2X0_R_FILTER_START,	offsetof(struct l2x0_regs, filter_start));
  DEFINE(L2X0_R_FILTER_END,	offsetof(struct l2x0_regs, filter_end));
  DEFINE(L2X0_R_PREFETCH_CTRL,	offsetof(struct l2x0_regs, prefetch_ctrl));
  DEFINE(L2X0_R_PWR_CTRL,	offsetof(struct l2x0_regs, pwr_ctrl));
  BLANK();
#endif
#ifdef CONFIG_CPU_HAS_ASID
  DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id.counter));
  BLANK();
#endif
  DEFINE(VMA_VM_MM,		offsetof(struct vm_area_struct, vm_mm));
  DEFINE(VMA_VM_FLAGS,		offsetof(struct vm_area_struct, vm_flags));
  BLANK();
  DEFINE(VM_EXEC,	       	VM_EXEC);
  BLANK();
  DEFINE(PAGE_SZ,	       	PAGE_SIZE);
  BLANK();
  DEFINE(SYS_ERROR0,		0x9f0000);
  BLANK();
  DEFINE(SIZEOF_MACHINE_DESC,	sizeof(struct machine_desc));
  DEFINE(MACHINFO_TYPE,		offsetof(struct machine_desc, nr));
  DEFINE(MACHINFO_NAME,		offsetof(struct machine_desc, name));
  BLANK();
  DEFINE(PROC_INFO_SZ,		sizeof(struct proc_info_list));
  DEFINE(PROCINFO_INITFUNC,	offsetof(struct proc_info_list, __cpu_flush));
  DEFINE(PROCINFO_MM_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_mm_mmu_flags));
  DEFINE(PROCINFO_IO_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_io_mmu_flags));
  BLANK();
#ifdef MULTI_DABORT
  DEFINE(PROCESSOR_DABT_FUNC,	offsetof(struct processor, _data_abort));
#endif
#ifdef MULTI_PABORT
  DEFINE(PROCESSOR_PABT_FUNC,	offsetof(struct processor, _prefetch_abort));
#endif
#ifdef MULTI_CPU
  DEFINE(CPU_SLEEP_SIZE,	offsetof(struct processor, suspend_size));
  DEFINE(CPU_DO_SUSPEND,	offsetof(struct processor, do_suspend));
  DEFINE(CPU_DO_RESUME,		offsetof(struct processor, do_resume));
#endif
#ifdef MULTI_CACHE
  DEFINE(CACHE_FLUSH_KERN_ALL,	offsetof(struct cpu_cache_fns, flush_kern_all));
#endif
#ifdef CONFIG_ARM_CPU_SUSPEND
  DEFINE(SLEEP_SAVE_SP_SZ,	sizeof(struct sleep_save_sp));
  DEFINE(SLEEP_SAVE_SP_PHYS,	offsetof(struct sleep_save_sp, save_ptr_stash_phys));
  DEFINE(SLEEP_SAVE_SP_VIRT,	offsetof(struct sleep_save_sp, save_ptr_stash));
#endif
  BLANK();
  DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);
  DEFINE(DMA_TO_DEVICE,		DMA_TO_DEVICE);
  DEFINE(DMA_FROM_DEVICE,	DMA_FROM_DEVICE);
  BLANK();
  DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
  DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
  BLANK();
#ifdef CONFIG_KVM_ARM_HOST
  DEFINE(VCPU_KVM,		offsetof(struct kvm_vcpu, kvm));
  DEFINE(VCPU_MIDR,		offsetof(struct kvm_vcpu, arch.midr));
  DEFINE(VCPU_CP15,		offsetof(struct kvm_vcpu, arch.cp15));
  DEFINE(VCPU_VFP_GUEST,	offsetof(struct kvm_vcpu, arch.vfp_guest));
  DEFINE(VCPU_VFP_HOST,		offsetof(struct kvm_vcpu, arch.host_cpu_context));
  DEFINE(VCPU_REGS,		offsetof(struct kvm_vcpu, arch.regs));
  DEFINE(VCPU_USR_REGS,		offsetof(struct kvm_vcpu, arch.regs.usr_regs));
  DEFINE(VCPU_SVC_REGS,		offsetof(struct kvm_vcpu, arch.regs.svc_regs));
  DEFINE(VCPU_ABT_REGS,		offsetof(struct kvm_vcpu, arch.regs.abt_regs));
  DEFINE(VCPU_UND_REGS,		offsetof(struct kvm_vcpu, arch.regs.und_regs));
  DEFINE(VCPU_IRQ_REGS,		offsetof(struct kvm_vcpu, arch.regs.irq_regs));
  DEFINE(VCPU_FIQ_REGS,		offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
  DEFINE(VCPU_PC,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
  DEFINE(VCPU_CPSR,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
  DEFINE(VCPU_HCR,		offsetof(struct kvm_vcpu, arch.hcr));
  DEFINE(VCPU_IRQ_LINES,	offsetof(struct kvm_vcpu, arch.irq_lines));
  DEFINE(VCPU_HSR,		offsetof(struct kvm_vcpu, arch.fault.hsr));
  DEFINE(VCPU_HxFAR,		offsetof(struct kvm_vcpu, arch.fault.hxfar));
  DEFINE(VCPU_HPFAR,		offsetof(struct kvm_vcpu, arch.fault.hpfar));
  DEFINE(VCPU_HYP_PC,		offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
#ifdef CONFIG_KVM_ARM_VGIC
  DEFINE(VCPU_VGIC_CPU,		offsetof(struct kvm_vcpu, arch.vgic_cpu));
  DEFINE(VGIC_V2_CPU_HCR,	offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
  DEFINE(VGIC_V2_CPU_VMCR,	offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
  DEFINE(VGIC_V2_CPU_MISR,	offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
  DEFINE(VGIC_V2_CPU_EISR,	offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
  DEFINE(VGIC_V2_CPU_ELRSR,	offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
  DEFINE(VGIC_V2_CPU_APR,	offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
  DEFINE(VGIC_V2_CPU_LR,	offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
  DEFINE(VGIC_CPU_NR_LR,	offsetof(struct vgic_cpu, nr_lr));
#ifdef CONFIG_KVM_ARM_TIMER
  DEFINE(VCPU_TIMER_CNTV_CTL,	offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
  DEFINE(VCPU_TIMER_CNTV_CVAL,	offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
  DEFINE(KVM_TIMER_CNTVOFF,	offsetof(struct kvm, arch.timer.cntvoff));
  DEFINE(KVM_TIMER_ENABLED,	offsetof(struct kvm, arch.timer.enabled));
#endif
  DEFINE(KVM_VGIC_VCTRL,	offsetof(struct kvm, arch.vgic.vctrl_base));
#endif
  DEFINE(KVM_VTTBR,		offsetof(struct kvm, arch.vttbr));
#endif
  return 0; 
}
int main(void)
{
#define ENTRY(entry) DEFINE(tsk_ ## entry, offsetof(struct task_struct, entry))
	ENTRY(state);
	ENTRY(flags); 
	ENTRY(pid);
	BLANK();
#undef ENTRY
#define ENTRY(entry) DEFINE(TI_ ## entry, offsetof(struct thread_info, entry))
	ENTRY(flags);
	ENTRY(addr_limit);
	ENTRY(preempt_count);
	ENTRY(status);
#ifdef CONFIG_IA32_EMULATION
	ENTRY(sysenter_return);
#endif
	BLANK();
#undef ENTRY
#ifdef CONFIG_PARAVIRT
	BLANK();
	OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
	OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
	OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
	OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
	OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
	OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame);
	OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
	OFFSET(PV_CPU_nmi_return, pv_cpu_ops, nmi_return);
	OFFSET(PV_CPU_usergs_sysret32, pv_cpu_ops, usergs_sysret32);
	OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
	OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
	OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
	OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
#endif


#ifdef CONFIG_IA32_EMULATION
#define ENTRY(entry) DEFINE(IA32_SIGCONTEXT_ ## entry, offsetof(struct sigcontext_ia32, entry))
	ENTRY(ax);
	ENTRY(bx);
	ENTRY(cx);
	ENTRY(dx);
	ENTRY(si);
	ENTRY(di);
	ENTRY(bp);
	ENTRY(sp);
	ENTRY(ip);
	BLANK();
#undef ENTRY
	DEFINE(IA32_RT_SIGFRAME_sigcontext,
	       offsetof (struct rt_sigframe_ia32, uc.uc_mcontext));
	BLANK();
#endif
	DEFINE(pbe_address, offsetof(struct pbe, address));
	DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
	DEFINE(pbe_next, offsetof(struct pbe, next));
	BLANK();
#define ENTRY(entry) DEFINE(pt_regs_ ## entry, offsetof(struct pt_regs, entry))
	ENTRY(bx);
	ENTRY(bx);
	ENTRY(cx);
	ENTRY(dx);
	ENTRY(sp);
	ENTRY(bp);
	ENTRY(si);
	ENTRY(di);
	ENTRY(r8);
	ENTRY(r9);
	ENTRY(r10);
	ENTRY(r11);
	ENTRY(r12);
	ENTRY(r13);
	ENTRY(r14);
	ENTRY(r15);
	ENTRY(flags);
	BLANK();
#undef ENTRY
#define ENTRY(entry) DEFINE(saved_context_ ## entry, offsetof(struct saved_context, entry))
	ENTRY(cr0);
	ENTRY(cr2);
	ENTRY(cr3);
	ENTRY(cr4);
	ENTRY(cr8);
	BLANK();
#undef ENTRY
	DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
	BLANK();
	DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
	BLANK();
	DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);

	BLANK();
	OFFSET(BP_scratch, boot_params, scratch);
	OFFSET(BP_loadflags, boot_params, hdr.loadflags);
	OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
	OFFSET(BP_version, boot_params, hdr.version);
	OFFSET(BP_kernel_alignment, boot_params, hdr.kernel_alignment);

	BLANK();
	DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
#ifdef CONFIG_XEN
	BLANK();
	OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
	OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
#undef ENTRY
#endif
	return 0;
}
Esempio n. 21
0
int main(void)
{
  DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
  BLANK();
  DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
  DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
  DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
  DEFINE(TI_TASK,		offsetof(struct thread_info, task));
  DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
  BLANK();
  DEFINE(THREAD_CPU_CONTEXT,	offsetof(struct task_struct, thread.cpu_context));
  BLANK();
  DEFINE(S_X0,			offsetof(struct pt_regs, regs[0]));
  DEFINE(S_X1,			offsetof(struct pt_regs, regs[1]));
  DEFINE(S_X2,			offsetof(struct pt_regs, regs[2]));
  DEFINE(S_X3,			offsetof(struct pt_regs, regs[3]));
  DEFINE(S_X4,			offsetof(struct pt_regs, regs[4]));
  DEFINE(S_X5,			offsetof(struct pt_regs, regs[5]));
  DEFINE(S_X6,			offsetof(struct pt_regs, regs[6]));
  DEFINE(S_X7,			offsetof(struct pt_regs, regs[7]));
  DEFINE(S_LR,			offsetof(struct pt_regs, regs[30]));
  DEFINE(S_SP,			offsetof(struct pt_regs, sp));
#ifdef CONFIG_COMPAT
  DEFINE(S_COMPAT_SP,		offsetof(struct pt_regs, compat_sp));
#endif
  DEFINE(S_PSTATE,		offsetof(struct pt_regs, pstate));
  DEFINE(S_PC,			offsetof(struct pt_regs, pc));
  DEFINE(S_ORIG_X0,		offsetof(struct pt_regs, orig_x0));
  DEFINE(S_SYSCALLNO,		offsetof(struct pt_regs, syscallno));
  DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
  BLANK();
  DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id.counter));
  BLANK();
  DEFINE(VMA_VM_MM,		offsetof(struct vm_area_struct, vm_mm));
  DEFINE(VMA_VM_FLAGS,		offsetof(struct vm_area_struct, vm_flags));
  BLANK();
  DEFINE(VM_EXEC,	       	VM_EXEC);
  BLANK();
  DEFINE(PAGE_SZ,	       	PAGE_SIZE);
  BLANK();
  DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);
  DEFINE(DMA_TO_DEVICE,		DMA_TO_DEVICE);
  DEFINE(DMA_FROM_DEVICE,	DMA_FROM_DEVICE);
  BLANK();
  DEFINE(CLOCK_REALTIME,	CLOCK_REALTIME);
  DEFINE(CLOCK_MONOTONIC,	CLOCK_MONOTONIC);
  DEFINE(CLOCK_REALTIME_RES,	MONOTONIC_RES_NSEC);
  DEFINE(CLOCK_REALTIME_COARSE,	CLOCK_REALTIME_COARSE);
  DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
  DEFINE(CLOCK_COARSE_RES,	LOW_RES_NSEC);
  DEFINE(NSEC_PER_SEC,		NSEC_PER_SEC);
  BLANK();
  DEFINE(VDSO_CS_CYCLE_LAST,	offsetof(struct vdso_data, cs_cycle_last));
  DEFINE(VDSO_XTIME_CLK_SEC,	offsetof(struct vdso_data, xtime_clock_sec));
  DEFINE(VDSO_XTIME_CLK_NSEC,	offsetof(struct vdso_data, xtime_clock_nsec));
  DEFINE(VDSO_XTIME_CRS_SEC,	offsetof(struct vdso_data, xtime_coarse_sec));
  DEFINE(VDSO_XTIME_CRS_NSEC,	offsetof(struct vdso_data, xtime_coarse_nsec));
  DEFINE(VDSO_WTM_CLK_SEC,	offsetof(struct vdso_data, wtm_clock_sec));
  DEFINE(VDSO_WTM_CLK_NSEC,	offsetof(struct vdso_data, wtm_clock_nsec));
  DEFINE(VDSO_TB_SEQ_COUNT,	offsetof(struct vdso_data, tb_seq_count));
  DEFINE(VDSO_CS_MULT,		offsetof(struct vdso_data, cs_mult));
  DEFINE(VDSO_CS_SHIFT,		offsetof(struct vdso_data, cs_shift));
  DEFINE(VDSO_TZ_MINWEST,	offsetof(struct vdso_data, tz_minuteswest));
  DEFINE(VDSO_TZ_DSTTIME,	offsetof(struct vdso_data, tz_dsttime));
  DEFINE(VDSO_USE_SYSCALL,	offsetof(struct vdso_data, use_syscall));
  BLANK();
  DEFINE(TVAL_TV_SEC,		offsetof(struct timeval, tv_sec));
  DEFINE(TVAL_TV_USEC,		offsetof(struct timeval, tv_usec));
  DEFINE(TSPEC_TV_SEC,		offsetof(struct timespec, tv_sec));
  DEFINE(TSPEC_TV_NSEC,		offsetof(struct timespec, tv_nsec));
  BLANK();
  DEFINE(TZ_MINWEST,		offsetof(struct timezone, tz_minuteswest));
  DEFINE(TZ_DSTTIME,		offsetof(struct timezone, tz_dsttime));
  BLANK();
#ifdef CONFIG_KVM_ARM_HOST
  DEFINE(VCPU_CONTEXT,		offsetof(struct kvm_vcpu, arch.ctxt));
  DEFINE(CPU_GP_REGS,		offsetof(struct kvm_cpu_context, gp_regs));
  DEFINE(CPU_USER_PT_REGS,	offsetof(struct kvm_regs, regs));
  DEFINE(CPU_FP_REGS,		offsetof(struct kvm_regs, fp_regs));
  DEFINE(CPU_SP_EL1,		offsetof(struct kvm_regs, sp_el1));
  DEFINE(CPU_ELR_EL1,		offsetof(struct kvm_regs, elr_el1));
  DEFINE(CPU_SPSR,		offsetof(struct kvm_regs, spsr));
  DEFINE(CPU_SYSREGS,		offsetof(struct kvm_cpu_context, sys_regs));
  DEFINE(VCPU_ESR_EL2,		offsetof(struct kvm_vcpu, arch.fault.esr_el2));
  DEFINE(VCPU_FAR_EL2,		offsetof(struct kvm_vcpu, arch.fault.far_el2));
  DEFINE(VCPU_HPFAR_EL2,	offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
  DEFINE(VCPU_DEBUG_FLAGS,	offsetof(struct kvm_vcpu, arch.debug_flags));
  DEFINE(VCPU_DEBUG_PTR,	offsetof(struct kvm_vcpu, arch.debug_ptr));
  DEFINE(DEBUG_BCR, 		offsetof(struct kvm_guest_debug_arch, dbg_bcr));
  DEFINE(DEBUG_BVR, 		offsetof(struct kvm_guest_debug_arch, dbg_bvr));
  DEFINE(DEBUG_WCR, 		offsetof(struct kvm_guest_debug_arch, dbg_wcr));
  DEFINE(DEBUG_WVR, 		offsetof(struct kvm_guest_debug_arch, dbg_wvr));
  DEFINE(VCPU_HCR_EL2,		offsetof(struct kvm_vcpu, arch.hcr_el2));
  DEFINE(VCPU_MDCR_EL2,	offsetof(struct kvm_vcpu, arch.mdcr_el2));
  DEFINE(VCPU_IRQ_LINES,	offsetof(struct kvm_vcpu, arch.irq_lines));
  DEFINE(VCPU_HOST_CONTEXT,	offsetof(struct kvm_vcpu, arch.host_cpu_context));
  DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state));
  DEFINE(VCPU_TIMER_CNTV_CTL,	offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
  DEFINE(VCPU_TIMER_CNTV_CVAL,	offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
  DEFINE(KVM_TIMER_CNTVOFF,	offsetof(struct kvm, arch.timer.cntvoff));
  DEFINE(KVM_TIMER_ENABLED,	offsetof(struct kvm, arch.timer.enabled));
  DEFINE(VCPU_KVM,		offsetof(struct kvm_vcpu, kvm));
  DEFINE(VCPU_VGIC_CPU,		offsetof(struct kvm_vcpu, arch.vgic_cpu));
  DEFINE(VGIC_V2_CPU_HCR,	offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
  DEFINE(VGIC_V2_CPU_VMCR,	offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
  DEFINE(VGIC_V2_CPU_MISR,	offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
  DEFINE(VGIC_V2_CPU_EISR,	offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
  DEFINE(VGIC_V2_CPU_ELRSR,	offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
  DEFINE(VGIC_V2_CPU_APR,	offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
  DEFINE(VGIC_V2_CPU_LR,	offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
  DEFINE(VGIC_V3_CPU_SRE,	offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
  DEFINE(VGIC_V3_CPU_HCR,	offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
  DEFINE(VGIC_V3_CPU_VMCR,	offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
  DEFINE(VGIC_V3_CPU_MISR,	offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
  DEFINE(VGIC_V3_CPU_EISR,	offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
  DEFINE(VGIC_V3_CPU_ELRSR,	offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
  DEFINE(VGIC_V3_CPU_AP0R,	offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
  DEFINE(VGIC_V3_CPU_AP1R,	offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
  DEFINE(VGIC_V3_CPU_LR,	offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
  DEFINE(VGIC_CPU_NR_LR,	offsetof(struct vgic_cpu, nr_lr));
  DEFINE(KVM_VTTBR,		offsetof(struct kvm, arch.vttbr));
  DEFINE(KVM_VGIC_VCTRL,	offsetof(struct kvm, arch.vgic.vctrl_base));
#endif
#ifdef CONFIG_CPU_PM
  DEFINE(CPU_SUSPEND_SZ,	sizeof(struct cpu_suspend_ctx));
  DEFINE(CPU_CTX_SP,		offsetof(struct cpu_suspend_ctx, sp));
  DEFINE(MPIDR_HASH_MASK,	offsetof(struct mpidr_hash, mask));
  DEFINE(MPIDR_HASH_SHIFTS,	offsetof(struct mpidr_hash, shift_aff));
  DEFINE(SLEEP_SAVE_SP_SZ,	sizeof(struct sleep_save_sp));
  DEFINE(SLEEP_SAVE_SP_PHYS,	offsetof(struct sleep_save_sp, save_ptr_stash_phys));
  DEFINE(SLEEP_SAVE_SP_VIRT,	offsetof(struct sleep_save_sp, save_ptr_stash));
#endif
  return 0;
}
Esempio n. 22
0
void __dummy__(void)
{
    OFFSET(UREGS_r15, struct cpu_user_regs, r15);
    OFFSET(UREGS_r14, struct cpu_user_regs, r14);
    OFFSET(UREGS_r13, struct cpu_user_regs, r13);
    OFFSET(UREGS_r12, struct cpu_user_regs, r12);
    OFFSET(UREGS_rbp, struct cpu_user_regs, rbp);
    OFFSET(UREGS_rbx, struct cpu_user_regs, rbx);
    OFFSET(UREGS_r11, struct cpu_user_regs, r11);
    OFFSET(UREGS_r10, struct cpu_user_regs, r10);
    OFFSET(UREGS_r9, struct cpu_user_regs, r9);
    OFFSET(UREGS_r8, struct cpu_user_regs, r8);
    OFFSET(UREGS_rax, struct cpu_user_regs, rax);
    OFFSET(UREGS_rcx, struct cpu_user_regs, rcx);
    OFFSET(UREGS_rdx, struct cpu_user_regs, rdx);
    OFFSET(UREGS_rsi, struct cpu_user_regs, rsi);
    OFFSET(UREGS_rdi, struct cpu_user_regs, rdi);
    OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
    OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
    OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
    OFFSET(UREGS_rip, struct cpu_user_regs, rip);
    OFFSET(UREGS_cs, struct cpu_user_regs, cs);
    OFFSET(UREGS_eflags, struct cpu_user_regs, rflags);
    OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
    OFFSET(UREGS_ss, struct cpu_user_regs, ss);
    OFFSET(UREGS_ds, struct cpu_user_regs, ds);
    OFFSET(UREGS_es, struct cpu_user_regs, es);
    OFFSET(UREGS_fs, struct cpu_user_regs, fs);
    OFFSET(UREGS_gs, struct cpu_user_regs, gs);
    OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
    DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
    BLANK();

    OFFSET(irq_caps_offset, struct domain, irq_caps);
    OFFSET(next_in_list_offset, struct domain, next_in_list);
    OFFSET(VCPU_processor, struct vcpu, processor);
    OFFSET(VCPU_domain, struct vcpu, domain);
    OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
    OFFSET(VCPU_trap_bounce, struct vcpu, arch.pv_vcpu.trap_bounce);
    OFFSET(VCPU_int80_bounce, struct vcpu, arch.pv_vcpu.int80_bounce);
    OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
    OFFSET(VCPU_event_addr, struct vcpu, arch.pv_vcpu.event_callback_eip);
    OFFSET(VCPU_event_sel, struct vcpu, arch.pv_vcpu.event_callback_cs);
    OFFSET(VCPU_failsafe_addr, struct vcpu,
           arch.pv_vcpu.failsafe_callback_eip);
    OFFSET(VCPU_failsafe_sel, struct vcpu,
           arch.pv_vcpu.failsafe_callback_cs);
    OFFSET(VCPU_syscall_addr, struct vcpu,
           arch.pv_vcpu.syscall_callback_eip);
    OFFSET(VCPU_syscall32_addr, struct vcpu,
           arch.pv_vcpu.syscall32_callback_eip);
    OFFSET(VCPU_syscall32_sel, struct vcpu,
           arch.pv_vcpu.syscall32_callback_cs);
    OFFSET(VCPU_syscall32_disables_events, struct vcpu,
           arch.pv_vcpu.syscall32_disables_events);
    OFFSET(VCPU_sysenter_addr, struct vcpu,
           arch.pv_vcpu.sysenter_callback_eip);
    OFFSET(VCPU_sysenter_sel, struct vcpu,
           arch.pv_vcpu.sysenter_callback_cs);
    OFFSET(VCPU_sysenter_disables_events, struct vcpu,
           arch.pv_vcpu.sysenter_disables_events);
    OFFSET(VCPU_trap_ctxt, struct vcpu, arch.pv_vcpu.trap_ctxt);
    OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv_vcpu.kernel_sp);
    OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv_vcpu.kernel_ss);
    OFFSET(VCPU_iopl, struct vcpu, arch.pv_vcpu.iopl);
    OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags);
    OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
    OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
    OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
    OFFSET(VCPU_mce_old_mask, struct vcpu, mce_state.old_mask);
    OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask);
    DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
    DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
    DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
    DEFINE(_VGCF_syscall_disables_events,  _VGCF_syscall_disables_events);
    BLANK();

    OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
    OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
    OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
    BLANK();

    OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
    OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm_vmx.vmx_realmode);
    OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm_vmx.vmx_emulate);
    OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm_vmx.vm86_segment_mask);
    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
    BLANK();

    OFFSET(VCPU_nhvm_guestmode, struct vcpu, arch.hvm_vcpu.nvcpu.nv_guestmode);
    OFFSET(VCPU_nhvm_p2m, struct vcpu, arch.hvm_vcpu.nvcpu.nv_p2m);
    OFFSET(VCPU_nsvm_hap_enabled, struct vcpu, arch.hvm_vcpu.nvcpu.u.nsvm.ns_hap_enabled);
    BLANK();

    OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
    BLANK();

    OFFSET(VMCB_rax, struct vmcb_struct, rax);
    OFFSET(VMCB_rip, struct vmcb_struct, rip);
    OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
    OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
    BLANK();

    OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
    OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
    BLANK();

    OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, evtchn_upcall_pending);
    OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, evtchn_upcall_mask);
    BLANK();

    OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
    OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id);
    OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
    OFFSET(CPUINFO_cr4, struct cpu_info, cr4);
    DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
    BLANK();

    OFFSET(TRAPINFO_eip, struct trap_info, address);
    OFFSET(TRAPINFO_cs, struct trap_info, cs);
    OFFSET(TRAPINFO_flags, struct trap_info, flags);
    DEFINE(TRAPINFO_sizeof, sizeof(struct trap_info));
    BLANK();

    OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
    OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
    OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
    OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
    BLANK();

#ifdef CONFIG_PERF_COUNTERS
    DEFINE(ASM_PERFC_exceptions, PERFC_exceptions);
    BLANK();
#endif

    DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
    OFFSET(IRQSTAT_softirq_pending, irq_cpustat_t, __softirq_pending);
    BLANK();

    OFFSET(CPUINFO_features, struct cpuinfo_x86, x86_capability);
    BLANK();

    OFFSET(MB_flags, multiboot_info_t, flags);
    OFFSET(MB_cmdline, multiboot_info_t, cmdline);
    OFFSET(MB_mem_lower, multiboot_info_t, mem_lower);
    BLANK();

    DEFINE(MB2_fixed_sizeof, sizeof(multiboot2_fixed_t));
    OFFSET(MB2_fixed_total_size, multiboot2_fixed_t, total_size);
    OFFSET(MB2_tag_type, multiboot2_tag_t, type);
    OFFSET(MB2_tag_size, multiboot2_tag_t, size);
    OFFSET(MB2_load_base_addr, multiboot2_tag_load_base_addr_t, load_base_addr);
    OFFSET(MB2_mem_lower, multiboot2_tag_basic_meminfo_t, mem_lower);
    OFFSET(MB2_efi64_st, multiboot2_tag_efi64_t, pointer);
    OFFSET(MB2_efi64_ih, multiboot2_tag_efi64_ih_t, pointer);
    BLANK();

    DEFINE(l2_identmap_sizeof, sizeof(l2_identmap));
    BLANK();

    OFFSET(DOMAIN_vm_assist, struct domain, vm_assist);
}
Esempio n. 23
0
/* makeREADFASTA : CREATE THE BASIC COMPONENTS OF A FASTA READER.
*/
READFASTA *makeREADFASTA(){
    register READFASTA *r = BLANK(READFASTA);
    r->alloc = READFASTA_CHUNKSIZE;
    r->buf = (char*)malloc(sizeof(char)*r->alloc);
    return r;
    }
int main(void)
{
  DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
  BLANK();
  DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
  DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
  DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
  DEFINE(TI_TASK,		offsetof(struct thread_info, task));
  DEFINE(TI_EXEC_DOMAIN,	offsetof(struct thread_info, exec_domain));
  DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
  DEFINE(TI_CPU_DOMAIN,		offsetof(struct thread_info, cpu_domain));
  DEFINE(TI_CPU_SAVE,		offsetof(struct thread_info, cpu_context));
  DEFINE(TI_USED_CP,		offsetof(struct thread_info, used_cp));
  DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));
  DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate));
  DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));
#ifdef CONFIG_ARM_THUMBEE
  DEFINE(TI_THUMBEE_STATE,	offsetof(struct thread_info, thumbee_state));
#endif
#ifdef CONFIG_IWMMXT
  DEFINE(TI_IWMMXT_STATE,	offsetof(struct thread_info, fpstate.iwmmxt));
#endif
#ifdef CONFIG_CRUNCH
  DEFINE(TI_CRUNCH_STATE,	offsetof(struct thread_info, crunchstate));
#endif
  BLANK();
  DEFINE(S_R0,			offsetof(struct pt_regs, ARM_r0));
  DEFINE(S_R1,			offsetof(struct pt_regs, ARM_r1));
  DEFINE(S_R2,			offsetof(struct pt_regs, ARM_r2));
  DEFINE(S_R3,			offsetof(struct pt_regs, ARM_r3));
  DEFINE(S_R4,			offsetof(struct pt_regs, ARM_r4));
  DEFINE(S_R5,			offsetof(struct pt_regs, ARM_r5));
  DEFINE(S_R6,			offsetof(struct pt_regs, ARM_r6));
  DEFINE(S_R7,			offsetof(struct pt_regs, ARM_r7));
  DEFINE(S_R8,			offsetof(struct pt_regs, ARM_r8));
  DEFINE(S_R9,			offsetof(struct pt_regs, ARM_r9));
  DEFINE(S_R10,			offsetof(struct pt_regs, ARM_r10));
  DEFINE(S_FP,			offsetof(struct pt_regs, ARM_fp));
  DEFINE(S_IP,			offsetof(struct pt_regs, ARM_ip));
  DEFINE(S_SP,			offsetof(struct pt_regs, ARM_sp));
  DEFINE(S_LR,			offsetof(struct pt_regs, ARM_lr));
  DEFINE(S_PC,			offsetof(struct pt_regs, ARM_pc));
  DEFINE(S_PSR,			offsetof(struct pt_regs, ARM_cpsr));
  DEFINE(S_OLD_R0,		offsetof(struct pt_regs, ARM_ORIG_r0));
  DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
  BLANK();
#ifdef CONFIG_CPU_HAS_ASID
  DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id));
  BLANK();
#endif
  DEFINE(VMA_VM_MM,		offsetof(struct vm_area_struct, vm_mm));
  DEFINE(VMA_VM_FLAGS,		offsetof(struct vm_area_struct, vm_flags));
  BLANK();
  DEFINE(VM_EXEC,	       	VM_EXEC);
  BLANK();
  DEFINE(PAGE_SZ,	       	PAGE_SIZE);
  BLANK();
  DEFINE(SYS_ERROR0,		0x9f0000);
  BLANK();
  DEFINE(SIZEOF_MACHINE_DESC,	sizeof(struct machine_desc));
  DEFINE(MACHINFO_TYPE,		offsetof(struct machine_desc, nr));
  DEFINE(MACHINFO_NAME,		offsetof(struct machine_desc, name));
  DEFINE(MACHINFO_PHYSIO,	offsetof(struct machine_desc, phys_io));
  DEFINE(MACHINFO_PGOFFIO,	offsetof(struct machine_desc, io_pg_offst));
  BLANK();
  DEFINE(PROC_INFO_SZ,		sizeof(struct proc_info_list));
  DEFINE(PROCINFO_INITFUNC,	offsetof(struct proc_info_list, __cpu_flush));
  DEFINE(PROCINFO_MM_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_mm_mmu_flags));
  DEFINE(PROCINFO_IO_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_io_mmu_flags));
  BLANK();
#ifdef MULTI_DABORT
  DEFINE(PROCESSOR_DABT_FUNC,	offsetof(struct processor, _data_abort));
#endif
#ifdef MULTI_PABORT
  DEFINE(PROCESSOR_PABT_FUNC,	offsetof(struct processor, _prefetch_abort));
#endif
#ifdef CONFIG_NKERNEL
  DEFINE(ctx_sizeof,            sizeof(NkOsCtx));
  DEFINE(ctx_swi_off,           offsetof(NkOsCtx, os_vectors[2]));
  DEFINE(ctx_irq_off,           offsetof(NkOsCtx, os_vectors[6]));
  DEFINE(ctx_iirq_off,          offsetof(NkOsCtx, os_vectors[10]));
  DEFINE(ctx_xirq_off,          offsetof(NkOsCtx, os_vectors[9]));
  DEFINE(ctx_pending_off,       offsetof(NkOsCtx, pending));
  DEFINE(ctx_enabled_off,       offsetof(NkOsCtx, enabled));
  DEFINE(ctx_sp_svc_off,        offsetof(NkOsCtx, sp_svc));
  DEFINE(ctx_root_dir_off,      offsetof(NkOsCtx, root_dir));
  DEFINE(ctx_domain_off,        offsetof(NkOsCtx, domain));
  DEFINE(ctx_arch_id_off,       offsetof(NkOsCtx, arch_id));
  DEFINE(ctx_vil_off,           offsetof(NkOsCtx, vil));
  DEFINE(ctx_idle_off,          offsetof(NkOsCtx, idle));
  DEFINE(ctx_prio_set_off,      offsetof(NkOsCtx, prio_set));
  DEFINE(ctx_regs_r0_off,       offsetof(NkOsCtx, regs[0]));
  DEFINE(ctx_regs_r4_off,       offsetof(NkOsCtx, regs[4]));
  DEFINE(ctx_regs_r10_off,      offsetof(NkOsCtx, regs[10]));
  DEFINE(ctx_regs_r12_off,      offsetof(NkOsCtx, regs[12]));
  DEFINE(ctx_regs_sp_off,       offsetof(NkOsCtx, regs[13]));
  DEFINE(ctx_regs_lr_off,       offsetof(NkOsCtx, regs[14]));
  DEFINE(ctx_regs_pc_off,       offsetof(NkOsCtx, regs[15]));
  DEFINE(ctx_regs_cpsr_off,     offsetof(NkOsCtx, regs[16]));
  DEFINE(ctx_nkvector_off,      offsetof(NkOsCtx, nkvector));
#endif
  return 0; 
}
Esempio n. 25
0
void __dummy__(void)
{
    OFFSET(UREGS_r15, struct cpu_user_regs, r15);
    OFFSET(UREGS_r14, struct cpu_user_regs, r14);
    OFFSET(UREGS_r13, struct cpu_user_regs, r13);
    OFFSET(UREGS_r12, struct cpu_user_regs, r12);
    OFFSET(UREGS_rbp, struct cpu_user_regs, rbp);
    OFFSET(UREGS_rbx, struct cpu_user_regs, rbx);
    OFFSET(UREGS_r11, struct cpu_user_regs, r11);
    OFFSET(UREGS_r10, struct cpu_user_regs, r10);
    OFFSET(UREGS_r9, struct cpu_user_regs, r9);
    OFFSET(UREGS_r8, struct cpu_user_regs, r8);
    OFFSET(UREGS_rax, struct cpu_user_regs, rax);
    OFFSET(UREGS_rcx, struct cpu_user_regs, rcx);
    OFFSET(UREGS_rdx, struct cpu_user_regs, rdx);
    OFFSET(UREGS_rsi, struct cpu_user_regs, rsi);
    OFFSET(UREGS_rdi, struct cpu_user_regs, rdi);
    OFFSET(UREGS_error_code, struct cpu_user_regs, error_code);
    OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector);
    OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
    OFFSET(UREGS_rip, struct cpu_user_regs, rip);
    OFFSET(UREGS_cs, struct cpu_user_regs, cs);
    OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
    OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
    OFFSET(UREGS_ss, struct cpu_user_regs, ss);
    OFFSET(UREGS_ds, struct cpu_user_regs, ds);
    OFFSET(UREGS_es, struct cpu_user_regs, es);
    OFFSET(UREGS_fs, struct cpu_user_regs, fs);
    OFFSET(UREGS_gs, struct cpu_user_regs, gs);
    OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es);
    DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs));
    BLANK();

    OFFSET(irq_caps_offset, struct domain, irq_caps);
    OFFSET(next_in_list_offset, struct domain, next_in_list);
    OFFSET(VCPU_processor, struct vcpu, processor);
    OFFSET(VCPU_domain, struct vcpu, domain);
    OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
    OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
    OFFSET(VCPU_int80_bounce, struct vcpu, arch.int80_bounce);
    OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
    OFFSET(VCPU_event_addr, struct vcpu,
           arch.guest_context.event_callback_eip);
    OFFSET(VCPU_event_sel, struct vcpu,
           arch.guest_context.event_callback_cs);
    OFFSET(VCPU_failsafe_addr, struct vcpu,
           arch.guest_context.failsafe_callback_eip);
    OFFSET(VCPU_failsafe_sel, struct vcpu,
           arch.guest_context.failsafe_callback_cs);
    OFFSET(VCPU_syscall_addr, struct vcpu,
           arch.guest_context.syscall_callback_eip);
    OFFSET(VCPU_syscall32_addr, struct vcpu, arch.syscall32_callback_eip);
    OFFSET(VCPU_syscall32_sel, struct vcpu, arch.syscall32_callback_cs);
    OFFSET(VCPU_syscall32_disables_events, struct vcpu,
           arch.syscall32_disables_events);
    OFFSET(VCPU_sysenter_addr, struct vcpu, arch.sysenter_callback_eip);
    OFFSET(VCPU_sysenter_sel, struct vcpu, arch.sysenter_callback_cs);
    OFFSET(VCPU_sysenter_disables_events, struct vcpu,
           arch.sysenter_disables_events);
    OFFSET(VCPU_gp_fault_addr, struct vcpu,
           arch.guest_context.trap_ctxt[TRAP_gp_fault].address);
    OFFSET(VCPU_gp_fault_sel, struct vcpu,
           arch.guest_context.trap_ctxt[TRAP_gp_fault].cs);
    OFFSET(VCPU_kernel_sp, struct vcpu, arch.guest_context.kernel_sp);
    OFFSET(VCPU_kernel_ss, struct vcpu, arch.guest_context.kernel_ss);
    OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
    OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
    OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
    OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
    OFFSET(VCPU_mce_old_mask, struct vcpu, mce_state.old_mask);
    OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask);
    DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
    DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
    DEFINE(_VGCF_failsafe_disables_events, _VGCF_failsafe_disables_events);
    DEFINE(_VGCF_syscall_disables_events,  _VGCF_syscall_disables_events);
    BLANK();

    OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
    OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
    OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
    BLANK();

    OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
    OFFSET(VCPU_vmx_realmode, struct vcpu, arch.hvm_vmx.vmx_realmode);
    OFFSET(VCPU_vmx_emulate, struct vcpu, arch.hvm_vmx.vmx_emulate);
    OFFSET(VCPU_vm86_seg_mask, struct vcpu, arch.hvm_vmx.vm86_segment_mask);
    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
    BLANK();

    OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
    BLANK();

    OFFSET(VMCB_rax, struct vmcb_struct, rax);
    OFFSET(VMCB_rip, struct vmcb_struct, rip);
    OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
    OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
    BLANK();

    OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
    OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
    BLANK();

    OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, evtchn_upcall_pending);
    OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, evtchn_upcall_mask);
    BLANK();

    OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
    DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
    BLANK();

    OFFSET(TRAPBOUNCE_error_code, struct trap_bounce, error_code);
    OFFSET(TRAPBOUNCE_flags, struct trap_bounce, flags);
    OFFSET(TRAPBOUNCE_cs, struct trap_bounce, cs);
    OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
    BLANK();

#if PERF_COUNTERS
    DEFINE(PERFC_hypercalls, PERFC_hypercalls);
    DEFINE(PERFC_exceptions, PERFC_exceptions);
    BLANK();
#endif

    DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t)));
    BLANK();

    OFFSET(CPUINFO_ext_features, struct cpuinfo_x86, x86_capability[1]);
    BLANK();

    OFFSET(MB_flags, multiboot_info_t, flags);
    OFFSET(MB_cmdline, multiboot_info_t, cmdline);
}
Esempio n. 26
0
int main(void)
{
	DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
	DEFINE(TASK_STATE, offsetof(struct task_struct, state));
	DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
	DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
	DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
	DEFINE(TASK_PERSONALITY, offsetof(struct task_struct, personality));
	DEFINE(TASK_PID, offsetof(struct task_struct, pid));
	BLANK();
	DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
	DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
	DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));
	DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));
	DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));
	DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));
	DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));
	DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));
	DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));
	DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));
	DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9]));
	DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10]));
	DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11]));
	DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12]));
	DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13]));
	DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14]));
	DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15]));
	DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16]));
	DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17]));
	DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18]));
	DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19]));
	DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20]));
	DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21]));
	DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22]));
	DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23]));
	DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24]));
	DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25]));
	DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26]));
	DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27]));
	DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28]));
	DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29]));
	DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30]));
	DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31]));
	DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0]));
	DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1]));
	DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2]));
	DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3]));
	DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4]));
	DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5]));
	DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6]));
	DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7]));
	DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8]));
	DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9]));
	DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10]));
	DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11]));
	DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12]));
	DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13]));
	DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14]));
	DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15]));
	DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16]));
	DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17]));
	DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18]));
	DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19]));
	DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20]));
	DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21]));
	DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22]));
	DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23]));
	DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24]));
	DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25]));
	DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26]));
	DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27]));
	DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28]));
	DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29]));
	DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30]));
	DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31]));
	DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0]));
	DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1]));
	DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2]));
	DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3]));
	DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4]));
	DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5]));
	DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6]));
	DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7]));
	DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0]));
	DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1]));
	DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0]));
	DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1]));
	DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27));
	DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28));
	DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp));
	DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc));
	DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar));
	DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir));
	DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr));
	DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
	BLANK();
	DEFINE(TASK_SZ, sizeof(struct task_struct));
	/* TASK_SZ_ALGN includes space for a stack frame. */
	DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
	BLANK();
	DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
	DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
	DEFINE(PT_GR2, offsetof(struct pt_regs, gr[ 2]));
	DEFINE(PT_GR3, offsetof(struct pt_regs, gr[ 3]));
	DEFINE(PT_GR4, offsetof(struct pt_regs, gr[ 4]));
	DEFINE(PT_GR5, offsetof(struct pt_regs, gr[ 5]));
	DEFINE(PT_GR6, offsetof(struct pt_regs, gr[ 6]));
	DEFINE(PT_GR7, offsetof(struct pt_regs, gr[ 7]));
	DEFINE(PT_GR8, offsetof(struct pt_regs, gr[ 8]));
	DEFINE(PT_GR9, offsetof(struct pt_regs, gr[ 9]));
	DEFINE(PT_GR10, offsetof(struct pt_regs, gr[10]));
	DEFINE(PT_GR11, offsetof(struct pt_regs, gr[11]));
	DEFINE(PT_GR12, offsetof(struct pt_regs, gr[12]));
	DEFINE(PT_GR13, offsetof(struct pt_regs, gr[13]));
	DEFINE(PT_GR14, offsetof(struct pt_regs, gr[14]));
	DEFINE(PT_GR15, offsetof(struct pt_regs, gr[15]));
	DEFINE(PT_GR16, offsetof(struct pt_regs, gr[16]));
	DEFINE(PT_GR17, offsetof(struct pt_regs, gr[17]));
	DEFINE(PT_GR18, offsetof(struct pt_regs, gr[18]));
	DEFINE(PT_GR19, offsetof(struct pt_regs, gr[19]));
	DEFINE(PT_GR20, offsetof(struct pt_regs, gr[20]));
	DEFINE(PT_GR21, offsetof(struct pt_regs, gr[21]));
	DEFINE(PT_GR22, offsetof(struct pt_regs, gr[22]));
	DEFINE(PT_GR23, offsetof(struct pt_regs, gr[23]));
	DEFINE(PT_GR24, offsetof(struct pt_regs, gr[24]));
	DEFINE(PT_GR25, offsetof(struct pt_regs, gr[25]));
	DEFINE(PT_GR26, offsetof(struct pt_regs, gr[26]));
	DEFINE(PT_GR27, offsetof(struct pt_regs, gr[27]));
	DEFINE(PT_GR28, offsetof(struct pt_regs, gr[28]));
	DEFINE(PT_GR29, offsetof(struct pt_regs, gr[29]));
	DEFINE(PT_GR30, offsetof(struct pt_regs, gr[30]));
	DEFINE(PT_GR31, offsetof(struct pt_regs, gr[31]));
	DEFINE(PT_FR0, offsetof(struct pt_regs, fr[ 0]));
	DEFINE(PT_FR1, offsetof(struct pt_regs, fr[ 1]));
	DEFINE(PT_FR2, offsetof(struct pt_regs, fr[ 2]));
	DEFINE(PT_FR3, offsetof(struct pt_regs, fr[ 3]));
	DEFINE(PT_FR4, offsetof(struct pt_regs, fr[ 4]));
	DEFINE(PT_FR5, offsetof(struct pt_regs, fr[ 5]));
	DEFINE(PT_FR6, offsetof(struct pt_regs, fr[ 6]));
	DEFINE(PT_FR7, offsetof(struct pt_regs, fr[ 7]));
	DEFINE(PT_FR8, offsetof(struct pt_regs, fr[ 8]));
	DEFINE(PT_FR9, offsetof(struct pt_regs, fr[ 9]));
	DEFINE(PT_FR10, offsetof(struct pt_regs, fr[10]));
	DEFINE(PT_FR11, offsetof(struct pt_regs, fr[11]));
	DEFINE(PT_FR12, offsetof(struct pt_regs, fr[12]));
	DEFINE(PT_FR13, offsetof(struct pt_regs, fr[13]));
	DEFINE(PT_FR14, offsetof(struct pt_regs, fr[14]));
	DEFINE(PT_FR15, offsetof(struct pt_regs, fr[15]));
	DEFINE(PT_FR16, offsetof(struct pt_regs, fr[16]));
	DEFINE(PT_FR17, offsetof(struct pt_regs, fr[17]));
	DEFINE(PT_FR18, offsetof(struct pt_regs, fr[18]));
	DEFINE(PT_FR19, offsetof(struct pt_regs, fr[19]));
	DEFINE(PT_FR20, offsetof(struct pt_regs, fr[20]));
	DEFINE(PT_FR21, offsetof(struct pt_regs, fr[21]));
	DEFINE(PT_FR22, offsetof(struct pt_regs, fr[22]));
	DEFINE(PT_FR23, offsetof(struct pt_regs, fr[23]));
	DEFINE(PT_FR24, offsetof(struct pt_regs, fr[24]));
	DEFINE(PT_FR25, offsetof(struct pt_regs, fr[25]));
	DEFINE(PT_FR26, offsetof(struct pt_regs, fr[26]));
	DEFINE(PT_FR27, offsetof(struct pt_regs, fr[27]));
	DEFINE(PT_FR28, offsetof(struct pt_regs, fr[28]));
	DEFINE(PT_FR29, offsetof(struct pt_regs, fr[29]));
	DEFINE(PT_FR30, offsetof(struct pt_regs, fr[30]));
	DEFINE(PT_FR31, offsetof(struct pt_regs, fr[31]));
	DEFINE(PT_SR0, offsetof(struct pt_regs, sr[ 0]));
	DEFINE(PT_SR1, offsetof(struct pt_regs, sr[ 1]));
	DEFINE(PT_SR2, offsetof(struct pt_regs, sr[ 2]));
	DEFINE(PT_SR3, offsetof(struct pt_regs, sr[ 3]));
	DEFINE(PT_SR4, offsetof(struct pt_regs, sr[ 4]));
	DEFINE(PT_SR5, offsetof(struct pt_regs, sr[ 5]));
	DEFINE(PT_SR6, offsetof(struct pt_regs, sr[ 6]));
	DEFINE(PT_SR7, offsetof(struct pt_regs, sr[ 7]));
	DEFINE(PT_IASQ0, offsetof(struct pt_regs, iasq[0]));
	DEFINE(PT_IASQ1, offsetof(struct pt_regs, iasq[1]));
	DEFINE(PT_IAOQ0, offsetof(struct pt_regs, iaoq[0]));
	DEFINE(PT_IAOQ1, offsetof(struct pt_regs, iaoq[1]));
	DEFINE(PT_CR27, offsetof(struct pt_regs, cr27));
	DEFINE(PT_ORIG_R28, offsetof(struct pt_regs, orig_r28));
	DEFINE(PT_KSP, offsetof(struct pt_regs, ksp));
	DEFINE(PT_KPC, offsetof(struct pt_regs, kpc));
	DEFINE(PT_SAR, offsetof(struct pt_regs, sar));
	DEFINE(PT_IIR, offsetof(struct pt_regs, iir));
	DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
	DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
	DEFINE(PT_SIZE, sizeof(struct pt_regs));
	/* PT_SZ_ALGN includes space for a stack frame. */
	DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
	BLANK();
	DEFINE(TI_TASK, offsetof(struct thread_info, task));
	DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
	DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
	DEFINE(THREAD_SZ, sizeof(struct thread_info));
	/* THREAD_SZ_ALGN includes space for a stack frame. */
	DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
	BLANK();
	DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
	DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
	DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
	DEFINE(ICACHE_LOOP, offsetof(struct pdc_cache_info, ic_loop));
	DEFINE(DCACHE_BASE, offsetof(struct pdc_cache_info, dc_base));
	DEFINE(DCACHE_STRIDE, offsetof(struct pdc_cache_info, dc_stride));
	DEFINE(DCACHE_COUNT, offsetof(struct pdc_cache_info, dc_count));
	DEFINE(DCACHE_LOOP, offsetof(struct pdc_cache_info, dc_loop));
	DEFINE(ITLB_SID_BASE, offsetof(struct pdc_cache_info, it_sp_base));
	DEFINE(ITLB_SID_STRIDE, offsetof(struct pdc_cache_info, it_sp_stride));
	DEFINE(ITLB_SID_COUNT, offsetof(struct pdc_cache_info, it_sp_count));
	DEFINE(ITLB_OFF_BASE, offsetof(struct pdc_cache_info, it_off_base));
	DEFINE(ITLB_OFF_STRIDE, offsetof(struct pdc_cache_info, it_off_stride));
	DEFINE(ITLB_OFF_COUNT, offsetof(struct pdc_cache_info, it_off_count));
	DEFINE(ITLB_LOOP, offsetof(struct pdc_cache_info, it_loop));
	DEFINE(DTLB_SID_BASE, offsetof(struct pdc_cache_info, dt_sp_base));
	DEFINE(DTLB_SID_STRIDE, offsetof(struct pdc_cache_info, dt_sp_stride));
	DEFINE(DTLB_SID_COUNT, offsetof(struct pdc_cache_info, dt_sp_count));
	DEFINE(DTLB_OFF_BASE, offsetof(struct pdc_cache_info, dt_off_base));
	DEFINE(DTLB_OFF_STRIDE, offsetof(struct pdc_cache_info, dt_off_stride));
	DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
	DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
	BLANK();
	DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
	DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
	BLANK();
	DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
	DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
	DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
	DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
	DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
	DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER));
	DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
	DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
	DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
	DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
	DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
	DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
	DEFINE(ASM_PT_INITIAL, PT_INITIAL);
	BLANK();
	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
	DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
	BLANK();
	DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long));
	BLANK();
	return 0;
}
Esempio n. 27
0
void output_kvm_defines(void)
{
	COMMENT(" KVM/MIPS Specfic offsets. ");
	DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
	OFFSET(VCPU_RUN, kvm_vcpu, run);
	OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);

	OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
	OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);

	OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
	OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);

	OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
	OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
	OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
	OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);

	OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);

	OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
	OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
	OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
	OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
	OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
	OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
	OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
	OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
	OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
	OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
	OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
	OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
	OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
	OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
	OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
	OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
	OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
	OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
	OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
	OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
	OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
	OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
	OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
	OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
	OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
	OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
	OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
	OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
	OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
	OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
	OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
	OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
	OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
	OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
	OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
	OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
	OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
	OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);

	OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
	OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
	BLANK();
}
int main(void)
{
	COMMENT("This is a comment.");
	/*  might get these from somewhere else.  */
	DEFINE(_PAGE_SIZE, PAGE_SIZE);
	DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
	BLANK();

	COMMENT("Hexagon pt_regs definitions");
	OFFSET(_PT_SYSCALL_NR, pt_regs, syscall_nr);
	OFFSET(_PT_UGPGP, pt_regs, ugpgp);
	OFFSET(_PT_R3130, pt_regs, r3130);
	OFFSET(_PT_R2928, pt_regs, r2928);
	OFFSET(_PT_R2726, pt_regs, r2726);
	OFFSET(_PT_R2524, pt_regs, r2524);
	OFFSET(_PT_R2322, pt_regs, r2322);
	OFFSET(_PT_R2120, pt_regs, r2120);
	OFFSET(_PT_R1918, pt_regs, r1918);
	OFFSET(_PT_R1716, pt_regs, r1716);
	OFFSET(_PT_R1514, pt_regs, r1514);
	OFFSET(_PT_R1312, pt_regs, r1312);
	OFFSET(_PT_R1110, pt_regs, r1110);
	OFFSET(_PT_R0908, pt_regs, r0908);
	OFFSET(_PT_R0706, pt_regs, r0706);
	OFFSET(_PT_R0504, pt_regs, r0504);
	OFFSET(_PT_R0302, pt_regs, r0302);
	OFFSET(_PT_R0100, pt_regs, r0100);
	OFFSET(_PT_LC0SA0, pt_regs, lc0sa0);
	OFFSET(_PT_LC1SA1, pt_regs, lc1sa1);
	OFFSET(_PT_M1M0, pt_regs, m1m0);
	OFFSET(_PT_PREDSUSR, pt_regs, predsusr);
	OFFSET(_PT_EVREC, pt_regs, hvmer);
	OFFSET(_PT_ER_VMEL, pt_regs, hvmer.vmel);
	OFFSET(_PT_ER_VMEST, pt_regs, hvmer.vmest);
	OFFSET(_PT_ER_VMPSP, pt_regs, hvmer.vmpsp);
	OFFSET(_PT_ER_VMBADVA, pt_regs, hvmer.vmbadva);
	DEFINE(_PT_REGS_SIZE, sizeof(struct pt_regs));
	BLANK();

	COMMENT("Hexagon thread_info definitions");
	OFFSET(_THREAD_INFO_FLAGS, thread_info, flags);
	OFFSET(_THREAD_INFO_PT_REGS, thread_info, regs);
	OFFSET(_THREAD_INFO_SP, thread_info, sp);
	DEFINE(_THREAD_SIZE, THREAD_SIZE);
	BLANK();

	COMMENT("Hexagon hexagon_switch_stack definitions");
	OFFSET(_SWITCH_R1716, hexagon_switch_stack, r1716);
	OFFSET(_SWITCH_R1918, hexagon_switch_stack, r1918);
	OFFSET(_SWITCH_R2120, hexagon_switch_stack, r2120);
	OFFSET(_SWITCH_R2322, hexagon_switch_stack, r2322);

	OFFSET(_SWITCH_R2524, hexagon_switch_stack, r2524);
	OFFSET(_SWITCH_R2726, hexagon_switch_stack, r2726);
	OFFSET(_SWITCH_FP, hexagon_switch_stack, fp);
	OFFSET(_SWITCH_LR, hexagon_switch_stack, lr);
	DEFINE(_SWITCH_STACK_SIZE, sizeof(struct hexagon_switch_stack));
	BLANK();

	COMMENT("Hexagon task_struct definitions");
	OFFSET(_TASK_THREAD_INFO, task_struct, stack);
	OFFSET(_TASK_STRUCT_THREAD, task_struct, thread);

	COMMENT("Hexagon thread_struct definitions");
	OFFSET(_THREAD_STRUCT_SWITCH_SP, thread_struct, switch_sp);

	return 0;
}
Esempio n. 29
0
void foo(void)
{
	OFFSET(SIGCONTEXT_d0, sigcontext, d0);
	OFFSET(SIGCONTEXT_d1, sigcontext, d1);
	BLANK();

	OFFSET(TI_task,			thread_info, task);
	OFFSET(TI_exec_domain,		thread_info, exec_domain);
	OFFSET(TI_flags,		thread_info, flags);
	OFFSET(TI_cpu,			thread_info, cpu);
	OFFSET(TI_preempt_count,	thread_info, preempt_count);
	OFFSET(TI_addr_limit,		thread_info, addr_limit);
	OFFSET(TI_restart_block,	thread_info, restart_block);
	BLANK();

	OFFSET(REG_D0,			pt_regs, d0);
	OFFSET(REG_D1,			pt_regs, d1);
	OFFSET(REG_D2,			pt_regs, d2);
	OFFSET(REG_D3,			pt_regs, d3);
	OFFSET(REG_A0,			pt_regs, a0);
	OFFSET(REG_A1,			pt_regs, a1);
	OFFSET(REG_A2,			pt_regs, a2);
	OFFSET(REG_A3,			pt_regs, a3);
	OFFSET(REG_E0,			pt_regs, e0);
	OFFSET(REG_E1,			pt_regs, e1);
	OFFSET(REG_E2,			pt_regs, e2);
	OFFSET(REG_E3,			pt_regs, e3);
	OFFSET(REG_E4,			pt_regs, e4);
	OFFSET(REG_E5,			pt_regs, e5);
	OFFSET(REG_E6,			pt_regs, e6);
	OFFSET(REG_E7,			pt_regs, e7);
	OFFSET(REG_SP,			pt_regs, sp);
	OFFSET(REG_EPSW,		pt_regs, epsw);
	OFFSET(REG_PC,			pt_regs, pc);
	OFFSET(REG_LAR,			pt_regs, lar);
	OFFSET(REG_LIR,			pt_regs, lir);
	OFFSET(REG_MDR,			pt_regs, mdr);
	OFFSET(REG_MCVF,		pt_regs, mcvf);
	OFFSET(REG_MCRL,		pt_regs, mcrl);
	OFFSET(REG_MCRH,		pt_regs, mcrh);
	OFFSET(REG_MDRQ,		pt_regs, mdrq);
	OFFSET(REG_ORIG_D0,		pt_regs, orig_d0);
	OFFSET(REG_NEXT,		pt_regs, next);
	DEFINE(REG__END,		sizeof(struct pt_regs));
	BLANK();

	OFFSET(THREAD_UREGS,		thread_struct, uregs);
	OFFSET(THREAD_PC,		thread_struct, pc);
	OFFSET(THREAD_SP,		thread_struct, sp);
	OFFSET(THREAD_A3,		thread_struct, a3);
	OFFSET(THREAD_USP,		thread_struct, usp);
	OFFSET(THREAD_FRAME,		thread_struct, __frame);
	BLANK();

	DEFINE(CLONE_VM_asm,		CLONE_VM);
	DEFINE(CLONE_FS_asm,		CLONE_FS);
	DEFINE(CLONE_FILES_asm,		CLONE_FILES);
	DEFINE(CLONE_SIGHAND_asm,	CLONE_SIGHAND);
	DEFINE(CLONE_UNTRACED_asm,	CLONE_UNTRACED);
	DEFINE(SIGCHLD_asm,		SIGCHLD);
	BLANK();

	OFFSET(EXEC_DOMAIN_handler,	exec_domain, handler);
	OFFSET(RT_SIGFRAME_sigcontext,	rt_sigframe, uc.uc_mcontext);

	DEFINE(PAGE_SIZE_asm,		PAGE_SIZE);

	OFFSET(__rx_buffer,		mn10300_serial_port, rx_buffer);
	OFFSET(__rx_inp,		mn10300_serial_port, rx_inp);
	OFFSET(__rx_outp,		mn10300_serial_port, rx_outp);
	OFFSET(__tx_info_buffer,	mn10300_serial_port, uart.info);
	OFFSET(__tx_xchar,		mn10300_serial_port, tx_xchar);
	OFFSET(__tx_break,		mn10300_serial_port, tx_break);
	OFFSET(__intr_flags,		mn10300_serial_port, intr_flags);
	OFFSET(__rx_icr,		mn10300_serial_port, rx_icr);
	OFFSET(__tx_icr,		mn10300_serial_port, tx_icr);
	OFFSET(__tm_icr,		mn10300_serial_port, _tmicr);
	OFFSET(__iobase,		mn10300_serial_port, _iobase);

	DEFINE(__UART_XMIT_SIZE,	UART_XMIT_SIZE);
	OFFSET(__xmit_buffer,		uart_state, xmit.buf);
	OFFSET(__xmit_head,		uart_state, xmit.head);
	OFFSET(__xmit_tail,		uart_state, xmit.tail);
}
Esempio n. 30
0
int main(void)
{
  DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
#ifdef CONFIG_STACKPROTECTOR
  DEFINE(TSK_STACK_CANARY,	offsetof(struct task_struct, stack_canary));
#endif
  BLANK();
  DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
  DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
  DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
  DEFINE(TI_TASK,		offsetof(struct thread_info, task));
  DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
  DEFINE(TI_CPU_DOMAIN,		offsetof(struct thread_info, cpu_domain));
  DEFINE(TI_CPU_SAVE,		offsetof(struct thread_info, cpu_context));
  DEFINE(TI_USED_CP,		offsetof(struct thread_info, used_cp));
  DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));
  DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate));
#ifdef CONFIG_VFP
  DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));
#ifdef CONFIG_SMP
  DEFINE(VFP_CPU,		offsetof(union vfp_state, hard.cpu));
#endif
#endif
#ifdef CONFIG_ARM_THUMBEE
  DEFINE(TI_THUMBEE_STATE,	offsetof(struct thread_info, thumbee_state));
#endif
#ifdef CONFIG_IWMMXT
  DEFINE(TI_IWMMXT_STATE,	offsetof(struct thread_info, fpstate.iwmmxt));
#endif
#ifdef CONFIG_CRUNCH
  DEFINE(TI_CRUNCH_STATE,	offsetof(struct thread_info, crunchstate));
#endif
#ifdef CONFIG_STACKPROTECTOR_PER_TASK
  DEFINE(TI_STACK_CANARY,	offsetof(struct thread_info, stack_canary));
#endif
  DEFINE(THREAD_SZ_ORDER,	THREAD_SIZE_ORDER);
  BLANK();
  DEFINE(S_R0,			offsetof(struct pt_regs, ARM_r0));
  DEFINE(S_R1,			offsetof(struct pt_regs, ARM_r1));
  DEFINE(S_R2,			offsetof(struct pt_regs, ARM_r2));
  DEFINE(S_R3,			offsetof(struct pt_regs, ARM_r3));
  DEFINE(S_R4,			offsetof(struct pt_regs, ARM_r4));
  DEFINE(S_R5,			offsetof(struct pt_regs, ARM_r5));
  DEFINE(S_R6,			offsetof(struct pt_regs, ARM_r6));
  DEFINE(S_R7,			offsetof(struct pt_regs, ARM_r7));
  DEFINE(S_R8,			offsetof(struct pt_regs, ARM_r8));
  DEFINE(S_R9,			offsetof(struct pt_regs, ARM_r9));
  DEFINE(S_R10,			offsetof(struct pt_regs, ARM_r10));
  DEFINE(S_FP,			offsetof(struct pt_regs, ARM_fp));
  DEFINE(S_IP,			offsetof(struct pt_regs, ARM_ip));
  DEFINE(S_SP,			offsetof(struct pt_regs, ARM_sp));
  DEFINE(S_LR,			offsetof(struct pt_regs, ARM_lr));
  DEFINE(S_PC,			offsetof(struct pt_regs, ARM_pc));
  DEFINE(S_PSR,			offsetof(struct pt_regs, ARM_cpsr));
  DEFINE(S_OLD_R0,		offsetof(struct pt_regs, ARM_ORIG_r0));
  DEFINE(PT_REGS_SIZE,		sizeof(struct pt_regs));
  DEFINE(SVC_DACR,		offsetof(struct svc_pt_regs, dacr));
  DEFINE(SVC_ADDR_LIMIT,	offsetof(struct svc_pt_regs, addr_limit));
  DEFINE(SVC_REGS_SIZE,		sizeof(struct svc_pt_regs));
  BLANK();
  DEFINE(SIGFRAME_RC3_OFFSET,	offsetof(struct sigframe, retcode[3]));
  DEFINE(RT_SIGFRAME_RC3_OFFSET, offsetof(struct rt_sigframe, sig.retcode[3]));
  BLANK();
#ifdef CONFIG_CACHE_L2X0
  DEFINE(L2X0_R_PHY_BASE,	offsetof(struct l2x0_regs, phy_base));
  DEFINE(L2X0_R_AUX_CTRL,	offsetof(struct l2x0_regs, aux_ctrl));
  DEFINE(L2X0_R_TAG_LATENCY,	offsetof(struct l2x0_regs, tag_latency));
  DEFINE(L2X0_R_DATA_LATENCY,	offsetof(struct l2x0_regs, data_latency));
  DEFINE(L2X0_R_FILTER_START,	offsetof(struct l2x0_regs, filter_start));
  DEFINE(L2X0_R_FILTER_END,	offsetof(struct l2x0_regs, filter_end));
  DEFINE(L2X0_R_PREFETCH_CTRL,	offsetof(struct l2x0_regs, prefetch_ctrl));
  DEFINE(L2X0_R_PWR_CTRL,	offsetof(struct l2x0_regs, pwr_ctrl));
  BLANK();
#endif
#ifdef CONFIG_CPU_HAS_ASID
  DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id.counter));
  BLANK();
#endif
  DEFINE(VMA_VM_MM,		offsetof(struct vm_area_struct, vm_mm));
  DEFINE(VMA_VM_FLAGS,		offsetof(struct vm_area_struct, vm_flags));
  BLANK();
  DEFINE(VM_EXEC,	       	VM_EXEC);
  BLANK();
  DEFINE(PAGE_SZ,	       	PAGE_SIZE);
  BLANK();
  DEFINE(SYS_ERROR0,		0x9f0000);
  BLANK();
  DEFINE(SIZEOF_MACHINE_DESC,	sizeof(struct machine_desc));
  DEFINE(MACHINFO_TYPE,		offsetof(struct machine_desc, nr));
  DEFINE(MACHINFO_NAME,		offsetof(struct machine_desc, name));
  BLANK();
  DEFINE(PROC_INFO_SZ,		sizeof(struct proc_info_list));
  DEFINE(PROCINFO_INITFUNC,	offsetof(struct proc_info_list, __cpu_flush));
  DEFINE(PROCINFO_MM_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_mm_mmu_flags));
  DEFINE(PROCINFO_IO_MMUFLAGS,	offsetof(struct proc_info_list, __cpu_io_mmu_flags));
  BLANK();
#ifdef MULTI_DABORT
  DEFINE(PROCESSOR_DABT_FUNC,	offsetof(struct processor, _data_abort));
#endif
#ifdef MULTI_PABORT
  DEFINE(PROCESSOR_PABT_FUNC,	offsetof(struct processor, _prefetch_abort));
#endif
#ifdef MULTI_CPU
  DEFINE(CPU_SLEEP_SIZE,	offsetof(struct processor, suspend_size));
  DEFINE(CPU_DO_SUSPEND,	offsetof(struct processor, do_suspend));
  DEFINE(CPU_DO_RESUME,		offsetof(struct processor, do_resume));
#endif
#ifdef MULTI_CACHE
  DEFINE(CACHE_FLUSH_KERN_ALL,	offsetof(struct cpu_cache_fns, flush_kern_all));
#endif
#ifdef CONFIG_ARM_CPU_SUSPEND
  DEFINE(SLEEP_SAVE_SP_SZ,	sizeof(struct sleep_save_sp));
  DEFINE(SLEEP_SAVE_SP_PHYS,	offsetof(struct sleep_save_sp, save_ptr_stash_phys));
  DEFINE(SLEEP_SAVE_SP_VIRT,	offsetof(struct sleep_save_sp, save_ptr_stash));
#endif
  BLANK();
  DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);
  DEFINE(DMA_TO_DEVICE,		DMA_TO_DEVICE);
  DEFINE(DMA_FROM_DEVICE,	DMA_FROM_DEVICE);
  BLANK();
  DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
  DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
  BLANK();
#ifdef CONFIG_KVM_ARM_HOST
  DEFINE(VCPU_GUEST_CTXT,	offsetof(struct kvm_vcpu, arch.ctxt));
  DEFINE(VCPU_HOST_CTXT,	offsetof(struct kvm_vcpu, arch.host_cpu_context));
  DEFINE(CPU_CTXT_VFP,		offsetof(struct kvm_cpu_context, vfp));
  DEFINE(CPU_CTXT_GP_REGS,	offsetof(struct kvm_cpu_context, gp_regs));
  DEFINE(GP_REGS_USR,		offsetof(struct kvm_regs, usr_regs));
#endif
  BLANK();
#ifdef CONFIG_VDSO
  DEFINE(VDSO_DATA_SIZE,	sizeof(union vdso_data_store));
#endif
  BLANK();
#ifdef CONFIG_ARM_MPU
  DEFINE(MPU_RNG_INFO_RNGS,	offsetof(struct mpu_rgn_info, rgns));
  DEFINE(MPU_RNG_INFO_USED,	offsetof(struct mpu_rgn_info, used));

  DEFINE(MPU_RNG_SIZE,		sizeof(struct mpu_rgn));
  DEFINE(MPU_RGN_DRBAR,	offsetof(struct mpu_rgn, drbar));
  DEFINE(MPU_RGN_DRSR,	offsetof(struct mpu_rgn, drsr));
  DEFINE(MPU_RGN_DRACR,	offsetof(struct mpu_rgn, dracr));
  DEFINE(MPU_RGN_PRBAR,	offsetof(struct mpu_rgn, prbar));
  DEFINE(MPU_RGN_PRLAR,	offsetof(struct mpu_rgn, prlar));
#endif
  return 0; 
}