Beispiel #1
0
/**
 * Sysfs registrations
 */
int flicker_sysfs_init(void)
{
    int rc = 0;
    int i;

    assert(NULL != g_pal);

    if(get_cpu_vendor() == CPU_VENDOR_INTEL) {
        assert(NULL != g_acmod);
        assert(NULL != g_mle_ptab);

        g_acmod_size = 0;
    }

    flicker_sysfs_data_state = IDLE;

    /*
     * Create a simple kobject with the name of "flicker",
     * located under /sys/kernel/
     *
     * As this is a simple directory, no uevent will be sent to
     * userspace.  That is why this function should not be used for
     * any type of dynamic kobjects, where the name and number are
     * not known ahead of time.
     */
    flicker_kobj = kobject_create_and_add("flicker", kernel_kobj);
    if (!flicker_kobj)
        return -ENOMEM;

    /* reg other files and handle errors w/ nested gotos */
    if((rc = sysfs_create_file(flicker_kobj, &attr_control.attr))) {
        error("Error [%d] registering flicker sysfs control", rc);
        return rc;
    }

    for (i = 0; flicker_bin_attrs[i]; i++) {
        rc = sysfs_create_bin_file(flicker_kobj,
                                   flicker_bin_attrs[i]);
        if (rc) {
            error("Error [%d] registering flicker sysfs binary file", rc);
            while (--i >= 0)
                sysfs_remove_bin_file(flicker_kobj,
                                      flicker_bin_attrs[i]);
            return rc;
        }
    }

    logit("flicker: sysfs entries registered successfully");

    /* TODO: gracefully handle failure conditions and free kobj reference. */
    //kobject_put(flicker_kobj);

    return rc;
}
void AppDelegate::initDeviceInfo()
{
#if (CC_TARGET_PLATFORM == CC_PLATFORM_WIN32)
	// ªÒ»°ª˙∆˜–≈œ¢
	char buff[64] = "";
	get_cpu_vendor_brand(buff,buff);
	get_cpu_vendor(buff);
	gGameManager->GetMachineInfo()._cpu_vendor = buff;
	get_cpu_brand(buff);
	gGameManager->GetMachineInfo()._cpu_name = buff;
	gGameManager->GetMachineInfo()._os = get_os_version();
#endif

#if (CC_TARGET_PLATFORM == CC_PLATFORM_ANDROID)
	// »°µ√CPU≥߅ã®Vendor£©º∞CPU…ñͣ®brand£©
	char cpu_vendor[128] = "";
	char cpu_name[128] = "";
	get_cpu_info(cpu_vendor,cpu_name);
	gGameManager->GetMachineInfo()._cpu_vendor = (cpu_vendor[0] == '\0' ? "Unknown" : cpu_vendor);
	gGameManager->GetMachineInfo()._cpu_name = (cpu_name[0] == '\0' ? "Unknown" : cpu_name);
	CCLOG("---------cpu_vendor=%s cpu_name=%s--------------",cpu_vendor,cpu_name);
	string vendor,num,os,abi;
	get_machine_info(vendor,num,os,abi);
	gGameManager->GetMachineInfo()._vendor = vendor;
	gGameManager->GetMachineInfo()._num = num;
	gGameManager->GetMachineInfo()._os = os;
	gGameManager->GetMachineInfo()._cpu_abi = abi;
#endif

#if (CC_TARGET_PLATFORM == CC_PLATFORM_IOS)
//        UIDevice *device = [[UIDevice alloc] int];
//        NSString *name = device.name;		//获取设备所有者的名称
//        NSString *model = [NSString stringWithFormat:@"%@,%@", device.model, device.localizedModel ];;    //获取设备的类�?+ //获取本地化版�?
//        NSString* os = [NSString stringWithFormat:@"%@,%@", device.systemName, device.systemVersion ];    //获取当前运行的系�?+ //获取当前系统的版�?
    
        gGameManager->GetMachineInfo()._vendor = "apple";
        gGameManager->GetMachineInfo()._num = "iphone";
        gGameManager->GetMachineInfo()._os = "ios";
    
        char cpu_vendor[128] = "";
        char cpu_name[128] = "";
        get_cpu_info(cpu_vendor,cpu_name);
        gGameManager->GetMachineInfo()._cpu_vendor = (cpu_vendor[0] == '\0' ? "Unknown" : cpu_vendor);
        gGameManager->GetMachineInfo()._cpu_name = (cpu_name[0] == '\0' ? "Unknown" : cpu_name);
#endif
}
Beispiel #3
0
static void __init setup_boot_cpu_data(void)
{
	int dummy, eax;

	/* get vendor info */
	cpuid(0, &boot_cpu_data.cpuid_level,
	      (int *)&boot_cpu_data.x86_vendor_id[0],
	      (int *)&boot_cpu_data.x86_vendor_id[8],
	      (int *)&boot_cpu_data.x86_vendor_id[4]);

	/* get cpu type */
	cpuid(1, &eax, &dummy, &dummy, (int *) &boot_cpu_data.x86_capability);
	boot_cpu_data.x86 = (eax >> 8) & 0xf;
	boot_cpu_data.x86_model = (eax >> 4) & 0xf;
	boot_cpu_data.x86_mask = eax & 0xf;

	/* Also determine cpu vendor */
	get_cpu_vendor(&boot_cpu_data);
}
Beispiel #4
0
/**
 * Executes the issued command
 */
void console_exec(char *buf) {
    if(strncmp(buf, "cd", 2) == 0) {
        console_cd(dir, buf);
    } else if(strncmp(buf, "start", 5) == 0) {
        console_start(dir, buf);
    } else if(strncmp(buf, "read", 4) == 0) {
        console_read(dir, buf);
    } else if(strncmp(buf, "write", 5) == 0) {
        console_write(dir, buf);
    } else if(strncmp(buf, "touch", 5) == 0) {
        console_touch(dir, buf);
    } else if(strncmp(buf, "delete", 6) == 0) {
        console_delete(dir, buf);
    } else if(strcmp(buf, "hoho") == 0) {
        printk("hoho\n");
    } else if(strcmp(buf, "help") == 0) {
        printk("Help:\nhoho - prints hoho\nhelp - shows help\nmeminfo - prints RAM info\ncpuinfo - shows CPU info\nls - shows filesystem devices\nread - reads a file\nstart - starts a program\nclear - clears the screen\nhalt - shuts down\nreboot - reboots the pc\n");
    } else if(strcmp(buf, "meminfo") == 0) {
        print_meminfo();
    } else if(strcmp(buf, "cpuinfo") == 0) {
        printk("%s\n", get_cpu_vendor(0));
    } else if(strcmp(buf, "ls") == 0) {
        if(dir[0] == 0) {
            vfs_ls();
        } else {
            vfs_ls_dir(dir);
        }
    } else if(strcmp(buf, "clear") == 0) {
        clear();
    } else if(strcmp(buf, "proc") == 0) {
        print_procs();
    } else if(strcmp(buf, "halt") == 0) {
        printk("Shutting down\n");
        halt();
        while(1);
    } else if(strcmp(buf, "reboot") == 0) {
        printk("Rebooting\n");
        reboot();
    } else {
        printk("Command not found\n");
    }
}
Beispiel #5
0
static int __init init_flicker(void)
{
  int rv = 0;
  logit("Flicker module initializing.");

  if(get_cpu_vendor() == CPU_VENDOR_INTEL) {
      /* Don't bother to load the module if the platform does not support
       * the TXT extensions.
       *
       * TODO: Is this a better home for the checks in prepare_for_launch?
       */
      rv = txt_verify_platform();
      if (!rv) {
          error("Intel Platform that does not support TXT extensions.");
          return rv;
      }
  } else {
      /* On AMD, we need to clear the Microcode on all CPUs. This
       * introduces the requirement that this module is loaded before
       * CPU hotplug disables all the other CPUs, since otherwise they
       * won't all be cleared. */
      do_amducodeclear();
  }

  /* allocate memory for PAL and (on Intel) ACmod */
  rv = do_allocations();
  if (rv) {
    error("Error during alloc_pal(): rv = [%d]", rv);
    return rv;
  }

  /* Initialize sysfs entries */
  rv = flicker_sysfs_init();
  if(rv) {
    error("Error initializing sysfs: rv = [%d]", rv);
    free_allocations();
    return rv;
  }

  assert(0 == rv);
  return rv;
}
Beispiel #6
0
static int __init detect_init_APIC (void)
{
	u32 h, l, features;
	extern void get_cpu_vendor(struct cpuinfo_x86*);

	/* Disabled by DMI scan or kernel option? */
	if (enable_local_apic < 0)
		return -1;

	/* Workaround for us being called before identify_cpu(). */
	get_cpu_vendor(&boot_cpu_data);

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
		    (boot_cpu_data.x86 == 15))	    
			break;
		goto no_apic;
	case X86_VENDOR_INTEL:
		if (boot_cpu_data.x86 == 6 ||
		    (boot_cpu_data.x86 == 15 && (cpu_has_apic || enable_local_apic > 0)) ||
		    (boot_cpu_data.x86 == 5 && cpu_has_apic))
			break;
		goto no_apic;
	default:
		goto no_apic;
	}

	if (!cpu_has_apic) {
		/*
		 * Over-ride BIOS and try to enable LAPIC
		 * only if "lapic" specified
		 */
		if (enable_local_apic != 1)
			goto no_apic;
		/*
		 * Some BIOSes disable the local APIC in the
		 * APIC_BASE MSR. This can only be done in
		 * software for Intel P6 and AMD K7 (Model > 1).
		 */
		rdmsr(MSR_IA32_APICBASE, l, h);
		if (!(l & MSR_IA32_APICBASE_ENABLE)) {
			apic_printk(APIC_VERBOSE, "Local APIC disabled "
					"by BIOS -- reenabling.\n");
			l &= ~MSR_IA32_APICBASE_BASE;
			l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
			wrmsr(MSR_IA32_APICBASE, l, h);
			enabled_via_apicbase = 1;
		}
	}
	/*
	 * The APIC feature bit should now be enabled
	 * in `cpuid'
	 */
	features = cpuid_edx(1);
	if (!(features & (1 << X86_FEATURE_APIC))) {
		printk("Could not enable APIC!\n");
		return -1;
	}
	set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;

	/* The BIOS may have set up the APIC at some other address */
	rdmsr(MSR_IA32_APICBASE, l, h);
	if (l & MSR_IA32_APICBASE_ENABLE)
		mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;

	if (nmi_watchdog != NMI_NONE)
		nmi_watchdog = NMI_LOCAL_APIC;

	apic_printk(APIC_VERBOSE, "Found and enabled local APIC!\n");

	apic_pm_activate();

	return 0;

no_apic:
	printk("No local APIC present or hardware disabled\n");
	return -1;
}
Beispiel #7
0
static int __init detect_init_APIC (void)
{
	u32 h, l, features;
	extern void get_cpu_vendor(struct cpuinfo_x86*);

	/* Workaround for us being called before identify_cpu(). */
	get_cpu_vendor(&boot_cpu_data);

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1)
			break;
		goto no_apic;
	case X86_VENDOR_INTEL:
		if (boot_cpu_data.x86 == 6 ||
		    (boot_cpu_data.x86 == 15 && cpu_has_apic) ||
		    (boot_cpu_data.x86 == 5 && cpu_has_apic))
			break;
		goto no_apic;
	default:
		goto no_apic;
	}

	if (!cpu_has_apic) {
		/*
		 * Some BIOSes disable the local APIC in the
		 * APIC_BASE MSR. This can only be done in
		 * software for Intel P6 and AMD K7 (Model > 1).
		 */
		rdmsr(MSR_IA32_APICBASE, l, h);
		if (!(l & MSR_IA32_APICBASE_ENABLE)) {
			printk("Local APIC disabled by BIOS -- reenabling.\n");
			l &= ~MSR_IA32_APICBASE_BASE;
			l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
			wrmsr(MSR_IA32_APICBASE, l, h);
		}
	}
	/*
	 * The APIC feature bit should now be enabled
	 * in `cpuid'
	 */
	features = cpuid_edx(1);
	if (!(features & (1 << X86_FEATURE_APIC))) {
		printk("Could not enable APIC!\n");
		return -1;
	}
	set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability);
	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
	boot_cpu_physical_apicid = 0;
	if (nmi_watchdog != NMI_NONE)
		nmi_watchdog = NMI_LOCAL_APIC;

	printk("Found and enabled local APIC!\n");

	apic_pm_init1();

	return 0;

no_apic:
	printk("No local APIC present or hardware disabled\n");
	return -1;
}
Beispiel #8
0
static void update_domain_cpuid_info(struct domain *d,
                                     const xen_domctl_cpuid_t *ctl)
{
    switch ( ctl->input[0] )
    {
    case 0: {
        union {
            typeof(boot_cpu_data.x86_vendor_id) str;
            struct {
                uint32_t ebx, edx, ecx;
            } reg;
        } vendor_id = {
            .reg = {
                .ebx = ctl->ebx,
                .edx = ctl->edx,
                .ecx = ctl->ecx
            }
        };
        int old_vendor = d->arch.x86_vendor;

        d->arch.x86_vendor = get_cpu_vendor(vendor_id.str, gcv_guest);

        if ( is_hvm_domain(d) && (d->arch.x86_vendor != old_vendor) )
        {
            struct vcpu *v;

            for_each_vcpu( d, v )
            hvm_update_guest_vendor(v);
        }

        break;
    }

    case 1:
        d->arch.x86 = (ctl->eax >> 8) & 0xf;
        if ( d->arch.x86 == 0xf )
            d->arch.x86 += (ctl->eax >> 20) & 0xff;
        d->arch.x86_model = (ctl->eax >> 4) & 0xf;
        if ( d->arch.x86 >= 0x6 )
            d->arch.x86_model |= (ctl->eax >> 12) & 0xf0;

        if ( is_pv_domain(d) && ((levelling_caps & LCAP_1cd) == LCAP_1cd) )
        {
            uint64_t mask = cpuidmask_defaults._1cd;
            uint32_t ecx = ctl->ecx & pv_featureset[FEATURESET_1c];
            uint32_t edx = ctl->edx & pv_featureset[FEATURESET_1d];

            /*
             * Must expose hosts HTT and X2APIC value so a guest using native
             * CPUID can correctly interpret other leaves which cannot be
             * masked.
             */
            if ( cpu_has_x2apic )
                ecx |= cpufeat_mask(X86_FEATURE_X2APIC);
            if ( cpu_has_htt )
                edx |= cpufeat_mask(X86_FEATURE_HTT);

            switch ( boot_cpu_data.x86_vendor )
            {
            case X86_VENDOR_INTEL:
                /*
                 * Intel masking MSRs are documented as AND masks.
                 * Experimentally, they are applied after OSXSAVE and APIC
                 * are fast-forwarded from real hardware state.
                 */
                mask &= ((uint64_t)edx << 32) | ecx;

                if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
                    ecx = cpufeat_mask(X86_FEATURE_OSXSAVE);
                else
                    ecx = 0;
                edx = cpufeat_mask(X86_FEATURE_APIC);

                mask |= ((uint64_t)edx << 32) | ecx;
                break;

            case X86_VENDOR_AMD:
                mask &= ((uint64_t)ecx << 32) | edx;

                /*
                 * AMD masking MSRs are documented as overrides.
                 * Experimentally, fast-forwarding of the OSXSAVE and APIC
                 * bits from real hardware state only occurs if the MSR has
                 * the respective bits set.
                 */
                if ( ecx & cpufeat_mask(X86_FEATURE_XSAVE) )
                    ecx = cpufeat_mask(X86_FEATURE_OSXSAVE);
                else
                    ecx = 0;
                edx = cpufeat_mask(X86_FEATURE_APIC);

                mask |= ((uint64_t)ecx << 32) | edx;
                break;
            }

            d->arch.pv_domain.cpuidmasks->_1cd = mask;
        }
        break;

    case 6:
        if ( is_pv_domain(d) && ((levelling_caps & LCAP_6c) == LCAP_6c) )
        {
            uint64_t mask = cpuidmask_defaults._6c;

            if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
                mask &= (~0ULL << 32) | ctl->ecx;

            d->arch.pv_domain.cpuidmasks->_6c = mask;
        }
        break;

    case 7:
        if ( ctl->input[1] != 0 )
            break;

        if ( is_pv_domain(d) && ((levelling_caps & LCAP_7ab0) == LCAP_7ab0) )
        {
            uint64_t mask = cpuidmask_defaults._7ab0;
            uint32_t eax = ctl->eax;
            uint32_t ebx = ctl->ebx & pv_featureset[FEATURESET_7b0];

            if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
                mask &= ((uint64_t)eax << 32) | ebx;

            d->arch.pv_domain.cpuidmasks->_7ab0 = mask;
        }
        break;

    case 0xd:
        if ( ctl->input[1] != 1 )
            break;

        if ( is_pv_domain(d) && ((levelling_caps & LCAP_Da1) == LCAP_Da1) )
        {
            uint64_t mask = cpuidmask_defaults.Da1;
            uint32_t eax = ctl->eax & pv_featureset[FEATURESET_Da1];

            if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
                mask &= (~0ULL << 32) | eax;

            d->arch.pv_domain.cpuidmasks->Da1 = mask;
        }
        break;

    case 0x80000001:
        if ( is_pv_domain(d) && ((levelling_caps & LCAP_e1cd) == LCAP_e1cd) )
        {
            uint64_t mask = cpuidmask_defaults.e1cd;
            uint32_t ecx = ctl->ecx & pv_featureset[FEATURESET_e1c];
            uint32_t edx = ctl->edx & pv_featureset[FEATURESET_e1d];

            /*
             * Must expose hosts CMP_LEGACY value so a guest using native
             * CPUID can correctly interpret other leaves which cannot be
             * masked.
             */
            if ( cpu_has_cmp_legacy )
                ecx |= cpufeat_mask(X86_FEATURE_CMP_LEGACY);

            /* If not emulating AMD, clear the duplicated features in e1d. */
            if ( d->arch.x86_vendor != X86_VENDOR_AMD )
                edx &= ~CPUID_COMMON_1D_FEATURES;

            switch ( boot_cpu_data.x86_vendor )
            {
            case X86_VENDOR_INTEL:
                mask &= ((uint64_t)edx << 32) | ecx;
                break;

            case X86_VENDOR_AMD:
                mask &= ((uint64_t)ecx << 32) | edx;

                /*
                 * Fast-forward bits - Must be set in the masking MSR for
                 * fast-forwarding to occur in hardware.
                 */
                ecx = 0;
                edx = cpufeat_mask(X86_FEATURE_APIC);

                mask |= ((uint64_t)ecx << 32) | edx;
                break;
            }

            d->arch.pv_domain.cpuidmasks->e1cd = mask;
        }
        break;
    }
}
Beispiel #9
0
/**
 * initializes cpuinfo-struct
 * @param print detection-summary is written to stdout when !=0
 */
void init_cpuinfo(cpu_info_t *cpuinfo,int print)
{
    unsigned int i;
    char output[_HW_DETECT_MAX_OUTPUT];

    /* initialize data structure */
    memset(cpuinfo,0,sizeof(cpu_info_t));
    strcpy(cpuinfo->architecture,"unknown\0");
    strcpy(cpuinfo->vendor,"unknown\0");
    strcpy(cpuinfo->model_str,"unknown\0");

    cpuinfo->num_cpus               = num_cpus();
    get_architecture(cpuinfo->architecture, sizeof(cpuinfo->architecture));
    get_cpu_vendor(cpuinfo->vendor, sizeof(cpuinfo->vendor));
    get_cpu_name(cpuinfo->model_str, sizeof(cpuinfo->model_str));
    cpuinfo->family                 = get_cpu_family();
    cpuinfo->model                  = get_cpu_model();
    cpuinfo->stepping               = get_cpu_stepping();
    cpuinfo->num_cores_per_package  = num_cores_per_package();
    cpuinfo->num_threads_per_core   = num_threads_per_core();
    cpuinfo->num_packages           = num_packages();
    cpuinfo->clockrate              = get_cpu_clockrate(1, 0);

    /* setup supported feature list*/
    if(!strcmp(cpuinfo->architecture,"x86_64")) cpuinfo->features   |= X86_64;
    if (feature_available("SMT")) cpuinfo->features                 |= SMT;
    if (feature_available("FPU")) cpuinfo->features                 |= FPU;
    if (feature_available("MMX")) cpuinfo->features                 |= MMX;
    if (feature_available("MMX_EXT")) cpuinfo->features             |= MMX_EXT;
    if (feature_available("SSE")) cpuinfo->features                 |= SSE;
    if (feature_available("SSE2")) cpuinfo->features                |= SSE2;
    if (feature_available("SSE3")) cpuinfo->features                |= SSE3;
    if (feature_available("SSSE3")) cpuinfo->features               |= SSSE3;
    if (feature_available("SSE4.1")) cpuinfo->features              |= SSE4_1;
    if (feature_available("SSE4.2")) cpuinfo->features              |= SSE4_2;
    if (feature_available("SSE4A")) cpuinfo->features               |= SSE4A;
    if (feature_available("ABM")) cpuinfo->features                 |= ABM;
    if (feature_available("POPCNT")) cpuinfo->features              |= POPCNT;
    if (feature_available("AVX")) cpuinfo->features                 |= AVX;
    if (feature_available("AVX2")) cpuinfo->features                |= AVX2;
    if (feature_available("FMA")) cpuinfo->features                 |= FMA;
    if (feature_available("FMA4")) cpuinfo->features                |= FMA4;
    if (feature_available("AES")) cpuinfo->features                 |= AES;
    if (feature_available("AVX512")) cpuinfo->features              |= AVX512;

    /* determine cache details */
    for (i=0; i<(unsigned int)num_caches(0); i++)
    {
        cpuinfo->Cache_shared[cache_level(0,i)-1]=cache_shared(0,i);
        cpuinfo->Cacheline_size[cache_level(0,i)-1]=cacheline_length(0,i);
        if (cpuinfo->Cachelevels < (unsigned int)cache_level(0,i)) { cpuinfo->Cachelevels = cache_level(0,i); }
        switch (cache_type(0,i))
        {
        case UNIFIED_CACHE: {
            cpuinfo->Cache_unified[cache_level(0,i)-1]=1;
            cpuinfo->U_Cache_Size[cache_level(0,i)-1]=cache_size(0,i);
            cpuinfo->U_Cache_Sets[cache_level(0,i)-1]=cache_assoc(0,i);
            break;            
        }
        case DATA_CACHE: {
            cpuinfo->Cache_unified[cache_level(0,i)-1]=0;
            cpuinfo->D_Cache_Size[cache_level(0,i)-1]=cache_size(0,i);
            cpuinfo->D_Cache_Sets[cache_level(0,i)-1]=cache_assoc(0,i);
            break;
        }
        case INSTRUCTION_CACHE: {
            cpuinfo->Cache_unified[cache_level(0,i)-1]=0;
            cpuinfo->I_Cache_Size[cache_level(0,i)-1]=cache_size(0,i);
            cpuinfo->I_Cache_Sets[cache_level(0,i)-1]=cache_assoc(0,i);
            break;
        }
        default:
            break;
        }
    }

    /* print a summary */
    if (print)
    {
        fflush(stdout);
        printf("\n  system summary:\n");
        if(cpuinfo->num_packages) printf("    number of processors: %i\n",cpuinfo->num_packages);
        if(cpuinfo->num_cores_per_package) printf("    number of cores per package: %i\n",cpuinfo->num_cores_per_package);
        if(cpuinfo->num_threads_per_core) printf("    number of threads per core: %i\n",cpuinfo->num_threads_per_core);
        if(cpuinfo->num_cpus) printf("    total number of threads: %i\n",cpuinfo->num_cpus);
        printf("\n  processor characteristics:\n");
        printf("    architecture:   %s\n",cpuinfo->architecture);
        printf("    vendor:         %s\n",cpuinfo->vendor);
        printf("    processor-name: %s\n",cpuinfo->model_str);
        printf("    model:          Family %i, Model %i, Stepping %i\n",cpuinfo->family,cpuinfo->model,cpuinfo->stepping);
        printf("    frequency:      %llu MHz\n",cpuinfo->clockrate/1000000);
        fflush(stdout);
        printf("    supported features:\n      -");
        if(cpuinfo->features&X86_64)    printf(" X86_64");
        if(cpuinfo->features&FPU)       printf(" FPU");
        if(cpuinfo->features&MMX)       printf(" MMX");
        if(cpuinfo->features&MMX_EXT)   printf(" MMX_EXT");
        if(cpuinfo->features&SSE)       printf(" SSE");
        if(cpuinfo->features&SSE2)      printf(" SSE2");
        if(cpuinfo->features&SSE3)      printf(" SSE3");
        if(cpuinfo->features&SSSE3)     printf(" SSSE3");
        if(cpuinfo->features&SSE4_1)    printf(" SSE4.1");
        if(cpuinfo->features&SSE4_2)    printf(" SSE4.2");
        if(cpuinfo->features&SSE4A)     printf(" SSE4A");
        if(cpuinfo->features&POPCNT)    printf(" POPCNT");
        if(cpuinfo->features&AVX)       printf(" AVX");
        if(cpuinfo->features&AVX2)      printf(" AVX2");
        if(cpuinfo->features&AVX512)    printf(" AVX512");
        if(cpuinfo->features&FMA)       printf(" FMA");
        if(cpuinfo->features&FMA4)      printf(" FMA4");
        if(cpuinfo->features&AES)       printf(" AES");
        if(cpuinfo->features&SMT)       printf(" SMT");
        printf("    \n");
        if(cpuinfo->Cachelevels)
        {
            printf("    Caches:\n");
            for(i = 0; i < (unsigned int)num_caches(0); i++)
            {
                snprintf(output,sizeof(output),"n/a");
                if (cache_info(0, i, output, sizeof(output)) != -1) printf("      - %s\n",output);
            }
        }
    }
    fflush(stdout);
}
Beispiel #10
0
static int do_allocations(void) {
    uint32_t needed_alloc_size = sizeof(pal_t);

    assert(NULL == g_pal_region);
    assert(NULL == g_pal);
    assert(NULL == g_mle_ptab);
    assert(NULL == g_acmod);

    dbg("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$");

    if(get_cpu_vendor() == CPU_VENDOR_INTEL) {
        dbg("Intel CPU detected; doing additional allocation for ACMod and MLE PTs");
        /* We need more space on Intel for the MLE page tables. */
        needed_alloc_size += sizeof(mle_pt_t);

        /* We need a buffer into which the Authenticated Code module
         * can be stored.  From MLE developer's guide: "System
         * software should not utilize the memory immediately after
         * the SINIT AC module up to the next 4KByte boundary."  We
         * get this for free since kmalloc uses __get_free_pages
         * internally, and always returns 4K-aligned chunks.
         *
         * XXX TODO: Copy it directly into the TXT-specified location
         * without additional buffering. (i.e., the above comment is
         * irrelevant since the AC Mod is copied again before use. XXX
         * is it?) */
        g_acmod = (acmod_t*)kmalloc(sizeof(acmod_t), GFP_KERNEL);

        if(g_acmod == NULL) {
            error("alloc of 0x%08x bytes failed!", sizeof(acmod_t));
            return -ENOMEM;
        }

        dbg("alloc of 0x%08x bytes for acmod at virt 0x%08x.", sizeof(acmod_t), (uint32_t)g_acmod);
        dbg("g_mle_ptab @ 0x%p", g_mle_ptab);
        dbg("->pdpt  @ 0x%p", g_mle_ptab->pdpt);
        dbg("->pd    @ 0x%p", g_mle_ptab->pd);
        dbg("->pt    @ 0x%p", g_mle_ptab->pt);

    }

    dbg("PAGE_SIZE = 0x%08lx (%ld)", PAGE_SIZE, PAGE_SIZE);
    dbg("sizeof(pal_t)= 0x%08x (%d)", sizeof(pal_t), sizeof(pal_t));

    g_pal_region = kmalloc(needed_alloc_size, GFP_KERNEL);

    if(g_pal_region == NULL) {
        error("alloc of %d bytes failed!", needed_alloc_size);
        if(g_acmod) { kfree(g_acmod); g_acmod = NULL; }
        return -ENOMEM;
    }

    dbg("alloc of %d bytes at virt 0x%08x.",
        needed_alloc_size, (uint32_t)g_pal_region);

    /* Verify that we have at least a 128K aligned block */
    if((unsigned long)(g_pal_region) != ((unsigned long)(g_pal_region) & ALIGN_128K)) {
        error("ERROR: memory not aligned!");
        kfree(g_pal_region); g_pal_region = NULL;
        if(g_acmod) { kfree(g_acmod); g_acmod = NULL; }
        return -ENOMEM;
    }

    /* zero the PAL container */ /* slow? necessary? */
    memset(g_pal_region, 0, needed_alloc_size);

    if(needed_alloc_size > sizeof(pal_t)) {
        /* Intel system; assign g_mle_ptab, then g_pal */
        g_mle_ptab = (mle_pt_t*)g_pal_region;
        g_pal = (pal_t*)(g_pal_region + sizeof(mle_pt_t));
    } else {
        /* AMD system; just assign g_pal */
        g_pal = (pal_t*)g_pal_region;
    }

    dbg("g_pal           @ 0x%p", g_pal);
    dbg("g_pal->pal      @ 0x%p", g_pal->pal);
    dbg("&g_pal->reload  @ 0x%p", &(g_pal->reload));
    dbg("g_pal->inputs   @ 0x%p", g_pal->inputs);
    dbg("g_pal->outputs  @ 0x%p", g_pal->outputs);

    build_resume_page_tables(g_pal->pal, g_pal->resume_pagetabs);

    return 0;
}