/* All CPUs including BSP will run the following function. */ void soc_core_init(struct device *cpu) { /* Clear out pending MCEs */ /* TODO(adurbin): This should only be done on a cold boot. Also, some * of these banks are core vs package scope. For now every CPU clears * every bank. */ mca_configure(NULL); /* Enable the local CPU apics */ enable_lapic_tpr(); setup_lapic(); /* Configure c-state interrupt response time */ configure_c_states(); /* Configure Enhanced SpeedStep and Thermal Sensors */ configure_misc(); /* Configure Intel Speed Shift */ configure_isst(); /* Enable Direct Cache Access */ configure_dca_cap(); /* Set energy policy */ set_energy_perf_bias(ENERGY_POLICY_NORMAL); /* Enable Turbo */ enable_turbo(); }
void restore_defaults() { //No one else can enter now. in_handler = 1; int package; // Reset all limits. for(package=0; package<NUM_PACKAGES; package++){ write_msr( package, MSR_PKG_POWER_LIMIT, 0x6845000148398 ); /* write_msr( package, MSR_PP0_POWER_LIMIT, 0 ); #ifdef ARCH_062D write_msr( package, MSR_DRAM_POWER_LIMIT, 0 ); # endif */ // These are currently locked out. //Default is enabled for turbo boost enable_turbo(package); } //Close the /dev/cpu/msr files, in case they are open... finalize_msr(); //Now exit. //printf("In_handler is %d", in_handler); _exit(EXIT_FAILURE); //You can't reach here, so the value of in_handler stays 1 until we exit. No one //else can get in while it is 1 }
void rapl_finalize( struct rapl_state_s *s, int reset_limits){ int package; if(s->f == NULL){ printf("\n Error: File pointer should not be null. Something went wrong"); return; } if( s==NULL ){ printf("\n Error: State pointer should not be null. Something went wrong"); s = &no_caller_rapl_state; } if(s->initializedTick){ uint64_t tsc = rdtsc(); // require 10ms between ticks if(tsc_delta(&lastNonzeroTick, &tsc, &tsc_rate) > 0.01) rapl_tick(s, 0); } for(package=0; package<NUM_PACKAGES; package++){ get_all_status(package, s); if(reset_limits){ // Rest all limits. // This is currently the default limit on rzmerl. printf("\nRESETTING LIMITS\n"); write_msr( package, MSR_PKG_POWER_LIMIT, APPRO_DEFAULT_PKG_POWER_LIMIT); /* write_msr( package, MSR_PP0_POWER_LIMIT, 0 ); #ifdef ARCH_062D write_msr( package, MSR_DRAM_POWER_LIMIT, 0 ); #endif */ // These are currently locked out. //We had disabled turbo. The default is to leave this enabled. enable_turbo(package); } } // Now the print statement from hell. Call this only if it is not a dry-run //I.E., if we are in the read-only or the read-write modes. //Otherwise, the file should be empty. if(s->mode.dry_run_flag == 1 && s->mode.read_only_flag ==0 && s->mode.read_write_flag == 0){ fprintf(stdout, "\nIn DRY_RUN mode.\n"); finalize_msr(); } else { //This is either read-only or read_write mode. print_rapl_state(s); finalize_msr(); } }
static void baytrail_core_init(device_t cpu) { printk(BIOS_DEBUG, "Init BayTrail core.\n"); /* On bay trail the turbo disable bit is actually scoped at building * block level -- not package. For non-bsp cores that are within a * building block enable turbo. The cores within the BSP's building * block will just see it already enabled and move on. */ if (lapicid()) enable_turbo(); /* Set core MSRs */ reg_script_run(core_msr_script); /* Set this core to max frequency ratio */ set_max_freq(); }
void soc_init_cpus(device_t dev) { struct bus *cpu_bus = dev->link_list; const struct pattrs *pattrs = pattrs_get(); struct mp_params mp_params; void *default_smm_area; uint32_t bsmrwac; printk(BIOS_SPEW, "%s/%s ( %s )\n", __FILE__, __func__, dev_name(dev)); /* Set up MTRRs based on physical address size. */ x86_setup_fixed_mtrrs(); x86_setup_var_mtrrs(pattrs->address_bits, 2); x86_mtrr_check(); mp_params.num_cpus = pattrs->num_cpus, mp_params.parallel_microcode_load = 1, mp_params.adjust_apic_id = adjust_apic_id; mp_params.flight_plan = &mp_steps[0]; mp_params.num_records = ARRAY_SIZE(mp_steps); mp_params.microcode_pointer = pattrs->microcode_patch; default_smm_area = backup_default_smm_area(); /* * Configure the BUNIT to allow dirty cache line evictions in non-SMM * mode for the lines that were dirtied while in SMM mode. Otherwise * the writes would be silently dropped. */ bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED; iosf_bunit_write(BUNIT_SMRWAC, bsmrwac); /* Set package MSRs */ reg_script_run(package_msr_script); /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */ enable_turbo(); if (mp_init(cpu_bus, &mp_params)) printk(BIOS_ERR, "MP initialization failure.\n"); restore_default_smm_area(default_smm_area); }
static void soc_core_init(device_t cpu) { printk(BIOS_SPEW, "%s/%s ( %s )\n", __FILE__, __func__, dev_name(cpu)); printk(BIOS_DEBUG, "Init Braswell core.\n"); /* * The turbo disable bit is actually scoped at building * block level -- not package. For non-bsp cores that are within a * building block enable turbo. The cores within the BSP's building * block will just see it already enabled and move on. */ if (lapicid()) enable_turbo(); /* Set core MSRs */ reg_script_run(core_msr_script); /* Set this core to max frequency ratio */ set_max_freq(); }
static void pre_mp_init(void) { uint32_t bsmrwac; /* Set up MTRRs based on physical address size. */ x86_setup_mtrrs_with_detect(); x86_mtrr_check(); /* * Configure the BUNIT to allow dirty cache line evictions in non-SMM * mode for the lines that were dirtied while in SMM mode. Otherwise * the writes would be silently dropped. */ bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED; iosf_bunit_write(BUNIT_SMRWAC, bsmrwac); /* Set package MSRs */ reg_script_run(package_msr_script); /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */ enable_turbo(); }