Example #1
0
struct guest_pcore *load_guest_pcore(struct proc *p, int guest_pcoreid,
                                     bool *should_vmresume)
{
	struct guest_pcore *gpc;
	struct per_cpu_info *pcpui = &per_cpu_info[core_id()];

	gpc = lookup_guest_pcore(p, guest_pcoreid);
	if (!gpc)
		return 0;
	assert(pcpui->guest_pcoreid == -1);
	spin_lock(&p->vmm.lock);
	if (gpc->cpu != -1) {
		spin_unlock(&p->vmm.lock);
		return 0;
	}
	gpc->cpu = core_id();
	spin_unlock(&p->vmm.lock);
	/* We've got dibs on the gpc; we don't need to hold the lock any longer. */
	pcpui->guest_pcoreid = guest_pcoreid;
	vmx_load_guest_pcore(gpc, should_vmresume);
	/* Load guest's xcr0 */
	lxcr0(gpc->xcr0);

	/* Manual MSR save/restore */
	write_kern_gsbase(gpc->msr_kern_gs_base);
	if (gpc->msr_star != AKAROS_MSR_STAR)
		write_msr(MSR_STAR, gpc->msr_star);
	if (gpc->msr_lstar != AKAROS_MSR_LSTAR)
		write_msr(MSR_LSTAR, gpc->msr_lstar);
	if (gpc->msr_sfmask != AKAROS_MSR_SFMASK)
		write_msr(MSR_SFMASK, gpc->msr_sfmask);

	return gpc;
}
Example #2
0
void unload_guest_pcore(struct proc *p, int guest_pcoreid)
{
	struct guest_pcore *gpc;
	struct per_cpu_info *pcpui = &per_cpu_info[core_id()];

	gpc = lookup_guest_pcore(p, guest_pcoreid);
	assert(gpc);
	spin_lock(&p->vmm.lock);
	assert(gpc->cpu != -1);
	vmx_unload_guest_pcore(gpc);
	gpc->cpu = -1;

	/* Save guest's xcr0 and restore Akaros's default. */
	gpc->xcr0 = rxcr0();
	lxcr0(__proc_global_info.x86_default_xcr0);

	/* We manage these MSRs manually. */
	gpc->msr_kern_gs_base = read_kern_gsbase();
	gpc->msr_star = read_msr(MSR_STAR);
	gpc->msr_lstar = read_msr(MSR_LSTAR);
	gpc->msr_sfmask = read_msr(MSR_SFMASK);

	write_kern_gsbase((uint64_t)pcpui);
	if (gpc->msr_star != AKAROS_MSR_STAR)
		write_msr(MSR_STAR, AKAROS_MSR_STAR);
	if (gpc->msr_lstar != AKAROS_MSR_LSTAR)
		write_msr(MSR_LSTAR, AKAROS_MSR_LSTAR);
	if (gpc->msr_sfmask, AKAROS_MSR_SFMASK)
		write_msr(MSR_SFMASK, AKAROS_MSR_SFMASK);

	/* As soon as we unlock, this gpc can be started on another core */
	spin_unlock(&p->vmm.lock);
	pcpui->guest_pcoreid = -1;
}
Example #3
0
static void perfmon_enable_event(int event, bool enable)
{
	uint64_t gctrl = read_msr(MSR_CORE_PERF_GLOBAL_CTRL);

	if (enable)
		write_msr(MSR_CORE_PERF_GLOBAL_CTRL, gctrl | (1 << event));
	else
		write_msr(MSR_CORE_PERF_GLOBAL_CTRL, gctrl & ~(1 << event));
}
Example #4
0
void
clear_all_limits(){
	int package;
	fprintf(stdout,"No, really, clearing all limits.\n");
	for( package=0; package<NUM_PACKAGES; package++ ){
		write_msr( package, MSR_PKG_POWER_LIMIT, 0 );
		write_msr( package, MSR_PP0_POWER_LIMIT, 0 );
		write_msr( package, MSR_DRAM_POWER_LIMIT, 0 );
	}
}
Example #5
0
static void perfmon_do_cores_alloc(void *opaque)
{
	struct perfmon_alloc *pa = (struct perfmon_alloc *) opaque;
	struct perfmon_cpu_context *cctx = PERCPU_VARPTR(counters_env);
	int i;

	spin_lock_irqsave(&cctx->lock);
	if (perfmon_is_fixed_event(&pa->ev)) {
		uint64_t fxctrl_value = read_msr(MSR_CORE_PERF_FIXED_CTR_CTRL), tmp;

		i = PMEV_GET_EVENT(pa->ev.event);
		if (i >= (int) cpu_caps.fix_counters_x_proc) {
			i = -EINVAL;
		} else if (fxctrl_value & (FIXCNTR_MASK << i)) {
			i = -EBUSY;
		} else {
			cctx->fixed_counters[i] = pa->ev;
			PMEV_SET_EN(cctx->fixed_counters[i].event, 1);

			tmp = perfmon_get_fixevent_mask(&pa->ev, i, fxctrl_value);

			perfmon_enable_fix_event(i, TRUE);

			write_msr(MSR_CORE_PERF_FIXED_CTR0 + i,
					  -(int64_t) pa->ev.trigger_count);
			write_msr(MSR_CORE_PERF_FIXED_CTR_CTRL, tmp);
		}
	} else {
		for (i = 0; i < (int) cpu_caps.counters_x_proc; i++) {
			if (cctx->counters[i].event == 0) {
				if (!perfmon_event_available(i))
					warn_once("Counter %d is free but not available", i);
				else
					break;
			}
		}
		if (i < (int) cpu_caps.counters_x_proc) {
			cctx->counters[i] = pa->ev;
			PMEV_SET_EN(cctx->counters[i].event, 1);

			perfmon_enable_event(i, TRUE);

			write_msr(MSR_IA32_PERFCTR0 + i, -(int64_t) pa->ev.trigger_count);
			write_msr(MSR_ARCH_PERFMON_EVENTSEL0 + i,
					  cctx->counters[i].event);
		} else {
			i = -ENOSPC;
		}
	}
	spin_unlock_irqsave(&cctx->lock);

	pa->cores_counters[core_id()] = (counter_t) i;
}
Example #6
0
void
set_raw_policy( int package, int domain, uint64_t policy ){
	switch( domain ){
		case PP0_DOMAIN:	write_msr( package, MSR_PP0_POLICY, policy );	
					break;
#ifdef ARCH_062A
		case PP1_DOMAIN: 	write_msr( package, MSR_PP1_POLICY, policy );	
					break;
#endif
		default:		assert(0);
					break;
	}
}
Example #7
0
void restore_defaults()
{
	//No one else can enter now.
	in_handler = 1;

	int package;
        
       // Reset all limits.
	for(package=0; package<NUM_PACKAGES; package++){
             write_msr( package, MSR_PKG_POWER_LIMIT, 0x6845000148398 );
                   /*
                     write_msr( package, MSR_PP0_POWER_LIMIT, 0 );
                     #ifdef ARCH_062D
                         write_msr( package, MSR_DRAM_POWER_LIMIT, 0 );
                     # endif
            */ // These are currently locked out.
	 
	    //Default is enabled for turbo boost
	     enable_turbo(package); 
	}                     

	//Close the /dev/cpu/msr files, in case they are open...
        finalize_msr();

	//Now exit. 
	//printf("In_handler is %d", in_handler);
         _exit(EXIT_FAILURE);
	
	//You can't reach here, so the value of in_handler stays 1 until we exit. No one 
	//else can get in while it is 1 
	
}
Example #8
0
int cthd_msr::disable_turbo() {
	int cpu_count = get_no_cpus();
	unsigned long long val;
	int ret;

	for (int i = 0; i < cpu_count; ++i) {
		/*
		 This method is recommended only for BIOS

		 ret = read_msr(i, MSR_IA32_MISC_ENABLE, &val);
		 if (ret < 0)
		 return THD_ERROR;
		 val |= MSR_IA32_MISC_ENABLE_TURBO_DISABLE;

		 ret = write_msr(i, MSR_IA32_MISC_ENABLE, val);
		 if (ret < 0)
		 return THD_ERROR;
		 */
		ret = read_msr(i, MSR_IA32_PERF_CTL, &val);
		if (ret < 0)
			return THD_ERROR;
		val |= TURBO_DISENGAGE_BIT;
		ret = write_msr(i, MSR_IA32_PERF_CTL, val);
		if (ret < 0)
			return THD_ERROR;
	}
	thd_log_info("Turbo disabled \n");

	return THD_SUCCESS;
}
Example #9
0
int cthd_msr::set_clock_mod_duty_cycle_per_cpu(int cpu, int state) {
	unsigned long long val;
	int ret;

	// First bit is reserved
	state = state << 1;

	ret = read_msr(cpu, MSR_IA32_THERM_CONTROL, &val);
	if (ret < 0)
		return THD_ERROR;

	if (!state) {
		val &= ~MSR_IA32_CLK_MOD_ENABLE;
	} else {
		val |= MSR_IA32_CLK_MOD_ENABLE;
	}
	val &= ~MSR_IA32_CLK_MOD_DUTY_CYCLE_MASK;
	val |= (state & MSR_IA32_CLK_MOD_DUTY_CYCLE_MASK);

	ret = write_msr(cpu, MSR_IA32_THERM_CONTROL, val);
	if (ret < 0) {
		thd_log_warn("set_clock_mod_duty_cycle current set failed to write\n");
		return THD_ERROR;
	}

	return THD_SUCCESS;
}
Example #10
0
void
disable_turbo(int cpu){
	uint64_t val;
	read_msr( cpu, MSR_MISC_ENABLE, &val );
	// Set bit 38 to 1.
	val |= ((uint64_t)1) << 38;
	write_msr( cpu, MSR_MISC_ENABLE, val );
}
Example #11
0
void
enable_turbo(int cpu){
	uint64_t val;
	read_msr( cpu, MSR_MISC_ENABLE, &val );
	// Set bit 38 to 0.
	val &= ((uint64_t)-1) ^ ((uint64_t)1) << 38;
	write_msr( cpu, MSR_MISC_ENABLE, val );
}
Example #12
0
void
rapl_finalize( struct rapl_state_s *s, int reset_limits){

	int package;

	if(s->f == NULL){
		printf("\n Error: File pointer should not be null. Something went wrong");
		return;
	}

	if( s==NULL ){
		printf("\n Error: State pointer should not be null. Something went wrong");
		s = &no_caller_rapl_state;
	}

        if(s->initializedTick){
          uint64_t tsc = rdtsc();

          // require 10ms between ticks
          if(tsc_delta(&lastNonzeroTick, &tsc, &tsc_rate) > 0.01)
                 rapl_tick(s, 0);
          }
          

	for(package=0; package<NUM_PACKAGES; package++){
		get_all_status(package, s);

		if(reset_limits){
			// Rest all limits.
			// This is currently the default limit on rzmerl.
			printf("\nRESETTING LIMITS\n");
			write_msr( package, MSR_PKG_POWER_LIMIT, APPRO_DEFAULT_PKG_POWER_LIMIT);
			/*
			write_msr( package, MSR_PP0_POWER_LIMIT, 0 );
#ifdef ARCH_062D
			write_msr( package, MSR_DRAM_POWER_LIMIT, 0 );
#endif
			*/ // These are currently locked out.
			
			//We had disabled turbo. The default is to leave this enabled.
			enable_turbo(package);
		}
	}
	
	// Now the print statement from hell. Call this only if it is not a dry-run
	//I.E., if we are in the read-only or the read-write modes. 
	//Otherwise, the file should be empty.

	if(s->mode.dry_run_flag == 1 && s->mode.read_only_flag ==0 && s->mode.read_write_flag == 0){
		fprintf(stdout, "\nIn DRY_RUN mode.\n");
		finalize_msr();
	}
	else {
		//This is either read-only or read_write mode.
		print_rapl_state(s);
		finalize_msr();
	}
}
Example #13
0
void perfmon_interrupt(struct hw_trapframe *hw_tf, void *data)
{
	int i;
	struct perfmon_cpu_context *cctx = PERCPU_VARPTR(counters_env);
	uint64_t gctrl, status;

	spin_lock_irqsave(&cctx->lock);
	/* We need to save the global control status, because we need to disable
	 * counters in order to be able to reset their values.
	 * We will restore the global control status on exit.
	 */
	status = read_msr(MSR_CORE_PERF_GLOBAL_STATUS);
	gctrl = read_msr(MSR_CORE_PERF_GLOBAL_CTRL);
	write_msr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
	for (i = 0; i < (int) cpu_caps.counters_x_proc; i++) {
		if (status & ((uint64_t) 1 << i)) {
			if (cctx->counters[i].event) {
				profiler_add_hw_sample(
					hw_tf, perfmon_make_sample_event(cctx->counters + i));
				write_msr(MSR_IA32_PERFCTR0 + i,
						  -(int64_t) cctx->counters[i].trigger_count);
			}
		}
	}
	for (i = 0; i < (int) cpu_caps.fix_counters_x_proc; i++) {
		if (status & ((uint64_t) 1 << (32 + i))) {
			if (cctx->fixed_counters[i].event) {
				profiler_add_hw_sample(
					hw_tf, perfmon_make_sample_event(cctx->fixed_counters + i));
				write_msr(MSR_CORE_PERF_FIXED_CTR0 + i,
						  -(int64_t) cctx->fixed_counters[i].trigger_count);
			}
		}
	}
	write_msr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, status);
	write_msr(MSR_CORE_PERF_GLOBAL_CTRL, gctrl);
	spin_unlock_irqsave(&cctx->lock);

	/* We need to re-arm the IRQ as the PFM IRQ gets masked on trigger.
	 * Note that KVM and real HW seems to be doing two different things WRT
	 * re-arming the IRQ. KVM re-arms does not mask the IRQ, while real HW does.
	 */
	perfmon_arm_irq();
}
Example #14
0
void
set_raw_power_limit( int package, int domain, uint64_t val ){
	switch(domain){
		case PKG_DOMAIN: 	write_msr( package, MSR_PKG_POWER_LIMIT, val );	
					break;
		case PP0_DOMAIN: 	write_msr( package, MSR_PP0_POWER_LIMIT, val );	
					break;
#ifdef ARCH_062A				
		case PP1_DOMAIN: 	write_msr( package, MSR_PP1_POWER_LIMIT, val );	
					break;
#endif
#ifdef ARCH_062D				
		case DRAM_DOMAIN: 	write_msr( package, MSR_DRAM_POWER_LIMIT, val );	
					break;
#endif
		default:		assert(0);					
					break;
	}
}
/* Enable prefetch on core2 */
static int enable_prefetch_core2(int core) {

	int fd;
	uint64_t result;
	int begin,end,c;

	printf("Enable all prefetch\n");

	if (core==-1) {
		begin=0;
		end=1024;
	}
	else {
		begin=core;
		end=core;
	}

	for(c=begin;c<=end;c++) {

		fd=open_msr(c);
		if (fd<0) break;

		/* Read original results */
		result=read_msr(fd,CORE2_PREFETCH_MSR);

		printf("\tCore %d old : L2HW=%c L2ADJ=%c DCU=%c DCUIP=%c\n",
			c,
			result&(1ULL<<9)?'N':'Y',
			result&(1ULL<<19)?'N':'Y',
			result&(1ULL<<37)?'N':'Y',
			result&(1ULL<<39)?'N':'Y'
			);

		/* Enable all */
		result &= ~((1ULL<<9)|(1ULL<<19)|(1ULL<<37)|(1ULL<<39));
		result=write_msr(fd,CORE2_PREFETCH_MSR,result);

		/* Verify change */
		result=read_msr(fd,CORE2_PREFETCH_MSR);

		printf("\tCore %d new : L2HW=%c L2ADJ=%c DCU=%c DCUIP=%c\n",
			c,
			result&(1ULL<<9)?'N':'Y',
			result&(1ULL<<19)?'N':'Y',
			result&(1ULL<<37)?'N':'Y',
			result&(1ULL<<39)?'N':'Y'
			);

		close(fd);

	}

	return 0;
}
Example #16
0
File: vmm.c Project: anandab/akaros
bool emsr_ok(struct emmsr *msr, uint64_t *rcx, uint64_t *rdx,
             uint64_t *rax, uint32_t opcode)
{
	if (opcode == VMM_MSR_EMU_READ) {
		rdmsr(msr->reg, *rdx, *rax);
	} else {
		uint64_t val = (uint64_t) *rdx << 32 | *rax;

		write_msr(msr->reg, val);
	}
	return TRUE;
}
/* Enable prefetch on nehalem and newer */
static int enable_prefetch_nhm(int core) {

	int fd;
	int result;
	int begin,end,c;

	printf("Enable all prefetch\n");

	if (core==-1) {
		begin=0;
		end=1024;
	}
	else {
		begin=core;
		end=core;
	}

	for(c=begin;c<=end;c++) {

		fd=open_msr(c);
		if (fd<0) break;

		/* Read original results */
		result=read_msr(fd,NHM_PREFETCH_MSR);

		printf("\tCore %d old : L2HW=%c L2ADJ=%c DCU=%c DCUIP=%c\n",
			c,
			result&0x1?'N':'Y',
			result&0x2?'N':'Y',
			result&0x4?'N':'Y',
			result&0x8?'N':'Y'
			);

		/* Enable all */
		result=write_msr(fd,NHM_PREFETCH_MSR,0x0);

		/* Verify change */
		result=read_msr(fd,NHM_PREFETCH_MSR);

		printf("\tCore %d new : L2HW=%c L2ADJ=%c DCU=%c DCUIP=%c\n",
			c,
			result&0x1?'N':'Y',
			result&0x2?'N':'Y',
			result&0x4?'N':'Y',
			result&0x8?'N':'Y'
			);

		close(fd);

	}

	return 0;
}
Example #18
0
static void perfmon_do_cores_free(void *opaque)
{
	struct perfmon_alloc *pa = (struct perfmon_alloc *) opaque;
	struct perfmon_cpu_context *cctx = PERCPU_VARPTR(counters_env);
	int err = 0, coreno = core_id();
	counter_t ccno = pa->cores_counters[coreno];

	spin_lock_irqsave(&cctx->lock);
	if (perfmon_is_fixed_event(&pa->ev)) {
		unsigned int ccbitsh = ccno * FIXCNTR_NBITS;
		uint64_t fxctrl_value = read_msr(MSR_CORE_PERF_FIXED_CTR_CTRL);

		if ((ccno >= cpu_caps.fix_counters_x_proc) ||
			!(fxctrl_value & (FIXCNTR_MASK << ccbitsh))) {
			err = -ENOENT;
		} else {
			perfmon_init_event(&cctx->fixed_counters[ccno]);

			perfmon_enable_fix_event((int) ccno, FALSE);

			write_msr(MSR_CORE_PERF_FIXED_CTR_CTRL,
					  fxctrl_value & ~(FIXCNTR_MASK << ccbitsh));
			write_msr(MSR_CORE_PERF_FIXED_CTR0 + ccno, 0);
		}
	} else {
		if (ccno < (int) cpu_caps.counters_x_proc) {
			perfmon_init_event(&cctx->counters[ccno]);

			perfmon_enable_event((int) ccno, FALSE);

			write_msr(MSR_ARCH_PERFMON_EVENTSEL0 + ccno, 0);
			write_msr(MSR_IA32_PERFCTR0 + ccno, 0);
		} else {
			err = -ENOENT;
		}
	}
	spin_unlock_irqsave(&cctx->lock);

	pa->cores_counters[coreno] = (counter_t) err;
}
Example #19
0
void perfmon_pcpu_init(void)
{
	int i;

	if (!perfmon_supported())
		return;
	/* Enable user level access to the performance counters */
	lcr4(rcr4() | CR4_PCE);

	/* Reset all the counters and selectors to zero.
	 */
	write_msr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
	for (i = 0; i < (int) cpu_caps.counters_x_proc; i++) {
		write_msr(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0);
		write_msr(MSR_IA32_PERFCTR0 + i, 0);
	}
	write_msr(MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
	for (i = 0; i < (int) cpu_caps.fix_counters_x_proc; i++)
		write_msr(MSR_CORE_PERF_FIXED_CTR0 + i, 0);

	perfmon_arm_irq();
}
Example #20
0
unsigned long apic_timer_init(unsigned int vector)
{
	unsigned long start, end;
	unsigned long tmr;

	write_msr(X2APIC_TDCR, 3);

	start = pm_timer_read();
	write_msr(X2APIC_TMICT, 0xffffffff);

	while (pm_timer_read() - start < 100 * NS_PER_MSEC)
		cpu_relax();

	end = pm_timer_read();
	tmr = read_msr(X2APIC_TMCCT);

	divided_apic_freq = (0xffffffffULL - tmr) * NS_PER_SEC / (end - start);

	write_msr(X2APIC_TMICT, 0);
	write_msr(X2APIC_LVTT, vector);

	return (divided_apic_freq * 16 + 500) / 1000;
}
Example #21
0
int cthd_msr::disable_turbo_per_cpu(int cpu) {
	unsigned long long val;
	int ret;

	ret = read_msr(cpu, MSR_IA32_PERF_CTL, &val);
	if (ret < 0)
		return THD_ERROR;
	val |= TURBO_DISENGAGE_BIT;
	ret = write_msr(cpu, MSR_IA32_PERF_CTL, val);
	if (ret < 0)
		return THD_ERROR;

	return THD_SUCCESS;
}
Example #22
0
/**
 * Allocate and initialize a per-CPU structure to be accessible via the
 * GS_KERNEL segment register.
 **/
void kseg_init(void)
{
    kseg_t *kseg;

    kseg = (kseg_t *) malloc(sizeof(kseg_t), FRAME_ATOMIC);
    if (!kseg)
        panic("Cannot allocate kseg.");

    kseg->ustack_rsp = 0;
    kseg->kstack_rsp = 0;
    kseg->fsbase = read_msr(AMD_MSR_FS);

    write_msr(AMD_MSR_GS_KERNEL, (uintptr_t) kseg);
}
Example #23
0
void
rapl_finalize( struct rapl_state_s *s, int reset_limits){

	int socket;
	gettimeofday( &(s->finish), NULL );
	s->elapsed = ts_delta( &(s->prev), &(s->finish) );
	for(socket=0; socket<NUM_PACKAGES; socket++){
		get_all_status(socket, s);

		if(reset_limits){
		  // Rest all limits.
		  write_msr( socket, MSR_PKG_POWER_LIMIT, 0 );
		  write_msr( socket, MSR_PP0_POWER_LIMIT, 0 );
#ifdef ARCH_062D
		  write_msr( socket, MSR_DRAM_POWER_LIMIT, 0 );
#endif
		}
	}
	
	// Now the print statement from hell.
	
	print_rapl_state(s);
	fclose(s->f);
}
Example #24
0
/** Perform ia32 specific tasks needed before the new thread is scheduled.
 *
 * THREAD is locked and interrupts are disabled.
 */
void before_thread_runs_arch(void)
{
	uintptr_t kstk = (uintptr_t) &THREAD->kstack[STACK_SIZE];
	
#ifndef PROCESSOR_i486
	if (CPU->arch.fi.bits.sep) {
		/* Set kernel stack for CP3 -> CPL0 switch via SYSENTER */
		write_msr(IA32_MSR_SYSENTER_ESP, kstk - sizeof(istate_t));
	}
#endif
	
	/* Set kernel stack for CPL3 -> CPL0 switch via interrupt */
	CPU->arch.tss->esp0 = kstk;
	CPU->arch.tss->ss0 = GDT_SELECTOR(KDATA_DES);
	
	/* Set up TLS in GS register */
	set_tls_desc(THREAD->arch.tls);
}
Example #25
0
int probe_smp()
{
	unsigned long long lapic_msr = read_msr(0x1b);
	write_msr(0x1b, (lapic_msr&0xFFFFF000) | 0x800, 0); //set global enable bit for lapic
	unsigned mem_lower = ((CMOS_READ_BYTE(CMOS_BASE_MEMORY+1) << 8) | CMOS_READ_BYTE(CMOS_BASE_MEMORY)) << 10;
	int res=0;
	if(mem_lower < 512*1024 || mem_lower > 640*1024)
		return 0;
	if((unsigned)EBDA_SEG_ADDR > mem_lower - 1024 || (unsigned)EBDA_SEG_ADDR + *((unsigned char *)EBDA_SEG_ADDR) * 1024 > mem_lower)
		res=imps_scan_mptables(mem_lower - 1024, 1024);
	else
		res=imps_scan_mptables(EBDA_SEG_ADDR, 1024);
	if(!res)
		res=imps_scan_mptables(0xF0000, 0x10000);
	if(!res)
		return 0;
	set_ksf(KSF_CPUS_RUNNING);
	printk(5, "[cpu]: CPU%s initialized (boot=%d, #APs=%d: ok)                    \n", num_cpus > 1 ? "s" : "", primary_cpu->apicid, num_booted_cpus);
	return num_booted_cpus > 0;
}
Example #26
0
int cthd_msr::set_clock_mod_duty_cycle(int state) {
	int cpu_count = get_no_cpus();
	unsigned long long val;
	int ret;

	thd_log_info("Set T stated %d \n", state);
	// First bit is reserved
	state = state << 1;

	for (int i = 0; i < cpu_count; ++i) {
		ret = read_msr(i, MSR_IA32_THERM_CONTROL, &val);
		if (ret < 0) {
			thd_log_debug("set_clock_mod_duty_cycle current MSR read failed\n");
			return THD_ERROR;
		}

		thd_log_debug("set_clock_mod_duty_cycle current %x\n",
				(unsigned int) val);

		if (!state) {
			val &= ~MSR_IA32_CLK_MOD_ENABLE;
		} else {
			val |= MSR_IA32_CLK_MOD_ENABLE;
		}

		val &= ~MSR_IA32_CLK_MOD_DUTY_CYCLE_MASK;
		val |= (state & MSR_IA32_CLK_MOD_DUTY_CYCLE_MASK);

		thd_log_debug("set_clock_mod_duty_cycle current set to %x\n",
				(unsigned int) val);
		ret = write_msr(i, MSR_IA32_THERM_CONTROL, val);
		if (ret < 0) {
			thd_log_warn(
					"set_clock_mod_duty_cycle current set failed to write\n");
			return THD_ERROR;
		}
	}

	return THD_SUCCESS;
}
Example #27
0
void apic_timer_set(unsigned long timeout_ns)
{
	unsigned long long ticks =
		(unsigned long long)timeout_ns * divided_apic_freq;
	write_msr(X2APIC_TMICT, ticks / NS_PER_SEC);
}
Example #28
0
static void send_x2apic_ipi(u32 apic_id, u32 icr_lo)
{
	write_msr(MSR_X2APIC_BASE + APIC_REG_ICR,
		  ((unsigned long)apic_id) << 32 | icr_lo);
}
Example #29
0
static void write_x2apic(unsigned int reg, u32 val)
{
	write_msr(MSR_X2APIC_BASE + reg, val);
}
Example #30
0
void 
set_power_bounds(){
	int cpu;
	uint64_t msr_pkg_power_limit=-1, msr_pp0_power_limit=-1; 
#ifdef ARCH_062D
	uint64_t msr_dram_power_limit=-1;
#endif
	char *env = NULL;


	// First, check the environment variables.
	env = getenv("MSR_PKG_POWER_LIMIT");
	
	if(env == NULL){
		fprintf(stderr, "Error in reading environment variable MSR_PKG_POWER_LIMIT. Using defaults.\n");
	}
	if(env){
		msr_pkg_power_limit = strtoll( env, NULL, 0 );
	}	



	env = getenv("MSR_PP0_POWER_LIMIT");
	if(env == NULL){
		fprintf(stderr, "Error in reading environment variable MSR_PP0_POWER_LIMIT. Using defaults.\n");
	}
	if(env){
		msr_pp0_power_limit = strtoll( env, NULL, 0 );
	}


#ifdef ARCH_062D
	env = getenv("MSR_DRAM_POWER_LIMIT");
	if(env == NULL){
		fprintf(stderr, "Error in reading environment variable MSR_DRAM_POWER_LIMIT. Using defaults.\n");
	}

	if(env){
		msr_dram_power_limit = strtoll( env, NULL, 0 );
	}
#endif


	// Now write the MSRs.  Zero is a valid value
	for( cpu=0; cpu<NUM_PACKAGES; cpu++ ){
		if( msr_pkg_power_limit != -1 ){
			fprintf(stderr, "%s::%d setting %s to 0x%lx on cpu %d\n", 
				__FILE__, __LINE__, msr2str(MSR_PKG_POWER_LIMIT), msr_pkg_power_limit, cpu);
			write_msr( cpu, MSR_PKG_POWER_LIMIT, msr_pkg_power_limit );
		}
		if( msr_pp0_power_limit != -1 ){
			fprintf(stderr, "%s::%d setting %s to 0x%lx on cpu %d\n", 
				__FILE__, __LINE__, msr2str(MSR_PP0_POWER_LIMIT), msr_pp0_power_limit, cpu);
			write_msr( cpu, MSR_PP0_POWER_LIMIT, msr_pp0_power_limit );
		}
#ifdef ARCH_062D
		if( msr_dram_power_limit != -1 ){
			fprintf(stderr, "%s::%d setting %s to 0x%lx on cpu %d\n", 
				__FILE__, __LINE__, msr2str(MSR_DRAM_POWER_LIMIT), msr_dram_power_limit, cpu);
			write_msr( cpu, MSR_DRAM_POWER_LIMIT, msr_dram_power_limit );
		}
#endif
	}
}