static void set_max_freq(void) { msr_t perf_ctl; msr_t msr; /* Enable speed step */ msr = msr_read(MSR_IA32_MISC_ENABLES); msr.lo |= (1 << 16); msr_write(MSR_IA32_MISC_ENABLES, msr); /* * Set guaranteed ratio [21:16] from IACORE_RATIOS to bits [15:8] of * the PERF_CTL */ msr = msr_read(MSR_IACORE_RATIOS); perf_ctl.lo = (msr.lo & 0x3f0000) >> 8; /* * Set guaranteed vid [21:16] from IACORE_VIDS to bits [7:0] of * the PERF_CTL */ msr = msr_read(MSR_IACORE_VIDS); perf_ctl.lo |= (msr.lo & 0x7f0000) >> 16; perf_ctl.hi = 0; msr_write(MSR_IA32_PERF_CTL, perf_ctl); }
static void init_syscall_msr() { u64 sysenter_cs = (u64)GDT_SELECTOR_CODE_K; u64 sysenter_esp = (u64)get_my_cpu_area_start_vaddr() + PER_CPU_STACK_TOP_OFFSET; u64 sysenter_eip = (u64)(ulong)&sysenter_handler; // Initialize Sysenter parameters msr_write(SYSENTER_MSR_CS, &sysenter_cs); msr_write(SYSENTER_MSR_ESP, &sysenter_esp); msr_write(SYSENTER_MSR_EIP, &sysenter_eip); }
static void apic_set_base_addr (struct apic_dev * apic, addr_t addr) { uint64_t data; data = msr_read(APIC_BASE_MSR); msr_write(APIC_BASE_MSR, (addr & APIC_BASE_ADDR_MASK) | (data & 0xfff)); }
static inline int __flip_bit(u32 msr, u8 bit, bool set) { struct msr m, m1; int err = -EINVAL; if (bit > 63) return err; err = msr_read(msr, &m); if (err) return err; m1 = m; if (set) m1.q |= BIT_64(bit); else m1.q &= ~BIT_64(bit); if (m1.q == m.q) return 0; err = msr_write(msr, &m1); if (err) return err; return 1; }
void syscall_setup_for_thread(thread_t * thread) { uint64_t page = 0xFFFFFFFFFL - thread->stack; uint64_t addr = page << 12; if (addr & (1L << 47)) { addr |= 0xffff000000000000; } thread_t * ptr = (thread_t *)addr; msr_write(MSR_LSTAR, (uint64_t)&ptr->state.callCode); }
int hw_l2ca_set(const unsigned l2id, const unsigned num_ca, const struct pqos_l2ca *ca) { int ret = PQOS_RETVAL_OK; unsigned i = 0, count = 0, core = 0; if (ca == NULL || num_ca == 0) return PQOS_RETVAL_PARAM; /** * Check if L2 CAT is supported */ ASSERT(m_cap != NULL); ret = pqos_l2ca_get_cos_num(m_cap, &count); if (ret != PQOS_RETVAL_OK) return PQOS_RETVAL_RESOURCE; /* L2 CAT not supported */ /** * Check if class bitmasks are contiguous and * if class id's are within allowed range. */ for (i = 0; i < num_ca; i++) { if (!is_contiguous(ca[i].ways_mask)) { LOG_ERROR("L2 COS%u bit mask is not contiguous!\n", ca[i].class_id); return PQOS_RETVAL_PARAM; } if (ca[i].class_id >= count) { LOG_ERROR("L2 COS%u is out of range (COS%u is max)!\n", ca[i].class_id, count - 1); return PQOS_RETVAL_PARAM; } } /** * Pick one core from the L2 cluster and * perform MSR writes to COS registers on the cluster. */ ASSERT(m_cpu != NULL); ret = pqos_cpu_get_one_by_l2id(m_cpu, l2id, &core); if (ret != PQOS_RETVAL_OK) return ret; for (i = 0; i < num_ca; i++) { uint32_t reg = ca[i].class_id + PQOS_MSR_L2CA_MASK_START; uint64_t val = ca[i].ways_mask; int retval = MACHINE_RETVAL_OK; retval = msr_write(core, reg, val); if (retval != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; } return ret; }
void lapic_enable() { // set bit 11 in the APIC_BASE MSR to enable the APIC uint32_t base = msr_read(MSR_APIC_BASE); base |= 1<<11; msr_write(MSR_APIC_BASE, base); // set the software-enable bit (8) in the spurious vector set_reg(LAPIC_REG_SPURIOUS, get_reg(LAPIC_REG_SPURIOUS) | (1<<8)); }
uint64_t get_microcode_version(void) { uint64_t val; uint32_t eax, ebx, ecx, edx; msr_write(MSR_IA32_BIOS_SIGN_ID, 0U); cpuid(CPUID_FEATURES, &eax, &ebx, &ecx, &edx); val = msr_read(MSR_IA32_BIOS_SIGN_ID); return val; }
/** * @brief Reads monitoring event data from given core * * This function doesn't acquire API lock. * * @param lcore logical core id * @param rmid RMID to be read * @param event monitoring event * @param value place to store read value * * @return Operation status * @retval PQOS_RETVAL_OK on success */ static int mon_read(const unsigned lcore, const pqos_rmid_t rmid, const enum pqos_mon_event event, uint64_t *value) { int retries = 3, retval = PQOS_RETVAL_OK; uint32_t reg = 0; uint64_t val = 0; /** * Set event selection register (RMID + event id) */ reg = PQOS_MSR_MON_EVTSEL; val = ((uint64_t)rmid) & PQOS_MSR_MON_EVTSEL_RMID_MASK; val <<= PQOS_MSR_MON_EVTSEL_RMID_SHIFT; val |= ((uint64_t)event) & PQOS_MSR_MON_EVTSEL_EVTID_MASK; if (msr_write(lcore, reg, val) != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; /** * read selected data associated with previously selected RMID+event */ reg = PQOS_MSR_MON_QMC; do { if (msr_read(lcore, reg, &val) != MACHINE_RETVAL_OK) { retval = PQOS_RETVAL_ERROR; break; } if ((val&(PQOS_MSR_MON_QMC_ERROR)) != 0ULL) { /** * Unsupported event id or RMID selected */ retval = PQOS_RETVAL_ERROR; break; } retries--; } while ((val&PQOS_MSR_MON_QMC_UNAVAILABLE) != 0ULL && retries > 0); /** * Store event value */ if (retval == PQOS_RETVAL_OK) *value = (val & PQOS_MSR_MON_QMC_DATA_MASK); else LOG_WARN("Error reading event %u on core %u (RMID%u)!\n", (unsigned) event, lcore, (unsigned) rmid); return retval; }
int main() { void *dev; int cnt; int i,j; //Registrieren msr_init(&rtp,NULL); msr_reg_rtw_param("/daten/p1(ll=\"1\" ul=\"2\")","gain","int",&p1,1,1,SS_INT32,var_SCALAR,sizeof(int)); msr_reg_rtw_param("/daten/p2(init=\"0.5\" ll=\"1\" ul=\"2\")","gain","double",&p2,1,1,SS_DOUBLE,var_SCALAR,sizeof(double)); msr_reg_rtw_param("/daten/p3(unit=\"s\")","gain","double",&p3,1,10,SS_DOUBLE,var_VECTOR,sizeof(double)); msr_reg_rtw_signal("/kanal/k1","","int",(void *)&rtp.k1 - (void *)&rtp,1,1,SS_INT32,var_SCALAR,sizeof(int)); msr_reg_rtw_signal("/kanal/k2","","int",(void *)&rtp.k2 - (void *)&rtp,1,1,SS_DOUBLE,var_SCALAR,sizeof(double)); msr_reg_rtw_signal("/kanal/k3","","int",(void *)&rtp.k3[0] - (void *)&rtp,1,5,SS_DOUBLE,var_VECTOR,sizeof(double)); msr_reg_enum_list("/Aufzaehlung","",&en[0],MSR_R |MSR_W,5,1,"Eins,Zwei,Drei",NULL,NULL); //Kanäle füllen for(i=0;i<1000;i++) { // printf("index %i\n",i); rtp.k1 = i; rtp.k2 = i*0.3; for(j=0;j<5;j++) rtp.k3[j] = j*i; msr_update(&rtp); } //Lesen dev = msr_open(STDIN_FILENO,STDOUT_FILENO); msr_read(dev); do { cnt = msr_write(dev); // printf("Write count: %i\n",cnt); } while(cnt>0); msr_close(dev); msr_cleanup(); return 0; }
/** * @brief Writes range of MBA/CAT COS MSR's with \a msr_val value * * Used as part of CAT/MBA reset process. * * @param [in] msr_start First MSR to be written * @param [in] msr_num Number of MSR's to be written * @param [in] coreid Core ID to be used for MSR write operations * @param [in] msr_val Value to be written to MSR's * * @return Operation status * @retval PQOS_RETVAL_OK on success * @retval PQOS_RETVAL_ERROR on MSR write error */ static int alloc_cos_reset(const unsigned msr_start, const unsigned msr_num, const unsigned coreid, const uint64_t msr_val) { int ret = PQOS_RETVAL_OK; unsigned i; for (i = 0; i < msr_num; i++) { int retval = msr_write(coreid, msr_start + i, msr_val); if (retval != MACHINE_RETVAL_OK) ret = PQOS_RETVAL_ERROR; } return ret; }
static int cos_assoc_set(const unsigned lcore, const unsigned class_id) { const uint32_t reg = PQOS_MSR_ASSOC; uint64_t val = 0; int ret; ret = msr_read(lcore, reg, &val); if (ret != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; val &= (~PQOS_MSR_ASSOC_QECOS_MASK); val |= (((uint64_t) class_id) << PQOS_MSR_ASSOC_QECOS_SHIFT); ret = msr_write(lcore, reg, val); if (ret != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; return PQOS_RETVAL_OK; }
/** * @brief Disables IA32 performance counters * * @param num_cores number of cores in \a cores table * @param cores table with core id's * @param event mask of selected monitoring events * * @return Operation status * @retval PQOS_RETVAL_OK on success */ static int ia32_perf_counter_stop(const unsigned num_cores, const unsigned *cores, const enum pqos_mon_event event) { int retval = PQOS_RETVAL_OK; unsigned i; ASSERT(cores != NULL && num_cores > 0); if (!(event & (PQOS_PERF_EVENT_LLC_MISS | PQOS_PERF_EVENT_IPC))) return retval; for (i = 0; i < num_cores; i++) { int ret = msr_write(cores[i], IA32_MSR_PERF_GLOBAL_CTRL, 0); if (ret != MACHINE_RETVAL_OK) retval = PQOS_RETVAL_ERROR; } return retval; }
void acrn_update_ucode(struct acrn_vcpu *vcpu, uint64_t v) { uint64_t gva, fault_addr = 0UL; struct ucode_header uhdr; size_t data_size; int32_t err; uint32_t err_code; spinlock_obtain(µ_code_lock); gva = v - sizeof(struct ucode_header); err_code = 0U; err = copy_from_gva(vcpu, &uhdr, gva, sizeof(uhdr), &err_code, &fault_addr); if (err < 0) { if (err == -EFAULT) { vcpu_inject_pf(vcpu, fault_addr, err_code); } } else { data_size = get_ucode_data_size(&uhdr) + sizeof(struct ucode_header); if (data_size > MICRO_CODE_SIZE_MAX) { pr_err("The size of microcode is greater than 0x%x", MICRO_CODE_SIZE_MAX); } else { err_code = 0U; err = copy_from_gva(vcpu, micro_code, gva, data_size, &err_code, &fault_addr); if (err < 0) { if (err == -EFAULT) { vcpu_inject_pf(vcpu, fault_addr, err_code); } } else { msr_write(MSR_IA32_BIOS_UPDT_TRIG, (uint64_t)micro_code + sizeof(struct ucode_header)); (void)get_microcode_version(); } } } spinlock_release(µ_code_lock); }
/** * @brief Associates core with RMID at register level * * This function doesn't acquire API lock * and can be used internally when lock is already taken. * * @param lcore logical core id * @param rmid resource monitoring ID * * @return Operation status * @retval PQOS_RETVAL_OK on success */ static int mon_assoc_set_nocheck(const unsigned lcore, const pqos_rmid_t rmid) { int ret = 0; uint32_t reg = 0; uint64_t val = 0; reg = PQOS_MSR_ASSOC; ret = msr_read(lcore, reg, &val); if (ret != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; val &= PQOS_MSR_ASSOC_QECOS_MASK; val |= (uint64_t)(rmid & PQOS_MSR_ASSOC_RMID_MASK); ret = msr_write(lcore, reg, val); if (ret != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; return PQOS_RETVAL_OK; }
/** * @brief Enables or disables CDP across selected CPU sockets * * @param [in] sockets_num dimension of \a sockets array * @param [in] sockets array with socket ids to change CDP config on * @param [in] enable CDP enable/disable flag, 1 - enable, 0 - disable * * @return Operations status * @retval PQOS_RETVAL_OK on success * @retval PQOS_RETVAL_ERROR on failure, MSR read/write error */ static int cdp_enable(const unsigned sockets_num, const unsigned *sockets, const int enable) { unsigned j = 0; ASSERT(sockets_num > 0 && sockets != NULL); LOG_INFO("%s CDP across sockets...\n", (enable) ? "Enabling" : "Disabling"); for (j = 0; j < sockets_num; j++) { uint64_t reg = 0; unsigned core = 0; int ret = PQOS_RETVAL_OK; ret = pqos_cpu_get_one_core(m_cpu, sockets[j], &core); if (ret != PQOS_RETVAL_OK) return ret; ret = msr_read(core, PQOS_MSR_L3_QOS_CFG, ®); if (ret != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; if (enable) reg |= PQOS_MSR_L3_QOS_CFG_CDP_EN; else reg &= ~PQOS_MSR_L3_QOS_CFG_CDP_EN; ret = msr_write(core, PQOS_MSR_L3_QOS_CFG, reg); if (ret != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; } return PQOS_RETVAL_OK; }
int pqos_l3ca_set(const unsigned socket, const unsigned num_ca, const struct pqos_l3ca *ca) { int ret = PQOS_RETVAL_OK; unsigned i = 0, count = 0, core = 0; int cdp_enabled = 0; _pqos_api_lock(); ret = _pqos_check_init(1); if (ret != PQOS_RETVAL_OK) { _pqos_api_unlock(); return ret; } if (ca == NULL || num_ca == 0) { _pqos_api_unlock(); return PQOS_RETVAL_PARAM; } /** * Check if class bitmasks are contiguous. */ for (i = 0; i < num_ca; i++) { int is_contig = 0; if (ca[i].cdp) { is_contig = is_contiguous(ca[i].u.s.data_mask) && is_contiguous(ca[i].u.s.code_mask); } else { is_contig = is_contiguous(ca[i].u.ways_mask); } if (!is_contig) { LOG_ERROR("L3 COS%u bit mask is not contiguous!\n", ca[i].class_id); _pqos_api_unlock(); return PQOS_RETVAL_PARAM; } } ASSERT(m_cap != NULL); ret = pqos_l3ca_get_cos_num(m_cap, &count); if (ret != PQOS_RETVAL_OK) { _pqos_api_unlock(); return ret; /**< perhaps no L3CA capability */ } if (num_ca > count) { _pqos_api_unlock(); return PQOS_RETVAL_ERROR; } ret = pqos_l3ca_cdp_enabled(m_cap, NULL, &cdp_enabled); if (ret != PQOS_RETVAL_OK) { _pqos_api_unlock(); return ret; } ASSERT(m_cpu != NULL); ret = pqos_cpu_get_cores(m_cpu, socket, 1, &count, &core); if (ret != PQOS_RETVAL_OK) { _pqos_api_unlock(); return ret; } if (cdp_enabled) { for (i = 0; i < num_ca; i++) { uint32_t reg = (ca[i].class_id*2) + PQOS_MSR_L3CA_MASK_START; int retval = MACHINE_RETVAL_OK; uint64_t cmask = 0, dmask = 0; if (ca[i].cdp) { dmask = ca[i].u.s.data_mask; cmask = ca[i].u.s.code_mask; } else { dmask = ca[i].u.ways_mask; cmask = ca[i].u.ways_mask; } retval = msr_write(core, reg, dmask); if (retval != MACHINE_RETVAL_OK) { _pqos_api_unlock(); return PQOS_RETVAL_ERROR; } retval = msr_write(core, reg+1, cmask); if (retval != MACHINE_RETVAL_OK) { _pqos_api_unlock(); return PQOS_RETVAL_ERROR; } } } else { for (i = 0; i < num_ca; i++) { uint32_t reg = ca[i].class_id + PQOS_MSR_L3CA_MASK_START; uint64_t val = ca[i].u.ways_mask; int retval = MACHINE_RETVAL_OK; if (ca[i].cdp) { LOG_ERROR("Attempting to set CDP COS " "while CDP is disabled!\n"); _pqos_api_unlock(); return PQOS_RETVAL_ERROR; } retval = msr_write(core, reg, val); if (retval != MACHINE_RETVAL_OK) { _pqos_api_unlock(); return PQOS_RETVAL_ERROR; } } } _pqos_api_unlock(); return ret; }
void apic_init (struct cpu * core) { struct apic_dev * apic = NULL; ulong_t base_addr; uint32_t val; apic = (struct apic_dev*)malloc(sizeof(struct apic_dev)); if (!apic) { panic("Could not allocate apic struct\n"); } memset(apic, 0, sizeof(struct apic_dev)); core->apic = apic; if (!check_apic_avail()) { panic("No APIC found on core %u, dying\n", core->id); } /* In response to AMD erratum #663 * the damn thing may give us lint interrupts * even when we have them masked */ if (nk_is_amd() && cpuid_get_family() == 0x15) { APIC_DEBUG("Writing Bridge Ctrl MSR for AMD Errata #663\n"); msr_write(AMD_MSR_NBRIDGE_CTL, msr_read(AMD_MSR_NBRIDGE_CTL) | (1ULL<<23) | (1ULL<<54)); } base_addr = apic_get_base_addr(); /* idempotent when not compiled as HRT */ apic->base_addr = pa_to_va(base_addr); #ifndef NAUT_CONFIG_HVM_HRT if (core->is_bsp) { /* map in the lapic as uncacheable */ if (nk_map_page_nocache(apic->base_addr, PTE_PRESENT_BIT|PTE_WRITABLE_BIT, PS_4K) == -1) { panic("Could not map APIC\n"); } } #endif apic->version = apic_get_version(apic); apic->id = apic_get_id(apic); #ifndef NAUT_CONFIG_XEON_PHI if (apic->version < 0x10 || apic->version > 0x15) { panic("Unsupported APIC version (0x%1x)\n", (unsigned)apic->version); } #endif val = apic_read(apic, APIC_REG_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(0); apic_write(apic, APIC_REG_LDR, val); apic_write(apic, APIC_REG_TPR, apic_read(apic, APIC_REG_TPR) & 0xffffff00); // accept all interrupts apic_write(apic, APIC_REG_LVTT, APIC_DEL_MODE_FIXED | APIC_LVT_DISABLED); // disable timer interrupts intially apic_write(apic, APIC_REG_LVTPC, APIC_DEL_MODE_FIXED | APIC_LVT_DISABLED | APIC_PC_INT_VEC); // disable perf cntr interrupts apic_write(apic, APIC_REG_LVTTHMR, APIC_DEL_MODE_FIXED | APIC_LVT_DISABLED | APIC_THRML_INT_VEC); // disable thermal interrupts /* do we have AMD extended LVT entries to deal with */ if (nk_is_amd() && amd_has_ext_lvt(apic)) { amd_setup_ext_lvt(apic); } /* mask 8259a interrupts */ apic_write(apic, APIC_REG_LVT0, APIC_DEL_MODE_EXTINT | APIC_LVT_DISABLED); /* only BSP takes NMI interrupts */ apic_write(apic, APIC_REG_LVT1, APIC_DEL_MODE_NMI | (core->is_bsp ? 0 : APIC_LVT_DISABLED)); apic_write(apic, APIC_REG_LVTERR, APIC_DEL_MODE_FIXED | APIC_ERROR_INT_VEC); // allow error interrupts // clear the ESR apic_write(apic, APIC_REG_ESR, 0u); apic_global_enable(); // assign interrupt handlers if (core->is_bsp) { if (register_int_handler(APIC_NULL_KICK_VEC, null_kick, apic) != 0) { panic("Could not register null kick interrupt handler\n"); } if (register_int_handler(APIC_SPUR_INT_VEC, spur_int_handler, apic) != 0) { panic("Could not register spurious interrupt handler\n"); } if (register_int_handler(APIC_ERROR_INT_VEC, error_int_handler, apic) != 0) { panic("Could not register spurious interrupt handler\n"); return; } /* we shouldn't ever get these, but just in case */ if (register_int_handler(APIC_PC_INT_VEC, pc_int_handler, apic) != 0) { panic("Could not register perf counter interrupt handler\n"); return; } if (register_int_handler(APIC_THRML_INT_VEC, thermal_int_handler, apic) != 0) { panic("Could not register thermal interrupt handler\n"); return; } if (register_int_handler(APIC_EXT_LVT_DUMMY_VEC, dummy_int_handler, apic) != 0) { panic("Could not register dummy ext lvt handler\n"); return; } } apic_assign_spiv(apic, APIC_SPUR_INT_VEC); /* turn it on */ apic_sw_enable(apic); /* pass in quantum as milliseconds */ #ifndef NAUT_CONFIG_XEON_PHI apic_timer_setup(apic, 1000/NAUT_CONFIG_HZ); #endif apic_dump(apic); }
/** * @brief Sets up IA32 performance counters for IPC and LLC miss ratio events * * @param num_cores number of cores in \a cores table * @param cores table with core id's * @param event mask of selected monitoring events * * @return Operation status * @retval PQOS_RETVAL_OK on success */ static int ia32_perf_counter_start(const unsigned num_cores, const unsigned *cores, const enum pqos_mon_event event) { uint64_t global_ctrl_mask = 0; unsigned i; ASSERT(cores != NULL && num_cores > 0); if (!(event & (PQOS_PERF_EVENT_LLC_MISS | PQOS_PERF_EVENT_IPC))) return PQOS_RETVAL_OK; if (event & PQOS_PERF_EVENT_IPC) global_ctrl_mask |= (0x3ULL << 32); /**< fixed counters 0&1 */ if (event & PQOS_PERF_EVENT_LLC_MISS) global_ctrl_mask |= 0x1ULL; /**< programmable counter 0 */ if (!m_force_mon) { /** * Fixed counters are used for IPC calculations. * Programmable counters are used for LLC miss calculations. * Let's check if they are in use. */ for (i = 0; i < num_cores; i++) { uint64_t global_inuse = 0; int ret; ret = msr_read(cores[i], IA32_MSR_PERF_GLOBAL_CTRL, &global_inuse); if (ret != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; if (global_inuse & global_ctrl_mask) { LOG_ERROR("IPC and/or LLC miss performance " "counters already in use!\n"); return PQOS_RETVAL_PERF_CTR; } } } /** * - Disable counters in global control and * reset counter values to 0. * - Program counters for desired events * - Enable counters in global control */ for (i = 0; i < num_cores; i++) { const uint64_t fixed_ctrl = 0x33ULL; /**< track usr + os */ int ret; ret = msr_write(cores[i], IA32_MSR_PERF_GLOBAL_CTRL, 0); if (ret != MACHINE_RETVAL_OK) break; if (event & PQOS_PERF_EVENT_IPC) { ret = msr_write(cores[i], IA32_MSR_INST_RETIRED_ANY, 0); if (ret != MACHINE_RETVAL_OK) break; ret = msr_write(cores[i], IA32_MSR_CPU_UNHALTED_THREAD, 0); if (ret != MACHINE_RETVAL_OK) break; ret = msr_write(cores[i], IA32_MSR_FIXED_CTR_CTRL, fixed_ctrl); if (ret != MACHINE_RETVAL_OK) break; } if (event & PQOS_PERF_EVENT_LLC_MISS) { const uint64_t evtsel0_miss = IA32_EVENT_LLC_MISS_MASK | (IA32_EVENT_LLC_MISS_UMASK << 8) | (1ULL << 16) | (1ULL << 17) | (1ULL << 22); ret = msr_write(cores[i], IA32_MSR_PMC0, 0); if (ret != MACHINE_RETVAL_OK) break; ret = msr_write(cores[i], IA32_MSR_PERFEVTSEL0, evtsel0_miss); if (ret != MACHINE_RETVAL_OK) break; } ret = msr_write(cores[i], IA32_MSR_PERF_GLOBAL_CTRL, global_ctrl_mask); if (ret != MACHINE_RETVAL_OK) break; } if (i < num_cores) return PQOS_RETVAL_ERROR; return PQOS_RETVAL_OK; }
void syscall_initialize() { uint64_t star = (8L << 0x20) | (0x1bL << 0x30); msr_write(MSR_STAR, star); msr_write(MSR_SFMASK, 0x200); // interrupts will be disabled by syscall }
int hw_mba_set(const unsigned socket, const unsigned num_cos, const struct pqos_mba *requested, struct pqos_mba *actual) { int ret = PQOS_RETVAL_OK; unsigned i = 0, count = 0, core = 0, step = 0; const struct pqos_capability *mba_cap = NULL; if (requested == NULL || num_cos == 0) return PQOS_RETVAL_PARAM; /** * Check if MBA is supported */ ASSERT(m_cap != NULL); ret = pqos_cap_get_type(m_cap, PQOS_CAP_TYPE_MBA, &mba_cap); if (ret != PQOS_RETVAL_OK) return PQOS_RETVAL_RESOURCE; /* MBA not supported */ count = mba_cap->u.mba->num_classes; step = mba_cap->u.mba->throttle_step; /** * Non-linear mode not currently supported */ if (!mba_cap->u.mba->is_linear) { LOG_ERROR("MBA non-linear mode not currently supported!\n"); return PQOS_RETVAL_RESOURCE; } /** * Check if MBA rate and class * id's are within allowed range. */ for (i = 0; i < num_cos; i++) { if (requested[i].mb_rate == 0 || requested[i].mb_rate > 100) { LOG_ERROR("MBA COS%u rate out of range (from 1-100)!\n", requested[i].class_id); return PQOS_RETVAL_PARAM; } if (requested[i].class_id >= count) { LOG_ERROR("MBA COS%u is out of range (COS%u is max)!\n", requested[i].class_id, count - 1); return PQOS_RETVAL_PARAM; } } ASSERT(m_cpu != NULL); ret = pqos_cpu_get_one_core(m_cpu, socket, &core); if (ret != PQOS_RETVAL_OK) return ret; for (i = 0; i < num_cos; i++) { const uint32_t reg = requested[i].class_id + PQOS_MSR_MBA_MASK_START; uint64_t val = PQOS_MBA_LINEAR_MAX - (((requested[i].mb_rate + (step/2)) / step) * step); int retval = MACHINE_RETVAL_OK; if (val > mba_cap->u.mba->throttle_max) val = mba_cap->u.mba->throttle_max; retval = msr_write(core, reg, val); if (retval != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; /** * If table to store actual values set is passed, * read MSR values and store in table */ if (actual == NULL) continue; retval = msr_read(core, reg, &val); if (retval != MACHINE_RETVAL_OK) return PQOS_RETVAL_ERROR; actual[i] = requested[i]; actual[i].mb_rate = (PQOS_MBA_LINEAR_MAX - val); } return ret; }
void apic_set_base(apic_base_t addr){ msr_write(MSR_IA32_APIC_BASE, addr.raw); }
int pqos_alloc_assoc_set(const unsigned lcore, const unsigned class_id) { int ret = PQOS_RETVAL_OK; unsigned num_l2_cos = 0, num_l3_cos = 0; const uint32_t reg = PQOS_MSR_ASSOC; uint64_t val = 0; _pqos_api_lock(); ret = _pqos_check_init(1); if (ret != PQOS_RETVAL_OK) { _pqos_api_unlock(); return ret; } ASSERT(m_cpu != NULL); ret = pqos_cpu_check_core(m_cpu, lcore); if (ret != PQOS_RETVAL_OK) { _pqos_api_unlock(); return PQOS_RETVAL_PARAM; } ASSERT(m_cap != NULL); ret = pqos_l3ca_get_cos_num(m_cap, &num_l3_cos); if (ret != PQOS_RETVAL_OK && ret != PQOS_RETVAL_RESOURCE) { _pqos_api_unlock(); return ret; } ret = pqos_l2ca_get_cos_num(m_cap, &num_l2_cos); if (ret != PQOS_RETVAL_OK && ret != PQOS_RETVAL_RESOURCE) { _pqos_api_unlock(); return ret; } if (class_id > num_l3_cos && class_id > num_l2_cos) { /* class_id is out of bounds */ _pqos_api_unlock(); return PQOS_RETVAL_PARAM; } ret = msr_read(lcore, reg, &val); if (ret != MACHINE_RETVAL_OK) { _pqos_api_unlock(); return PQOS_RETVAL_ERROR; } val &= (~PQOS_MSR_ASSOC_QECOS_MASK); val |= (((uint64_t) class_id) << PQOS_MSR_ASSOC_QECOS_SHIFT); ret = msr_write(lcore, reg, val); if (ret != MACHINE_RETVAL_OK) { _pqos_api_unlock(); return PQOS_RETVAL_ERROR; } _pqos_api_unlock(); return PQOS_RETVAL_OK; }
static inline void apic_global_enable (void) { msr_write(APIC_BASE_MSR, msr_read(APIC_BASE_MSR) | APIC_GLOBAL_ENABLE); }