Exemple #1
0
static void set_max_freq(void)
{
	msr_t perf_ctl;
	msr_t msr;

	/* Enable speed step */
	msr = msr_read(MSR_IA32_MISC_ENABLES);
	msr.lo |= (1 << 16);
	msr_write(MSR_IA32_MISC_ENABLES, msr);

	/*
	 * Set guaranteed ratio [21:16] from IACORE_RATIOS to bits [15:8] of
	 * the PERF_CTL
	 */
	msr = msr_read(MSR_IACORE_RATIOS);
	perf_ctl.lo = (msr.lo & 0x3f0000) >> 8;

	/*
	 * Set guaranteed vid [21:16] from IACORE_VIDS to bits [7:0] of
	 * the PERF_CTL
	 */
	msr = msr_read(MSR_IACORE_VIDS);
	perf_ctl.lo |= (msr.lo & 0x7f0000) >> 16;
	perf_ctl.hi = 0;

	msr_write(MSR_IA32_PERF_CTL, perf_ctl);
}
static void wait_until_idle(void)
{
	uint64_t tsc, pc, res;

	do {
		set_alarm(0, 500 * 1000);

		tsc = msr_read(IA32_TIME_STAMP_COUNTER);
		pc = msr_read(deepest_pc_state);

		while (!alarm_received)
			pause();

		pc = msr_read(deepest_pc_state) - pc;
		tsc = msr_read(IA32_TIME_STAMP_COUNTER) - tsc;

		res = pc * 100 / tsc;

		/*printf("res:%02"PRIu64"\n", res);*/
	} while (res < idle_res && idle_res - res > 3);

	if (res > idle_res && res - idle_res > 3)
		fprintf(stderr, "The calculated idle residency may be too low "
			"(got %02"PRIu64"%%)\n", res);
}
void init_cr0_cr4_host_mask(void)
{
	static bool inited = false;
	static uint64_t cr0_host_owned_bits, cr4_host_owned_bits;
	uint64_t fixed0, fixed1;

	if (!inited) {
		/* Read the CR0 fixed0 / fixed1 MSR registers */
		fixed0 = msr_read(MSR_IA32_VMX_CR0_FIXED0);
		fixed1 = msr_read(MSR_IA32_VMX_CR0_FIXED1);

		cr0_host_owned_bits = ~(fixed0 ^ fixed1);
		/* Add the bit hv wants to trap */
		cr0_host_owned_bits |= CR0_TRAP_MASK;
		/* CR0 clear PE/PG from always on bits due to "unrestructed guest" feature */
		cr0_always_on_mask = fixed0 & (~(CR0_PE | CR0_PG));
		cr0_always_off_mask = ~fixed1;
		/* SDM 2.5
		 * bit 63:32 of CR0 and CR4 ar reserved and must be written
		 * zero. We could merge it with always off mask.
		 */
		cr0_always_off_mask |= 0xFFFFFFFF00000000UL;

		/* Read the CR4 fixed0 / fixed1 MSR registers */
		fixed0 = msr_read(MSR_IA32_VMX_CR4_FIXED0);
		fixed1 = msr_read(MSR_IA32_VMX_CR4_FIXED1);

		cr4_host_owned_bits = ~(fixed0 ^ fixed1);
		/* Add the bit hv wants to trap */
		cr4_host_owned_bits |= CR4_TRAP_MASK;
		cr4_always_on_mask = fixed0;
		/* Record the bit fixed to 0 for CR4, including reserved bits */
		cr4_always_off_mask = ~fixed1;
		/* SDM 2.5
		 * bit 63:32 of CR0 and CR4 ar reserved and must be written
		 * zero. We could merge it with always off mask.
		 */
		cr4_always_off_mask |= 0xFFFFFFFF00000000UL;
		cr4_always_off_mask |= CR4_RESERVED_MASK;
		inited = true;
	}

	exec_vmwrite(VMX_CR0_GUEST_HOST_MASK, cr0_host_owned_bits);
	/* Output CR0 mask value */
	pr_dbg("CR0 guest-host mask value: 0x%016llx", cr0_host_owned_bits);


	exec_vmwrite(VMX_CR4_GUEST_HOST_MASK, cr4_host_owned_bits);
	/* Output CR4 mask value */
	pr_dbg("CR4 guest-host mask value: 0x%016llx", cr4_host_owned_bits);
}
static void setup_idle(void)
{
	uint64_t tsc, pc[NUM_PC_STATES], res, best_res;
	int pc_i, best_pc_i = 0, retries, consecutive_not_best;

	for (retries = 0; ; retries++) {

		set_alarm(opts.res_warm_time, 0);
		while (!alarm_received)
			pause();

		set_alarm(opts.res_calc_time, 0);

		tsc = msr_read(IA32_TIME_STAMP_COUNTER);
		for (pc_i = best_pc_i; pc_i < NUM_PC_STATES; pc_i++)
			pc[pc_i] = msr_read(res_msr_addrs[pc_i]);

		while (!alarm_received)
			pause();

		for (pc_i = best_pc_i; pc_i < NUM_PC_STATES; pc_i++)
			pc[pc_i] = msr_read(res_msr_addrs[pc_i]) - pc[pc_i];
		tsc = msr_read(IA32_TIME_STAMP_COUNTER) - tsc;

		for (pc_i = NUM_PC_STATES -1; pc_i >= best_pc_i; pc_i--)
			if (pc[pc_i] != 0)
				break;
		igt_require_f(pc_i >= 0, "We're not reaching any PC states!\n");

		res = pc[pc_i] * 100 / tsc;

		if (retries == 0 || pc_i > best_pc_i || res > best_res) {
			best_pc_i = pc_i;
			best_res = res;
			consecutive_not_best = 0;
		} else {
			consecutive_not_best++;
			if (consecutive_not_best > 2)
				break;
		}
	}

	deepest_pc_state = res_msr_addrs[best_pc_i];
	idle_res = best_res;

	printf("Stable idle residency retries:\t%d\n", retries);
	printf("Deepest PC state reached when idle:\t%s\n",
	       res_msr_names[best_pc_i]);
	printf("Idle residency for this state:\t%02"PRIu64"%%\n", idle_res);
}
Exemple #5
0
static void
apic_set_base_addr (struct apic_dev * apic, addr_t addr)
{
    uint64_t data;
    data = msr_read(APIC_BASE_MSR);
    msr_write(APIC_BASE_MSR, (addr & APIC_BASE_ADDR_MASK) | (data & 0xfff));
}
Exemple #6
0
static int save_bsp_msrs(char *start, int size)
{
	int msr_count;
	int num_var_mtrrs;
	struct saved_msr *msr_entry;
	int i;
	msr_t msr;

	/* Determine number of MTRRs need to be saved */
	msr = msr_read(MTRR_CAP_MSR);
	num_var_mtrrs = msr.lo & 0xff;

	/* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE */
	msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1;

	if ((msr_count * sizeof(struct saved_msr)) > size) {
		printf("Cannot mirror all %d msrs.\n", msr_count);
		return -ENOSPC;
	}

	msr_entry = (void *)start;
	for (i = 0; i < NUM_FIXED_MTRRS; i++)
		msr_entry = save_msr(fixed_mtrrs[i], msr_entry);

	for (i = 0; i < num_var_mtrrs; i++) {
		msr_entry = save_msr(MTRR_PHYS_BASE_MSR(i), msr_entry);
		msr_entry = save_msr(MTRR_PHYS_MASK_MSR(i), msr_entry);
	}

	msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry);

	return msr_count;
}
int
pqos_alloc_assoc_get(const unsigned lcore,
                     unsigned *class_id)
{
        const struct pqos_capability *l3_cap = NULL;
        const struct pqos_capability *l2_cap = NULL;
        int ret = PQOS_RETVAL_OK;
        const uint32_t reg = PQOS_MSR_ASSOC;
        uint64_t val = 0;

        _pqos_api_lock();

        ret = _pqos_check_init(1);
        if (ret != PQOS_RETVAL_OK) {
                _pqos_api_unlock();
                return ret;
        }

        if (class_id == NULL) {
                _pqos_api_unlock();
                return PQOS_RETVAL_PARAM;
        }

        ASSERT(m_cpu != NULL);
        ret = pqos_cpu_check_core(m_cpu, lcore);
        if (ret != PQOS_RETVAL_OK) {
                _pqos_api_unlock();
                return PQOS_RETVAL_PARAM;
        }

        ASSERT(m_cap != NULL);
        ret = pqos_cap_get_type(m_cap, PQOS_CAP_TYPE_L3CA, &l3_cap);
        if (ret != PQOS_RETVAL_OK && ret != PQOS_RETVAL_RESOURCE) {
                _pqos_api_unlock();
                return ret;
        }

        ret = pqos_cap_get_type(m_cap, PQOS_CAP_TYPE_L2CA, &l2_cap);
        if (ret != PQOS_RETVAL_OK && ret != PQOS_RETVAL_RESOURCE) {
                _pqos_api_unlock();
                return ret;
        }

        if (l2_cap == NULL && l3_cap == NULL) {
                /* no L2/L3 CAT detected */
                _pqos_api_unlock();
                return PQOS_RETVAL_RESOURCE;
        }

        if (msr_read(lcore, reg, &val) != MACHINE_RETVAL_OK) {
                _pqos_api_unlock();
                return PQOS_RETVAL_ERROR;
        }

        val >>= PQOS_MSR_ASSOC_QECOS_SHIFT;
        *class_id = (unsigned) val;

        _pqos_api_unlock();
        return PQOS_RETVAL_OK;
}
Exemple #8
0
static uint8_t
apic_is_bsp (struct apic_dev * apic)
{
    uint64_t data;
    data = msr_read(APIC_BASE_MSR);
    return APIC_IS_BSP(data);
}
Exemple #9
0
static inline int __flip_bit(u32 msr, u8 bit, bool set)
{
	struct msr m, m1;
	int err = -EINVAL;

	if (bit > 63)
		return err;

	err = msr_read(msr, &m);
	if (err)
		return err;

	m1 = m;
	if (set)
		m1.q |=  BIT_64(bit);
	else
		m1.q &= ~BIT_64(bit);

	if (m1.q == m.q)
		return 0;

	err = msr_write(msr, &m1);
	if (err)
		return err;

	return 1;
}
Exemple #10
0
static ulong_t 
apic_get_base_addr (void) 
{
    uint64_t data;
    data = msr_read(APIC_BASE_MSR);

    // we're assuming PAE is on
    return (addr_t)(data & APIC_BASE_ADDR_MASK);
}
Exemple #11
0
void lapic_enable() {
    // set bit 11 in the APIC_BASE MSR to enable the APIC
    uint32_t base = msr_read(MSR_APIC_BASE);
    base |= 1<<11;
    msr_write(MSR_APIC_BASE, base);
    
    // set the software-enable bit (8) in the spurious vector
    set_reg(LAPIC_REG_SPURIOUS,
        get_reg(LAPIC_REG_SPURIOUS) | (1<<8));
}
Exemple #12
0
uint64_t get_microcode_version(void)
{
	uint64_t val;
	uint32_t eax, ebx, ecx, edx;

	msr_write(MSR_IA32_BIOS_SIGN_ID, 0U);
	cpuid(CPUID_FEATURES, &eax, &ebx, &ecx, &edx);
	val = msr_read(MSR_IA32_BIOS_SIGN_ID);

	return val;
}
Exemple #13
0
static unsigned long tsc_freq(void)
{
	msr_t platform_info;
	ulong bclk = bus_freq();

	if (!bclk)
		return 0;

	platform_info = msr_read(MSR_PLATFORM_INFO);

	return bclk * ((platform_info.lo >> 8) & 0xff);
}
static uint64_t do_measurement(void (*callback)(void *ptr), void *ptr)
{
	uint64_t tsc, pc;

	wait_until_idle();

	set_alarm(opts.res_warm_time, 0);
	callback(ptr);

	set_alarm(opts.res_calc_time, 0);

	tsc = msr_read(IA32_TIME_STAMP_COUNTER);
	pc = msr_read(deepest_pc_state);

	callback(ptr);

	pc = msr_read(deepest_pc_state) - pc;
	tsc = msr_read(IA32_TIME_STAMP_COUNTER) - tsc;

	return pc * 100 / tsc;
}
Exemple #15
0
static inline struct saved_msr *save_msr(int index, struct saved_msr *entry)
{
	msr_t msr;

	msr = msr_read(index);
	entry->index = index;
	entry->lo = msr.lo;
	entry->hi = msr.hi;

	/* Return the next entry */
	entry++;
	return entry;
}
/**
 * @brief Reads monitoring event data from given core
 *
 * This function doesn't acquire API lock.
 *
 * @param lcore logical core id
 * @param rmid RMID to be read
 * @param event monitoring event
 * @param value place to store read value
 *
 * @return Operation status
 * @retval PQOS_RETVAL_OK on success
 */
static int
mon_read(const unsigned lcore,
         const pqos_rmid_t rmid,
         const enum pqos_mon_event event,
         uint64_t *value)
{
    int retries = 3, retval = PQOS_RETVAL_OK;
    uint32_t reg = 0;
    uint64_t val = 0;

    /**
     * Set event selection register (RMID + event id)
     */
    reg = PQOS_MSR_MON_EVTSEL;
    val = ((uint64_t)rmid) & PQOS_MSR_MON_EVTSEL_RMID_MASK;
    val <<= PQOS_MSR_MON_EVTSEL_RMID_SHIFT;
    val |= ((uint64_t)event) & PQOS_MSR_MON_EVTSEL_EVTID_MASK;
    if (msr_write(lcore, reg, val) != MACHINE_RETVAL_OK)
        return PQOS_RETVAL_ERROR;

    /**
     * read selected data associated with previously selected RMID+event
     */
    reg = PQOS_MSR_MON_QMC;
    do {
        if (msr_read(lcore, reg, &val) != MACHINE_RETVAL_OK) {
            retval = PQOS_RETVAL_ERROR;
            break;
        }
        if ((val&(PQOS_MSR_MON_QMC_ERROR)) != 0ULL) {
            /**
             * Unsupported event id or RMID selected
             */
            retval = PQOS_RETVAL_ERROR;
            break;
        }
        retries--;
    } while ((val&PQOS_MSR_MON_QMC_UNAVAILABLE) != 0ULL && retries > 0);

    /**
     * Store event value
     */
    if (retval == PQOS_RETVAL_OK)
        *value = (val & PQOS_MSR_MON_QMC_DATA_MASK);
    else
        LOG_WARN("Error reading event %u on core %u (RMID%u)!\n",
                 (unsigned) event, lcore, (unsigned) rmid);

    return retval;
}
Exemple #17
0
static unsigned bus_freq(void)
{
	msr_t clk_info = msr_read(MSR_BSEL_CR_OVERCLOCK_CONTROL);
	switch (clk_info.lo & 0x3) {
	case 0:
		return 83333333;
	case 1:
		return 100000000;
	case 2:
		return 133333333;
	case 3:
		return 116666666;
	default:
		return 0;
	}
}
static void northbridge_init(struct udevice *dev, int rev)
{
	u32 bridge_type;

	add_fixed_resources(dev, 6);
	northbridge_dmi_init(dev, rev);

	bridge_type = readl(MCHBAR_REG(0x5f10));
	bridge_type &= ~0xff;

	if ((rev & BASE_REV_MASK) == BASE_REV_IVB) {
		/* Enable Power Aware Interrupt Routing - fixed priority */
		clrsetbits_8(MCHBAR_REG(0x5418), 0xf, 0x4);

		/* 30h for IvyBridge */
		bridge_type |= 0x30;
	} else {
		/* 20h for Sandybridge */
		bridge_type |= 0x20;
	}
	writel(bridge_type, MCHBAR_REG(0x5f10));

	/*
	 * Set bit 0 of BIOS_RESET_CPL to indicate to the CPU
	 * that BIOS has initialized memory and power management
	 */
	setbits_8(MCHBAR_REG(BIOS_RESET_CPL), 1);
	debug("Set BIOS_RESET_CPL\n");

	/* Configure turbo power limits 1ms after reset complete bit */
	mdelay(1);
	set_power_limits(28);

	/*
	 * CPUs with configurable TDP also need power limits set
	 * in MCHBAR.  Use same values from MSR_PKG_POWER_LIMIT.
	 */
	if (cpu_config_tdp_levels()) {
		msr_t msr = msr_read(MSR_PKG_POWER_LIMIT);

		writel(msr.lo, MCHBAR_REG(0x59A0));
		writel(msr.hi, MCHBAR_REG(0x59A4));
	}

	/* Set here before graphics PM init */
	writel(0x00100001, MCHBAR_REG(0x5500));
}
static void setup_msr(void)
{
#if 0
	uint64_t control;
	const char *limit;
#endif

	/* Make sure our Kernel supports MSR and the module is loaded. */
	igt_assert(system("modprobe -q msr > /dev/null 2>&1") != -1);

	msr_fd = open("/dev/cpu/0/msr", O_RDONLY);
	igt_assert_f(msr_fd >= 0,
		     "Can't open /dev/cpu/0/msr.\n");

#if 0
	/* FIXME: why is this code not printing the truth? */
	control = msr_read(MSR_PKG_CST_CONFIG_CONTROL);
	printf("Control: 0x016%" PRIx64 "\n", control);
	switch (control & PKG_CST_LIMIT_MASK) {
	case PKG_CST_LIMIT_C0:
		limit = "C0";
		break;
	case PKG_CST_LIMIT_C2:
		limit = "C2";
		break;
	case PKG_CST_LIMIT_C3:
		limit = "C3";
		break;
	case PKG_CST_LIMIT_C6:
		limit = "C6";
		break;
	case PKG_CST_LIMIT_C7:
		limit = "C7";
		break;
	case PKG_CST_LIMIT_C7s:
		limit = "C7s";
		break;
	case PKG_CST_NO_LIMIT:
		limit = "no limit";
		break;
	default:
		limit = "unknown";
		break;
	}
	printf("Package C state limit: %s\n", limit);
#endif
}
int main()
 {


     void *dev;
     int cnt;
     int i,j;
     //Registrieren
    msr_init(&rtp,NULL);

    msr_reg_rtw_param("/daten/p1(ll=\"1\" ul=\"2\")","gain","int",&p1,1,1,SS_INT32,var_SCALAR,sizeof(int));
    msr_reg_rtw_param("/daten/p2(init=\"0.5\" ll=\"1\" ul=\"2\")","gain","double",&p2,1,1,SS_DOUBLE,var_SCALAR,sizeof(double));
    msr_reg_rtw_param("/daten/p3(unit=\"s\")","gain","double",&p3,1,10,SS_DOUBLE,var_VECTOR,sizeof(double));

    msr_reg_rtw_signal("/kanal/k1","","int",(void *)&rtp.k1 - (void *)&rtp,1,1,SS_INT32,var_SCALAR,sizeof(int));

    msr_reg_rtw_signal("/kanal/k2","","int",(void *)&rtp.k2 - (void *)&rtp,1,1,SS_DOUBLE,var_SCALAR,sizeof(double));

    msr_reg_rtw_signal("/kanal/k3","","int",(void *)&rtp.k3[0] - (void *)&rtp,1,5,SS_DOUBLE,var_VECTOR,sizeof(double));

    msr_reg_enum_list("/Aufzaehlung","",&en[0],MSR_R |MSR_W,5,1,"Eins,Zwei,Drei",NULL,NULL);

    //Kanäle füllen
    for(i=0;i<1000;i++) {
//	printf("index %i\n",i);
	rtp.k1 = i;
	rtp.k2 = i*0.3;
	for(j=0;j<5;j++)
	    rtp.k3[j] = j*i;
	msr_update(&rtp);
    }

    //Lesen
    dev = msr_open(STDIN_FILENO,STDOUT_FILENO);

    msr_read(dev);
    do {
	cnt = msr_write(dev);
//	printf("Write count: %i\n",cnt);
    } while(cnt>0); 

    msr_close(dev);

    msr_cleanup();

    return 0;
}
Exemple #21
0
/**
 * @brief Gets COS associated to \a lcore
 *
 * @param [in] lcore lcore to read COS association from
 * @param [out] class_id associated COS
 *
 * @return Operation status
 */
static int
cos_assoc_get(const unsigned lcore, unsigned *class_id)
{
        const uint32_t reg = PQOS_MSR_ASSOC;
        uint64_t val = 0;

        if (class_id == NULL)
                return PQOS_RETVAL_PARAM;

        if (msr_read(lcore, reg, &val) != MACHINE_RETVAL_OK)
                return PQOS_RETVAL_ERROR;

        val >>= PQOS_MSR_ASSOC_QECOS_SHIFT;
        *class_id = (unsigned) val;

        return PQOS_RETVAL_OK;
}
/**
 * @brief Reads \a lcore to RMID association
 *
 * @param lcore logical core id
 * @param rmid place to store RMID \a lcore is assigned to
 *
 * @return Operation status
 * @retval PQOS_RETVAL_OK success
 */
static int
mon_assoc_get(const unsigned lcore,
              pqos_rmid_t *rmid)
{
    int ret = 0;
    uint32_t reg = PQOS_MSR_ASSOC;
    uint64_t val = 0;

    ASSERT(rmid != NULL);

    ret = msr_read(lcore, reg, &val);
    if (ret != MACHINE_RETVAL_OK)
        return PQOS_RETVAL_ERROR;

    val &= PQOS_MSR_ASSOC_RMID_MASK;
    *rmid = (pqos_rmid_t) val;

    return PQOS_RETVAL_OK;
}
Exemple #23
0
int
hw_l2ca_get(const unsigned l2id,
            const unsigned max_num_ca,
            unsigned *num_ca,
            struct pqos_l2ca *ca)
{
        int ret = PQOS_RETVAL_OK;
        unsigned i = 0, count = 0;
        unsigned core = 0;

        if (num_ca == NULL || ca == NULL || max_num_ca == 0)
                return PQOS_RETVAL_PARAM;

        ASSERT(m_cap != NULL);
        ret = pqos_l2ca_get_cos_num(m_cap, &count);
        if (ret != PQOS_RETVAL_OK)
                return PQOS_RETVAL_RESOURCE; /* L2 CAT not supported */

        if (max_num_ca < count)
                /* Not enough space to store the classes */
                return PQOS_RETVAL_PARAM;

        ASSERT(m_cpu != NULL);
        ret = pqos_cpu_get_one_by_l2id(m_cpu, l2id, &core);
        if (ret != PQOS_RETVAL_OK)
                return ret;

        for (i = 0; i < count; i++) {
                const uint32_t reg = PQOS_MSR_L2CA_MASK_START + i;
                uint64_t val = 0;
                int retval = msr_read(core, reg, &val);

                if (retval != MACHINE_RETVAL_OK)
                        return PQOS_RETVAL_ERROR;

                ca[i].class_id = i;
                ca[i].ways_mask = val;
        }
        *num_ca = count;

	return ret;
}
Exemple #24
0
static int
cos_assoc_set(const unsigned lcore, const unsigned class_id)
{
        const uint32_t reg = PQOS_MSR_ASSOC;
        uint64_t val = 0;
        int ret;

        ret = msr_read(lcore, reg, &val);
        if (ret != MACHINE_RETVAL_OK)
                return PQOS_RETVAL_ERROR;

        val &= (~PQOS_MSR_ASSOC_QECOS_MASK);
        val |= (((uint64_t) class_id) << PQOS_MSR_ASSOC_QECOS_SHIFT);

        ret = msr_write(lcore, reg, val);
        if (ret != MACHINE_RETVAL_OK)
                return PQOS_RETVAL_ERROR;

        return PQOS_RETVAL_OK;
}
Exemple #25
0
int hw_mba_get(const unsigned socket,
               const unsigned max_num_cos,
               unsigned *num_cos,
               struct pqos_mba *mba_tab)
{
        int ret = PQOS_RETVAL_OK;
        unsigned i = 0, count = 0, core = 0;

        if (num_cos == NULL || mba_tab == NULL || max_num_cos == 0)
                return PQOS_RETVAL_PARAM;

        ASSERT(m_cap != NULL);
        ret = pqos_mba_get_cos_num(m_cap, &count);
        if (ret != PQOS_RETVAL_OK)
                return ret;             /**< no MBA capability */

        if (count > max_num_cos)
                return PQOS_RETVAL_ERROR;

        ASSERT(m_cpu != NULL);
        ret = pqos_cpu_get_one_core(m_cpu, socket, &core);
        if (ret != PQOS_RETVAL_OK)
                return ret;

        for (i = 0; i < count; i++) {
                const uint32_t reg = PQOS_MSR_MBA_MASK_START + i;
                uint64_t val = 0;
                int retval = msr_read(core, reg, &val);

                if (retval != MACHINE_RETVAL_OK)
                        return PQOS_RETVAL_ERROR;

		mba_tab[i].class_id = i;
                mba_tab[i].mb_rate = (unsigned) PQOS_MBA_LINEAR_MAX - val;
        }
        *num_cos = count;

        return ret;
}
/**
 * @brief Associates core with RMID at register level
 *
 * This function doesn't acquire API lock
 * and can be used internally when lock is already taken.
 *
 * @param lcore logical core id
 * @param rmid resource monitoring ID
 *
 * @return Operation status
 * @retval PQOS_RETVAL_OK on success
 */
static int
mon_assoc_set_nocheck(const unsigned lcore,
                      const pqos_rmid_t rmid)
{
    int ret = 0;
    uint32_t reg = 0;
    uint64_t val = 0;

    reg = PQOS_MSR_ASSOC;
    ret = msr_read(lcore, reg, &val);
    if (ret != MACHINE_RETVAL_OK)
        return PQOS_RETVAL_ERROR;

    val &= PQOS_MSR_ASSOC_QECOS_MASK;
    val |= (uint64_t)(rmid & PQOS_MSR_ASSOC_RMID_MASK);

    ret = msr_write(lcore, reg, val);
    if (ret != MACHINE_RETVAL_OK)
        return PQOS_RETVAL_ERROR;

    return PQOS_RETVAL_OK;
}
Exemple #27
0
/**
 * @brief Enables or disables CDP across selected CPU sockets
 *
 * @param [in] sockets_num dimension of \a sockets array
 * @param [in] sockets array with socket ids to change CDP config on
 * @param [in] enable CDP enable/disable flag, 1 - enable, 0 - disable
 *
 * @return Operations status
 * @retval PQOS_RETVAL_OK on success
 * @retval PQOS_RETVAL_ERROR on failure, MSR read/write error
 */
static int
cdp_enable(const unsigned sockets_num,
           const unsigned *sockets,
           const int enable)
{
        unsigned j = 0;

        ASSERT(sockets_num > 0 && sockets != NULL);

        LOG_INFO("%s CDP across sockets...\n",
                 (enable) ? "Enabling" : "Disabling");

        for (j = 0; j < sockets_num; j++) {
                uint64_t reg = 0;
                unsigned core = 0;
                int ret = PQOS_RETVAL_OK;

                ret = pqos_cpu_get_one_core(m_cpu, sockets[j], &core);
                if (ret != PQOS_RETVAL_OK)
                        return ret;

                ret = msr_read(core, PQOS_MSR_L3_QOS_CFG, &reg);
                if (ret != MACHINE_RETVAL_OK)
                        return PQOS_RETVAL_ERROR;

                if (enable)
                        reg |= PQOS_MSR_L3_QOS_CFG_CDP_EN;
                else
                        reg &= ~PQOS_MSR_L3_QOS_CFG_CDP_EN;

                ret = msr_write(core, PQOS_MSR_L3_QOS_CFG, reg);
                if (ret != MACHINE_RETVAL_OK)
                        return PQOS_RETVAL_ERROR;
        }

        return PQOS_RETVAL_OK;
}
Exemple #28
0
void apic_address_init() {
	_apic_address = msr_read(MSR_IA32_APIC_BASE) & 0xFFFFF000;
}
/**
 * @brief Sets up IA32 performance counters for IPC and LLC miss ratio events
 *
 * @param num_cores number of cores in \a cores table
 * @param cores table with core id's
 * @param event mask of selected monitoring events
 *
 * @return Operation status
 * @retval PQOS_RETVAL_OK on success
 */
static int
ia32_perf_counter_start(const unsigned num_cores,
                        const unsigned *cores,
                        const enum pqos_mon_event event)
{
    uint64_t global_ctrl_mask = 0;
    unsigned i;

    ASSERT(cores != NULL && num_cores > 0);

    if (!(event & (PQOS_PERF_EVENT_LLC_MISS | PQOS_PERF_EVENT_IPC)))
        return PQOS_RETVAL_OK;

    if (event & PQOS_PERF_EVENT_IPC)
        global_ctrl_mask |= (0x3ULL << 32); /**< fixed counters 0&1 */

    if (event & PQOS_PERF_EVENT_LLC_MISS)
        global_ctrl_mask |= 0x1ULL;     /**< programmable counter 0 */

    if (!m_force_mon) {
        /**
         * Fixed counters are used for IPC calculations.
         * Programmable counters are used for LLC miss calculations.
         * Let's check if they are in use.
         */
        for (i = 0; i < num_cores; i++) {
            uint64_t global_inuse = 0;
            int ret;

            ret = msr_read(cores[i], IA32_MSR_PERF_GLOBAL_CTRL,
                           &global_inuse);
            if (ret != MACHINE_RETVAL_OK)
                return PQOS_RETVAL_ERROR;
            if (global_inuse & global_ctrl_mask) {
                LOG_ERROR("IPC and/or LLC miss performance "
                          "counters already in use!\n");
                return PQOS_RETVAL_PERF_CTR;
            }
        }
    }

    /**
     * - Disable counters in global control and
     *   reset counter values to 0.
     * - Program counters for desired events
     * - Enable counters in global control
     */
    for (i = 0; i < num_cores; i++) {
        const uint64_t fixed_ctrl = 0x33ULL; /**< track usr + os */
        int ret;

        ret = msr_write(cores[i], IA32_MSR_PERF_GLOBAL_CTRL, 0);
        if (ret != MACHINE_RETVAL_OK)
            break;

        if (event & PQOS_PERF_EVENT_IPC) {
            ret = msr_write(cores[i], IA32_MSR_INST_RETIRED_ANY, 0);
            if (ret != MACHINE_RETVAL_OK)
                break;
            ret = msr_write(cores[i],
                            IA32_MSR_CPU_UNHALTED_THREAD, 0);
            if (ret != MACHINE_RETVAL_OK)
                break;
            ret = msr_write(cores[i],
                            IA32_MSR_FIXED_CTR_CTRL, fixed_ctrl);
            if (ret != MACHINE_RETVAL_OK)
                break;
        }

        if (event & PQOS_PERF_EVENT_LLC_MISS) {
            const uint64_t evtsel0_miss = IA32_EVENT_LLC_MISS_MASK |
                                          (IA32_EVENT_LLC_MISS_UMASK << 8) |
                                          (1ULL << 16) | (1ULL << 17) | (1ULL << 22);

            ret = msr_write(cores[i], IA32_MSR_PMC0, 0);
            if (ret != MACHINE_RETVAL_OK)
                break;
            ret = msr_write(cores[i], IA32_MSR_PERFEVTSEL0,
                            evtsel0_miss);
            if (ret != MACHINE_RETVAL_OK)
                break;
        }

        ret = msr_write(cores[i],
                        IA32_MSR_PERF_GLOBAL_CTRL, global_ctrl_mask);
        if (ret != MACHINE_RETVAL_OK)
            break;
    }

    if (i < num_cores)
        return PQOS_RETVAL_ERROR;

    return PQOS_RETVAL_OK;
}
/**
 * @brief Reads monitoring event data from given core
 *
 * @param p pointer to monitoring structure
 *
 * @return Operation status
 * @retval PQOS_RETVAL_OK on success
 */
static int
pqos_core_poll(struct pqos_mon_data *p)
{
    struct pqos_event_values *pv = &p->values;
    int retval = PQOS_RETVAL_OK;
    unsigned i;

    if (p->event & PQOS_MON_EVENT_L3_OCCUP) {
        uint64_t total = 0;

        for (i = 0; i < p->num_poll_ctx; i++) {
            uint64_t tmp = 0;
            int ret;

            ret = mon_read(p->poll_ctx[i].lcore,
                           p->poll_ctx[i].rmid,
                           get_event_id(PQOS_MON_EVENT_L3_OCCUP),
                           &tmp);
            if (ret != PQOS_RETVAL_OK) {
                retval = PQOS_RETVAL_ERROR;
                goto pqos_core_poll__exit;
            }
            total += tmp;
        }
        pv->llc = total;
    }
    if (p->event & (PQOS_MON_EVENT_LMEM_BW | PQOS_MON_EVENT_RMEM_BW)) {
        uint64_t total = 0, old_value = pv->mbm_local;

        for (i = 0; i < p->num_poll_ctx; i++) {
            uint64_t tmp = 0;
            int ret;

            ret = mon_read(p->poll_ctx[i].lcore,
                           p->poll_ctx[i].rmid,
                           get_event_id(PQOS_MON_EVENT_LMEM_BW),
                           &tmp);
            if (ret != PQOS_RETVAL_OK) {
                retval = PQOS_RETVAL_ERROR;
                goto pqos_core_poll__exit;
            }
            total += tmp;
        }
        pv->mbm_local = total;
        pv->mbm_local_delta = get_delta(old_value, pv->mbm_local);
    }
    if (p->event & (PQOS_MON_EVENT_TMEM_BW | PQOS_MON_EVENT_RMEM_BW)) {
        uint64_t total = 0, old_value = pv->mbm_total;

        for (i = 0; i < p->num_poll_ctx; i++) {
            uint64_t tmp = 0;
            int ret;

            ret = mon_read(p->poll_ctx[i].lcore,
                           p->poll_ctx[i].rmid,
                           get_event_id(PQOS_MON_EVENT_TMEM_BW),
                           &tmp);
            if (ret != PQOS_RETVAL_OK) {
                retval = PQOS_RETVAL_ERROR;
                goto pqos_core_poll__exit;
            }
            total += tmp;
        }
        pv->mbm_total = total;
        pv->mbm_total_delta = get_delta(old_value, pv->mbm_total);
    }
    if (p->event & PQOS_MON_EVENT_RMEM_BW) {
        pv->mbm_remote = 0;
        if (pv->mbm_total > pv->mbm_local)
            pv->mbm_remote = pv->mbm_total - pv->mbm_local;
        pv->mbm_remote_delta = 0;
        if (pv->mbm_total_delta > pv->mbm_local_delta)
            pv->mbm_remote_delta =
                pv->mbm_total_delta - pv->mbm_local_delta;
    }
    if (p->event & PQOS_PERF_EVENT_IPC) {
        /**
         * If multiple cores monitored in one group
         * then we have to accumulate the values in the group.
         */
        uint64_t unhalted = 0, retired = 0;
        unsigned n;

        for (n = 0; n < p->num_cores; n++) {
            uint64_t tmp = 0;
            int ret = msr_read(p->cores[n],
                               IA32_MSR_INST_RETIRED_ANY, &tmp);
            if (ret != MACHINE_RETVAL_OK) {
                retval = PQOS_RETVAL_ERROR;
                goto pqos_core_poll__exit;
            }
            retired += tmp;

            ret = msr_read(p->cores[n],
                           IA32_MSR_CPU_UNHALTED_THREAD, &tmp);
            if (ret != MACHINE_RETVAL_OK) {
                retval = PQOS_RETVAL_ERROR;
                goto pqos_core_poll__exit;
            }
            unhalted += tmp;
        }

        pv->ipc_unhalted_delta = unhalted - pv->ipc_unhalted;
        pv->ipc_retired_delta = retired - pv->ipc_retired;
        pv->ipc_unhalted = unhalted;
        pv->ipc_retired = retired;
        if (pv->ipc_unhalted_delta == 0)
            pv->ipc = 0.0;
        else
            pv->ipc = (double) pv->ipc_retired_delta /
                      (double) pv->ipc_unhalted_delta;
    }
    if (p->event & PQOS_PERF_EVENT_LLC_MISS) {
        /**
         * If multiple cores monitored in one group
         * then we have to accumulate the values in the group.
         */
        uint64_t missed = 0;
        unsigned n;

        for (n = 0; n < p->num_cores; n++) {
            uint64_t tmp = 0;
            int ret = msr_read(p->cores[n],
                               IA32_MSR_PMC0, &tmp);
            if (ret != MACHINE_RETVAL_OK) {
                retval = PQOS_RETVAL_ERROR;
                goto pqos_core_poll__exit;
            }
            missed += tmp;
        }

        pv->llc_misses_delta = missed - pv->llc_misses;
        pv->llc_misses = missed;
    }

pqos_core_poll__exit:
    return retval;
}