/* * mac_soft_ring_bind * * Bind a soft ring worker thread to supplied CPU. */ cpu_t * mac_soft_ring_bind(mac_soft_ring_t *ringp, processorid_t cpuid) { cpu_t *cp; boolean_t clear = B_FALSE; ASSERT(MUTEX_HELD(&cpu_lock)); if (mac_soft_ring_thread_bind == 0) { DTRACE_PROBE1(mac__soft__ring__no__cpu__bound, mac_soft_ring_t *, ringp); return (NULL); } cp = cpu_get(cpuid); if (cp == NULL || !cpu_is_online(cp)) return (NULL); mutex_enter(&ringp->s_ring_lock); ringp->s_ring_state |= S_RING_BOUND; if (ringp->s_ring_cpuid != -1) clear = B_TRUE; ringp->s_ring_cpuid = cpuid; mutex_exit(&ringp->s_ring_lock); if (clear) thread_affinity_clear(ringp->s_ring_worker); DTRACE_PROBE2(mac__soft__ring__cpu__bound, mac_soft_ring_t *, ringp, processorid_t, cpuid); thread_affinity_set(ringp->s_ring_worker, cpuid); return (cp); }
/* ------------------------------------------------------------------------*//** * @FUNCTION cpu_online_cores_count_get * @BRIEF return the number of CPU cores online * @RETURNS number of CPU cores online * @param[in] none * @DESCRIPTION return the number of CPU cores online *//*------------------------------------------------------------------------ */ unsigned int cpu_online_cores_count_get(void) { unsigned int i, cpu_total_count, cpu_online_count; cpu_total_count = cpu_cores_count_get(); cpu_online_count = 0; for (i = 0; i < cpu_total_count; i++) { if (cpu_is_online(i) == 1) cpu_online_count++; } return cpu_online_count; }
/* * Associate a new CPU with a given ino. * Operate only on INOs which are already mapped to devices. */ int px_ib_set_intr_target(px_t *px_p, devino_t ino, cpuid_t cpu_id) { dev_info_t *dip = px_p->px_dip; cpuid_t old_cpu_id; sysino_t sysino; int ret = DDI_SUCCESS; extern const int _ncpu; extern cpu_t *cpu[]; DBG(DBG_IB, px_p->px_dip, "px_ib_set_intr_target: devino %x " "cpu_id %x\n", ino, cpu_id); mutex_enter(&cpu_lock); /* Convert leaf-wide intr to system-wide intr */ if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) { ret = DDI_FAILURE; goto done; } if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) { ret = DDI_FAILURE; goto done; } /* * Get lock, validate cpu and write it. */ if ((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id]))) { DBG(DBG_IB, dip, "px_ib_set_intr_target: Enabling CPU %d\n", cpu_id); px_ib_intr_dist_en(dip, cpu_id, ino, B_TRUE); px_ib_log_new_cpu(px_p->px_ib_p, old_cpu_id, cpu_id, ino); } else { /* Invalid cpu */ DBG(DBG_IB, dip, "px_ib_set_intr_target: Invalid cpuid %x\n", cpu_id); ret = DDI_EINVAL; } done: mutex_exit(&cpu_lock); return (ret); }
void output_cstate_info(FILE *f, struct cpu_topology * topo, int nrcpus) { struct cpuidle_cstates *cstates; int i, j; cstates = build_cstate_info(nrcpus); assert(!is_err(cstates)); for (i=0; i < nrcpus; i++) { if (!cpu_is_online(topo, i)) continue; fprintf(f, "cpuid %d:\n", i); for (j=0; j < MAXCSTATE ; j++) { write_cstate_info(f, cstates[i].cstate[j].name, cstates[i].cstate[j].target_residency); } } }
static void output_pstates(FILE *f, struct init_pstates *initp, int nrcpus, struct cpu_topology *topo, double ts) { int cpu; unsigned int freq; unsigned long ts_sec, ts_usec; ts_sec = (unsigned long)ts; ts_usec = (ts - ts_sec) * USEC_PER_SEC; for (cpu = 0; cpu < nrcpus; cpu++) { if (!cpu_is_online(topo, cpu)) continue; freq = initp ? initp->freqs[cpu] : 0; fprintf(f, "%16s-%-5d [%03d] .... %5lu.%06lu: cpu_frequency: " "state=%u cpu_id=%d\n", "idlestat", getpid(), cpu, ts_sec, ts_usec, freq, cpu); } }
static struct init_pstates *build_init_pstates(struct cpu_topology *topo) { struct init_pstates *initp; int nrcpus, cpu; unsigned int *freqs; nrcpus = sysconf(_SC_NPROCESSORS_CONF); if (nrcpus < 0) return NULL; initp = calloc(sizeof(*initp), 1); if (!initp) return NULL; freqs = calloc(nrcpus, sizeof(*freqs)); if (!freqs) { free(initp); return NULL; } initp->nrcpus = nrcpus; initp->freqs = freqs; for (cpu = 0; cpu < nrcpus; cpu++) { char *fpath; unsigned int *freq = &(freqs[cpu]); if (!cpu_is_online(topo, cpu)) continue; if (asprintf(&fpath, CPUFREQ_CURFREQ_PATH_FORMAT, cpu) < 0) { release_init_pstates(initp); return NULL; } if (read_int(fpath, (int *)freq)) *freq = 0; free(fpath); } return initp; }
DECLHIDDEN(int) rtR0MpNotificationNativeInit(void) { if (ASMAtomicReadBool(&g_fSolCpuWatch) == true) return VERR_WRONG_ORDER; /* * Register the callback building the online cpu set as we do so. */ RTCpuSetEmpty(&g_rtMpSolCpuSet); mutex_enter(&cpu_lock); register_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */); for (int i = 0; i < (int)RTMpGetCount(); ++i) if (cpu_is_online(cpu[i])) rtMpNotificationCpuEvent(CPU_ON, i, NULL /* pvArg */); ASMAtomicWriteBool(&g_fSolCpuWatch, true); mutex_exit(&cpu_lock); return VINF_SUCCESS; }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); if (!pTimer->fSuspended) return VERR_TIMER_ACTIVE; /* One-shot timers are not supported by the cyclic system. */ if (pTimer->interval == 0) return VERR_NOT_SUPPORTED; pTimer->fSuspended = false; if (pTimer->fAllCpu) { PRTR0OMNITIMERSOL pOmniTimer = RTMemAllocZ(sizeof(RTR0OMNITIMERSOL)); if (RT_UNLIKELY(!pOmniTimer)) return VERR_NO_MEMORY; pOmniTimer->au64Ticks = RTMemAllocZ(RTMpGetCount() * sizeof(uint64_t)); if (RT_UNLIKELY(!pOmniTimer->au64Ticks)) { RTMemFree(pOmniTimer); return VERR_NO_MEMORY; } /* * Setup omni (all CPU) timer. The Omni-CPU online event will fire * and from there we setup periodic timers per CPU. */ pTimer->pOmniTimer = pOmniTimer; pOmniTimer->u64When = pTimer->interval + RTTimeNanoTS(); cyc_omni_handler_t hOmni; hOmni.cyo_online = rtTimerSolOmniCpuOnline; hOmni.cyo_offline = NULL; hOmni.cyo_arg = pTimer; mutex_enter(&cpu_lock); pTimer->hCyclicId = cyclic_add_omni(&hOmni); mutex_exit(&cpu_lock); } else { int iCpu = SOL_TIMER_ANY_CPU; if (pTimer->fSpecificCpu) { iCpu = pTimer->iCpu; if (!RTMpIsCpuOnline(iCpu)) /* ASSUMES: index == cpuid */ return VERR_CPU_OFFLINE; } PRTR0SINGLETIMERSOL pSingleTimer = RTMemAllocZ(sizeof(RTR0SINGLETIMERSOL)); if (RT_UNLIKELY(!pSingleTimer)) return VERR_NO_MEMORY; pTimer->pSingleTimer = pSingleTimer; pSingleTimer->hHandler.cyh_func = rtTimerSolCallbackWrapper; pSingleTimer->hHandler.cyh_arg = pTimer; pSingleTimer->hHandler.cyh_level = CY_LOCK_LEVEL; mutex_enter(&cpu_lock); if (iCpu != SOL_TIMER_ANY_CPU && !cpu_is_online(cpu[iCpu])) { mutex_exit(&cpu_lock); RTMemFree(pSingleTimer); pTimer->pSingleTimer = NULL; return VERR_CPU_OFFLINE; } pSingleTimer->hFireTime.cyt_when = u64First + RTTimeNanoTS(); if (pTimer->interval == 0) { /** @todo use gethrtime_max instead of LLONG_MAX? */ AssertCompileSize(pSingleTimer->hFireTime.cyt_interval, sizeof(long long)); pSingleTimer->hFireTime.cyt_interval = LLONG_MAX - pSingleTimer->hFireTime.cyt_when; } else pSingleTimer->hFireTime.cyt_interval = pTimer->interval; pTimer->hCyclicId = cyclic_add(&pSingleTimer->hHandler, &pSingleTimer->hFireTime); if (iCpu != SOL_TIMER_ANY_CPU) cyclic_bind(pTimer->hCyclicId, cpu[iCpu], NULL /* cpupart */); mutex_exit(&cpu_lock); } return VINF_SUCCESS; }
/* * Associate a new CPU with a given MSI/X. * Operate only on MSI/Xs which are already mapped to devices. */ int px_ib_set_msix_target(px_t *px_p, ddi_intr_handle_impl_t *hdlp, msinum_t msi_num, cpuid_t cpu_id) { px_ib_t *ib_p = px_p->px_ib_p; px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; dev_info_t *dip = px_p->px_dip; dev_info_t *rdip = hdlp->ih_dip; msiqid_t msiq_id, old_msiq_id; pci_msi_state_t msi_state; msiq_rec_type_t msiq_rec_type; msi_type_t msi_type; px_ino_t *ino_p; px_ih_t *ih_p, *old_ih_p; cpuid_t old_cpu_id; hrtime_t start_time, end_time; int ret = DDI_SUCCESS; extern const int _ncpu; extern cpu_t *cpu[]; DBG(DBG_IB, dip, "px_ib_set_msix_target: msi_num %x new cpu_id %x\n", msi_num, cpu_id); mutex_enter(&cpu_lock); /* Check for MSI64 support */ if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) { msiq_rec_type = MSI64_REC; msi_type = MSI64_TYPE; } else { msiq_rec_type = MSI32_REC; msi_type = MSI32_TYPE; } if ((ret = px_lib_msi_getmsiq(dip, msi_num, &old_msiq_id)) != DDI_SUCCESS) { mutex_exit(&cpu_lock); return (ret); } DBG(DBG_IB, dip, "px_ib_set_msix_target: current msiq 0x%x\n", old_msiq_id); if ((ret = px_ib_get_intr_target(px_p, px_msiqid_to_devino(px_p, old_msiq_id), &old_cpu_id)) != DDI_SUCCESS) { mutex_exit(&cpu_lock); return (ret); } DBG(DBG_IB, dip, "px_ib_set_msix_target: current cpuid 0x%x\n", old_cpu_id); if (cpu_id == old_cpu_id) { mutex_exit(&cpu_lock); return (DDI_SUCCESS); } /* * Get lock, validate cpu and write it. */ if (!((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id])))) { /* Invalid cpu */ DBG(DBG_IB, dip, "px_ib_set_msix_target: Invalid cpuid %x\n", cpu_id); mutex_exit(&cpu_lock); return (DDI_EINVAL); } DBG(DBG_IB, dip, "px_ib_set_msix_target: Enabling CPU %d\n", cpu_id); if ((ret = px_add_msiq_intr(dip, rdip, hdlp, msiq_rec_type, msi_num, cpu_id, &msiq_id)) != DDI_SUCCESS) { DBG(DBG_IB, dip, "px_ib_set_msix_target: Add MSI handler " "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); mutex_exit(&cpu_lock); return (ret); } if ((ret = px_lib_msi_setmsiq(dip, msi_num, msiq_id, msi_type)) != DDI_SUCCESS) { mutex_exit(&cpu_lock); (void) px_rem_msiq_intr(dip, rdip, hdlp, msiq_rec_type, msi_num, msiq_id); return (ret); } if ((ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri, PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num)) != DDI_SUCCESS) { mutex_exit(&cpu_lock); (void) px_rem_msiq_intr(dip, rdip, hdlp, msiq_rec_type, msi_num, msiq_id); return (ret); } mutex_exit(&cpu_lock); /* * Remove the old handler, but first ensure it is finished. * * Each handler sets its PENDING flag before it clears the MSI state. * Then it clears that flag when finished. If a re-target occurs while * the MSI state is DELIVERED, then it is not yet known which of the * two handlers will take the interrupt. So the re-target operation * sets a RETARGET flag on both handlers in that case. Monitoring both * flags on both handlers then determines when the old handler can be * be safely removed. */ mutex_enter(&ib_p->ib_ino_lst_mutex); ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, old_msiq_id)); old_ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri), rdip, hdlp->ih_inum, msiq_rec_type, msi_num); ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, msiq_id)); ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri), rdip, hdlp->ih_inum, msiq_rec_type, msi_num); if ((ret = px_lib_msi_getstate(dip, msi_num, &msi_state)) != DDI_SUCCESS) { (void) px_rem_msiq_intr(dip, rdip, hdlp, msiq_rec_type, msi_num, msiq_id); mutex_exit(&ib_p->ib_ino_lst_mutex); return (ret); } if (msi_state == PCI_MSI_STATE_DELIVERED) { ih_p->ih_intr_flags |= PX_INTR_RETARGET; old_ih_p->ih_intr_flags |= PX_INTR_RETARGET; } start_time = gethrtime(); while (((ih_p->ih_intr_flags & PX_INTR_RETARGET) && (old_ih_p->ih_intr_flags & PX_INTR_RETARGET)) || (old_ih_p->ih_intr_flags & PX_INTR_PENDING)) { /* Wait for one second */ delay(drv_usectohz(1000000)); end_time = gethrtime() - start_time; if (end_time > px_ib_msix_retarget_timeout) { cmn_err(CE_WARN, "MSIX retarget %x is not completed, " "even after waiting %llx ticks\n", msi_num, end_time); break; } } ih_p->ih_intr_flags &= ~(PX_INTR_RETARGET); mutex_exit(&ib_p->ib_ino_lst_mutex); ret = px_rem_msiq_intr(dip, rdip, hdlp, msiq_rec_type, msi_num, old_msiq_id); return (ret); }
/** * load_and_build_cstate_info - load c-state info written to idlestat * trace file. * * @f: the file handle of the idlestat trace file * @nrcpus: number of CPUs * * @return: per-CPU array of structs (success) or ptrerror() (error) */ static struct cpuidle_cstates *load_and_build_cstate_info(FILE* f, char *buffer, int nrcpus, struct cpu_topology * topo) { int cpu; struct cpuidle_cstates *cstates; assert(f != NULL); assert(buffer != NULL); assert(nrcpus > 0); cstates = calloc(nrcpus, sizeof(*cstates)); if (!cstates) return ptrerror(__func__); for (cpu = 0; cpu < nrcpus; cpu++) { int i, read_cpu; struct cpuidle_cstate *c; cstates[cpu].cstate_max = -1; cstates[cpu].current_cstate = -1; if (!cpu_is_online(topo, cpu)) continue; if (sscanf(buffer, "cpuid %d:\n", &read_cpu) != 1 || read_cpu != cpu) { release_cstate_info(cstates, cpu); fprintf(stderr, "%s: Error reading trace file\n" "Expected: cpuid %d:\n" "Read: %s", __func__, cpu, buffer); return ptrerror(NULL); } for (i = 0; i < MAXCSTATE; i++) { int residency; char *name = malloc(128); if (!name) { release_cstate_info(cstates, cpu); return ptrerror(__func__); } fgets(buffer, BUFSIZE, f); sscanf(buffer, "\t%s\n", name); fgets(buffer, BUFSIZE, f); sscanf(buffer, "\t%d\n", &residency); c = &(cstates[cpu].cstate[i]); if (!strcmp(name, "(null)")) { free(name); c->name = NULL; } else { c->name = name; } c->data = NULL; c->nrdata = 0; c->early_wakings = 0; c->late_wakings = 0; c->avg_time = 0.; c->max_time = 0.; c->min_time = DBL_MAX; c->duration = 0.; c->target_residency = residency; } fgets(buffer, BUFSIZE, f); } return cstates; }
/* * Associate a new CPU with a given ino. * Operate only on inos which are already mapped to devices. */ int ib_set_intr_target(pci_t *pci_p, ib_ino_t ino, int cpu_id) { dev_info_t *dip = pci_p->pci_dip; ib_t *ib_p = pci_p->pci_ib_p; int ret = DDI_SUCCESS; uint32_t old_cpu_id; hrtime_t start_time; uint64_t imregval; uint64_t new_imregval; volatile uint64_t *imregp; volatile uint64_t *idregp; extern const int _ncpu; extern cpu_t *cpu[]; DEBUG2(DBG_IB, dip, "ib_set_intr_target: ino %x cpu_id %x\n", ino, cpu_id); imregp = (uint64_t *)ib_intr_map_reg_addr(ib_p, ino); idregp = IB_INO_INTR_STATE_REG(ib_p, ino); /* Save original mapreg value. */ imregval = *imregp; DEBUG1(DBG_IB, dip, "ib_set_intr_target: orig mapreg value: 0x%llx\n", imregval); /* Operate only on inos which are already enabled. */ if (!(imregval & COMMON_INTR_MAP_REG_VALID)) return (DDI_FAILURE); /* Is this request a noop? */ if ((old_cpu_id = ib_map_reg_get_cpu(imregval)) == cpu_id) return (DDI_SUCCESS); /* Clear the interrupt valid/enable bit for particular ino. */ DEBUG0(DBG_IB, dip, "Clearing intr_enabled...\n"); *imregp = imregval & ~COMMON_INTR_MAP_REG_VALID; /* Wait until there are no more pending interrupts. */ start_time = gethrtime(); DEBUG0(DBG_IB, dip, "About to check for pending interrupts...\n"); while (IB_INO_INTR_PENDING(idregp, ino)) { DEBUG0(DBG_IB, dip, "Waiting for pending ints to clear\n"); if ((gethrtime() - start_time) < pci_intrpend_timeout) { continue; } else { /* Timed out waiting. */ DEBUG0(DBG_IB, dip, "Timed out waiting \n"); return (DDI_EPENDING); } } new_imregval = *imregp; DEBUG1(DBG_IB, dip, "after disabling intr, mapreg value: 0x%llx\n", new_imregval); /* * Get lock, validate cpu and write new mapreg value. */ mutex_enter(&cpu_lock); if ((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id]))) { /* Prepare new mapreg value with intr enabled and new cpu_id. */ new_imregval &= COMMON_INTR_MAP_REG_IGN | COMMON_INTR_MAP_REG_INO; new_imregval = ib_get_map_reg(new_imregval, cpu_id); DEBUG1(DBG_IB, dip, "Writing new mapreg value:0x%llx\n", new_imregval); *imregp = new_imregval; ib_log_new_cpu(ib_p, old_cpu_id, cpu_id, ino); } else { /* Invalid cpu. Restore original register image. */ DEBUG0(DBG_IB, dip, "Invalid cpuid: writing orig mapreg value\n"); *imregp = imregval; ret = DDI_EINVAL; } mutex_exit(&cpu_lock); return (ret); }
/* ------------------------------------------------------------------------*//** * @FUNCTION opp_show * @BRIEF show current operating voltages and key clock rates. * @RETURNS 0 in case of success * OMAPCONF_ERR_REG_ACCESS * OMAPCONF_ERR_CPU * OMAPCONF_ERR_INTERNAL * @param[in,out] stream: output file stream (opened, != NULL) * @DESCRIPTION show current operating voltages and key clock rates. *//*------------------------------------------------------------------------ */ int opp_show(FILE *stream) { int volt, volt2; const char *opp_s, *opp_s2; int temp; int rate_mpu, rate_mpu_por; int rate_dsp, rate_iva, rate_gpu; int rate_dsp_por, rate_iva_por, rate_gpu_por, rate_aess_por; int rate_l3, rate_l3_por; int rate_l4, rate_emif, rate_lpddr2, rate_aess, rate_iss, rate_fdif, rate_dss, rate_bb2d, rate_hsi; mod_module_mode mmode; int rate_cal, rate_ipu, rate_c2c; char table[TABLE_MAX_ROW][TABLE_MAX_COL][TABLE_MAX_ELT_LEN]; unsigned int row = 0; unsigned int retry_cnt = 0; unsigned int found = 0; const genlist *voltdm_list; int i, vdd_count; const char voltdm[VOLTDM_MAX_NAME_LENGTH]; char prev_gov[CPUFREQ_GOV_MAX_NAME_LENGTH], prev_gov2[CPUFREQ_GOV_MAX_NAME_LENGTH]; const char *temp_sensor; /* Switch to userspace governor temporarily, * so that OPP cannot change during audit and does not false it. */ cpufreq_scaling_governor_set("userspace", prev_gov); autoadjust_table_init(table); row = 0; strncpy(table[row][1], "Temperature", TABLE_MAX_ELT_LEN); strncpy(table[row][2], "Voltage", TABLE_MAX_ELT_LEN); strncpy(table[row][3], "Frequency", TABLE_MAX_ELT_LEN); strncpy(table[row][4], "OPerating Point", TABLE_MAX_ELT_LEN); row++; /* * In order to make sure all details (OPP, voltage, clock rates) are * coherent (due to potential OPP change in between), must use a loop, * checking that OPP and voltage did not change and that at least ONE * clock rate is aligned to expected rate for the detected OPP. */ dprintf("%s():\n", __func__); voltdm_list = voltdm_list_get(); if (voltdm_list == NULL) return OMAPCONF_ERR_INTERNAL; vdd_count = voltdm_count_get(); if (vdd_count < 0) return OMAPCONF_ERR_INTERNAL; dprintf("found %d voltage domains\n", vdd_count); for (i = 1; i < vdd_count; i++) { genlist_get((genlist *) voltdm_list, i, (char *) &voltdm); snprintf(table[row][0], TABLE_MAX_ELT_LEN, "%s / VDD_CORE%u", voltdm, i); dprintf(" %s:\n", voltdm); /* Retrieve OPP and clock rates */ retry_cnt = 0; found = 0; do { dprintf(" TRY #%u:\n", retry_cnt); if (retry_cnt == 0) /* Print warning on first try */ opp_s = opp_get(voltdm, 0); else opp_s = opp_get(voltdm, 1); if (opp_s == NULL) { dprintf(" OPP NOT detected!\n"); opp_s = OPP_UNKNOWN; } else { dprintf(" OPP detected: %s\n", opp_s); } volt = voltdm_voltage_get(voltdm); dprintf(" Voltage: %duV\n", volt); if (strcmp(voltdm, "VDD_MPU") == 0) { rate_mpu = mod_clk_rate_get("MPU"); if (strcmp(opp_s, OPP_UNKNOWN) != 0) rate_mpu_por = mod_por_clk_rate_get( "MPU", opp_s); else rate_mpu_por = -1; dprintf( " MPU Rate: %dKHz, POR Rate: %dKHz\n", rate_mpu, rate_mpu_por); } else if ((strcmp(voltdm, "VDD_IVA") == 0) || (strcmp(voltdm, "VDD_MM") == 0)) { rate_dsp_por = -1; rate_iva_por = -1; rate_aess_por = -1; rate_gpu_por = -1; rate_dsp = mod_clk_rate_get("DSP"); rate_iva = mod_clk_rate_get("IVA"); if (cpu_is_omap44xx()) rate_aess = mod_clk_rate_get("AESS"); else if (cpu_is_omap54xx()) rate_gpu = mod_clk_rate_get("GPU"); if (strcmp(opp_s, OPP_UNKNOWN) != 0) { rate_dsp_por = mod_por_clk_rate_get( "DSP", opp_s); rate_iva_por = mod_por_clk_rate_get( "IVA", opp_s); if (cpu_is_omap44xx()) rate_aess_por = mod_por_clk_rate_get( "AESS", opp_s); else if (cpu_is_omap54xx()) rate_gpu_por = mod_por_clk_rate_get( "GPU", opp_s); } dprintf( " DSP Rate: %dMHz, POR Rate: %dMHz\n", rate_dsp, rate_dsp_por); dprintf( " IVA Rate: %dMHz, POR Rate: %dMHz\n", rate_iva, rate_iva_por); if (cpu_is_omap44xx()) { dprintf( " AESS Rate: %dMHz, POR Rate: %dMHz\n", rate_aess, rate_aess_por); } else if (cpu_is_omap54xx()) { dprintf( " GPU Rate: %dMHz, POR Rate: %dMHz\n", rate_gpu, rate_gpu_por); } } else if (strcmp(voltdm, "VDD_CORE") == 0) { rate_l3 = mod_clk_rate_get("L3"); if (strcmp(opp_s, OPP_UNKNOWN) != 0) rate_l3_por = mod_por_clk_rate_get( "L3", opp_s); else rate_l3_por = -1; dprintf( " L3_1 Rate: %dMHz, POR Rate: %dMHz\n", rate_l3, rate_l3_por); rate_emif = mod_clk_rate_get("EMIF"); rate_lpddr2 = mod_clk_rate_get("MEM"); rate_l4 = mod_clk_rate_get("L4"); if (cpu_is_omap44xx()) rate_gpu = mod_clk_rate_get("GPU"); else if (cpu_is_omap54xx()) rate_aess = mod_clk_rate_get("AESS"); rate_iss = mod_clk_rate_get("ISS"); rate_fdif = mod_clk_rate_get("FDIF"); if (!cpu_is_omap44xx()) rate_cal = mod_clk_rate_get("CAL"); else rate_cal = -1; rate_ipu = mod_clk_rate_get("IPU"); rate_dss = mod_clk_rate_get("DSS"); rate_hsi = mod_clk_rate_get("HSI"); if (cpu_is_omap4470() || cpu_is_omap54xx()) rate_bb2d = mod_clk_rate_get("BB2D"); else rate_bb2d = -1; rate_c2c = mod_clk_rate_get("C2C"); } if (strcmp(opp_s, OPP_UNKNOWN) == 0) { dprintf( " Could not detect OPP, aborting for this domain.\n"); break; } opp_s2 = opp_get(voltdm, 1); if (opp_s2 == NULL) { dprintf(" OPP NOT detected! (2)\n"); opp_s2 = OPP_UNKNOWN; } else { dprintf(" OPP detected: %s (2)\n", opp_s2); } volt2 = voltdm_voltage_get(voltdm); dprintf(" Voltage (2): %dV\n", volt2); if (strcmp(voltdm, "VDD_MPU") == 0) { found = ((rate_mpu == rate_mpu_por) && (strcmp(opp_s, opp_s2) == 0) && (volt == volt2)); } else if (strcmp(voltdm, "VDD_IVA") == 0) { found = ((strcmp(opp_s, opp_s2) == 0) && (volt == volt2) && (((unsigned int) rate_dsp == (unsigned int) rate_dsp_por) || ((unsigned int) rate_iva == (unsigned int) rate_iva_por) || ((unsigned int) rate_aess == (unsigned int) rate_aess_por))); } else if (strcmp(voltdm, "VDD_MM") == 0) { found = ((strcmp(opp_s, opp_s2) == 0) && (volt == volt2) && ((rate_dsp == rate_dsp_por) || (rate_iva == rate_iva_por) || (rate_gpu == rate_gpu_por))); } else if (strcmp(voltdm, "VDD_CORE") == 0) { found = ((strcmp(opp_s, opp_s2) == 0) && (volt == volt2) && (rate_l3 == rate_l3_por)); } dprintf(" found=%u\n", found); retry_cnt++; } while ((retry_cnt < OPP_MAX_RETRY) && (found == 0)); /* Print temperature */ temp_sensor = temp_sensor_voltdm2sensor(voltdm); if (temp_sensor == NULL) { snprintf(table[row][1], TABLE_MAX_ELT_LEN, "NA"); } else { temp = temp_sensor_get(temp_sensor); if (temp != TEMP_ABSOLUTE_ZERO) snprintf(table[row][1], TABLE_MAX_ELT_LEN, "%dC / %dF", temp, celcius2fahrenheit(temp)); else snprintf(table[row][1], TABLE_MAX_ELT_LEN, "NA"); } /* Print voltage */ if (volt < 0) snprintf(table[row][2], TABLE_MAX_ELT_LEN, "NA"); else if (!cpu_is_omap44xx()) snprintf(table[row][2], TABLE_MAX_ELT_LEN, "%.3lf V", uv2v(volt)); else snprintf(table[row][2], TABLE_MAX_ELT_LEN, "%.6lf V", uv2v(volt)); /* Print OPP */ if (retry_cnt < OPP_MAX_RETRY) { strncpy(table[row][4], opp_s, TABLE_MAX_ELT_LEN); } else { fprintf(stderr, "omapconf: too many %s OPP changes, could not retrieve it!!!\n", voltdm); strncpy(table[row][4], "ERROR", TABLE_MAX_ELT_LEN); } row++; /* Print clock rates */ if (strcmp(voltdm, "VDD_MPU") == 0) { if (cpu_is_online(1) == 1) strncpy(table[row][0], " MPU (CPU1 ON)", TABLE_MAX_ELT_LEN); else strncpy(table[row][0], " MPU (CPU1 OFF)", TABLE_MAX_ELT_LEN); snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_mpu / 1000); row += 2; } else if ((strcmp(voltdm, "VDD_IVA") == 0) || (strcmp(voltdm, "VDD_MM") == 0)) { strncpy(table[row][0], " IVA", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("IVA"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_iva / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_iva / 1000); row++; if (cpu_is_omap44xx()) { strncpy(table[row][0], " AESS", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("AESS"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_aess / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_aess / 1000); row++; } else if (cpu_is_omap54xx()) { strncpy(table[row][0], " GPU", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("GPU"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_gpu / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_gpu / 1000); row++; } strncpy(table[row][0], " DSP", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("DSP"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_dsp / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_dsp / 1000); row += 2; } else if (strcmp(voltdm, "VDD_CORE") == 0) { strncpy(table[row][0], " L3", TABLE_MAX_ELT_LEN); snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_l3 / 1000); row++; strncpy(table[row][0], " DMM/EMIF", TABLE_MAX_ELT_LEN); snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_emif / 1000); row++; strncpy(table[row][0], " LP-DDR2", TABLE_MAX_ELT_LEN); snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_lpddr2 / 1000); row++; strncpy(table[row][0], " L4", TABLE_MAX_ELT_LEN); snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_l4 / 1000); row++; if (cpu_is_omap44xx()) { strncpy(table[row][0], " GPU", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("GPU"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_gpu / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_gpu / 1000); row++; } else if (cpu_is_omap54xx()) { strncpy(table[row][0], " AESS", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("AESS"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_aess / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_aess / 1000); row++; } strncpy(table[row][0], " FDIF", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("FDIF"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_fdif / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_fdif / 1000); row++; if (cpu_is_omap54xx()) { strncpy(table[row][0], " CAL", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("CAL"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_cal / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_cal / 1000); row++; } strncpy(table[row][0], " IPU", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("IPU"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_ipu / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_ipu / 1000); row++; if (cpu_is_omap44xx()) { strncpy(table[row][0], " Cortex-M3 Cores", TABLE_MAX_ELT_LEN); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_ipu / 2000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_ipu / 2000); row++; } else if (cpu_is_omap54xx()) { strncpy(table[row][0], " Cortex-M4 Cores", TABLE_MAX_ELT_LEN); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_ipu / 2000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_ipu / 2000); row++; } strncpy(table[row][0], " ISS", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("ISS"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_iss / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_iss / 1000); row++; strncpy(table[row][0], " DSS", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("DSS"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_dss / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_dss / 1000); row++; if (cpu_is_omap4470() || cpu_is_omap54xx()) { strncpy(table[row][0], " BB2D", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("BB2D"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_bb2d / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_bb2d / 1000); row++; } strncpy(table[row][0], " HSI", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("HSI"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_hsi / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_hsi / 1000); row++; strncpy(table[row][0], " C2C", TABLE_MAX_ELT_LEN); mmode = mod_mode_get("C2C"); if (mmode == MOD_DISABLED_MODE) snprintf(table[row][3], TABLE_MAX_ELT_LEN, "(%-4d MHz) (1)", rate_c2c / 1000); else snprintf(table[row][3], TABLE_MAX_ELT_LEN, " %-4d MHz", rate_c2c / 1000); row++; } } /* Display table */ autoadjust_table_fprint(stream, table, row, 5); fprintf(stream, "Notes:\n"); fprintf(stream, " (1) Module is disabled, rate may not be relevant.\n\n"); /* Restore CPUFreq governor */ cpufreq_scaling_governor_set(prev_gov, prev_gov2); return 0; }