int mt_cpu_ss_thread_handler(void *unused) { kal_uint32 flag = 0; do { ktime_t ktime = ktime_set(mt_cpu_ss_period_s, mt_cpu_ss_period_ns); wait_event_interruptible(mt_cpu_ss_timer_waiter, mt_cpu_ss_timer_flag != 0); mt_cpu_ss_timer_flag = 0; if (!flag) { mt65xx_reg_sync_writel((DRV_Reg32(TOP_CKMUXSEL) & 0x0ff3), TOP_CKMUXSEL); flag = 1; } else { mt65xx_reg_sync_writel((DRV_Reg32(TOP_CKMUXSEL) | 0x0004), TOP_CKMUXSEL); flag = 0; } if (mt_cpu_ss_debug_mode) printk("[%s]: TOP_CKMUXSEL = 0x%x\n", __FUNCTION__, DRV_Reg32(TOP_CKMUXSEL)); hrtimer_start(&mt_cpu_ss_timer, ktime, HRTIMER_MODE_REL); } while (!kthread_should_stop()); return 0; }
void mt65xx_mon_set_l2c(struct l2c_cfg *l_cfg) { l2c_cnt0_evt = l_cfg->l2c_evt[0]; l2c_cnt1_evt = l_cfg->l2c_evt[1]; mt65xx_reg_sync_writel(l2c_cnt0_evt << 2, PL310_BASE + L2X0_EVENT_CNT0_CFG); mt65xx_reg_sync_writel(l2c_cnt1_evt << 2, PL310_BASE + L2X0_EVENT_CNT1_CFG); }
static ssize_t cpu_ss_mode_write(struct file *file, const char *buffer, unsigned long count, void *data) { int len = 0, mode = 0; char desc[32]; len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1); if (copy_from_user(desc, buffer, len)) { return 0; } desc[len] = '\0'; if (sscanf(desc, "%d", &mode) == 1) { if (mode) { printk("[%s]: config cpu speed switch mode = ARMPLL\n", __FUNCTION__); mt65xx_reg_sync_writel((DRV_Reg32(TOP_CKMUXSEL) | 0x0004), TOP_CKMUXSEL); } else { printk("[%s]: config cpu speed switch mode = CLKSQ\n", __FUNCTION__); mt65xx_reg_sync_writel((DRV_Reg32(TOP_CKMUXSEL) & 0x0ff3), TOP_CKMUXSEL); } return count; } else { printk("[%s]: bad argument!! should be \"1\" or \"0\"\n", __FUNCTION__); } return -EINVAL; }
/* * mt_irq_set_polarity: set the interrupt polarity * @irq: interrupt id * @polarity: interrupt polarity */ void mt_irq_set_polarity(unsigned int irq, unsigned int polarity) { unsigned long flags; u32 offset, reg_index, value; if (irq < (NR_GIC_SGI + NR_GIC_PPI)) { printk(KERN_CRIT "Fail to set polarity of interrupt %d\n", irq); return ; } offset = (irq - GIC_PRIVATE_SIGNALS) & 0x1F; reg_index = (irq - GIC_PRIVATE_SIGNALS) >> 5; spin_lock_irqsave(&irq_lock, flags); if (polarity == 0) { /* active low */ value = readl(INT_POL_CTL0 + (reg_index * 4)); value |= (1 << offset); mt65xx_reg_sync_writel(value, (INT_POL_CTL0 + (reg_index * 4))); } else { /* active high */ value = readl(INT_POL_CTL0 + (reg_index * 4)); value &= ~(0x1 << offset); mt65xx_reg_sync_writel(value, INT_POL_CTL0 + (reg_index * 4)); } spin_unlock_irqrestore(&irq_lock, flags); }
int MET_BM_SetIDSelect(const unsigned int counter_num, const unsigned int id, const unsigned int enable) { unsigned int value, addr, shift_num; if ((counter_num < 1 || counter_num > BM_COUNTER_MAX) || (id > 0xFF) || (enable > 1)) { return BM_ERR_WRONG_REQ; } addr = EMI_BMID0 + (counter_num - 1) / 4 * 8; // field's offset in the target EMI_BMIDx register shift_num = ((counter_num - 1) % 4) * 8; // clear SELx_ID field value = readl(addr) & ~(0xFF << shift_num); // set SELx_ID field value |= id << shift_num; mt65xx_reg_sync_writel(value, addr); value = (readl(EMI_BMID5) & ~(1 << (counter_num - 1 + 7))) | (enable << (counter_num - 1 + 7)); mt65xx_reg_sync_writel(value, EMI_BMID5); return BM_REQ_OK; }
static int mtk_thermal_suspend(struct platform_device *dev, pm_message_t state) { kal_uint32 i; mtktscpu_dprintk("[mtk_thermal_suspend] \n"); if(talking_flag==false) { printk("[mtk_thermal_suspend] \n"); mt65xx_reg_sync_writel(0x00000000, TEMPMONCTL0); // disable periodoc temperature sensing point 0 disable_clock(MT_CG_THEM_SW_CG,"THM"); disable_clock(MT_VCG_AUX_THERM,"THM"); // disable auxadc module. mt65xx_reg_sync_writel(DRV_Reg32(TS_CON0) | 0x000000C0, TS_CON0); // turn off the sensor buffer to save power } #ifdef MTK_CRYSTAL_THERMAL if((cpu_env_first == 0) && (mtktscpu_reset_first ==false)){ if((cpu_curr_temp - cpu_min_temp) > CPU_NOTIFY_THRESHOLD) { mtkts_crystal_notify(15000); } } #endif for(i=0;i< THERMAL_MAX;i++) { if(thm_suspend_cbk[i] != NULL) thm_suspend_cbk[i](); } return 0; }
void __init smp_init_cpus(void) { unsigned int i, ncores; /* * NoteXXX: CPU 1 may not be reset clearly after power-ON. * Need to apply a S/W workaround to manualy reset it first. */ u32 val; val = *(volatile u32 *)0xF0009010; mt65xx_reg_sync_writel(val | 0x2, 0xF0009010); udelay(10); mt65xx_reg_sync_writel(val & ~0x2, 0xF0009010); udelay(10); ncores = scu_get_core_count((void *)SCU_BASE); if (ncores > NR_CPUS) { printk(KERN_WARNING "SCU core count (%d) > NR_CPUS (%d)\n", ncores, NR_CPUS); printk(KERN_WARNING "set nr_cores to NR_CPUS (%d)\n", NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); set_smp_cross_call(irq_raise_softirq); }
unsigned int spc_clear_irq() { mt65xx_reg_sync_writel(readl(DEVAPC3_D0_VIO_STA) | ABORT_SMI , DEVAPC3_D0_VIO_STA); mt65xx_reg_sync_writel(0x8, DEVAPC0_DXS_VIO_STA); return 0; }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; static int is_first_boot = 1; printk(KERN_CRIT "Boot slave CPU\n"); /* * Set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); HOTPLUG_INFO("boot_secondary, cpu: %d\n", cpu); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ pen_release = cpu; __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); if (is_first_boot) { mt65xx_reg_sync_writel(SLAVE_MAGIC_NUM, SLAVE_MAGIC_REG); is_first_boot = 0; } else { mt65xx_reg_sync_writel(virt_to_phys(mt_secondary_startup), BOOTROM_BOOT_ADDR); } power_on_cpu1(); smp_cross_call(cpumask_of(cpu)); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } /* * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }
/* * mt6575_mon_mod_init: module init function */ static int __init mt6575_mon_mod_init(void) { int ret; /* register driver and create sysfs files */ ret = driver_register(&mt6575_mon_drv); if (ret) { printk("fail to register mt6575_mon_drv\n"); return ret; } ret = driver_create_file(&mt6575_mon_drv, &driver_attr_cpu_cnt0_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_cpu_cnt1_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_cpu_cnt2_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_cpu_cnt3_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_cpu_cnt4_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_cpu_cnt5_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_l2c_cnt0_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_l2c_cnt1_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_bm_master_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_bm_rw_type_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_mon_mode_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_mon_period_evt); ret |= driver_create_file(&mt6575_mon_drv, &driver_attr_mon_manual_evt); if (ret) { printk("fail to create mt6575_mon sysfs files\n"); return ret; } /* SPNIDEN[12] must be 1 for using ARM11 performance monitor unit */ // *(volatile unsigned int *)0xF702A000 |= 0x1000; // set default ARMv7 performance monitor events types armV7_perf_mon_select_event(0, DEF_CPU_CNT0_EVT); armV7_perf_mon_select_event(1, DEF_CPU_CNT1_EVT); armV7_perf_mon_select_event(2, DEF_CPU_CNT2_EVT); armV7_perf_mon_select_event(3, DEF_CPU_CNT3_EVT); armV7_perf_mon_select_event(4, DEF_CPU_CNT4_EVT); armV7_perf_mon_select_event(5, DEF_CPU_CNT5_EVT); /* set default L2C counter's events */ mt65xx_reg_sync_writel(DEF_L2C_CNT0_EVT << 2, PL310_BASE + L2X0_EVENT_CNT0_CFG); mt65xx_reg_sync_writel(DEF_L2C_CNT1_EVT << 2, PL310_BASE + L2X0_EVENT_CNT1_CFG); /* init EMI bus monitor */ BM_SetReadWriteType(DEF_BM_RW_TYPE); BM_SetMonitorCounter(1, BM_MASTER_MULTIMEDIA, BM_TRANS_TYPE_4BEAT | BM_TRANS_TYPE_8Byte | BM_TRANS_TYPE_BURST_WRAP); BM_SetMonitorCounter(2, BM_MASTER_AP_MCU, BM_TRANS_TYPE_4BEAT | BM_TRANS_TYPE_8Byte | BM_TRANS_TYPE_BURST_WRAP); BM_SetMonitorCounter(3, BM_MASTER_MD_DSP | BM_MASTER_MD_MCU | BM_MASTER_2G_3G_MDDMA, BM_TRANS_TYPE_4BEAT | BM_TRANS_TYPE_8Byte | BM_TRANS_TYPE_BURST_WRAP); BM_SetMonitorCounter(4, BM_MASTER_PERI, BM_TRANS_TYPE_4BEAT | BM_TRANS_TYPE_8Byte | BM_TRANS_TYPE_BURST_WRAP); return 0; }
void mt_devapc_clear_emi_violation(void) { if ((readl(IOMEM(DEVAPC0_D0_VIO_STA_0)) & ABORT_EMI_BUS_INTERFACE) != 0) { mt65xx_reg_sync_writel(ABORT_EMI_BUS_INTERFACE, DEVAPC0_D0_VIO_STA_0); } if ((readl(IOMEM(DEVAPC0_D0_VIO_STA_3)) & ABORT_EMI) != 0) { mt65xx_reg_sync_writel(ABORT_EMI, DEVAPC0_D0_VIO_STA_3); } }
int MTK_SPC_Init(void* dev) { SPCMSG("MTK_SPC_init() \n"); mt65xx_reg_sync_writel(readl(DEVAPC0_APC_CON) & (0xFFFFFFFF ^ (1<<2)), DEVAPC0_APC_CON); mt65xx_reg_sync_writel(readl(DEVAPC3_APC_CON) & (0xFFFFFFFF ^ (1<<2)), DEVAPC3_APC_CON); mt65xx_reg_sync_writel(readl(DEVAPC0_PD_APC_CON) & (0xFFFFFFFF ^ (1<<2)), DEVAPC0_PD_APC_CON); mt65xx_reg_sync_writel(readl(DEVAPC3_PD_APC_CON) & (0xFFFFFFFF ^ (1<<2)), DEVAPC3_PD_APC_CON); mt65xx_reg_sync_writel(0x0000007F, DEVAPC0_DXS_VIO_STA); //writel(0x00FF00FB, AP_DEVAPC0_DXS_VIO_MASK); // 0xfb:MM, 0xfd:EMI, 0xf9:Both mt65xx_reg_sync_writel(readl(DEVAPC0_DXS_VIO_MASK)&(~0x8), DEVAPC0_DXS_VIO_MASK); // 0xfb:MM, 0xfd:EMI, 0xf9:Both mt65xx_reg_sync_writel(readl(DEVAPC3_D0_VIO_STA) | ABORT_SMI , DEVAPC3_D0_VIO_STA); mt65xx_reg_sync_writel(readl(DEVAPC3_D0_VIO_MASK) & ~ABORT_SMI , DEVAPC3_D0_VIO_MASK); spc_register_isr(dev); #ifdef CONFIG_MTK_HIBERNATION register_swsusp_restore_noirq_func(ID_M_SPC, spc_pm_restore_noirq, NULL); #endif return 0; }
void Ana_Set_Reg(kal_uint32 offset, kal_uint32 value, kal_uint32 mask) { volatile kal_uint32 address = (offset); volatile kal_uint32 *Analog_Register = (volatile kal_uint32 *)address; volatile kal_uint32 val_tmp; PRINTK_ANA_REG("Ana_Set_Reg offset=%x, value=%x, mask=%x \n",offset,value,mask); val_tmp = READ_REGISTER_UINT32(Analog_Register); val_tmp &= (~mask); mt65xx_reg_sync_writel(val_tmp,Analog_Register); val_tmp = READ_REGISTER_UINT32(Analog_Register); val_tmp |= (value&mask); mt65xx_reg_sync_writel(val_tmp,Analog_Register); }
static void mt_gpio_clear_bit(unsigned long nr, unsigned long reg) { unsigned long value; value = __raw_readl(reg); value &= ~(1L << nr); mt65xx_reg_sync_writel(value, reg); }
int MET_BM_SetMaster(const unsigned int counter_num, const unsigned int master) { unsigned int value, addr; const unsigned int iMask = 0xFF; if (counter_num < 1 || counter_num > BM_COUNTER_MAX) { return BM_ERR_WRONG_REQ; } if (counter_num == 1) { addr = EMI_BMEN; value = (readl(addr) & ~(iMask << 16)) | ((master & iMask) << 16); }else { addr = (counter_num <= 3) ? EMI_MSEL : (EMI_MSEL2 + (counter_num / 2 - 2) * 8); // clear master and transaction type fields value = readl(addr) & ~(iMask << ((counter_num % 2) * 16)); // set master and transaction type fields value |= ((master & iMask) << ((counter_num % 2) * 16)); } mt65xx_reg_sync_writel(value, addr); return BM_REQ_OK; }
/* * mt_irq_mask_all: disable all interrupts * @mask: pointer to struct mtk_irq_mask for storing the original mask value. * Return 0 for success; return negative values for failure. * (This is ONLY used for the idle current measurement by the factory mode.) */ int mt_irq_mask_all(struct mtk_irq_mask *mask) { unsigned long flags; if (mask) { #if defined(CONFIG_FIQ_GLUE) local_fiq_disable(); #endif spin_lock_irqsave(&irq_lock, flags); mask->mask0 = readl(GIC_ICDISER0); mask->mask1 = readl(GIC_ICDISER1); mask->mask2 = readl(GIC_ICDISER2); mask->mask3 = readl(GIC_ICDISER3); mask->mask4 = readl(GIC_ICDISER4); writel(0xFFFFFFFF, GIC_ICDICER0); writel(0xFFFFFFFF, GIC_ICDICER1); writel(0xFFFFFFFF, GIC_ICDICER2); writel(0xFFFFFFFF, GIC_ICDICER3); mt65xx_reg_sync_writel(0xFFFFFFFF, GIC_ICDICER4); spin_unlock_irqrestore(&irq_lock, flags); #if defined(CONFIG_FIQ_GLUE) local_fiq_enable(); #endif mask->header = IRQ_MASK_HEADER; mask->footer = IRQ_MASK_FOOTER; return 0; } else { return -1; } }
static void mt_gpio_set_bit(unsigned long nr, unsigned long reg) { unsigned long value; value = __raw_readl(reg); value |= 1L << nr; mt65xx_reg_sync_writel(value, reg); }
/* * mt_irq_mask_restore: restore all interrupts * @mask: pointer to struct mtk_irq_mask for storing the original mask value. * Return 0 for success; return negative values for failure. * (This is ONLY used for the idle current measurement by the factory mode.) */ int mt_irq_mask_restore(struct mtk_irq_mask *mask) { unsigned long flags; if (!mask) { return -1; } if (mask->header != IRQ_MASK_HEADER) { return -1; } if (mask->footer != IRQ_MASK_FOOTER) { return -1; } #if defined(CONFIG_FIQ_GLUE) local_fiq_disable(); #endif spin_lock_irqsave(&irq_lock, flags); writel(mask->mask0, GIC_ICDISER0); writel(mask->mask1, GIC_ICDISER1); writel(mask->mask2, GIC_ICDISER2); writel(mask->mask3, GIC_ICDISER3); mt65xx_reg_sync_writel(mask->mask4, GIC_ICDISER4); spin_unlock_irqrestore(&irq_lock, flags); #if defined(CONFIG_FIQ_GLUE) local_fiq_enable(); #endif return 0; }
void Ana_Set_Reg(kal_uint32 offset, kal_uint32 value, kal_uint32 mask) { volatile kal_uint32 address = (offset); volatile kal_uint32 *Analog_Register = (volatile kal_uint32 *)address; volatile kal_uint32 val_tmp; AudDrv_ANA_Clk_On(); val_tmp = READ_REGISTER_UINT32(Analog_Register); val_tmp &= (~mask); val_tmp |= (value&mask); mt65xx_reg_sync_writel(val_tmp,Analog_Register); AudDrv_ANA_Clk_Off(); //Back Ana Reg Ana_Backup_Reg(address,val_tmp); if(Ana_Check_Backup_Memory(offset)==0){ xlog_printk(ANDROID_LOG_INFO, "Sound","Fail to backup Ana Register @Offset=0x%x\n",offset); } /* *Analog_Register &= (~mask); dsb(); *Analog_Register |= (value&mask); dsb(); */ }
PVRSRV_ERROR MTKSetFreqInfo(unsigned int freq, unsigned int tbltype) { printk(" freq= %d", freq); #ifdef CONFIG_LENOVO_89T_GPU_BOOST /* mt6589t can be boost up to 357.5Mhz */ freq = 357500; #endif #ifndef CONFIG_LENOVO_89T_GPU_BOOST #if defined(MTK_FREQ_OD_INIT) if (freq > GPU_DVFS_F5) { // mt_gpufreq_set_initial(freq, GPU_POWER_VRF18_1_15V); mt_gpufreq_set_initial(freq, GPU_POWER_VRF18_1_05V); mt65xx_reg_sync_writel((readl(CLK_CFG_8)&0xffcffff)|0x30000, CLK_CFG_8); // mt_gpufreq_keep_frequency_non_OD_init(GPU_MMPLL_D5, GPU_POWER_VRF18_1_15V); mt_gpufreq_keep_frequency_non_OD_init(GPU_MMPLL_D5, GPU_POWER_VRF18_1_05V); } else #endif #endif { mt_gpufreq_set_initial(freq, GPU_POWER_VRF18_1_05V); mt_gpufreq_keep_frequency_non_OD_init(GPU_KEEP_FREQ_NON_OD_BYPASS, GPU_KEEP_VOLT_NON_OD_BYPASS); } // mt_gpufreq_keep_frequency_non_OD_init(GPU_KEEP_FREQ_NON_OD_BYPASS, GPU_KEEP_VOLT_NON_OD_BYPASS); tbltype = TBLTYPE0; MtkInitSetFreqTbl(tbltype); return PVRSRV_OK; }
u32 mt_get_cpu_pc(int cpu_id) { if(cpu_id >= NR_CPUS) return 0; mt65xx_reg_sync_writel(DBG_MON_CTL_CORE0_PC + cpu_id, DBG_MON_CTL); return *(volatile u32 *)(DBG_MON_FLAG); }
void SMI_Manual_Trigger_Init(SMIBMCfg *cfg, SMIBMCfg_Ext *cfg_ex) { unsigned long smi_master = cfg->u4Master; unsigned long u4SMIBaseAddr = u4SMIBaseAddrArray[smi_master]; unsigned long u4RegVal; //printk("SMI_master %lu , u4SMIBaseAddr 0x%lx \n",smi_master,u4SMIBaseAddr); //Disable monitor mt65xx_reg_sync_writel(0 , MT6575SMI_MON_ENA(u4SMIBaseAddr)); //Clear counter mt65xx_reg_sync_writel(1 , MT6575SMI_MON_CLR(u4SMIBaseAddr)); mt65xx_reg_sync_writel(0 , MT6575SMI_MON_CLR(u4SMIBaseAddr)); //Set port mt65xx_reg_sync_writel(cfg->u4PortNo , MT6575SMI_MON_PORT(u4SMIBaseAddr)); mt65xx_reg_sync_writel((((unsigned long)cfg->bRWType << 2) | cfg->bDestType) , MT6575SMI_MON_TYPE(u4SMIBaseAddr)); //printk("[%lu, %lu, %lu]\n", cfg->u4PortNo, cfg->bRWType, cfg->bDestType); if(cfg_ex != NULL) { u4RegVal = (((unsigned long)cfg_ex->uStarvationTime << 8) | ((unsigned long)cfg_ex->bStarvationEn << 6) | ((unsigned long)cfg_ex->bMaxPhaseSelection << 5) | ((unsigned long)cfg_ex->bDPSelection << 4) | ((unsigned long)cfg_ex->uIdleOutStandingThresh << 1) | cfg_ex->bIdleSelection); //printk("Ex configuration %lx\n", u4RegVal); } else { u4RegVal = (((unsigned long)8 << 8) | ((unsigned long)1 << 6) | ((unsigned long)1 << 5) | ((unsigned long)1 << 4) | ((unsigned long)3 << 1) | 1); //printk("default configuration %lx\n", u4RegVal); } mt65xx_reg_sync_writel(u4RegVal , MT6575SMI_MON_CON(u4SMIBaseAddr)); // Enable it if(cfg->bBusType == 0) //GMC { //printk("GMC\n"); mt65xx_reg_sync_writel(0x1 , MT6575SMI_MON_ENA(u4SMIBaseAddr));//G2D } else //AXI { //printk("AXI\n"); mt65xx_reg_sync_writel(0x3 , MT6575SMI_MON_ENA(u4SMIBaseAddr));//GPU } // Cross Page Prevent *((volatile unsigned int *)0xF000882C) &= ~0x01; *((volatile unsigned int *)0xF000882C) |= 0x01; }
void SMI_Manual_Trigger_Result(SMIBMCfg *cfg, SMIBMCfg_Ext *cfg_ex, SMIBMResult *result) { unsigned long smi_master = cfg->u4Master; unsigned long u4SMIBaseAddr = u4SMIBaseAddrArray[smi_master]; struct timeval tv; //printk("SMI_Manual_Trigger_Result SMI_master %lu , u4SMIBaseAddr 0x%lx \n",smi_master,u4SMIBaseAddr); //Pause counter mt65xx_reg_sync_writel(0 , MT6575SMI_MON_ENA(u4SMIBaseAddr)); //Get time mark do_gettimeofday(&tv); //Get SMI result result->u4ActiveCnt = ioread32(MT6575SMI_MON_ACT_CNT(u4SMIBaseAddr)); result->u4RequestCnt = ioread32(MT6575SMI_MON_REQ_CNT(u4SMIBaseAddr)); result->u4IdleCnt = ioread32(MT6575SMI_MON_IDL_CNT(u4SMIBaseAddr)); result->u4BeatCnt = ioread32(MT6575SMI_MON_BEA_CNT(u4SMIBaseAddr)); result->u4ByteCnt = ioread32(MT6575SMI_MON_BYT_CNT(u4SMIBaseAddr)); result->u4CommPhaseAccum = ioread32(MT6575SMI_MON_CP_CNT(u4SMIBaseAddr)); result->u4DataPhaseAccum = ioread32(MT6575SMI_MON_DP_CNT(u4SMIBaseAddr)); result->u4MaxCommOrDataPhase = ioread32(MT6575SMI_MON_CDP_MAX(u4SMIBaseAddr)); result->u4MaxOutTransaction = ioread32(MT6575SMI_MON_COS_MAX(u4SMIBaseAddr)); result->u4EndTimeSec = tv.tv_sec; result->u4EndTimeMicroSec = tv.tv_usec; result->cfg = *cfg; //result->cfg_ex = *cfg_ex; // printk("SMI_Manual_Trigger_Result [%x %x]\n",result->u4ActiveCnt,result->u4RequestCnt); //Clear counter mt65xx_reg_sync_writel(1 , MT6575SMI_MON_CLR(u4SMIBaseAddr)); mt65xx_reg_sync_writel(0 , MT6575SMI_MON_CLR(u4SMIBaseAddr)); //Resume counter if(cfg->bBusType == 0) //GMC { mt65xx_reg_sync_writel(0x1 , MT6575SMI_MON_ENA(u4SMIBaseAddr));//G2D } else //AXI { mt65xx_reg_sync_writel(0x3 , MT6575SMI_MON_ENA(u4SMIBaseAddr));//GPU } }
int mt65xx_mon_disable(void) { disable_arm11_perf_mon(); mt65xx_reg_sync_writel(0, PL310_BASE + L2X0_EVENT_CNT_CTRL); BM_Pause(); return 0; }
void MET_BM_SetReadWriteType(const unsigned int ReadWriteType) { const unsigned int value = readl(EMI_BMEN); /* * ReadWriteType: 00/11 --> both R/W * 01 --> only R * 10 --> only W */ mt65xx_reg_sync_writel((value & 0xFFFFFFCF) | (ReadWriteType << 4), EMI_BMEN); }
/* config L2 cache and sram to its size */ int config_L2_size(int size) { volatile unsigned int cache_cfg, ret = 1; /* set L2C size to 128KB */ spin_lock(&cache_cfg_lock); cache_cfg = readl(IOMEM(MCUCFG_BASE)); if (size == SZ_256K) { cache_cfg &= (~0x7) << L2C_SIZE_CFG_OFF; cache_cfg |= 0x1 << L2C_SIZE_CFG_OFF; mt65xx_reg_sync_writel(cache_cfg, MCUCFG_BASE); } else if (size == SZ_512K) { cache_cfg &= (~0x7) << L2C_SIZE_CFG_OFF; cache_cfg |= 0x3 << L2C_SIZE_CFG_OFF; mt65xx_reg_sync_writel(cache_cfg, MCUCFG_BASE); } else { ret = -1; } spin_unlock(&cache_cfg_lock); return ret; }
int MET_BM_SetLatencyCounter(void) { unsigned int value; value = readl(EMI_BMEN2) & ~(0b11 << 24); //emi_ttype1 -- emi_ttype7 change as total latencies for m0 -- m6, and emi_ttype9 -- emi_ttype15 change as total transaction counts for m0 -- m6 value |= (0b10 << 24); mt65xx_reg_sync_writel(value, EMI_BMEN2); return BM_REQ_OK; }
void __init platform_smp_prepare_cpus(unsigned int max_cpus) { int i; for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); scu_enable((void *)SCU_BASE); /* write the address of slave startup into the system-wide flags register */ mt65xx_reg_sync_writel(virt_to_phys(mt_secondary_startup), SLAVE_JUMP_REG); }
/************************************************************** * mt hotplug mechanism control interface for procfs test0 ***************************************************************/ static int mt_hotplug_mechanism_read_test0(char *buf, char **start, off_t off, int count, int *eof, void *data) { char *p = buf; p += sprintf(p, "%d\n", g_test0); *eof = 1; HOTPLUG_INFO("mt_hotplug_mechanism_read_test0, hotplug_cpu_count: %d\n", atomic_read(&hotplug_cpu_count)); on_each_cpu((smp_call_func_t)dump_stack, NULL, 1); mt65xx_reg_sync_writel(8, 0xf0200080); printk(KERN_EMERG "CPU%u, debug event: 0x%08x, debug monitor: 0x%08x\n", 0, *(volatile u32 *)(0xf0200080), *(volatile u32 *)(0xf0200084)); mt65xx_reg_sync_writel(9, 0xf0200080); printk(KERN_EMERG "CPU%u, debug event: 0x%08x, debug monitor: 0x%08x\n", 1, *(volatile u32 *)(0xf0200080), *(volatile u32 *)(0xf0200084)); mt65xx_reg_sync_writel(10, 0xf0200080); printk(KERN_EMERG "CPU%u, debug event: 0x%08x, debug monitor: 0x%08x\n", 2, *(volatile u32 *)(0xf0200080), *(volatile u32 *)(0xf0200084)); mt65xx_reg_sync_writel(11, 0xf0200080); printk(KERN_EMERG "CPU%u, debug event: 0x%08x, debug monitor: 0x%08x\n", 3, *(volatile u32 *)(0xf0200080), *(volatile u32 *)(0xf0200084)); return p - buf; }
static ssize_t cpu_ss_period_mode_write(struct file *file, const char *buffer, unsigned long count, void *data) { int len = 0; char mode[20], desc[32]; ktime_t ktime = ktime_set(mt_cpu_ss_period_s, mt_cpu_ss_period_ns); len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1); if (copy_from_user(desc, buffer, len)) { return 0; } desc[len] = '\0'; if (sscanf(desc, "%s", mode) == 1) { if (!strcmp(mode, "enable")) { printk("[%s]: enable cpu speed switch period mode\n", __FUNCTION__); mt_cpu_ss_period_mode = true; mt_cpu_ss_thread = kthread_run(mt_cpu_ss_thread_handler, 0, "cpu speed switch"); if (IS_ERR(mt_cpu_ss_thread)) { printk("[%s]: failed to create cpu speed switch thread\n", __FUNCTION__); } hrtimer_start(&mt_cpu_ss_timer, ktime, HRTIMER_MODE_REL); return count; } else if (!strcmp(mode, "disable")) { printk("[%s]: disable cpu speed switch period mode\n", __FUNCTION__); mt_cpu_ss_period_mode = false; kthread_stop(mt_cpu_ss_thread); mt65xx_reg_sync_writel((DRV_Reg32(TOP_CKMUXSEL) | 0x0004), TOP_CKMUXSEL); hrtimer_cancel(&mt_cpu_ss_timer); return count; } else { printk("[%s]: bad argument!! should be \"enable\" or \"disable\"\n", __FUNCTION__); } } else { printk("[%s]: bad argument!! should be \"enable\" or \"disable\"\n", __FUNCTION__); } return -EINVAL; }