static irqreturn_t tmu_irq(int irq, void *id) { struct tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + INTSTAT); /* To handle multiple interrupt pending, * interrupt by high temperature are serviced with priority. */ #if defined(CONFIG_TC_VOLTAGE) if (status & INTSTAT_FALL0) { pr_info("TC interrupt occured..!\n"); __raw_writel(INTCLEAR_FALL0, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_TC; } else if (status & INTSTAT_RISE2) { #else if (status & INTSTAT_RISE2) { #endif pr_info("Tripping interrupt occured..!\n"); info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR_RISE2, info->tmu_base + INTCLEAR); } else if (status & INTSTAT_RISE1) { pr_info("Warning interrupt occured..!\n"); __raw_writel(INTCLEAR_RISE1, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_WARNING; } else if (status & INTSTAT_RISE0) { pr_info("Throttling interrupt occured..!\n"); __raw_writel(INTCLEAR_RISE0, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_THROTTLED; } else { pr_err("%s: TMU interrupt error\n", __func__); return -ENODEV; } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(1 * 1000)); return IRQ_HANDLED; } static irqreturn_t exynos4210_tmu_irq(int irq, void *id) { struct tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + INTSTAT); if (status & INTSTAT2) { pr_info("Tripping interrupt occured..!\n"); info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR2, info->tmu_base + INTCLEAR); } else if (status & INTSTAT1) { pr_info("Warning interrupt occured..!\n"); __raw_writel(INTCLEAR1, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_WARNING; } else if (status & INTSTAT0) { pr_info("Throttling interrupt occured..!\n"); __raw_writel(INTCLEAR0, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_THROTTLED; } else { pr_err("%s: TMU interrupt error\n", __func__); return -ENODEV; } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(1000)); return IRQ_HANDLED; } static int __devinit tmu_probe(struct platform_device *pdev) { struct tmu_info *info; struct resource *res; int ret = 0; pr_debug("%s: probe=%p\n", __func__, pdev); info = kzalloc(sizeof(struct tmu_info), GFP_KERNEL); if (!info) { dev_err(&pdev->dev, "failed to alloc memory!\n"); ret = -ENOMEM; goto err_nomem; } pr_emerg("TMU: Memory Allocation Sucessful\n"); platform_set_drvdata(pdev, info); pr_emerg("TMU: Platform data set\n"); info->dev = &pdev->dev; pr_emerg("TMU: Copied the Dev access Information \n"); info->irq = platform_get_irq(pdev, 0); if (info->irq < 0) { dev_err(&pdev->dev, "no irq for thermal\n"); ret = -ENOENT; goto err_noirq; } if (soc_is_exynos4210()) ret = request_irq(info->irq, exynos4210_tmu_irq, IRQF_DISABLED, "tmu interrupt", info); else ret = request_irq(info->irq, tmu_irq, IRQF_DISABLED, "tmu interrupt", info); if (ret) { dev_err(&pdev->dev, "IRQ%d error %d\n", info->irq, ret); goto err_noirq; } pr_emerg("TMU: IRQ Granted!\n"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "failed to get memory region resource\n"); ret = -ENODEV; goto err_nores; } pr_emerg("TMU: IO Resource alloced on Memory\n"); info->ioarea = request_mem_region(res->start, res->end-res->start+1, pdev->name); if (!(info->ioarea)) { dev_err(&pdev->dev, "failed to reserve memory region\n"); ret = -EBUSY; goto err_nores; } pr_emerg("TMU: Memory area resersed\n"); info->tmu_base = ioremap(res->start, (res->end - res->start) + 1); if (!(info->tmu_base)) { dev_err(&pdev->dev, "failed ioremap()\n"); ret = -EINVAL; goto err_nomap; } pr_emerg("TMU: IO Memory Remapped\n"); if (thermal_create_sysfs_file(&pdev->dev)) goto err_sysfs; pr_emerg("TMU: Created Sysfs\n"); tmu_monitor_wq = create_freezable_workqueue("tmu"); if (!tmu_monitor_wq) { dev_err(&pdev->dev, "Creation of tmu_monitor_wq failed\n"); ret = -EFAULT; goto err_wq; } pr_emerg("TMU: Workqueue Created\n"); INIT_DELAYED_WORK_DEFERRABLE(&info->polling, tmu_monitor); pr_emerg("TMU: Work Created\n"); #ifdef CONFIG_TMU_DEBUG INIT_DELAYED_WORK_DEFERRABLE(&info->monitor, cur_temp_monitor); #endif print_temperature_params(info); pr_emerg("TMU: Printed Parameters\n"); ret = tmu_initialize(pdev); if (ret < 0) goto err_noinit; #ifdef CONFIG_TMU_DEBUG queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, info->sampling_rate); #endif pr_info("Tmu Initialization is sucessful...!\n"); return ret; err_noinit: destroy_workqueue(tmu_monitor_wq); err_wq: thermal_remove_sysfs_file(&pdev->dev); err_sysfs: iounmap(info->tmu_base); err_nomap: release_resource(info->ioarea); err_nores: free_irq(info->irq, info); err_noirq: kfree(info); info = NULL; err_nomem: dev_err(&pdev->dev, "initialization failed.\n"); return ret; } static int __devinit tmu_remove(struct platform_device *pdev) { struct tmu_info *info = platform_get_drvdata(pdev); cancel_delayed_work(&info->polling); destroy_workqueue(tmu_monitor_wq); thermal_remove_sysfs_file(&pdev->dev); iounmap(info->tmu_base); release_resource(info->ioarea); free_irq(info->irq, (void *)pdev); kfree(info); info = NULL; pr_info("%s is removed\n", dev_name(&pdev->dev)); return 0; } #ifdef CONFIG_PM static int tmu_suspend(struct platform_device *pdev, pm_message_t state) { struct tmu_info *info = platform_get_drvdata(pdev); pm_tmu_save(info); return 0; } static int tmu_resume(struct platform_device *pdev) { struct tmu_info *info = platform_get_drvdata(pdev); #if defined(CONFIG_TC_VOLTAGE) struct tmu_data *data = info->dev->platform_data; #endif pm_tmu_restore(info); #if defined(CONFIG_TC_VOLTAGE) /* s/w workaround for fast service when interrupt is not occured, * such as current temp is lower than tc interrupt temperature * or current temp is continuosly increased. */ mdelay(1); if (get_cur_temp(info) <= data->ts.start_tc) { disable_irq_nosync(info->irq); if (exynos_tc_volt(info, 1) < 0) pr_err("%s\n", __func__); info->tmu_state = TMU_STATUS_TC; already_limit = 1; queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(1 * 1000)); } #endif return 0; } #else #define tmu_suspend NULL #define tmu_resume NULL #endif static struct platform_driver tmu_driver = { .probe = tmu_probe, .remove = tmu_remove, .suspend = tmu_suspend, .resume = tmu_resume, .driver = { .name = "tmu", .owner = THIS_MODULE, }, }; static int __init tmu_driver_init(void) { return platform_driver_register(&tmu_driver); } late_initcall(tmu_driver_init);
static int exynos_tmu_init(struct tmu_info *info) { struct tmu_data *data = info->dev->platform_data; unsigned int te_temp, con; unsigned int temp_throttle, temp_warning, temp_trip; unsigned int hw_temp_trip; unsigned int rising_thr = 0, cooling_thr = 0; /* must reload for using efuse value at EXYNOS4212 */ __raw_writel(TRIMINFO_RELOAD, info->tmu_base + TRIMINFO_CON); /* get the compensation parameter */ te_temp = __raw_readl(info->tmu_base + TRIMINFO); info->te1 = te_temp & TRIM_INFO_MASK; info->te2 = ((te_temp >> 8) & TRIM_INFO_MASK); if ((EFUSE_MIN_VALUE > info->te1) || (info->te1 > EFUSE_MAX_VALUE) || (info->te2 != 0)) info->te1 = data->efuse_value; /*Get rising Threshold and Set interrupt level*/ temp_throttle = data->ts.start_throttle + info->te1 - TMU_DC_VALUE; temp_warning = data->ts.start_warning + info->te1 - TMU_DC_VALUE; temp_trip = data->ts.start_tripping + info->te1 - TMU_DC_VALUE; hw_temp_trip = data->ts.start_hw_tripping + info->te1 - TMU_DC_VALUE; rising_thr = (temp_throttle | (temp_warning<<8) | \ (temp_trip<<16) | (hw_temp_trip<<24)); __raw_writel(rising_thr, info->tmu_base + THD_TEMP_RISE); #if defined(CONFIG_TC_VOLTAGE) /* Get set temperature for tc_voltage and set falling interrupt * trigger level */ cooling_thr = data->ts.start_tc + info->te1 - TMU_DC_VALUE; #endif __raw_writel(cooling_thr, info->tmu_base + THD_TEMP_FALL); /* Set TMU status */ info->tmu_state = TMU_STATUS_INIT; /* Set frequecny level */ exynos_cpufreq_get_level(data->cpulimit.throttle_freq, &info->throttle_freq); exynos_cpufreq_get_level(data->cpulimit.warning_freq, &info->warning_freq); /* Map auto_refresh_rate of normal & tq0 mode */ info->auto_refresh_tq0 = get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_TQ0); info->auto_refresh_normal = get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_NORMAL); /* To poll current temp, set sampling rate */ info->sampling_rate = usecs_to_jiffies(200 * 1000); #if defined(CONFIG_TC_VOLTAGE) /* Temperature compensated voltage */ if (exynos_find_cpufreq_level_by_volt(data->temp_compensate.arm_volt, &info->cpulevel_tc) < 0) { pr_err("cpufreq_get_level error\n"); return -EINVAL; } #ifdef CONFIG_BUSFREQ_OPP /* To lock bus frequency in OPP mode */ info->bus_dev = dev_get("exynos-busfreq"); if (info->bus_dev < 0) { pr_err("Failed to get_dev\n"); return -EINVAL; } if (exynos4x12_find_busfreq_by_volt(data->temp_compensate.bus_volt, &info->busfreq_tc)) { pr_err("get_busfreq_value error\n"); } #endif if (mali_voltage_lock_init()) { pr_err("Failed to initialize mail voltage lock.\n"); return -EINVAL; } pr_info("%s: cpufreq_level[%d], busfreq_value[%d]\n", __func__, info->cpulevel_tc, info->busfreq_tc); #endif /* Need to initail regsiter setting after getting parameter info */ /* [28:23] vref [11:8] slope - Tunning parameter */ __raw_writel(data->slope, info->tmu_base + TMU_CON); __raw_writel((CLEAR_RISE_INT | CLEAR_FALL_INT), \ info->tmu_base + INTCLEAR); /* TMU core enable and HW trpping enable */ con = __raw_readl(info->tmu_base + TMU_CON); con &= ~(HW_TRIP_MODE); con |= (HW_TRIPPING_EN | MUX_ADDR_VALUE<<20 | CORE_EN); __raw_writel(con, info->tmu_base + TMU_CON); /* Because temperature sensing time is appro 940us, * tmu is enabled and 1st valid sample can get 1ms after. */ mdelay(1); te_temp = __raw_readl(S5P_PMU_PS_HOLD_CONTROL); te_temp |= S5P_PS_HOLD_EN; __raw_writel(te_temp, S5P_PMU_PS_HOLD_CONTROL); /*LEV0 LEV1 LEV2 interrupt enable */ __raw_writel(INTEN_RISE0 | INTEN_RISE1 | INTEN_RISE2, \ info->tmu_base + INTEN); #if defined(CONFIG_TC_VOLTAGE) te_temp = __raw_readl(info->tmu_base + INTEN); te_temp |= INTEN_FALL0; __raw_writel(te_temp, info->tmu_base + INTEN); /* s/w workaround for fast service when interrupt is not occured, * such as current temp is lower than tc interrupt temperature * or current temp is continuosly increased. */ if (get_cur_temp(info) <= data->ts.start_tc) { disable_irq_nosync(info->irq); if (exynos_tc_volt(info, 1) < 0) pr_err("%s\n", __func__); info->tmu_state = TMU_STATUS_TC; already_limit = 1; queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(1000)); } #endif return 0; }
static irqreturn_t exynos4x12_tmu_irq_handler(int irq, void *id) { struct s5p_tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT) & 0x1FFFF; pr_info("EXYNOS4x12_tmu interrupt: INTSTAT = 0x%08x\n", status); /* To handle multiple interrupt pending, * interrupt by high temperature are serviced with priority. */ #if defined(CONFIG_TC_VOLTAGE) if (status & INTSTAT_FALL0) { info->tmu_state = TMU_STATUS_TC; __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); exynos_interrupt_enable(info, 0); } else if (status & INTSTAT_RISE2) { info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR); #else if (status & INTSTAT_RISE2) { info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR); #endif } else if (status & INTSTAT_RISE1) { info->tmu_state = TMU_STATUS_WARNING; __raw_writel(INTCLEAR_RISE1, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else if (status & INTSTAT_RISE0) { info->tmu_state = TMU_STATUS_THROTTLED; __raw_writel(INTCLEAR_RISE0, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else { pr_err("%s: interrupt error\n", __func__); __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate / 2); return -ENODEV; } /* read current temperature & save */ info->last_temperature = get_curr_temp(info); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); return IRQ_HANDLED; } static irqreturn_t exynos4210_tmu_irq_handler(int irq, void *id) { struct s5p_tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT); pr_info("EXYNOS4212_tmu interrupt: INTSTAT = 0x%08x\n", status); /* To handle multiple interrupt pending, * interrupt by high temperature are serviced with priority. */ if (status & TMU_INTSTAT2) { info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR2, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else if (status & TMU_INTSTAT1) { info->tmu_state = TMU_STATUS_WARNING; __raw_writel(INTCLEAR1, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else if (status & TMU_INTSTAT0) { info->tmu_state = TMU_STATUS_THROTTLED; __raw_writel(INTCLEAR0, info->tmu_base + EXYNOS4_TMU_INTCLEAR); } else { pr_err("%s: interrupt error\n", __func__); __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate / 2); return -ENODEV; } /* read current temperature & save */ info->last_temperature = get_curr_temp(info); queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); return IRQ_HANDLED; } #ifdef CONFIG_TMU_SYSFS static ssize_t s5p_tmu_show_curr_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct s5p_tmu_info *info = dev_get_drvdata(dev); unsigned int curr_temp; curr_temp = get_curr_temp(info); curr_temp *= 10; pr_info("curr temp = %d\n", curr_temp); return sprintf(buf, "%d\n", curr_temp); } static DEVICE_ATTR(curr_temp, S_IRUGO, s5p_tmu_show_curr_temp, NULL); #endif static int __devinit s5p_tmu_probe(struct platform_device *pdev) { struct s5p_tmu_info *info; struct s5p_platform_tmu *pdata; struct resource *res; unsigned int mask = (enable_mask & ENABLE_DBGMASK); int ret = 0; pr_debug("%s: probe=%p\n", __func__, pdev); info = kzalloc(sizeof(struct s5p_tmu_info), GFP_KERNEL); if (!info) { dev_err(&pdev->dev, "failed to alloc memory!\n"); ret = -ENOMEM; goto err_nomem; } platform_set_drvdata(pdev, info); info->dev = &pdev->dev; info->tmu_state = TMU_STATUS_INIT; /* set cpufreq limit level at 1st_throttle & 2nd throttle */ pdata = info->dev->platform_data; if (pdata->cpufreq.limit_1st_throttle) exynos_cpufreq_get_level(pdata->cpufreq.limit_1st_throttle, &info->cpufreq_level_1st_throttle); if (pdata->cpufreq.limit_2nd_throttle) exynos_cpufreq_get_level(pdata->cpufreq.limit_2nd_throttle, &info->cpufreq_level_2nd_throttle); pr_info("@@@ %s: cpufreq_limit: 1st_throttle: %u, 2nd_throttle = %u\n", __func__, info->cpufreq_level_1st_throttle, info->cpufreq_level_2nd_throttle); #if defined(CONFIG_TC_VOLTAGE) /* Temperature compensated voltage */ if (exynos_find_cpufreq_level_by_volt(pdata->temp_compensate.arm_volt, &info->cpulevel_tc) < 0) { dev_err(&pdev->dev, "cpufreq_get_level error\n"); ret = -EINVAL; goto err_nores; } #ifdef CONFIG_BUSFREQ_OPP /* To lock bus frequency in OPP mode */ info->bus_dev = dev_get("exynos-busfreq"); if (info->bus_dev < 0) { dev_err(&pdev->dev, "Failed to get_dev\n"); ret = -EINVAL; goto err_nores; } if (exynos4x12_find_busfreq_by_volt(pdata->temp_compensate.bus_volt, &info->busfreq_tc)) { dev_err(&pdev->dev, "get_busfreq_value error\n"); ret = -EINVAL; goto err_nores; } #endif pr_info("%s: cpufreq_level[%u], busfreq_value[%u]\n", __func__, info->cpulevel_tc, info->busfreq_tc); #endif /* Map auto_refresh_rate of normal & tq0 mode */ info->auto_refresh_tq0 = get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_TQ0); info->auto_refresh_normal = get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_NORMAL); /* To poll current temp, set sampling rate to ONE second sampling */ info->sampling_rate = usecs_to_jiffies(1000 * 1000); /* 10sec monitroing */ info->monitor_period = usecs_to_jiffies(10000 * 1000); /* support test mode */ if (mask & ENABLE_TEST_MODE) set_temperature_params(info); else print_temperature_params(info); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory region resource\n"); ret = -ENODEV; goto err_nores; } info->ioarea = request_mem_region(res->start, res->end-res->start + 1, pdev->name); if (!(info->ioarea)) { dev_err(&pdev->dev, "failed to reserve memory region\n"); ret = -EBUSY; goto err_nores; } info->tmu_base = ioremap(res->start, (res->end - res->start) + 1); if (!(info->tmu_base)) { dev_err(&pdev->dev, "failed ioremap()\n"); ret = -ENOMEM; goto err_nomap; } tmu_monitor_wq = create_freezable_workqueue(dev_name(&pdev->dev)); if (!tmu_monitor_wq) { pr_info("Creation of tmu_monitor_wq failed\n"); ret = -ENOMEM; goto err_wq; } /* To support periodic temprature monitoring */ if (mask & ENABLE_TEMP_MON) { INIT_DELAYED_WORK_DEFERRABLE(&info->monitor, exynos4_poll_cur_temp); queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, info->monitor_period); } INIT_DELAYED_WORK_DEFERRABLE(&info->polling, exynos4_handler_tmu_state); info->irq = platform_get_irq(pdev, 0); if (info->irq < 0) { dev_err(&pdev->dev, "no irq for thermal %d\n", info->irq); ret = -EINVAL; goto err_irq; } if (soc_is_exynos4210()) ret = request_irq(info->irq, exynos4210_tmu_irq_handler, IRQF_DISABLED, "s5p-tmu interrupt", info); else ret = request_irq(info->irq, exynos4x12_tmu_irq_handler, IRQF_DISABLED, "s5p-tmu interrupt", info); if (ret) { dev_err(&pdev->dev, "request_irq is failed. %d\n", ret); goto err_irq; } ret = device_create_file(&pdev->dev, &dev_attr_temperature); if (ret != 0) { pr_err("Failed to create temperatue file: %d\n", ret); goto err_sysfs_file1; } ret = device_create_file(&pdev->dev, &dev_attr_tmu_state); if (ret != 0) { pr_err("Failed to create tmu_state file: %d\n", ret); goto err_sysfs_file2; } ret = device_create_file(&pdev->dev, &dev_attr_lot_id); if (ret != 0) { pr_err("Failed to create lot id file: %d\n", ret); goto err_sysfs_file3; } ret = tmu_initialize(pdev); if (ret) goto err_init; #ifdef CONFIG_TMU_SYSFS ret = device_create_file(&pdev->dev, &dev_attr_curr_temp); if (ret < 0) { dev_err(&pdev->dev, "Failed to create sysfs group\n"); goto err_init; } #endif #ifdef CONFIG_TMU_DEBUG ret = device_create_file(&pdev->dev, &dev_attr_print_state); if (ret) { dev_err(&pdev->dev, "Failed to create tmu sysfs group\n\n"); return ret; } #endif #if defined(CONFIG_TC_VOLTAGE) /* s/w workaround for fast service when interrupt is not occured, * such as current temp is lower than tc interrupt temperature * or current temp is continuosly increased. */ if (get_curr_temp(info) <= pdata->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); } #if defined(CONFIG_VIDEO_MALI400MP) if (mali_voltage_lock_init()) pr_err("Failed to initialize mail voltage lock.\n"); #endif #endif /* initialize tmu_state */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); return ret; err_init: device_remove_file(&pdev->dev, &dev_attr_lot_id); err_sysfs_file3: device_remove_file(&pdev->dev, &dev_attr_tmu_state); err_sysfs_file2: device_remove_file(&pdev->dev, &dev_attr_temperature); err_sysfs_file1: if (info->irq >= 0) free_irq(info->irq, info); err_irq: destroy_workqueue(tmu_monitor_wq); err_wq: iounmap(info->tmu_base); err_nomap: release_resource(info->ioarea); kfree(info->ioarea); err_nores: kfree(info); info = NULL; err_nomem: dev_err(&pdev->dev, "initialization failed.\n"); return ret; } static int __devinit s5p_tmu_remove(struct platform_device *pdev) { struct s5p_tmu_info *info = platform_get_drvdata(pdev); cancel_delayed_work(&info->polling); destroy_workqueue(tmu_monitor_wq); device_remove_file(&pdev->dev, &dev_attr_temperature); device_remove_file(&pdev->dev, &dev_attr_tmu_state); if (info->irq >= 0) free_irq(info->irq, info); iounmap(info->tmu_base); release_resource(info->ioarea); kfree(info->ioarea); kfree(info); info = NULL; pr_info("%s is removed\n", dev_name(&pdev->dev)); return 0; } #ifdef CONFIG_PM static int s5p_tmu_suspend(struct platform_device *pdev, pm_message_t state) { struct s5p_tmu_info *info = platform_get_drvdata(pdev); if (!info) return -EAGAIN; /* save register value */ info->reg_save[0] = __raw_readl(info->tmu_base + EXYNOS4_TMU_CONTROL); info->reg_save[1] = __raw_readl(info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL); info->reg_save[2] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0); info->reg_save[3] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1); info->reg_save[4] = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTEN); if (soc_is_exynos4210()) { info->reg_save[5] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP); info->reg_save[6] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0); info->reg_save[7] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1); info->reg_save[8] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2); info->reg_save[9] = __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3); } else { info->reg_save[5] = __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE); #if defined(CONFIG_TC_VOLTAGE) info->reg_save[6] = __raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL); #endif } disable_irq(info->irq); return 0; } static int s5p_tmu_resume(struct platform_device *pdev) { struct s5p_tmu_info *info = platform_get_drvdata(pdev); struct s5p_platform_tmu *data; if (!info || !(info->dev)) return -EAGAIN; data = info->dev->platform_data; /* restore tmu register value */ __raw_writel(info->reg_save[0], info->tmu_base + EXYNOS4_TMU_CONTROL); __raw_writel(info->reg_save[1], info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL); __raw_writel(info->reg_save[2], info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0); __raw_writel(info->reg_save[3], info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1); if (soc_is_exynos4210()) { __raw_writel(info->reg_save[5], info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP); __raw_writel(info->reg_save[6], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0); __raw_writel(info->reg_save[7], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1); __raw_writel(info->reg_save[8], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2); __raw_writel(info->reg_save[9], info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3); } else { __raw_writel(info->reg_save[5], info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE); #if defined(CONFIG_TC_VOLTAGE) __raw_writel(info->reg_save[6], info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL); #endif } __raw_writel(info->reg_save[4], info->tmu_base + EXYNOS4_TMU_INTEN); #if defined(CONFIG_TC_VOLTAGE) /* s/w workaround for fast service when interrupt is not occured, * such as current temp is lower than tc interrupt temperature * or current temp is continuosly increased.. */ mdelay(1); if (get_curr_temp(info) <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); } #endif /* Find out tmu_state after wakeup */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, 0); return 0; } #else #define s5p_tmu_suspend NULL #define s5p_tmu_resume NULL #endif static struct platform_driver s5p_tmu_driver = { .probe = s5p_tmu_probe, .remove = s5p_tmu_remove, .suspend = s5p_tmu_suspend, .resume = s5p_tmu_resume, .driver = { .name = "s5p-tmu", .owner = THIS_MODULE, }, }; static int __init s5p_tmu_driver_init(void) { return platform_driver_register(&s5p_tmu_driver); } static void __exit s5p_tmu_driver_exit(void) { platform_driver_unregister(&s5p_tmu_driver); }
static void tmu_monitor(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct tmu_info *info = container_of(delayed_work, struct tmu_info, polling); struct tmu_data *data = info->dev->platform_data; int cur_temp; cur_temp = get_cur_temp(info); #ifdef CONFIG_TMU_DEBUG cancel_delayed_work(&info->monitor); pr_info("Current: %dc, FLAG=%d\n", cur_temp, info->tmu_state); #endif mutex_lock(&tmu_lock); switch (info->tmu_state) { #if defined(CONFIG_TC_VOLTAGE) case TMU_STATUS_TC: if (cur_temp >= data->ts.stop_tc) { if (exynos_tc_volt(info, 0) < 0) pr_err("%s\n", __func__); info->tmu_state = TMU_STATUS_NORMAL; already_limit = 0; pr_info("TC limit is released!!\n"); } else if (cur_temp <= data->ts.start_tc && !already_limit) { if (exynos_tc_volt(info, 1) < 0) pr_err("%s\n", __func__); already_limit = 1; } break; #endif case TMU_STATUS_NORMAL: #ifdef CONFIG_TMU_DEBUG queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, info->sampling_rate); #endif __raw_writel((CLEAR_RISE_INT|CLEAR_FALL_INT), info->tmu_base + INTCLEAR); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; case TMU_STATUS_THROTTLED: if (cur_temp >= data->ts.start_warning) { info->tmu_state = TMU_STATUS_WARNING; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); already_limit = 0; } else if (cur_temp > data->ts.stop_throttle && cur_temp < data->ts.start_warning && !already_limit) { exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->throttle_freq); already_limit = 1; } else if (cur_temp <= data->ts.stop_throttle) { info->tmu_state = TMU_STATUS_NORMAL; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); pr_info("Freq limit is released!!\n"); already_limit = 0; } break; case TMU_STATUS_WARNING: if (cur_temp >= data->ts.start_tripping) { info->tmu_state = TMU_STATUS_TRIPPED; already_limit = 0; } else if (cur_temp > data->ts.stop_warning && \ cur_temp < data->ts.start_tripping && !already_limit) { exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->warning_freq); already_limit = 1; } else if (cur_temp <= data->ts.stop_warning) { info->tmu_state = TMU_STATUS_THROTTLED; exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); already_limit = 0; } break; case TMU_STATUS_TRIPPED: mutex_unlock(&tmu_lock); tmu_tripped_cb(); return; default: break; } /* memory throttling */ if (cur_temp >= data->ts.start_mem_throttle && !(auto_refresh_changed)) { pr_info("set auto_refresh 1.95us\n"); set_refresh_rate(info->auto_refresh_tq0); auto_refresh_changed = 1; } else if (cur_temp <= (data->ts.stop_mem_throttle) && (auto_refresh_changed)) { pr_info("set auto_refresh 3.9us\n"); set_refresh_rate(info->auto_refresh_normal); auto_refresh_changed = 0; } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); mutex_unlock(&tmu_lock); return; }
static void exynos4_handler_tmu_state(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct s5p_tmu_info *info = container_of(delayed_work, struct s5p_tmu_info, polling); struct s5p_platform_tmu *data = info->dev->platform_data; unsigned int cur_temp; static int auto_refresh_changed; static int check_handle; int trend = 0; int cpu = 0; mutex_lock(&tmu_lock); cur_temp = get_curr_temp(info); trend = cur_temp - info->last_temperature; pr_debug("curr_temp = %u, temp_diff = %d\n", cur_temp, trend); switch (info->tmu_state) { #if defined(CONFIG_TC_VOLTAGE) case TMU_STATUS_TC: /* lock has priority than unlock */ if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); } else if (cur_temp >= data->ts.stop_tc) { if (exynos_tc_volt(info, 0) < 0) { pr_err("TMU: unlock error!\n"); } else { info->tmu_state = TMU_STATUS_NORMAL; pr_info("change state: tc -> normal.\n"); } } /* free if upper limit is locked */ if (check_handle) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle = 0; } break; #endif case TMU_STATUS_NORMAL: /* 1. change state: 1st-throttling */ if (cur_temp >= data->ts.start_1st_throttle) { info->tmu_state = TMU_STATUS_THROTTLED; pr_info("change state: normal->throttle.\n"); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) { pr_err("TMU: lock error!\n"); } else { info->tmu_state = TMU_STATUS_TC; pr_info("change state: normal->tc.\n"); } #endif /* 2. polling end and uevent */ } else if ((cur_temp <= data->ts.stop_1st_throttle) && (cur_temp <= data->ts.stop_mem_throttle)) { if (check_handle & THROTTLE_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(THROTTLE_FLAG); } pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("normal: free cpufreq_limit & interrupt enable.\n"); for_each_online_cpu(cpu) cpufreq_update_policy(cpu); /* clear to prevent from interfupt by peindig bit */ __raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR); exynos_interrupt_enable(info, 1); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; } break; case TMU_STATUS_THROTTLED: /* 1. change state: 2nd-throttling or warning */ if (cur_temp >= data->ts.start_2nd_throttle) { info->tmu_state = TMU_STATUS_WARNING; pr_info("change state: 1st throttle->2nd throttle.\n"); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; #endif /* 2. cpufreq limitation and uevent */ } else if ((cur_temp >= data->ts.start_1st_throttle) && !(check_handle & THROTTLE_FLAG)) { if (check_handle & WARNING_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(WARNING_FLAG); } exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->cpufreq_level_1st_throttle); check_handle |= THROTTLE_FLAG; pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("throttling: set cpufreq upper limit.\n"); /* 3. change state: normal */ } else if ((cur_temp <= data->ts.stop_1st_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_NORMAL; pr_info("change state: 1st throttle->normal.\n"); } break; case TMU_STATUS_WARNING: /* 1. change state: tripping */ if (cur_temp >= data->ts.start_tripping) { info->tmu_state = TMU_STATUS_TRIPPED; pr_info("change state: 2nd throttle->trip\n"); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; #endif /* 2. cpufreq limitation and uevent */ } else if ((cur_temp >= data->ts.start_2nd_throttle) && !(check_handle & WARNING_FLAG)) { if (check_handle & THROTTLE_FLAG) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_TMU); check_handle &= ~(THROTTLE_FLAG); } exynos_cpufreq_upper_limit(DVFS_LOCK_ID_TMU, info->cpufreq_level_2nd_throttle); check_handle |= WARNING_FLAG; pr_debug("check_handle = %d\n", check_handle); notify_change_of_tmu_state(info); pr_info("2nd throttle: cpufreq is limited.\n"); /* 3. change state: 1st-throttling */ } else if ((cur_temp <= data->ts.stop_2nd_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_THROTTLED; pr_info("change state: 2nd throttle->1st throttle, " "and release cpufreq upper limit.\n"); } break; case TMU_STATUS_TRIPPED: /* 1. call uevent to shut-down */ if ((cur_temp >= data->ts.start_tripping) && (trend > 0) && !(check_handle & TRIPPING_FLAG)) { notify_change_of_tmu_state(info); pr_info("tripping: on waiting shutdown.\n"); check_handle |= TRIPPING_FLAG; pr_debug("check_handle = %d\n", check_handle); #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ } else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; #endif /* 2. change state: 2nd-throttling or warning */ } else if ((cur_temp <= data->ts.stop_2nd_throttle) && (trend < 0)) { info->tmu_state = TMU_STATUS_WARNING; pr_info("change state: trip->2nd throttle, " "Check! occured only test mode.\n"); } /* 3. chip protection: kernel panic as SW workaround */ if ((cur_temp >= data->ts.start_emergency) && (trend > 0)) { panic("Emergency!!!! tripping is not treated!\n"); /* clear to prevent from interfupt by peindig bit */ __raw_writel(INTCLEARALL, info->tmu_state + EXYNOS4_TMU_INTCLEAR); enable_irq(info->irq); mutex_unlock(&tmu_lock); return; } break; case TMU_STATUS_INIT: /* sned tmu initial status to platform */ disable_irq(info->irq); if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; } #endif else if (cur_temp >= data->ts.start_2nd_throttle) info->tmu_state = TMU_STATUS_WARNING; else if (cur_temp >= data->ts.start_1st_throttle) info->tmu_state = TMU_STATUS_THROTTLED; else if (cur_temp <= data->ts.stop_1st_throttle) info->tmu_state = TMU_STATUS_NORMAL; notify_change_of_tmu_state(info); pr_info("%s: inform to init state to platform.\n", __func__); break; default: pr_warn("Bug: checked tmu_state.\n"); if (cur_temp >= data->ts.start_tripping) info->tmu_state = TMU_STATUS_TRIPPED; #if defined(CONFIG_TC_VOLTAGE) /* check whether temp compesation need or not */ else if (cur_temp <= data->ts.start_tc) { if (exynos_tc_volt(info, 1) < 0) pr_err("TMU: lock error!\n"); else info->tmu_state = TMU_STATUS_TC; } #endif else info->tmu_state = TMU_STATUS_WARNING; break; } /* end */ info->last_temperature = cur_temp; /* reschedule the next work */ queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, info->sampling_rate); mutex_unlock(&tmu_lock); return; }