/** * cpufreq_get_cur_state - callback function to get the current cooling state. * @cdev: thermal cooling device pointer. * @state: fill this variable with the current cooling state. * * Callback for the thermal cooling device to return the cpufreq * current cooling state. * * Return: 0 on success, an error code otherwise. */ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { *state=cpufreq_cooling_get_level(0,cpufreq_quick_get_max(0)); pr_debug( "*state=%ld\n",*state); return 0; }
/* thermal framework callbacks */ static int kona_tmon_tz_cdev_bind(struct thermal_zone_device *tz, struct thermal_cooling_device *cdev) { struct kona_tmon_thermal *thermal = tz->devdata; int idx, level, ret = 0; /* check if the cooling device is registered */ if (thermal->freq_cdev != cdev) return 0; for (idx = 0; idx < thermal->active_cnt; idx++) { level = cpufreq_cooling_get_level(0, thermal->pdata->trips[idx].max_freq); if (level == THERMAL_CSTATE_INVALID) continue; ret = thermal_zone_bind_cooling_device(tz, idx, cdev, level, 0); if (ret) { tmon_dbg(TMON_LOG_ERR, "binding colling device (%s) on trip %d: failed\n", cdev->type, idx); goto err; } } return ret; err: for (; idx >= 0; --idx) thermal_zone_unbind_cooling_device(tz, idx, cdev); return ret; }
/* Bind callback functions for thermal zone */ static int exynos_bind(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) { int ret = 0, i, tab_size, level; struct freq_clip_table *tab_ptr, *clip_data; struct exynos_thermal_zone *th_zone = thermal->devdata; struct thermal_sensor_conf *data = th_zone->sensor_conf; tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data; tab_size = data->cooling_data.freq_clip_count; if (tab_ptr == NULL || tab_size == 0) return 0; /* find the cooling device registered*/ for (i = 0; i < th_zone->cool_dev_size; i++) if (cdev == th_zone->cool_dev[i]) break; /* No matching cooling device */ if (i == th_zone->cool_dev_size) return 0; /* Bind the thermal zone to the cpufreq cooling device */ for (i = 0; i < tab_size; i++) { clip_data = (struct freq_clip_table *)&(tab_ptr[i]); level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max); if (level == THERMAL_CSTATE_INVALID) return 0; switch (GET_ZONE(i)) { case MONITOR_ZONE: case WARN_ZONE: if (thermal_zone_bind_cooling_device(thermal, i, cdev, level, 0)) { dev_err(data->dev, "error unbinding cdev inst=%d\n", i); ret = -EINVAL; } th_zone->bind = true; break; default: ret = -EINVAL; } } return ret; }
static struct amlogic_thermal_platform_data * amlogic_thermal_init_from_dts(struct platform_device *pdev, int trim_flag) { int i = 0, ret = -1, val = 0, cells, descend, error = 0; struct property *prop; struct temp_level *tmp_level = NULL; struct amlogic_thermal_platform_data *pdata = NULL; if(!of_property_read_u32(pdev->dev.of_node, "trip_point", &val)){ //INIT FROM DTS pdata=kzalloc(sizeof(*pdata),GFP_KERNEL); if(!pdata){ goto err; } memset((void* )pdata,0,sizeof(*pdata)); ret=of_property_read_u32(pdev->dev.of_node, "#thermal-cells", &val); if(ret){ dev_err(&pdev->dev, "dt probe #thermal-cells failed: %d\n", ret); goto err; } cells=val; /* * process for KEEP_MODE and virtual thermal * Logic: If virtual thermal is enabled, then ignore keep_mode * */ pdata->trim_flag = trim_flag; if (!pdata->trim_flag) { // chip is not trimmed, use virtual thermal aml_virtaul_thermal_probe(pdev, pdata); } else if (of_property_read_bool(pdev->dev.of_node, "keep_mode")) { if (of_property_read_u32(pdev->dev.of_node, "keep_mode_threshold", &pdata->keep_mode_threshold)) { error = 1; } if (of_property_read_u32_array(pdev->dev.of_node, "keep_mode_max_range", pdata->keep_mode_max_range, sizeof(pdata->keep_mode_max_range)/sizeof(u32))) { error = 1; } if (!error && pdata->trim_flag) { // keep mode should not used for virtual thermal right now THERMAL_INFO("keep_mode_max_range: [%7d, %3d, %d, %d]\n", pdata->keep_mode_max_range[0], pdata->keep_mode_max_range[1], pdata->keep_mode_max_range[2], pdata->keep_mode_max_range[3]); pdata->keep_mode = 1; pdata->freq_sample_period = 5; } if (!of_property_read_u32_array(pdev->dev.of_node, "keep_mode_min_range", pdata->keep_mode_min_range, sizeof(pdata->keep_mode_min_range)/sizeof(u32))) { pdata->keep_min_exist = 1; THERMAL_INFO("keep_mode_min_range: [%7d, %3d, %d, %d]\n", pdata->keep_mode_min_range[0], pdata->keep_mode_min_range[1], pdata->keep_mode_min_range[2], pdata->keep_mode_min_range[3]); } } else { THERMAL_INFO("keep_mode is disabled\n"); } if(pdata->keep_mode || !pdata->trim_flag){ INIT_DELAYED_WORK(&pdata->thermal_work, thermal_work); schedule_delayed_work(&pdata->thermal_work, msecs_to_jiffies(100)); atomic_set(&freq_update_flag, 0); } prop = of_find_property(pdev->dev.of_node, "trip_point", &val); if (!prop){ dev_err(&pdev->dev, "read %s length error\n","trip_point"); goto err; } if (pdata->keep_mode) { pdata->temp_trip_count = 2; } else { pdata->temp_trip_count=val/cells/sizeof(u32); } tmp_level=kzalloc(sizeof(*tmp_level)*pdata->temp_trip_count,GFP_KERNEL); pdata->tmp_trip=kzalloc(sizeof(struct temp_trip)*pdata->temp_trip_count,GFP_KERNEL); if(!tmp_level){ goto err; } if (pdata->keep_mode) { // keep mode only need one point keep_mode_temp_level_init(pdata, tmp_level); } else { ret=of_property_read_u32_array(pdev->dev.of_node,"trip_point",(u32 *)tmp_level,val/sizeof(u32)); if (ret){ dev_err(&pdev->dev, "read %s data error\n","trip_point"); goto err; } } descend=get_desend(); for (i = 0; i < pdata->temp_trip_count; i++) { pdata->tmp_trip[i].temperature=tmp_level[i].temperature; tmp_level[i].cpu_high_freq=fix_to_freq(tmp_level[i].cpu_high_freq,descend); pdata->tmp_trip[i].cpu_lower_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_high_freq); tmp_level[i].cpu_low_freq=fix_to_freq(tmp_level[i].cpu_low_freq,descend); pdata->tmp_trip[i].cpu_upper_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_low_freq); pdata->tmp_trip[i].gpu_lower_freq=tmp_level[i].gpu_low_freq; pdata->tmp_trip[i].gpu_upper_freq=tmp_level[i].gpu_high_freq; pdata->tmp_trip[i].cpu_core_num=tmp_level[i].cpu_core_num; pdata->tmp_trip[i].gpu_core_num=tmp_level[i].gpu_core_num; } ret= of_property_read_u32(pdev->dev.of_node, "idle_interval", &val); if (ret){ dev_err(&pdev->dev, "read %s error\n","idle_interval"); goto err; } pdata->idle_interval=val; ret=of_property_read_string(pdev->dev.of_node,"dev_name",&pdata->name); if (ret){ dev_err(&pdev->dev, "read %s error\n","dev_name"); goto err; } pdata->mode=THERMAL_DEVICE_ENABLED; if(tmp_level) kfree(tmp_level); return pdata; } err: if(tmp_level) kfree(tmp_level); if(pdata) kfree(pdata); pdata= NULL; return pdata; }
static struct amlogic_thermal_platform_data * amlogic_thermal_init_from_dts(struct platform_device *pdev) { int i=0,ret=-1,val=0,cells,descend; struct property *prop; struct temp_level *tmp_level=NULL; struct amlogic_thermal_platform_data *pdata=NULL; if(!of_property_read_u32(pdev->dev.of_node, "trip_point", &val)){ //INIT FROM DTS pdata=kzalloc(sizeof(*pdata),GFP_KERNEL); if(!pdata){ goto err; } memset((void* )pdata,0,sizeof(*pdata)); ret=of_property_read_u32(pdev->dev.of_node, "#thermal-cells", &val); if(ret){ dev_err(&pdev->dev, "dt probe #thermal-cells failed: %d\n", ret); goto err; } printk("#thermal-cells=%d\n",val); cells=val; prop = of_find_property(pdev->dev.of_node, "trip_point", &val); if (!prop){ dev_err(&pdev->dev, "read %s length error\n","trip_point"); goto err; } pdata->temp_trip_count=val/cells/sizeof(u32); printk("pdata->temp_trip_count=%d\n",pdata->temp_trip_count); tmp_level=kzalloc(sizeof(*tmp_level)*pdata->temp_trip_count,GFP_KERNEL); pdata->tmp_trip=kzalloc(sizeof(struct temp_trip)*pdata->temp_trip_count,GFP_KERNEL); if(!tmp_level){ goto err; } ret=of_property_read_u32_array(pdev->dev.of_node,"trip_point",(u32 *)tmp_level,val/sizeof(u32)); if (ret){ dev_err(&pdev->dev, "read %s data error\n","trip_point"); goto err; } descend=get_desend(); for (i = 0; i < pdata->temp_trip_count; i++) { printk("temperature=%d on trip point=%d\n",tmp_level[i].temperature,i); pdata->tmp_trip[i].temperature=tmp_level[i].temperature; printk("fixing high_freq=%d to ",tmp_level[i].cpu_high_freq); tmp_level[i].cpu_high_freq=fix_to_freq(tmp_level[i].cpu_high_freq,descend); pdata->tmp_trip[i].cpu_lower_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_high_freq); printk("%d at trip point %d,level=%d\n",tmp_level[i].cpu_high_freq,i,pdata->tmp_trip[i].cpu_lower_level); printk("fixing low_freq=%d to ",tmp_level[i].cpu_low_freq); tmp_level[i].cpu_low_freq=fix_to_freq(tmp_level[i].cpu_low_freq,descend); pdata->tmp_trip[i].cpu_upper_level=cpufreq_cooling_get_level(0,tmp_level[i].cpu_low_freq); printk("%d at trip point %d,level=%d\n",tmp_level[i].cpu_low_freq,i,pdata->tmp_trip[i].cpu_upper_level); pdata->tmp_trip[i].gpu_lower_freq=tmp_level[i].gpu_low_freq; pdata->tmp_trip[i].gpu_upper_freq=tmp_level[i].gpu_high_freq; printk("gpu[%d].gpu_high_freq=%d,tmp_level[%d].gpu_high_freq=%d\n",i,tmp_level[i].gpu_high_freq,i,tmp_level[i].gpu_low_freq); pdata->tmp_trip[i].cpu_core_num=tmp_level[i].cpu_core_num; printk("cpu[%d] core num==%d\n",i,pdata->tmp_trip[i].cpu_core_num); pdata->tmp_trip[i].gpu_core_num=tmp_level[i].gpu_core_num; printk("gpu[%d] core num==%d\n",i,pdata->tmp_trip[i].gpu_core_num); } ret= of_property_read_u32(pdev->dev.of_node, "idle_interval", &val); if (ret){ dev_err(&pdev->dev, "read %s error\n","idle_interval"); goto err; } pdata->idle_interval=val; printk("idle interval=%d\n",pdata->idle_interval); ret=of_property_read_string(pdev->dev.of_node,"dev_name",&pdata->name); if (ret){ dev_err(&pdev->dev, "read %s error\n","dev_name"); goto err; } printk("pdata->name:%s\n",pdata->name); pdata->mode=THERMAL_DEVICE_ENABLED; if(tmp_level) kfree(tmp_level); return pdata; } err: if(tmp_level) kfree(tmp_level); if(pdata) kfree(pdata); pdata= NULL; return pdata; }