int __init tegra_init_shared_bus_cap( struct core_bus_cap_table *table, int table_size, struct kobject *cap_kobj) { int i, j; struct clk *c = NULL; if (!table || !table_size || (table_size > MAX_BUS_NUM)) return -EINVAL; for (i = 0, j = 0; i < table_size; i++) { c = tegra_get_clock_by_name(table[i].cap_name); if (!c) { pr_err("%s: failed to initialize %s table\n", __func__, table[i].cap_name); continue; } table[i].cap_clk = c; table[i].level = clk_get_max_rate(c); table[i].refcnt = 0; table[i].refcnt_attr.show = bus_cap_state_show; table[i].refcnt_attr.store = bus_cap_state_store; table[i].level_attr.show = bus_cap_level_show; table[i].level_attr.store = bus_cap_level_store; bus_cap_attributes[j++] = &table[i].refcnt_attr.attr; bus_cap_attributes[j++] = &table[i].level_attr.attr; } bus_cap_attributes[j] = NULL; if (!cap_kobj || sysfs_create_files(cap_kobj, bus_cap_attributes)) return -ENOMEM; return 0; }
int tegra_auto_hotplug_init(struct mutex *cpu_lock) { int err; cpu_clk = clk_get_sys(NULL, "cpu"); cpu_g_clk = clk_get_sys(NULL, "cpu_g"); cpu_lp_clk = clk_get_sys(NULL, "cpu_lp"); if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk)) return -ENOENT; /* * Not bound to the issuer CPU (=> high-priority), has rescue worker * task, single-threaded, freezable. */ cpuquiet_wq = alloc_workqueue( "cpuquiet", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1); if (!cpuquiet_wq) return -ENOMEM; INIT_DELAYED_WORK(&cpuquiet_work, tegra_cpuquiet_work_func); INIT_WORK(&minmax_work, min_max_constraints_workfunc); idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000; idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000; up_delay = msecs_to_jiffies(UP_DELAY_MS); down_delay = msecs_to_jiffies(DOWN_DELAY_MS); cpumask_clear(&cr_online_requests); tegra3_cpu_lock = cpu_lock; cpq_state = INITIAL_STATE; enable = cpq_state == TEGRA_CPQ_DISABLED ? false : true; pr_info("Tegra cpuquiet initialized: %s\n", (cpq_state == TEGRA_CPQ_DISABLED) ? "disabled" : "enabled"); if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier)) pr_err("%s: Failed to register min cpus PM QoS notifier\n", __func__); if (pm_qos_add_notifier(PM_QOS_MAX_ONLINE_CPUS, &max_cpus_notifier)) pr_err("%s: Failed to register max cpus PM QoS notifier\n", __func__); err = cpuquiet_register_driver(&tegra_cpuquiet_driver); if (err) { destroy_workqueue(cpuquiet_wq); return err; } err = tegra_auto_sysfs(); if (err) { cpuquiet_unregister_driver(&tegra_cpuquiet_driver); destroy_workqueue(cpuquiet_wq); } return err; }
static void bus_cap_update(struct core_bus_cap_table *bus_cap) { struct clk *c = bus_cap->cap_clk; if (!c) return; if (bus_cap->refcnt) clk_set_rate(c, bus_cap->level); else clk_set_rate(c, clk_get_max_rate(c)); }
int tegra_auto_hotplug_init(struct mutex *cpu_lock) { /* * Not bound to the issuer CPU (=> high-priority), has rescue worker * task, single-threaded, freezable. */ int i = 0; hotplug_wq = alloc_workqueue( "cpu-tegra3", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1); if (!hotplug_wq) return -ENOMEM; INIT_DELAYED_WORK(&hotplug_work, tegra_auto_hotplug_work_func); cpuplug_wq = alloc_workqueue( "cpu-tegra3-plug", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1); if (!cpuplug_wq) return -ENOMEM; INIT_WORK(&cpuplug_work, tegra_auto_cpuplug_work_func); cpu_clk = clk_get_sys(NULL, "cpu"); cpu_g_clk = clk_get_sys(NULL, "cpu_g"); cpu_lp_clk = clk_get_sys(NULL, "cpu_lp"); if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk)) return -ENOENT; idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000; idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000; up2g0_delay = msecs_to_jiffies(UP2G0_DELAY_MS); up2gn_delay = msecs_to_jiffies(UP2Gn_DELAY_MS); down_delay = msecs_to_jiffies(DOWN_DELAY_MS); is_plugging = false; tegra3_cpu_lock = cpu_lock; hp_state = INITIAL_STATE; mp_state = TEGRA_HP_IDLE; hp_init_stats(); pr_info(CPU_HOTPLUG_TAG"Tegra auto-hotplug initialized: %s\n", (hp_state == TEGRA_HP_DISABLED) ? "disabled" : "enabled"); for (i = 0; i <= CONFIG_NR_CPUS; i++) { cpu_hp_active_time_stats[i].this_active_Time = 0; cpu_hp_active_time_stats[i].total_active_Time = 0; } pm_debug_cpu_hotplug = printCPUTotalActiveTime; if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier)) pr_err("%s: Failed to register min cpus PM QoS notifier\n", __func__); return 0; }
int tegra_auto_hotplug_init(struct mutex *cpu_lock) { /* * Not bound to the issuer CPU (=> high-priority), has rescue worker * task, single-threaded, freezable. */ hotplug_wq = alloc_workqueue( "cpu-tegra3", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1); if (!hotplug_wq) return -ENOMEM; INIT_DELAYED_WORK(&hotplug_work, tegra_auto_hotplug_work_func); cpu_clk = clk_get_sys(NULL, "cpu"); cpu_g_clk = clk_get_sys(NULL, "cpu_g"); cpu_lp_clk = clk_get_sys(NULL, "cpu_lp"); if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk)) return -ENOENT; idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000; idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000; up2g0_delay = msecs_to_jiffies(UP2G0_DELAY_MS); up2gn_delay = msecs_to_jiffies(UP2Gn_DELAY_MS); down_delay = msecs_to_jiffies(DOWN_DELAY_MS); tegra3_cpu_lock = cpu_lock; hp_state = INITIAL_STATE; hp_init_stats(); pr_info("Tegra auto-hotplug initialized: %s\n", (hp_state == TEGRA_HP_DISABLED) ? "disabled" : "enabled"); if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier)) pr_err("%s: Failed to register min cpus PM QoS notifier\n", __func__); rt_cfg_kobj = kobject_create_and_add("rt_config", kernel_kobj); if (!rt_cfg_kobj) { pr_err("cpu_tegra3: failed to create sysfs rt_cfg_kobj object"); return 0; } if (sysfs_create_files(rt_cfg_kobj, rt_cfg_attributes)) { pr_err("tegra3_dvfs: failed to create sysfs rt_cfg_kobj interface"); return 0; } return 0; }
static int init_cpu_edp_limits_calculated(void) { unsigned int max_nr_cpus = num_possible_cpus(); unsigned int temp_idx, n_cores_idx, pwr_idx; unsigned int cpu_g_minf, cpu_g_maxf; unsigned int iddq_mA; unsigned int cpu_speedo_idx; unsigned int cap, limit; struct tegra_edp_limits *edp_calculated_limits; struct tegra_system_edp_entry *power_edp_calc_limits; struct tegra_edp_cpu_leakage_params *params; int ret; struct clk *clk_cpu_g = tegra_get_clock_by_name("cpu_g"); int cpu_speedo_id = tegra_cpu_speedo_id(); /* Determine all inputs to EDP formula */ iddq_mA = tegra_get_cpu_iddq_value(); ret = edp_find_speedo_idx(cpu_speedo_id, &cpu_speedo_idx); if (ret) return ret; switch (tegra_chip_id) { case TEGRA_CHIPID_TEGRA11: params = tegra11x_get_leakage_params(cpu_speedo_idx, NULL); break; case TEGRA_CHIPID_TEGRA3: case TEGRA_CHIPID_TEGRA2: default: return -EINVAL; } edp_calculated_limits = kmalloc(sizeof(struct tegra_edp_limits) * ARRAY_SIZE(temperatures), GFP_KERNEL); BUG_ON(!edp_calculated_limits); power_edp_calc_limits = kmalloc(sizeof(struct tegra_system_edp_entry) * ARRAY_SIZE(power_cap_levels), GFP_KERNEL); BUG_ON(!power_edp_calc_limits); cpu_g_minf = 0; cpu_g_maxf = clk_get_max_rate(clk_cpu_g); freq_voltage_lut_size = (cpu_g_maxf - cpu_g_minf) / FREQ_STEP + 1; freq_voltage_lut = kmalloc(sizeof(struct tegra_edp_freq_voltage_table) * freq_voltage_lut_size, GFP_KERNEL); if (!freq_voltage_lut) { pr_err("%s: failed alloc mem for freq/voltage LUT\n", __func__); return -ENOMEM; } ret = edp_relate_freq_voltage(clk_cpu_g, cpu_speedo_idx, freq_voltage_lut_size, freq_voltage_lut); if (ret) { kfree(freq_voltage_lut); return ret; } if (freq_voltage_lut_size != freq_voltage_lut_size_saved) { /* release previous table if present */ kfree(freq_voltage_lut_saved); /* create table to save */ freq_voltage_lut_saved = kmalloc(sizeof(struct tegra_edp_freq_voltage_table) * freq_voltage_lut_size, GFP_KERNEL); if (!freq_voltage_lut_saved) { pr_err("%s: failed alloc mem for freq/voltage LUT\n", __func__); kfree(freq_voltage_lut); return -ENOMEM; } freq_voltage_lut_size_saved = freq_voltage_lut_size; } memcpy(freq_voltage_lut_saved, freq_voltage_lut, sizeof(struct tegra_edp_freq_voltage_table) * freq_voltage_lut_size); /* Calculate EDP table */ for (n_cores_idx = 0; n_cores_idx < max_nr_cpus; n_cores_idx++) { for (temp_idx = 0; temp_idx < ARRAY_SIZE(temperatures); temp_idx++) { edp_calculated_limits[temp_idx].temperature = temperatures[temp_idx]; limit = edp_calculate_maxf(params, temperatures[temp_idx], -1, iddq_mA, n_cores_idx); if (limit == -EINVAL) return -EINVAL; /* apply safety cap if it is specified */ if (n_cores_idx < 4) { cap = params->safety_cap[n_cores_idx]; if (cap && cap < limit) limit = cap; } edp_calculated_limits[temp_idx]. freq_limits[n_cores_idx] = limit; } for (pwr_idx = 0; pwr_idx < ARRAY_SIZE(power_cap_levels); pwr_idx++) { power_edp_calc_limits[pwr_idx].power_limit_100mW = power_cap_levels[pwr_idx] / 100; limit = edp_calculate_maxf(params, 50, power_cap_levels[pwr_idx], iddq_mA, n_cores_idx); if (limit == -EINVAL) return -EINVAL; power_edp_calc_limits[pwr_idx]. freq_limits[n_cores_idx] = limit; } } /* * If this is an EDP table update, need to overwrite old table. * The old table's address must remain valid. */ if (edp_limits != edp_default_limits) { memcpy(edp_limits, edp_calculated_limits, sizeof(struct tegra_edp_limits) * ARRAY_SIZE(temperatures)); kfree(edp_calculated_limits); } else { edp_limits = edp_calculated_limits; edp_limits_size = ARRAY_SIZE(temperatures); } if (power_edp_limits != power_edp_default_limits) { memcpy(power_edp_limits, power_edp_calc_limits, sizeof(struct tegra_system_edp_entry) * ARRAY_SIZE(power_cap_levels)); kfree(power_edp_calc_limits); } else { power_edp_limits = power_edp_calc_limits; power_edp_limits_size = ARRAY_SIZE(power_cap_levels); } kfree(freq_voltage_lut); return 0; }