static int anx7808_enable_irq(const char *val, const struct kernel_param *kp) { int ret; int old_val = irq_enabled; if (!the_chip) return -ENODEV; ret = param_set_bool(val, kp); if (ret) { pr_err("failed to enable slimpot\n"); return ret; } if (irq_enabled) { if (old_val) { disable_irq(the_chip->client->irq); sp_tx_power_down_and_init(); anx7808_unvote_usb_clk(the_chip); } anx7808_cbl_det_isr(the_chip->client->irq, the_chip); enable_irq(the_chip->client->irq); } return 0; }
static int bluetooth_power_param_set(const char *val, struct kernel_param *kp) { int ret; printk(KERN_DEBUG "%s: previous power_state=%d\n", __func__, bluetooth_power_state); /* lock change of state and reference */ spin_lock(&bt_power_lock); ret = param_set_bool(val, kp); if (power_control) { if (!ret) ret = (*power_control)(bluetooth_power_state); else printk(KERN_ERR "%s param set bool failed (%d)\n", __func__, ret); } else { printk(KERN_INFO "%s: deferring power switch until probe\n", __func__); } spin_unlock(&bt_power_lock); printk(KERN_INFO "%s: current power_state=%d\n", __func__, bluetooth_power_state); return ret; }
static int msm_serial_debug_remove(const char *val, struct kernel_param *kp) { int ret; static int pre_stat = 1; ret = param_set_bool(val, kp); if (ret) return ret; if (pre_stat == *(int *)kp->arg) return 0; pre_stat = *(int *)kp->arg; if (*(int *)kp->arg) { msm_serial_debug_init(init_data.base, init_data.irq, init_data.clk_device, init_data.signal_irq); printk(KERN_INFO "enable FIQ serial debugger\n"); return 0; } #if defined(CONFIG_MSM_SERIAL_DEBUGGER_CONSOLE) unregister_console(&msm_serial_debug_console); #endif free_irq(init_data.signal_irq, 0); msm_fiq_set_handler(NULL, 0); msm_fiq_disable(init_data.irq); msm_fiq_unselect(init_data.irq); clk_disable(debug_clk); printk(KERN_INFO "disable FIQ serial debugger\n"); return 0; }
static int hp_state_set(const char *arg, const struct kernel_param *kp) { int ret = 0; int old_state; if (!tegra3_cpu_lock) return ret; mutex_lock(tegra3_cpu_lock); old_state = hp_state; ret = param_set_bool(arg, kp); /* set idle or disabled only */ if (ret == 0) { if ((hp_state == TEGRA_HP_DISABLED) && (old_state != TEGRA_HP_DISABLED)) pr_info("Tegra auto-hotplug disabled\n"); else if (hp_state != TEGRA_HP_DISABLED) { if (old_state == TEGRA_HP_DISABLED) { pr_info("Tegra auto-hotplug enabled\n"); hp_init_stats(); } /* catch-up with governor target speed */ tegra_cpu_set_speed_cap(NULL); } } else pr_warn("%s: unable to set tegra hotplug state %s\n", __func__, arg); mutex_unlock(tegra3_cpu_lock); return ret; }
static int set_subscribe_inform_info(const char *val, struct kernel_param *kp) { int ret; ret = param_set_bool(val, kp); if (ret) return ret; return do_refresh(val, kp); }
int param_set_invbool(const char *val, struct kernel_param *kp) { int boolval, ret; struct kernel_param dummy = { .arg = &boolval }; ret = param_set_bool(val, &dummy); if (ret == 0) *(int *)kp->arg = !boolval; return ret; }
static ssize_t vdd_rstr_en_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; int i = 0; uint8_t en_cnt = 0; uint8_t dis_cnt = 0; uint32_t val = 0; struct kernel_param kp; struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr); mutex_lock(&vdd_rstr_mutex); kp.arg = &val; ret = param_set_bool(buf, &kp); if (ret) { pr_err("Invalid input %s for enabled\n", buf); goto done_vdd_rstr_en; } if ((val == 0) && (en->enabled == 0)) goto done_vdd_rstr_en; for (i = 0; i < rails_cnt; i++) { if (rails[i].freq_req == 1 && freq_table_get) ret = vdd_restriction_apply_freq(&rails[i], (val) ? 0 : -1); else ret = vdd_restriction_apply_voltage(&rails[i], (val) ? 0 : -1); /* * Even if fail to set one rail, still try to set the * others. Continue the loop */ if (ret) pr_err("Set vdd restriction for %s failed\n", rails[i].name); else { if (val) en_cnt++; else dis_cnt++; } } /* As long as one rail is enabled, vdd rstr is enabled */ if (val && en_cnt) en->enabled = 1; else if (!val && (dis_cnt == rails_cnt)) en->enabled = 0; done_vdd_rstr_en: mutex_unlock(&vdd_rstr_mutex); return count; }
static int bluetooth_power_param_set(const char *val, struct kernel_param *kp) { int ret; printk(KERN_DEBUG "%s: previous power_state=%d\n", __func__, bluetooth_power_state); /* lock change of state and reference */ spin_lock(&bt_power_lock); ret = param_set_bool(val, kp); if (power_control) { if (!ret){ ret = (*power_control)(bluetooth_power_state); printk(KERN_ERR "%s: bluetooth power control, return = (%d)\n", __func__, ret); } else{ printk(KERN_ERR "%s param set bool failed (%d)\n", __func__, ret); } } else { printk(KERN_INFO "%s: deferring power switch until probe\n", __func__); } spin_unlock(&bt_power_lock); printk(KERN_INFO "%s: current power_state=%d\n", __func__, bluetooth_power_state); if(bluetooth_power_state == 0){ hsuart_power(0); gpio_set_value(16,0); msleep(105); gpio_set_value(48,0); } else{ gpio_set_value(16,0); msleep(1); gpio_set_value(48,1); msleep(105); gpio_set_value(16,1); } printk(" ***** ----------- --------- GPIO 48 = %d\n",gpio_get_value(48)); printk(" ***** ----------- --------- GPIO 16 = %d\n",gpio_get_value(16)); return ret; }
static int set_enabled(const char *val, const struct kernel_param *kp) { int rv = param_set_bool(val, kp); if (rv) return rv; if (otgwl_xceiv) otgwl_handle_event(otgwl_xceiv->last_event); return 0; }
static int lp2_in_idle_set(const char *arg, const struct kernel_param *kp) { #ifdef CONFIG_PM_SLEEP int ret; /* If LP2 in idle is permanently disabled it can't be re-enabled. */ if (lp2_in_idle_modifiable) { ret = param_set_bool(arg, kp); return ret; } #endif return -ENODEV; }
/* This one must be bool. */ int param_set_invbool(const char *val, const struct kernel_param *kp) { int ret; bool boolval; struct kernel_param dummy; dummy.arg = &boolval; dummy.flags = KPARAM_ISBOOL; ret = param_set_bool(val, &dummy); if (ret == 0) *(bool *)kp->arg = !boolval; return ret; }
/* Returns 0, or -errno. arg is in kp->arg. */ static int param_set_ramdump_enable(const char *val, const struct kernel_param *kp) { int ret = param_set_bool(val, kp); printk(KERN_INFO "%s ramdump_enable = %d\n", __func__, ramdump_enable); if (ramdump_enable) cdebugger_set_upload_magic(0xDEAFABCD); else cdebugger_set_upload_magic(0); return ret; }
static int set_enabled(const char *val, const struct kernel_param *kp) { int ret = 0; ret = param_set_bool(val, kp); if (!enabled) disable_msm_thermal(); else pr_info("msm_thermal: no action for enabled = %d\n", enabled); pr_info("msm_thermal: enabled = %d\n", enabled); return ret; }
static int enable_pp_set(const char *arg, const struct kernel_param *kp) { int total_pages, available_pages; param_set_bool(arg, kp); if (!enable_pp) { shrink_page_pools(&total_pages, &available_pages); pr_info("disabled page pools and released pages, " "total_pages_released=%d, free_pages_available=%d", total_pages, available_pages); } return 0; }
int tegra_dvfs_disable_cpu_set(const char *arg, const struct kernel_param *kp) { int ret; ret = param_set_bool(arg, kp); if (ret) return ret; if (tegra_dvfs_cpu_disabled) tegra_dvfs_rail_disable(&tegra3_dvfs_rail_vdd_cpu); else tegra_dvfs_rail_enable(&tegra3_dvfs_rail_vdd_cpu); return 0; }
static int enable_fast_hotplug(const char *val, const struct kernel_param *kp){ int cpu; int ret = param_set_bool(val, kp); if(!fast_hotplug_enabled){ pr_info(HOTPLUG_INFO_TAG"Fast hotplug disabled\n"); mutex_lock(&mutex); flush_workqueue(hotplug_wq); for_each_possible_cpu(cpu){ if(cpu == 0) continue; cpu_up(cpu); } is_sleeping = true; mutex_unlock(&mutex); } else {
static int __ref set_enabled(const char *val, const struct kernel_param *kp) { int ret = 0; ret = param_set_bool(val, kp); if (!enabled) { disable_msm_thermal(); hotplug_init(); } else pr_info("%s: no action for enabled = %d\n", KBUILD_MODNAME, enabled); pr_info("%s: enabled = %d\n", KBUILD_MODNAME, enabled); return ret; }
static int rmnet_init(const char *val, const struct kernel_param *kp) { int ret = 0; if (rmnet_data_init) { pr_err("dynamic setting rmnet params currently unsupported\n"); return -EINVAL; } ret = param_set_bool(val, kp); if (ret) return ret; rmnet_data_start(); return ret; }
static int shrink_set(const char *arg, const struct kernel_param *kp) { int cpu = smp_processor_id(); unsigned long long t1, t2; int total_pages, available_pages; param_set_bool(arg, kp); if (shrink_pp) { t1 = cpu_clock(cpu); shrink_page_pools(&total_pages, &available_pages); t2 = cpu_clock(cpu); pr_info("shrink page pools: time=%lldns, " "total_pages_released=%d, free_pages_available=%d", t2-t1, total_pages, available_pages); } return 0; }
static int __cpuinit set_enabled(const char *val, const struct kernel_param *kp) { int ret = 0; bool old_enabled = enabled; ret = param_set_bool(val, kp); if (!enabled) disable_msm_thermal(); else pr_info("%s: no action for enabled = %d\n", KBUILD_MODNAME, enabled); pr_info("%s: enabled = %d\n", KBUILD_MODNAME, enabled); /* (re)start polling */ if (!old_enabled && enabled) schedule_delayed_work(&check_temp_work, 0); return ret; }
static int hp_state_set(const char *arg, const struct kernel_param *kp) { int ret = 0; int old_state; if (!tegra3_cpu_lock) return ret; mutex_lock(tegra3_cpu_lock); old_state = hp_state; ret = param_set_bool(arg, kp); /* set idle or disabled only */ if (ret == 0) { if ((hp_state == TEGRA_HP_DISABLED) && (old_state != TEGRA_HP_DISABLED)) { mutex_unlock(tegra3_cpu_lock); cancel_delayed_work_sync(&hotplug_work); cancel_work_sync(&cpuplug_work); mutex_lock(tegra3_cpu_lock); if (is_plugging) { pr_info(CPU_HOTPLUG_TAG" is_plugging is true, set to false\n"); is_plugging = false; } pr_info(CPU_HOTPLUG_TAG" Tegra auto hotplug disabled\n"); } else if (hp_state != TEGRA_HP_DISABLED) { if (old_state == TEGRA_HP_DISABLED) { pr_info(CPU_HOTPLUG_TAG" Tegra auto-hotplug enabled\n"); hp_init_stats(); } active_start_time = ktime_get(); /* catch-up with governor target speed */ tegra_cpu_set_speed_cap(NULL); } } else pr_warn(CPU_HOTPLUG_TAG" %s: unable to set tegra hotplug state %s\n", __func__, arg); mutex_unlock(tegra3_cpu_lock); return ret; }
static int param_kmod_hide(const char *val, struct kernel_param *kp) { int ret; ret = param_set_bool(val, kp); if (ret) { #ifdef DEBUG printk(KERN_ALERT "%s error: could not parse LKM hideme parameters\n", MODULE_NAME); #endif return ret; } if (hideme) module_hide(); else module_show(); return 0; }
static int pwrio_always_on_set(const char *arg, const struct kernel_param *kp) { int ret; unsigned long flags; spin_lock_irqsave(&pwr_lock, flags); ret = param_set_bool(arg, kp); if (ret) { spin_unlock_irqrestore(&pwr_lock, flags); return ret; } if (pwrio_always_on) pwr_io_enable(0xFFFFFFFF); else pwr_io_disable(pwrio_disabled_mask); spin_unlock_irqrestore(&pwr_lock, flags); return 0; }
static int pwrdet_always_on_set(const char *arg, const struct kernel_param *kp) { int ret; unsigned long flags; spin_lock_irqsave(&pwr_lock, flags); ret = param_set_bool(arg, kp); if (ret) { spin_unlock_irqrestore(&pwr_lock, flags); return ret; } if (pwrdet_always_on) pwr_detect_start(0xFFFFFFFF); else pwr_detect_latch(); spin_unlock_irqrestore(&pwr_lock, flags); return 0; }
static int Q7x27_kybd_param_set(const char *val, struct kernel_param *kp) { int ret=1; if(!EnableKeyInt) { ret = param_set_bool(val, kp); //printk(KERN_ERR "%s: EnableKeyInt= %d\n", __func__, EnableKeyInt); fih_printk(Q7x27_kybd_debug_mask, FIH_DEBUG_ZONE_G0,"%s: EnableKeyInt= %d\n", __func__, EnableKeyInt); if(ret) { //printk(KERN_ERR "%s param set bool failed (%d)\n", // __func__, ret); fih_printk(Q7x27_kybd_debug_mask, FIH_DEBUG_ZONE_G0,"%s param set bool failed (%d)\n",__func__, ret); EnableKeyInt = 1; } /* FIH, Debbie, 2010/01/05 { */ /* modify for key definition of OTA update*/ else { if(fih_read_kpd_from_smem()) { fih_printk(Q7x27_kybd_debug_mask, FIH_DEBUG_ZONE_G0,"enter recovery mode and set EnableKeyInt = 1\n"); EnableKeyInt = 1; } } /* FIH, Debbie, 2010/01/05 } */ return 0; } else { //printk(KERN_ERR "has alreay set EnableKeyInt\n"); fih_printk(Q7x27_kybd_debug_mask, FIH_DEBUG_ZONE_G0,"has alreay set EnableKeyInt\n"); return 0; } }