static void gpio_keys_check_keytrigger_event(struct gpio_keys_dev *kdev) { unsigned long flags; int i, reboot, console; reboot = console = 1; spin_lock_irqsave(&kdev->keytrigger_lock, flags); for (i = 0; i < kdev->nbtns; i++) { struct btn_ctxt *btn = &kdev->pbtns[i]; struct gpio_keys_button *button = btn->btn; if( button == NULL ) continue; #ifdef CONFIG_GPIO_KEYS_REBOOT_TRIGGER if( button->options & OPT_REBOOT_TRIGGER ) { if( button->options & OPT_REBOOT_TRIGGER_EDGE ) { reboot &=(btn->state != btn->prev_state); } else { reboot &= btn->state; } } #endif #ifdef CONFIG_GPIO_KEYS_CONSOLE_TRIGGER if( button->options & OPT_CONSOLE_TRIGGER ) console &= btn->state; #endif } #ifdef CONFIG_GPIO_KEYS_REBOOT_TRIGGER if( reboot || kdev->reboot_state != reboot ) { //printk("%s: reboot event; reboot=%d\n", __FUNCTION__, reboot); kdev->reboot_state = reboot; raw_notifier_call_chain(&reboot_key_notifier_list, reboot, NULL); } #endif #ifdef CONFIG_GPIO_KEYS_CONSOLE_TRIGGER if( kdev->console_state != console ) { kdev->console_state = console; raw_notifier_call_chain(&console_key_notifier_list, console, NULL); } #endif spin_unlock_irqrestore(&kdev->keytrigger_lock, flags); return; }
static void grade_notify(struct tegra_simon_grader *grader) { mutex_lock(&simon_lock); raw_notifier_call_chain(&simon_nh, grader->grade, (void *)((long)grader->domain)); mutex_unlock(&simon_lock); }
static int busfreq_notify(enum busfreq_event event) { int ret; ret = raw_notifier_call_chain(&busfreq_notifier_chain, event, NULL); return notifier_to_errno(ret); }
/** * automotivenevents_notify - notification about wall, cradle * and input2 events */ void automotivenevents_notify(unsigned long reason, void *arg) { unsigned long flags; raw_spin_lock_irqsave(&automotive_lock, flags); raw_notifier_call_chain(&automotiveio_chain, reason, NULL); raw_spin_unlock_irqrestore(&automotive_lock, flags); }
static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v) { struct raw_notifier_head *nh = &dp->ds->dst->nh; int err; err = raw_notifier_call_chain(nh, e, v); return notifier_to_errno(err); }
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { #ifdef CONFIG_SUSPEND #ifdef CONFIG_EARLYSUSPEND suspend_state_t state = PM_SUSPEND_ON; #else suspend_state_t state = PM_SUSPEND_STANDBY; #endif const char * const *s; #endif char *p; int len; int error = -EINVAL; p = memchr(buf, '\n', n); len = p ? p - buf : n; /* First, check if we are requested to hibernate */ if (len == 4 && !strncmp(buf, "disk", len)) { error = hibernate(); goto Exit; } #ifdef CONFIG_SUSPEND for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) { if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) break; } #ifdef CONFIG_FAST_BOOT if (len == 4 && !strncmp(buf, "dmem", len)) { pr_info("%s: fake shut down!!!\n", __func__); fake_shut_down = true; raw_notifier_call_chain(&fsd_notifier_list, FAKE_SHUT_DOWN_CMD_ON, NULL); state = PM_SUSPEND_MEM; error = 0; } #endif if (state < PM_SUSPEND_MAX && *s) { #ifdef CONFIG_EARLYSUSPEND if (state == PM_SUSPEND_ON || valid_state(state)) { error = 0; request_suspend_state(state); } #else error = enter_state(state); #endif } #endif Exit: return error ? error : n; }
static inline int sock_notifier_notify(unsigned long event, struct sock *sk) { int result; unsigned long flags; read_lock_irqsave(¬ifier_lock, flags); result = raw_notifier_call_chain(¬ifier_list, event, sk); read_unlock_irqrestore(¬ifier_lock, flags); return result; }
/* * Notify about a modem event change. * */ static void modem_notify_task(struct work_struct *work) { int i = 0; for(i = 0; i < MDM_EVT_NOTIFY_NUM; i++){ if(test_and_clear_bit(i, &vmdata->ntf_flags)){ raw_notifier_call_chain(&vmdata->ntf, i, NULL); } } }
void mm_common_disable_clock(struct mm_common *common) { BUG_ON(common->mm_hw_is_on == 0); common->mm_hw_is_on--; if (common->mm_hw_is_on == 0) { if (common->common_clk) clk_disable(common->common_clk); raw_notifier_call_chain(&common->notifier_head, \ MM_FMWK_NOTIFY_CLK_DISABLE, NULL); \ } }
void mm_common_enable_clock(struct mm_common *common) { if (common->mm_hw_is_on == 0) { if (common->common_clk) { clk_enable(common->common_clk); if (strncmp(common->mm_name, "mm_h264", 7)) clk_reset(common->common_clk); } raw_notifier_call_chain(&common->notifier_head, \ MM_FMWK_NOTIFY_CLK_ENABLE, NULL); \ } common->mm_hw_is_on++; }
void ipcs_intr_tasklet_handler(unsigned long data) { cp_crashed = 0; if (IpcCPCrashCheck()) { cp_crashed = 1; spin_lock_bh(&cp_state_notifier_lock); raw_notifier_call_chain(&cp_state_notifier_list, IPC_CPSTATE_CRASHED, NULL); spin_unlock_bh(&cp_state_notifier_lock); /* schedule the work on the decidated CP crash dump work queue */ queue_work(g_ipc_info.crash_dump_workqueue, &g_ipc_info.cp_crash_dump_wq); IPC_ProcessEvents(); } else { IPC_ProcessEvents(); wake_unlock(&ipc_wake_lock); } }
static void gpio_keys_fake_off_check(unsigned long _data) { struct input_dev *input = (struct input_dev *)_data; unsigned int type = EV_KEY; if (fake_pressed == false) return ; printk(KERN_DEBUG"keys: make event\n"); fake_shut_down = false; raw_notifier_call_chain(&fsd_notifier_list, FAKE_SHUT_DOWN_CMD_OFF, NULL); input_event(input, type, KEY_FAKE_PWR, 1); input_sync(input); input_event(input, type, KEY_FAKE_PWR, 0); input_sync(input); }
static void mm_fmwk_job_scheduler(struct work_struct *work) { mm_job_status_e status = MM_JOB_STATUS_INVALID; bool is_hw_busy = false; struct dev_job_list *job_list_elem; struct mm_core *core_dev = container_of(work, \ struct mm_core, \ job_scheduler); MM_CORE_HW_IFC *hw_ifc = &core_dev->mm_device; if (plist_head_empty(&core_dev->job_list)) return; job_list_elem = plist_first_entry(\ &(core_dev->job_list), \ struct dev_job_list, core_list); if (mm_core_enable_clock(core_dev)) goto mm_fmwk_job_scheduler_done; is_hw_busy = hw_ifc->mm_get_status(hw_ifc->mm_device_id); if (!is_hw_busy) { if (job_list_elem->job.size) { if (job_list_elem->job.status == MM_JOB_STATUS_READY) clean_cnt++; if (job_list_elem->job.status == MM_JOB_STATUS_DIRTY) { mm_common_cache_clean(); dirty_cnt++; if ((dirty_cnt % 1000) == 0) pr_debug("mm jobs dirty=%d, clean=%d\n", dirty_cnt, clean_cnt); } status = hw_ifc->mm_start_job(\ hw_ifc->mm_device_id, \ &job_list_elem->job, 0); if (status < MM_JOB_STATUS_SUCCESS) { getnstimeofday(&core_dev->sched_time); timespec_add_ns(\ &core_dev->sched_time, \ hw_ifc->mm_timeout * NSEC_PER_MSEC); core_dev->mm_core_idle = false; is_hw_busy = true; pr_debug("job posted "); raw_notifier_call_chain(\ &core_dev->mm_common->notifier_head, \ MM_FMWK_NOTIFY_JOB_STARTED, NULL); } else { core_dev->mm_core_idle = true; job_list_elem->job.status \ = MM_JOB_STATUS_SUCCESS; mm_common_job_completion(\ job_list_elem, core_dev); SCHEDULER_WORK(core_dev, \ &core_dev->job_scheduler); } } else { job_list_elem->job.status \ = MM_JOB_STATUS_SUCCESS; mm_common_job_completion(\ job_list_elem, core_dev); SCHEDULER_WORK(core_dev, \ &core_dev->job_scheduler); } } else { struct timespec cur_time; getnstimeofday(&cur_time); if (timespec_compare(&cur_time, &core_dev->sched_time) > 0) { pr_err("abort hw "); hw_ifc->mm_abort(hw_ifc->mm_device_id, \ &job_list_elem->job); core_dev->mm_core_idle = true; is_hw_busy = false; SCHEDULER_WORK(core_dev, &core_dev->job_scheduler); } } if (is_hw_busy) { mod_timer(&core_dev->dev_timer, \ jiffies + msecs_to_jiffies(hw_ifc->mm_timer)); pr_debug("mod_timer %lx %lx", \ jiffies, \ msecs_to_jiffies(hw_ifc->mm_timer)); return; } mm_fmwk_job_scheduler_done: mm_core_disable_clock(core_dev); }
void WaitForCpIpc(void *pSmBase) { int k = 0, ret = 0; void __iomem *cp_boot_base; u32 reg_val; cp_running = 0; IPC_DEBUG(DBG_WARN, "Waiting for CP IPC to init 0x%x\n", (unsigned int)pSmBase); /* Debug info to show is_ap_only_boot() status */ if (is_ap_only_boot()) IPC_DEBUG(DBG_WARN, "AP ONLY BOOT\n"); else IPC_DEBUG(DBG_WARN, "NORMAL BOOT\n"); if (!is_ap_only_boot()) { /* Check for AP_BOOT or NORMAL_BOOT */ ret = IPC_IsCpIpcInit(pSmBase, IPC_AP_CPU); IPC_DEBUG(DBG_WARN, "back from IPC_IsCpIpcInit\n"); while (ret == 0) { /* Wait up to 2s for CP to init */ if (k++ > 200) break; else msleep(10); ret = IPC_IsCpIpcInit(pSmBase, IPC_AP_CPU); } } if (ret == 1) { IPC_DEBUG(DBG_WARN, "CP IPC initialized\n"); spin_lock_bh(&cp_state_notifier_lock); cp_running = 1; /* TRUE; */ raw_notifier_call_chain(&cp_state_notifier_list, IPC_CPSTATE_RUNNING, NULL); spin_unlock_bh(&cp_state_notifier_lock); } else if (ret == 0) { IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "* CP IPC NOT INITIALIZED - SYSTEM BOOTS WITH AP ONLY!!! *\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); cp_boot_base = ioremap_nocache(MODEM_ITCM_ADDRESS, 0x20); if (!cp_boot_base) { IPC_DEBUG(DBG_ERROR, "ITCM Addr=0x%x, length=0x%x", MODEM_ITCM_ADDRESS, 0x20); IPC_DEBUG(DBG_ERROR, "ioremap cp_boot_base error\n"); return; } reg_val = readl(cp_boot_base); IPC_DEBUG(DBG_ERROR, "reset vector value is 0x%x\n", reg_val); reg_val = readl(cp_boot_base + 0x20); IPC_DEBUG(DBG_ERROR, "CP Boot flag 0x%x\n", reg_val); iounmap(cp_boot_base); /* SKIP reset is_ap_only_boot() non zero */ if (!is_ap_only_boot()) BUG_ON(ret == 0); } else if (ret == -1) { IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "* CP CRASHED !!! *\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); BUG_ON(ret); } else if (ret == -2) { IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "* AP/CP IPC VERSION NOT MATCH !!! *\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); /* BUG_ON(ret); */ } }
static void do_suspend(void) { int err; struct suspend_info si; shutting_down = SHUTDOWN_SUSPEND; err = freeze_processes(); if (err) { pr_err("%s: freeze processes failed %d\n", __func__, err); goto out; } err = freeze_kernel_threads(); if (err) { pr_err("%s: freeze kernel threads failed %d\n", __func__, err); goto out_thaw; } err = dpm_suspend_start(PMSG_FREEZE); if (err) { pr_err("%s: dpm_suspend_start %d\n", __func__, err); goto out_thaw; } printk(KERN_DEBUG "suspending xenstore...\n"); xs_suspend(); err = dpm_suspend_end(PMSG_FREEZE); if (err) { pr_err("dpm_suspend_end failed: %d\n", err); si.cancelled = 0; goto out_resume; } xen_arch_suspend(); si.cancelled = 1; err = stop_machine(xen_suspend, &si, cpumask_of(0)); /* Resume console as early as possible. */ if (!si.cancelled) xen_console_resume(); raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); if (err) { pr_err("failed to start xen_suspend: %d\n", err); si.cancelled = 1; } xen_arch_resume(); out_resume: if (!si.cancelled) xs_resume(); else xs_suspend_cancel(); dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); out_thaw: thaw_processes(); out: shutting_down = SHUTDOWN_INVALID; }
int blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v) { return raw_notifier_call_chain((struct raw_notifier_head *)nh, val, v); }
/* * Notify about a clock event change. Called with clockevents_lock * held. */ static void clockevents_do_notify(unsigned long reason, void *dev) { raw_notifier_call_chain(&clockevents_chain, reason, dev); }
int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v) { return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v); }
void WaitForCpIpc(void *pSmBase) { int k = 0, ret = 0; cp_running = 0; IPC_DEBUG(DBG_WARN, "Waiting for CP IPC to init ...\n"); /* Debug info to show is_ap_only_boot() status */ if (is_ap_only_boot()) IPC_DEBUG(DBG_WARN, "AP ONLY BOOT\n"); else IPC_DEBUG(DBG_WARN, "NORMAL BOOT\n"); if (!is_ap_only_boot()) { /* Check for AP_BOOT or NORMAL_BOOT */ ret = IPC_IsCpIpcInit(pSmBase, IPC_AP_CPU); IPC_DEBUG(DBG_WARN, "back from IPC_IsCpIpcInit\n"); while (ret == 0) { /* Wait up to 2s for CP to init */ if (k++ > 200) break; else msleep(10); ret = IPC_IsCpIpcInit(pSmBase, IPC_AP_CPU); } } if (ret == 1) { IPC_DEBUG(DBG_WARN, "CP IPC initialized\n"); spin_lock_bh(&cp_state_notifier_lock); cp_running = 1; /* TRUE; */ raw_notifier_call_chain(&cp_state_notifier_list, IPC_CPSTATE_RUNNING, NULL); spin_unlock_bh(&cp_state_notifier_lock); } else if (ret == 0) { IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "* CP IPC NOT INITIALIZED - SYSTEM BOOTS WITH AP ONLY!!! *\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); /* SKIP reset is_ap_only_boot() non zero */ if (!is_ap_only_boot()) BUG_ON(ret == 0); } else if (ret == -1) { IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "* CP CRASHED !!! *\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); BUG_ON(ret); } else if (ret == -2) { IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "* AP/CP IPC VERSION NOT MATCH !!! *\n"); IPC_DEBUG(DBG_ERROR, "* *\n"); IPC_DEBUG(DBG_ERROR, "********************************************************************\n"); //BUG_ON(ret); } }
int visdn_call_notifiers(unsigned long val, void *v) { return raw_notifier_call_chain(&visdn_notify_chain, val, v); }
static int show_cpuinfo(struct seq_file *m, void *v) { struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args; unsigned long n = (unsigned long) v - 1; unsigned int version = cpu_data[n].processor_id; unsigned int fp_vers = cpu_data[n].fpu_id; char fmt [64]; int i; #ifdef CONFIG_SMP if (!cpu_online(n)) return 0; #endif /* * For the first processor also print the system type */ if (n == 0) { seq_printf(m, "system type\t\t: %s\n", get_system_type()); if (mips_get_machine_name()) seq_printf(m, "machine\t\t\t: %s\n", mips_get_machine_name()); } seq_printf(m, "processor\t\t: %ld\n", n); sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n", cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : ""); seq_printf(m, fmt, __cpu_name[n], (version >> 4) & 0x0f, version & 0x0f, (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", cpu_data[n].udelay_val / (500000/HZ), (cpu_data[n].udelay_val / (5000/HZ)) % 100); seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); seq_printf(m, "microsecond timers\t: %s\n", cpu_has_counter ? "yes" : "no"); seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); seq_printf(m, "extra interrupt vector\t: %s\n", cpu_has_divec ? "yes" : "no"); seq_printf(m, "hardware watchpoint\t: %s", cpu_has_watch ? "yes, " : "no\n"); if (cpu_has_watch) { seq_printf(m, "count: %d, address/irw mask: [", cpu_data[n].watch_reg_count); for (i = 0; i < cpu_data[n].watch_reg_count; i++) seq_printf(m, "%s0x%04x", i ? ", " : "" , cpu_data[n].watch_reg_masks[i]); seq_printf(m, "]\n"); } seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n", cpu_has_mips16 ? " mips16" : "", cpu_has_mdmx ? " mdmx" : "", cpu_has_mips3d ? " mips3d" : "", cpu_has_smartmips ? " smartmips" : "", cpu_has_dsp ? " dsp" : "", cpu_has_mipsmt ? " mt" : "" ); seq_printf(m, "shadow register sets\t: %d\n", cpu_data[n].srsets); seq_printf(m, "kscratch registers\t: %d\n", hweight8(cpu_data[n].kscratch_mask)); seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); #if defined(CONFIG_MIPS_MT_SMP) if (cpu_has_mipsmt) seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); #endif sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", cpu_has_vce ? "%u" : "not available"); seq_printf(m, fmt, 'D', vced_count); seq_printf(m, fmt, 'I', vcei_count); proc_cpuinfo_notifier_args.m = m; proc_cpuinfo_notifier_args.n = n; raw_notifier_call_chain(&proc_cpuinfo_chain, 0, &proc_cpuinfo_notifier_args); seq_printf(m, "\n"); return 0; }
void __ref modemctl_notify_event(enum modemctl_event evt) { raw_notifier_call_chain(&cp_crash_notifier, evt, NULL); }
static void do_suspend(void) { int err; struct suspend_info si; shutting_down = SHUTDOWN_SUSPEND; #ifdef CONFIG_PREEMPT /* If the kernel is preemptible, we need to freeze all the processes to prevent them from being in the middle of a pagetable update during suspend. */ err = freeze_processes(); if (err) { pr_err("%s: freeze failed %d\n", __func__, err); goto out; } #endif err = dpm_suspend_start(PMSG_FREEZE); if (err) { pr_err("%s: dpm_suspend_start %d\n", __func__, err); goto out_thaw; } printk(KERN_DEBUG "suspending xenstore...\n"); xs_suspend(); err = dpm_suspend_end(PMSG_FREEZE); if (err) { pr_err("dpm_suspend_end failed: %d\n", err); si.cancelled = 0; goto out_resume; } si.cancelled = 1; err = stop_machine(xen_suspend, &si, cpumask_of(0)); /* Resume console as early as possible. */ if (!si.cancelled) xen_console_resume(); raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); if (err) { pr_err("failed to start xen_suspend: %d\n", err); si.cancelled = 1; } out_resume: if (!si.cancelled) { xen_arch_resume(); xs_resume(); } else xs_suspend_cancel(); dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); out_thaw: #ifdef CONFIG_PREEMPT thaw_processes(); out: #endif shutting_down = SHUTDOWN_INVALID; }
static int modem_state_notifier_call_chain(unsigned long val, void *v) { return raw_notifier_call_chain(&modem_state_chain, val, v); }