void dump_page_badflags(struct page *page, const char *reason, unsigned long badflags) { pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", page, atomic_read(&page->_count), page_mapcount(page), page->mapping, page->index); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names)); if (reason) pr_alert("page dumped because: %s\n", reason); if (page->flags & badflags) { pr_alert("bad because of flags:\n"); dump_flags(page->flags & badflags, pageflag_names, ARRAY_SIZE(pageflag_names)); } #ifdef CONFIG_MEMCG if (page->mem_cgroup) pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); #endif }
static void crash_kexec_prepare_cpus(void) { unsigned int msecs; unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ dump_send_ipi(crash_shutdown_secondary); smp_wmb(); /* * The crash CPU sends an IPI and wait for other CPUs to * respond. Delay of at least 10 seconds. */ pr_emerg("Sending IPI to other cpus...\n"); msecs = 10000; while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { cpu_relax(); mdelay(1); } }
void arch_restart_cpu(u32 cpu) { u32 timeout, val; val = readl(DBG_DSCR(cpu)); val &= ~((0x1 << 14) | (0x1 << 13)); writel(val, DBG_DSCR(cpu)); /* Restart dest cpu */ writel(0x2, DBG_DRCR(cpu)); timeout = 10000; do { val = readl(DBG_DSCR(cpu)); if (val & (0x1 << 1)) break; } while (--timeout); if (!timeout) pr_emerg("Cannot restart cpu%d\n", cpu); }
static void mx_reboot_internal(const char *cmd) { local_irq_disable(); if(cmd) { if (!strcmp(cmd, "charge")) __raw_writel(REBOOT_MODE_CHARGE, S5P_INFORM4); else if (!strcmp(cmd, "wipe")) __raw_writel(REBOOT_MODE_WIPE, S5P_INFORM4); else if (!strcmp(cmd, "upgrade")) __raw_writel(REBOOT_MODE_UPGRADE, S5P_INFORM4); } flush_cache_all(); outer_flush_all(); arch_reset(0, 0); pr_emerg("%s: waiting for reboot\n", __func__); while (1) arch_reset(0, 0); }
int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) { enum emulation_result er; int r; er = kvmppc_emulate_loadstore(vcpu); switch (er) { case EMULATE_DONE: /* Future optimization: only reload non-volatiles if they were * actually modified. */ r = RESUME_GUEST_NV; break; case EMULATE_AGAIN: r = RESUME_GUEST; break; case EMULATE_DO_MMIO: run->exit_reason = KVM_EXIT_MMIO; /* We must reload nonvolatiles because "update" load/store * instructions modify register state. */ /* Future optimization: only reload non-volatiles if they were * actually modified. */ r = RESUME_HOST_NV; break; case EMULATE_FAIL: { u32 last_inst; kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); /* XXX Deliver Program interrupt to guest. */ pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); r = RESUME_HOST; break; } default: WARN_ON(1); r = RESUME_GUEST; } return r; }
void arch_restart_cpu(u32 cpu) { u32 timeout, val; void __iomem *p_dbg_base = DBG_BASE(cpu); void __iomem *p_cti_base = CTI_BASE(cpu); /* Disable Halt Debug Mode */ val = readl(p_dbg_base + EDSCR); val &= ~(0x1 << 14); writel(val, p_dbg_base + EDSCR); /* Enable CTI access */ cti_enable_access(cpu); /* Enable CTI */ writel(0x1, p_cti_base + CTI_CTRL); /* ACK the outut event */ writel(0x1, p_cti_base + CTI_INTACK); /* Set output channel1 */ val = readl(p_cti_base + CTI_OUT1EN) | 0x2; writel(val, p_cti_base + CTI_OUT1EN); /* Trigger pulse event */ writel(0x2, p_cti_base + CTI_APP_PULSE); /* Wait the cpu become running */ timeout = 10000; do { val = readl(p_dbg_base + EDPRSR); if (!(val & (0x1 << 4))) break; } while (--timeout); if (!timeout) pr_emerg("Cannot restart cpu%d\n", cpu); }
/* * Handle hardware error interrupts. * * RTAS check-exception is called to collect data on the exception. If * the error is deemed recoverable, we log a warning and return. * For nonrecoverable errors, an error is logged and we stop all processing * as quickly as possible in order to prevent propagation of the failure. */ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) { struct rtas_error_log *rtas_elog; int status; int fatal; spin_lock(&ras_log_buf_lock); status = rtas_call(ras_check_exception_token, 6, 1, NULL, RTAS_VECTOR_EXTERNAL_INTERRUPT, virq_to_hw(irq), RTAS_INTERNAL_ERROR, 1 /* Time Critical */, __pa(&ras_log_buf), rtas_get_error_log_max()); rtas_elog = (struct rtas_error_log *)ras_log_buf; if (status == 0 && rtas_error_severity(rtas_elog) >= RTAS_SEVERITY_ERROR_SYNC) fatal = 1; else fatal = 0; /* format and print the extended information */ log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal); if (fatal) { pr_emerg("Fatal hardware error detected. Check RTAS error" " log for details. Powering off immediately\n"); emergency_sync(); kernel_power_off(); } else { pr_err("Recoverable hardware error detected\n"); } spin_unlock(&ras_log_buf_lock); return IRQ_HANDLED; }
static void s5m_rtc_shutdown(struct platform_device *pdev) { struct s5m_rtc_info *info = platform_get_drvdata(pdev); int i; unsigned int val = 0; if (info->wtsr_smpl) { for (i = 0; i < 3; i++) { s5m_rtc_enable_wtsr(info, false); regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val); pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val); if (val & WTSR_ENABLE_MASK) pr_emerg("%s: fail to disable WTSR\n", __func__); else { pr_info("%s: success to disable WTSR\n", __func__); break; } } } /* Disable SMPL when power off */ s5m_rtc_enable_smpl(info, false); }
/* * adds a new device and register it with virtio * appropriate drivers are loaded by the device model */ static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset) { struct kvm_device *kdev; kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); if (!kdev) { pr_emerg("Cannot allocate kvm dev %u type %u\n", offset, d->type); return; } kdev->vdev.dev.parent = kvm_root; kdev->vdev.id.device = d->type; kdev->vdev.config = &kvm_vq_config_ops; kdev->desc = d; kdev->desc_pa = PFN_PHYS(max_pfn) + offset; if (register_virtio_device(&kdev->vdev) != 0) { pr_err("Failed to register kvm device %u type %u\n", offset, d->type); kfree(kdev); } }
static void __init xilinx_timer_init(struct device_node *timer) { u32 irq; u32 timer_num = 1; int ret; timer_baseaddr = of_iomap(timer, 0); if (!timer_baseaddr) { pr_err("ERROR: invalid timer base address\n"); BUG(); } irq = irq_of_parse_and_map(timer, 0); of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num); if (timer_num) { pr_emerg("Please enable two timers in HW\n"); BUG(); } pr_info("%s: irq=%d\n", timer->full_name, irq); /* If there is clock-frequency property than use it */ ret = of_property_read_u32(timer, "clock-frequency", &timer_clock_freq); if (ret < 0) timer_clock_freq = cpuinfo.cpu_clock_freq; freq_div_hz = timer_clock_freq / HZ; setup_irq(irq, &timer_irqaction); #ifdef CONFIG_HEART_BEAT setup_heartbeat(); #endif xilinx_clocksource_init(); xilinx_clockevent_init(); timer_initialized = 1; }
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) { struct stackframe frame; pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); if (!tsk) tsk = current; if (regs) { frame.fp = regs->regs[29]; frame.sp = regs->sp; frame.pc = regs->pc; } else if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.pc = (unsigned long)dump_backtrace; } else { /* * task blocked in __switch_to */ frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.pc = thread_saved_pc(tsk); } pr_emerg("Call trace:\n"); while (1) { unsigned long where = frame.pc; int ret; ret = unwind_frame(&frame); if (ret < 0) break; dump_backtrace_entry(where, frame.sp); } }
static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p) { /* file-wide pr_fmt adds "CPU features: " prefix */ pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps); return 0; }
/** * panic - halt the system * @fmt: The text string to print * * Display a message, then perform cleanups. * * This function never returns. */ void panic(const char *fmt, ...) { static char buf[1024]; va_list args; long i, i_next = 0, len; int state = 0; int old_cpu, this_cpu; bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers; /* * Disable local interrupts. This will prevent panic_smp_self_stop * from deadlocking the first cpu that invokes the panic, since * there is nothing to prevent an interrupt handler (that runs * after setting panic_cpu) from invoking panic() again. */ local_irq_disable(); /* * It's possible to come here directly from a panic-assertion and * not have preempt disabled. Some functions called from here want * preempt to be disabled. No point enabling it later though... * * Only one CPU is allowed to execute the panic code from here. For * multiple parallel invocations of panic, all other CPUs either * stop themself or will wait until they are stopped by the 1st CPU * with smp_send_stop(). * * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which * comes here, so go ahead. * `old_cpu == this_cpu' means we came from nmi_panic() which sets * panic_cpu to this CPU. In this case, this is also the 1st CPU. */ this_cpu = raw_smp_processor_id(); old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu) panic_smp_self_stop(); console_verbose(); bust_spinlocks(1); va_start(args, fmt); len = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (len && buf[len - 1] == '\n') buf[len - 1] = '\0'; pr_emerg("Kernel panic - not syncing: %s\n", buf); #ifdef CONFIG_DEBUG_BUGVERBOSE /* * Avoid nested stack-dumping if a panic occurs during oops processing */ if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) dump_stack(); #endif /* * If we have crashed and we have a crash kernel loaded let it handle * everything else. * If we want to run this after calling panic_notifiers, pass * the "crash_kexec_post_notifiers" option to the kernel. * * Bypass the panic_cpu check and call __crash_kexec directly. */ if (!_crash_kexec_post_notifiers) { printk_safe_flush_on_panic(); __crash_kexec(NULL); /* * Note smp_send_stop is the usual smp shutdown function, which * unfortunately means it may not be hardened to work in a * panic situation. */ smp_send_stop(); } else { /* * If we want to do crash dump after notifier calls and * kmsg_dump, we will need architecture dependent extra * works in addition to stopping other CPUs. */ crash_smp_send_stop(); } /* * Run any panic handlers, including those that might need to * add information to the kmsg dump output. */ atomic_notifier_call_chain(&panic_notifier_list, 0, buf); /* Call flush even twice. It tries harder with a single online CPU */ printk_safe_flush_on_panic(); kmsg_dump(KMSG_DUMP_PANIC); /* * If you doubt kdump always works fine in any situation, * "crash_kexec_post_notifiers" offers you a chance to run * panic_notifiers and dumping kmsg before kdump. * Note: since some panic_notifiers can make crashed kernel * more unstable, it can increase risks of the kdump failure too. * * Bypass the panic_cpu check and call __crash_kexec directly. */ if (_crash_kexec_post_notifiers) __crash_kexec(NULL); #ifdef CONFIG_VT unblank_screen(); #endif console_unblank(); /* * We may have ended up stopping the CPU holding the lock (in * smp_send_stop()) while still having some valuable data in the console * buffer. Try to acquire the lock then release it regardless of the * result. The release will also print the buffers out. Locks debug * should be disabled to avoid reporting bad unlock balance when * panic() is not being callled from OOPS. */ debug_locks_off(); console_flush_on_panic(); panic_print_sys_info(); if (!panic_blink) panic_blink = no_blink; if (panic_timeout > 0) { /* * Delay timeout seconds before rebooting the machine. * We can't use the "normal" timers since we just panicked. */ pr_emerg("Rebooting in %d seconds..\n", panic_timeout); for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { touch_nmi_watchdog(); if (i >= i_next) { i += panic_blink(state ^= 1); i_next = i + 3600 / PANIC_BLINK_SPD; } mdelay(PANIC_TIMER_STEP); } } if (panic_timeout != 0) { /* * This will not be a clean reboot, with everything * shutting down. But if there is a chance of * rebooting the system it will be rebooted. */ emergency_restart(); } #ifdef __sparc__ { extern int stop_a_enabled; /* Make sure the user can actually press Stop-A (L1-A) */ stop_a_enabled = 1; pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n" "twice on console to return to the boot prom\n"); } #endif #if defined(CONFIG_S390) { unsigned long caller; caller = (unsigned long)__builtin_return_address(0); disabled_wait(caller); } #endif pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf); local_irq_enable(); for (i = 0; ; i += PANIC_TIMER_STEP) { touch_softlockup_watchdog(); if (i >= i_next) { i += panic_blink(state ^= 1); i_next = i + 3600 / PANIC_BLINK_SPD; } mdelay(PANIC_TIMER_STEP); } }
static void print_mce_tail(void) { pr_emerg("This is not a software problem!\n"); }
int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign) { const uint8_t *buffer = xbuffer, *p; mpi_limb_t alimb; int nlimbs; int i; nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; if (RESIZE_IF_NEEDED(a, nlimbs) < 0) return -ENOMEM; a->sign = sign; for (i = 0, p = buffer + nbytes - 1; p >= buffer + BYTES_PER_MPI_LIMB;) { #if BYTES_PER_MPI_LIMB == 4 alimb = (mpi_limb_t) *p--; alimb |= (mpi_limb_t) *p-- << 8; alimb |= (mpi_limb_t) *p-- << 16; alimb |= (mpi_limb_t) *p-- << 24; #elif BYTES_PER_MPI_LIMB == 8 alimb = (mpi_limb_t) *p--; alimb |= (mpi_limb_t) *p-- << 8; alimb |= (mpi_limb_t) *p-- << 16; alimb |= (mpi_limb_t) *p-- << 24; alimb |= (mpi_limb_t) *p-- << 32; alimb |= (mpi_limb_t) *p-- << 40; alimb |= (mpi_limb_t) *p-- << 48; alimb |= (mpi_limb_t) *p-- << 56; #else #error please implement for this limb size. #endif a->d[i++] = alimb; } if (p >= buffer) { #if BYTES_PER_MPI_LIMB == 4 alimb = *p--; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 8; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 16; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 24; #elif BYTES_PER_MPI_LIMB == 8 alimb = (mpi_limb_t) *p--; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 8; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 16; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 24; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 32; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 40; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 48; if (p >= buffer) alimb |= (mpi_limb_t) *p-- << 56; #else #error please implement for this limb size. #endif a->d[i++] = alimb; } a->nlimbs = i; if (i != nlimbs) { pr_emerg("MPI: mpi_set_buffer: Assertion failed (%d != %d)", i, nlimbs); BUG(); } return 0; }
/* * Check if the die sensor is cooling down. If it's higher than * t_hot since the last throttle then throttle it again. * OMAP junction temperature could stay for a long time in an * unacceptable temperature range. The idea here is to check after * t_hot->throttle the system really came below t_hot else re-throttle * and keep doing till it's under t_hot temp range. */ static void throttle_delayed_work_fn(struct work_struct *work) { int curr; struct omap_temp_sensor *temp_sensor = container_of(work, struct omap_temp_sensor, throttle_work.work); curr = omap_read_current_temp(temp_sensor); #ifdef CONFIG_OMAP_TEMP_CONTROL if (curr >= temp_limit || curr < 0) { #else if (curr >= BGAP_THRESHOLD_T_HOT || curr < 0) { #endif pr_warn("%s: OMAP temp read %d exceeds the threshold\n", __func__, curr); omap_thermal_throttle(); schedule_delayed_work(&temp_sensor->throttle_work, msecs_to_jiffies(THROTTLE_DELAY_MS)); } else { schedule_delayed_work(&temp_sensor->throttle_work, msecs_to_jiffies(THROTTLE_DELAY_MS)); } } static irqreturn_t omap_tshut_irq_handler(int irq, void *data) { struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data; /* Need to handle thermal mgmt in bootloader * to avoid restart again at kernel level */ if (temp_sensor->is_efuse_valid) { pr_emerg("%s: Thermal shutdown reached rebooting device\n", __func__); kernel_restart(NULL); } else { pr_err("%s:Invalid EFUSE, Non-trimmed BGAP\n", __func__); } return IRQ_HANDLED; } static irqreturn_t omap_talert_irq_handler(int irq, void *data) { struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data; int t_hot, t_cold, temp_offset; t_hot = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET) & OMAP4_HOT_FLAG_MASK; t_cold = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET) & OMAP4_COLD_FLAG_MASK; temp_offset = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET); if (t_hot) { omap_thermal_throttle(); schedule_delayed_work(&temp_sensor->throttle_work, msecs_to_jiffies(THROTTLE_DELAY_MS)); temp_offset &= ~(OMAP4_MASK_HOT_MASK); temp_offset |= OMAP4_MASK_COLD_MASK; } else if (t_cold) { cancel_delayed_work_sync(&temp_sensor->throttle_work); omap_thermal_unthrottle(); temp_offset &= ~(OMAP4_MASK_COLD_MASK); temp_offset |= OMAP4_MASK_HOT_MASK; } omap_temp_sensor_writel(temp_sensor, temp_offset, BGAP_CTRL_OFFSET); return IRQ_HANDLED; } static int __devinit omap_temp_sensor_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct omap_temp_sensor_pdata *pdata = pdev->dev.platform_data; struct omap_temp_sensor *temp_sensor; struct resource *mem; int ret = 0, val; if (!pdata) { dev_err(dev, "%s: platform data missing\n", __func__); return -EINVAL; } temp_sensor = kzalloc(sizeof(struct omap_temp_sensor), GFP_KERNEL); if (!temp_sensor) return -ENOMEM; spin_lock_init(&temp_sensor->lock); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(dev, "%s:no mem resource\n", __func__); ret = -EINVAL; goto plat_res_err; } temp_sensor->irq = platform_get_irq_byname(pdev, "thermal_alert"); if (temp_sensor->irq < 0) { dev_err(dev, "%s:Cannot get thermal alert irq\n", __func__); ret = -EINVAL; goto get_irq_err; } ret = gpio_request_one(OMAP_TSHUT_GPIO, GPIOF_DIR_IN, "thermal_shutdown"); if (ret) { dev_err(dev, "%s: Could not get tshut_gpio\n", __func__); goto tshut_gpio_req_err; } temp_sensor->tshut_irq = gpio_to_irq(OMAP_TSHUT_GPIO); if (temp_sensor->tshut_irq < 0) { dev_err(dev, "%s:Cannot get thermal shutdown irq\n", __func__); ret = -EINVAL; goto get_tshut_irq_err; } temp_sensor->phy_base = pdata->offset; temp_sensor->pdev = pdev; temp_sensor->dev = dev; pm_runtime_enable(dev); pm_runtime_irq_safe(dev); /* * check if the efuse has a non-zero value if not * it is an untrimmed sample and the temperatures * may not be accurate */ if (omap_readl(OMAP4_CTRL_MODULE_CORE + OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP)) temp_sensor->is_efuse_valid = 1; temp_sensor->clock = clk_get(&temp_sensor->pdev->dev, "fck"); if (IS_ERR(temp_sensor->clock)) { ret = PTR_ERR(temp_sensor->clock); pr_err("%s:Unable to get fclk: %d\n", __func__, ret); ret = -EINVAL; goto clk_get_err; } /* Init delayed work for throttle decision */ INIT_DELAYED_WORK(&temp_sensor->throttle_work, throttle_delayed_work_fn); platform_set_drvdata(pdev, temp_sensor); ret = omap_temp_sensor_enable(temp_sensor); if (ret) { dev_err(dev, "%s:Cannot enable temp sensor\n", __func__); goto sensor_enable_err; } omap_enable_continuous_mode(temp_sensor); omap_configure_temp_sensor_thresholds(temp_sensor); /* 1 ms */ omap_configure_temp_sensor_counter(temp_sensor, 1); /* Wait till the first conversion is done wait for at least 1ms */ mdelay(2); /* Read the temperature once due to hw issue*/ omap_read_current_temp(temp_sensor); /* Set 2 seconds time as default counter */ omap_configure_temp_sensor_counter(temp_sensor, temp_sensor->clk_rate * 2); ret = request_threaded_irq(temp_sensor->irq, NULL, omap_talert_irq_handler, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "temp_sensor", (void *)temp_sensor); if (ret) { dev_err(dev, "Request threaded irq failed.\n"); goto req_irq_err; } ret = request_threaded_irq(temp_sensor->tshut_irq, NULL, omap_tshut_irq_handler, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "tshut", (void *)temp_sensor); if (ret) { dev_err(dev, "Request threaded irq failed for TSHUT.\n"); goto tshut_irq_req_err; } ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_sensor_group); if (ret) { dev_err(&pdev->dev, "could not create sysfs files\n"); goto sysfs_create_err; } /* unmask the T_COLD and unmask T_HOT at init */ val = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET); val |= OMAP4_MASK_COLD_MASK; val |= OMAP4_MASK_HOT_MASK; omap_temp_sensor_writel(temp_sensor, val, BGAP_CTRL_OFFSET); dev_info(dev, "%s probed", pdata->name); temp_sensor_pm = temp_sensor; #ifdef CONFIG_OMAP_TEMP_CONTROL ctrl_sensor = temp_sensor; tempcontrol_registerlimit(temp_limit); #endif return 0; sysfs_create_err: free_irq(temp_sensor->tshut_irq, temp_sensor); cancel_delayed_work_sync(&temp_sensor->throttle_work); tshut_irq_req_err: free_irq(temp_sensor->irq, temp_sensor); req_irq_err: platform_set_drvdata(pdev, NULL); omap_temp_sensor_disable(temp_sensor); sensor_enable_err: clk_put(temp_sensor->clock); clk_get_err: pm_runtime_disable(dev); get_tshut_irq_err: gpio_free(OMAP_TSHUT_GPIO); tshut_gpio_req_err: get_irq_err: plat_res_err: kfree(temp_sensor); return ret; } static int __devexit omap_temp_sensor_remove(struct platform_device *pdev) { struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev); sysfs_remove_group(&pdev->dev.kobj, &omap_temp_sensor_group); cancel_delayed_work_sync(&temp_sensor->throttle_work); omap_temp_sensor_disable(temp_sensor); clk_put(temp_sensor->clock); platform_set_drvdata(pdev, NULL); if (temp_sensor->irq) free_irq(temp_sensor->irq, temp_sensor); if (temp_sensor->tshut_irq) free_irq(temp_sensor->tshut_irq, temp_sensor); kfree(temp_sensor); return 0; } #ifdef CONFIG_PM static void omap_temp_sensor_save_ctxt(struct omap_temp_sensor *temp_sensor) { temp_sensor_context.temp_sensor_ctrl = omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET); temp_sensor_context.bg_ctrl = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET); temp_sensor_context.bg_counter = omap_temp_sensor_readl(temp_sensor, BGAP_COUNTER_OFFSET); temp_sensor_context.bg_threshold = omap_temp_sensor_readl(temp_sensor, BGAP_THRESHOLD_OFFSET); temp_sensor_context.temp_sensor_tshut_threshold = omap_temp_sensor_readl(temp_sensor, BGAP_TSHUT_OFFSET); }
/** * omap_fatal_zone() - Shut-down the system to ensure OMAP Junction * Temperature decreases enough * * @cpu_temp: The current adjusted CPU temperature * * No return forces a restart of the system */ static void omap_fatal_zone(int cpu_temp) { pr_emerg("%s:FATAL ZONE (hot spot temp: %i)\n", __func__, cpu_temp); kernel_restart(NULL); }
static void apanic_mmc_memdump(void) { int rc; struct membank *bank; struct apanic_data *ctx = &drv_ctx; struct memdump_header *hdr = (struct memdump_header *) drv_ctx.bounce; if (!memdump_ctx.hd || !memdump_ctx.mmc_memdump_ops || !memdump_ctx.mmc_memdump_ops->panic_probe) return; memdump_wdt_disable(); if (memdump_ctx.mmc_memdump_ops-> panic_probe(memdump_ctx.hd, memdump_ctx.mmc_memdump_ops->type)) { pr_err("apanic: full memeory dump backing device" " not detected, fail to do memory dump\n"); return; } memset(ctx->bounce, 0, PAGE_SIZE); rc = memdump_ctx.mmc_memdump_ops->panic_write(memdump_ctx. hd, ctx->bounce, 0, PAGE_SIZE); if (rc <= 0) { pr_emerg("apanic: memdump erase header failed (%d)\n", rc); return; } /* no need to flush_cache_all(); */ /* get the last memory bank, probably the only bank */ if (meminfo.nr_banks >= 1 && meminfo.nr_banks < NR_BANKS) bank = &meminfo.bank[meminfo.nr_banks - 1]; rc = memdump_ctx.mmc_memdump_ops->panic_write(memdump_ctx. hd, __va(bank->start), PAGE_SIZE, bank->size); if (rc <= 0) { pr_emerg("apanic: full memory write failed rc=%d)\n", rc); return; } strncpy(hdr->magic, MEMDUMP_MAGIC, MEMDUMP_MAGIC_LEN); hdr->version = PHDR_VERSION; hdr->ts = current_kernel_time(); hdr->sdram_offset = PAGE_SIZE; hdr->sdram_length = bank->size; rc = memdump_ctx.mmc_memdump_ops->panic_write(memdump_ctx. hd, ctx->bounce, 0, PAGE_SIZE); if (rc <= 0) { pr_emerg("apanic: memdump header write failed (%d)\n", rc); return; } pr_emerg("apanic: full memory dump successfully written\n"); }
/* PM porting is needed */ static void sec_power_off(void) { int poweroff_try = 0; #if defined(CONFIG_SAMSUNG_MUIC) struct power_supply *psy = power_supply_get_by_name("battery"); union power_supply_propval value; bool health = true; if (!psy || !psy->get_property) { pr_err("%s: fail to get battery psy\n", __func__); } else { psy->get_property(psy, POWER_SUPPLY_PROP_HEALTH, &value); if (value.intval == POWER_SUPPLY_HEALTH_OVERVOLTAGE || value.intval == POWER_SUPPLY_HEALTH_UNDERVOLTAGE) health = false; } #endif local_irq_disable(); #if defined(CONFIG_SAMSUNG_MUIC) pr_emerg("%s : cable state=%d, health=%d\n", __func__, is_cable_attached, health); #endif /* CONFIG_SAMSUNG_MUIC */ while (1) { /* Check reboot charging */ #if defined(CONFIG_SAMSUNG_MUIC) if ((is_cable_attached && health) || (poweroff_try >= 5)) { #else if (poweroff_try >= 5) { #endif /* CONFIG_SAMSUNG_MUIC */ #if defined(CONFIG_SAMSUNG_MUIC) pr_emerg ("%s: charger connected(%d) or power" "off failed(%d), reboot!\n", __func__, is_cable_attached, poweroff_try); #else pr_emerg ("%s: power off failed(%d), reboot!\n", __func__, poweroff_try); #endif /* CONFIG_SAMSUNG_MUIC */ /* To enter LP charging */ writel(0x0, EXYNOS_INFORM2); flush_cache_all(); outer_flush_all(); exynos4_restart(0, 0); pr_emerg("%s: waiting for reboot\n", __func__); while (1) ; } /* wait for power button release */ if (gpio_get_value(GPIO_PMIC_ONOB)) { pr_emerg("%s: set PS_HOLD low\n", __func__); /* power off code * PS_HOLD Out/High --> * Low PS_HOLD_CONTROL, R/W, 0x1002_330C */ writel(readl(EXYNOS_PS_HOLD_CONTROL) & 0xFFFFFEFF, EXYNOS_PS_HOLD_CONTROL); ++poweroff_try; pr_emerg ("%s: Should not reach here! (poweroff_try:%d)\n", __func__, poweroff_try); } else { /* if power button is not released, wait and check TA again */ pr_info("%s: PowerButton is not released.\n", __func__); } mdelay(1000); } } #define REBOOT_MODE_PREFIX 0x12345670 #define REBOOT_MODE_NONE 0 #define REBOOT_MODE_DOWNLOAD 1 #define REBOOT_MODE_UPLOAD 2 #define REBOOT_MODE_CHARGING 3 #define REBOOT_MODE_RECOVERY 4 #define REBOOT_MODE_FOTA 5 #define REBOOT_MODE_FOTA_BL 6 /* update bootloader */ #define REBOOT_MODE_SECURE 7 /* image secure check fail */ #define REBOOT_SET_PREFIX 0xabc00000 #define REBOOT_SET_DEBUG 0x000d0000 #define REBOOT_SET_SWSEL 0x000e0000 #define REBOOT_SET_SUD 0x000f0000 static void sec_reboot(char str, const char *cmd) { local_irq_disable(); pr_emerg("%s (%d, %s)\n", __func__, str, cmd ? cmd : "(null)"); writel(0x12345678, EXYNOS_INFORM2); /* Don't enter lpm mode */ if (!cmd) { writel(REBOOT_MODE_PREFIX | REBOOT_MODE_NONE, EXYNOS_INFORM3); } else { unsigned long value; if (!strcmp(cmd, "fota")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_FOTA, EXYNOS_INFORM3); else if (!strcmp(cmd, "fota_bl")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_FOTA_BL, EXYNOS_INFORM3); else if (!strcmp(cmd, "recovery")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_RECOVERY, EXYNOS_INFORM3); else if (!strcmp(cmd, "download")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_DOWNLOAD, EXYNOS_INFORM3); else if (!strcmp(cmd, "upload")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_UPLOAD, EXYNOS_INFORM3); else if (!strcmp(cmd, "secure")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_SECURE, EXYNOS_INFORM3); else if (!strncmp(cmd, "debug", 5) && !kstrtoul(cmd + 5, 0, &value)) writel(REBOOT_SET_PREFIX | REBOOT_SET_DEBUG | value, EXYNOS_INFORM3); else if (!strncmp(cmd, "swsel", 5) && !kstrtoul(cmd + 5, 0, &value)) writel(REBOOT_SET_PREFIX | REBOOT_SET_SWSEL | value, EXYNOS_INFORM3); else if (!strncmp(cmd, "sud", 3) && !kstrtoul(cmd + 3, 0, &value)) writel(REBOOT_SET_PREFIX | REBOOT_SET_SUD | value, EXYNOS_INFORM3); else if (!strncmp(cmd, "emergency", 9)) writel(0, EXYNOS_INFORM3); else writel(REBOOT_MODE_PREFIX | REBOOT_MODE_NONE, EXYNOS_INFORM3); } flush_cache_all(); outer_flush_all(); exynos4_restart(0, 0); pr_emerg("%s: waiting for reboot\n", __func__); while (1) ; }
void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) { struct cpuinfo_x86 *c = &boot_cpu_data; u16 ec = EC(m->status); u8 xec = XEC(m->status, 0x1f); u32 nbsh = (u32)(m->status >> 32); int core = -1; pr_emerg(HW_ERR "Northbridge Error (node %d", node_id); /* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */ if (c->x86 == 0x10 && c->x86_model > 7) { if (nbsh & NBSH_ERR_CPU_VAL) core = nbsh & nb_err_cpumask; } else { u8 assoc_cpus = nbsh & nb_err_cpumask; if (assoc_cpus > 0) core = fls(assoc_cpus) - 1; } if (core >= 0) pr_cont(", core %d): ", core); else pr_cont("): "); switch (xec) { case 0x2: pr_cont("Sync error (sync packets on HT link detected).\n"); return; case 0x3: pr_cont("HT Master abort.\n"); return; case 0x4: pr_cont("HT Target abort.\n"); return; case 0x7: pr_cont("NB Watchdog timeout.\n"); return; case 0x9: pr_cont("SVM DMA Exclusion Vector error.\n"); return; default: break; } if (!fam_ops->nb_mce(ec, xec)) goto wrong_nb_mce; if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15) if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder) nb_bus_decoder(node_id, m, nbcfg); return; wrong_nb_mce: pr_emerg(HW_ERR "Corrupted NB MCE info?\n"); }
static int force_error(const char *val, struct kernel_param *kp) { pr_emerg("!!!WARN forced error : %s\n", val); if (!strncmp(val, "appdogbark", 10)) { pr_emerg("Generating an apps wdog bark!\n"); simulate_apps_wdog_bark(); } else if (!strncmp(val, "appdogbite", 10)) { pr_emerg("Generating an apps wdog bite!\n"); simulate_apps_wdog_bite(); } else if (!strncmp(val, "dabort", 6)) { pr_emerg("Generating a data abort exception!\n"); *(unsigned int *)0x0 = 0x0; } else if (!strncmp(val, "pabort", 6)) { pr_emerg("Generating a prefetch abort exception!\n"); ((void (*)(void))0x0)(); } else if (!strncmp(val, "undef", 5)) { pr_emerg("Generating a undefined instruction exception!\n"); BUG(); } else if (!strncmp(val, "bushang", 7)) { pr_emerg("Generating a Bus Hang!\n"); simulate_bus_hang(); } else if (!strncmp(val, "thermal_reset", 13)) { pr_emerg("Generating a thermal reset!\n"); simulate_msm_thermal_bite(); } else if (!strncmp(val, "dblfree", 7)) { void *p = kmalloc(sizeof(int), GFP_KERNEL); kfree(p); msleep(1000); kfree(p); } else if (!strncmp(val, "danglingref", 11)) { unsigned int *p = kmalloc(sizeof(int), GFP_KERNEL); kfree(p); *p = 0x1234; } else if (!strncmp(val, "lowmem", 6)) { int i = 0; pr_emerg("Allocating memory until failure!\n"); while (kmalloc(128*1024, GFP_KERNEL)) i++; pr_emerg("Allocated %d KB!\n", i*128); } else if (!strncmp(val, "memcorrupt", 10)) { int *ptr = kmalloc(sizeof(int), GFP_KERNEL); *ptr++ = 4; *ptr = 2; panic("MEMORY CORRUPTION"); #ifdef CONFIG_SEC_DEBUG_SEC_WDOG_BITE }else if (!strncmp(val, "secdogbite", 10)) { simulate_secure_wdog_bite(); #endif } else { pr_emerg("No such error defined for now!\n"); } return 0; }
static void native_stop_other_cpus(int wait) { unsigned long flags; unsigned long timeout; if (reboot_force) return; /* * Use an own vector here because smp_call_function * does lots of things not suitable in a panic situation. */ /* * We start by using the REBOOT_VECTOR irq. * The irq is treated as a sync point to allow critical * regions of code on other cpus to release their spin locks * and re-enable irqs. Jumping straight to an NMI might * accidentally cause deadlocks with further shutdown/panic * code. By syncing, we give the cpus up to one second to * finish their work before we force them off with the NMI. */ if (num_online_cpus() > 1) { /* did someone beat us here? */ if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1) return; /* sync above data before sending IRQ */ wmb(); apic->send_IPI_allbutself(REBOOT_VECTOR); /* * Don't wait longer than a second if the caller * didn't ask us to wait. */ timeout = USEC_PER_SEC; while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } /* if the REBOOT_VECTOR didn't work, try with the NMI */ if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) { if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, NMI_FLAG_FIRST, "smp_stop")) /* Note: we ignore failures here */ /* Hope the REBOOT_IRQ is good enough */ goto finish; /* sync above data before sending IRQ */ wmb(); pr_emerg("Shutting down cpus with NMI\n"); apic->send_IPI_allbutself(NMI_VECTOR); /* * Don't wait longer than a 10 ms if the caller * didn't ask us to wait. */ timeout = USEC_PER_MSEC * 10; while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } finish: local_irq_save(flags); disable_local_APIC(); local_irq_restore(flags); }
static irqreturn_t tmu_irq(int irq, void *id) { struct tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + INTSTAT); /* To handle multiple interrupt pending, * interrupt by high temperature are serviced with priority. */ #if defined(CONFIG_TC_VOLTAGE) if (status & INTSTAT_FALL0) { pr_info("TC interrupt occured..!\n"); __raw_writel(INTCLEAR_FALL0, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_TC; } else if (status & INTSTAT_RISE2) { #else if (status & INTSTAT_RISE2) { #endif pr_info("Tripping interrupt occured..!\n"); info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR_RISE2, info->tmu_base + INTCLEAR); } else if (status & INTSTAT_RISE1) { pr_info("Warning interrupt occured..!\n"); __raw_writel(INTCLEAR_RISE1, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_WARNING; } else if (status & INTSTAT_RISE0) { pr_info("Throttling interrupt occured..!\n"); __raw_writel(INTCLEAR_RISE0, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_THROTTLED; } else { pr_err("%s: TMU interrupt error\n", __func__); return -ENODEV; } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(1 * 1000)); return IRQ_HANDLED; } static irqreturn_t exynos4210_tmu_irq(int irq, void *id) { struct tmu_info *info = id; unsigned int status; disable_irq_nosync(irq); status = __raw_readl(info->tmu_base + INTSTAT); if (status & INTSTAT2) { pr_info("Tripping interrupt occured..!\n"); info->tmu_state = TMU_STATUS_TRIPPED; __raw_writel(INTCLEAR2, info->tmu_base + INTCLEAR); } else if (status & INTSTAT1) { pr_info("Warning interrupt occured..!\n"); __raw_writel(INTCLEAR1, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_WARNING; } else if (status & INTSTAT0) { pr_info("Throttling interrupt occured..!\n"); __raw_writel(INTCLEAR0, info->tmu_base + INTCLEAR); info->tmu_state = TMU_STATUS_THROTTLED; } else { pr_err("%s: TMU interrupt error\n", __func__); return -ENODEV; } queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(1000)); return IRQ_HANDLED; } static int __devinit tmu_probe(struct platform_device *pdev) { struct tmu_info *info; struct resource *res; int ret = 0; pr_debug("%s: probe=%p\n", __func__, pdev); info = kzalloc(sizeof(struct tmu_info), GFP_KERNEL); if (!info) { dev_err(&pdev->dev, "failed to alloc memory!\n"); ret = -ENOMEM; goto err_nomem; } pr_emerg("TMU: Memory Allocation Sucessful\n"); platform_set_drvdata(pdev, info); pr_emerg("TMU: Platform data set\n"); info->dev = &pdev->dev; pr_emerg("TMU: Copied the Dev access Information \n"); info->irq = platform_get_irq(pdev, 0); if (info->irq < 0) { dev_err(&pdev->dev, "no irq for thermal\n"); ret = -ENOENT; goto err_noirq; } if (soc_is_exynos4210()) ret = request_irq(info->irq, exynos4210_tmu_irq, IRQF_DISABLED, "tmu interrupt", info); else ret = request_irq(info->irq, tmu_irq, IRQF_DISABLED, "tmu interrupt", info); if (ret) { dev_err(&pdev->dev, "IRQ%d error %d\n", info->irq, ret); goto err_noirq; } pr_emerg("TMU: IRQ Granted!\n"); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "failed to get memory region resource\n"); ret = -ENODEV; goto err_nores; } pr_emerg("TMU: IO Resource alloced on Memory\n"); info->ioarea = request_mem_region(res->start, res->end-res->start+1, pdev->name); if (!(info->ioarea)) { dev_err(&pdev->dev, "failed to reserve memory region\n"); ret = -EBUSY; goto err_nores; } pr_emerg("TMU: Memory area resersed\n"); info->tmu_base = ioremap(res->start, (res->end - res->start) + 1); if (!(info->tmu_base)) { dev_err(&pdev->dev, "failed ioremap()\n"); ret = -EINVAL; goto err_nomap; } pr_emerg("TMU: IO Memory Remapped\n"); if (thermal_create_sysfs_file(&pdev->dev)) goto err_sysfs; pr_emerg("TMU: Created Sysfs\n"); tmu_monitor_wq = create_freezable_workqueue("tmu"); if (!tmu_monitor_wq) { dev_err(&pdev->dev, "Creation of tmu_monitor_wq failed\n"); ret = -EFAULT; goto err_wq; } pr_emerg("TMU: Workqueue Created\n"); INIT_DELAYED_WORK_DEFERRABLE(&info->polling, tmu_monitor); pr_emerg("TMU: Work Created\n"); #ifdef CONFIG_TMU_DEBUG INIT_DELAYED_WORK_DEFERRABLE(&info->monitor, cur_temp_monitor); #endif print_temperature_params(info); pr_emerg("TMU: Printed Parameters\n"); ret = tmu_initialize(pdev); if (ret < 0) goto err_noinit; #ifdef CONFIG_TMU_DEBUG queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor, info->sampling_rate); #endif pr_info("Tmu Initialization is sucessful...!\n"); return ret; err_noinit: destroy_workqueue(tmu_monitor_wq); err_wq: thermal_remove_sysfs_file(&pdev->dev); err_sysfs: iounmap(info->tmu_base); err_nomap: release_resource(info->ioarea); err_nores: free_irq(info->irq, info); err_noirq: kfree(info); info = NULL; err_nomem: dev_err(&pdev->dev, "initialization failed.\n"); return ret; } static int __devinit tmu_remove(struct platform_device *pdev) { struct tmu_info *info = platform_get_drvdata(pdev); cancel_delayed_work(&info->polling); destroy_workqueue(tmu_monitor_wq); thermal_remove_sysfs_file(&pdev->dev); iounmap(info->tmu_base); release_resource(info->ioarea); free_irq(info->irq, (void *)pdev); kfree(info); info = NULL; pr_info("%s is removed\n", dev_name(&pdev->dev)); return 0; } #ifdef CONFIG_PM static int tmu_suspend(struct platform_device *pdev, pm_message_t state) { struct tmu_info *info = platform_get_drvdata(pdev); pm_tmu_save(info); return 0; } static int tmu_resume(struct platform_device *pdev) { struct tmu_info *info = platform_get_drvdata(pdev); #if defined(CONFIG_TC_VOLTAGE) struct tmu_data *data = info->dev->platform_data; #endif pm_tmu_restore(info); #if defined(CONFIG_TC_VOLTAGE) /* s/w workaround for fast service when interrupt is not occured, * such as current temp is lower than tc interrupt temperature * or current temp is continuosly increased. */ mdelay(1); if (get_cur_temp(info) <= data->ts.start_tc) { disable_irq_nosync(info->irq); if (exynos_tc_volt(info, 1) < 0) pr_err("%s\n", __func__); info->tmu_state = TMU_STATUS_TC; already_limit = 1; queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(1 * 1000)); } #endif return 0; } #else #define tmu_suspend NULL #define tmu_resume NULL #endif static struct platform_driver tmu_driver = { .probe = tmu_probe, .remove = tmu_remove, .suspend = tmu_suspend, .resume = tmu_resume, .driver = { .name = "tmu", .owner = THIS_MODULE, }, }; static int __init tmu_driver_init(void) { return platform_driver_register(&tmu_driver); } late_initcall(tmu_driver_init);
static void apanic_mmc_logbuf_dump(void) { struct apanic_data *ctx = &drv_ctx; struct panic_header *hdr = (struct panic_header *) ctx->bounce; int console_offset = 0; int console_len = 0; int threads_offset = 0; int threads_len = 0; int app_threads_offset = 0; int app_threads_len = 0; int rc = 0; struct timespec now; struct timespec uptime; struct rtc_time rtc_timestamp; struct console *con; if (!ctx->hd || !ctx->mmc_panic_ops || !ctx->mmc_panic_ops->panic_probe) return; if (ctx->mmc_panic_ops->panic_probe(ctx->hd, ctx->mmc_panic_ops->type)) { pr_err("apanic: choose to use mmc, " "but eMMC card not detected\n"); return; } console_offset = 1024; if (ctx->curr.magic) { pr_emerg("Crash partition in use!\n"); return; } /* * Add timestamp to displays current UTC time and uptime (in seconds). */ now = current_kernel_time(); rtc_time_to_tm((unsigned long) now.tv_sec, &rtc_timestamp); do_posix_clock_monotonic_gettime(&uptime); bust_spinlocks(1); pr_emerg("Timestamp = %lu.%03lu\n", (unsigned long) now.tv_sec, (unsigned long) (now.tv_nsec / 1000000)); pr_emerg("Current Time = " "%02d-%02d %02d:%02d:%lu.%03lu, " "Uptime = %lu.%03lu seconds\n", rtc_timestamp.tm_mon + 1, rtc_timestamp.tm_mday, rtc_timestamp.tm_hour, rtc_timestamp.tm_min, (unsigned long) rtc_timestamp.tm_sec, (unsigned long) (now.tv_nsec / 1000000), (unsigned long) uptime.tv_sec, (unsigned long) (uptime.tv_nsec / USEC_PER_SEC)); bust_spinlocks(0); if (ctx->annotation) printk(KERN_EMERG "%s\n", ctx->annotation); touch_hw_watchdog(); /* * Write out the console */ console_len = apanic_write_console_mmc(console_offset); if (console_len < 0) { pr_emerg("Error writing console to panic log! (%d)\n", console_len); console_len = 0; } /* * Write out all threads */ app_threads_offset = (ALIGN(console_offset + console_len, 1024) == 0) ? 1024 : ALIGN(console_offset + console_len, 1024); log_buf_clear(); for (con = console_drivers; con; con = con->next) con->flags &= ~CON_ENABLED; ctx->buf_offset = app_threads_offset; ctx->written = app_threads_offset; start_apanic_threads = 1; if (tracing_get_trace_buf_size() < (SZ_512K + 1)) ftrace_dump(1); show_state_thread_filter(0, SHOW_APP_THREADS); ctx->buf_offset = ALIGN(ctx->written, 512); start_apanic_threads = 0; ctx->written += apanic_write_console_mmc(ctx->buf_offset); app_threads_len = ctx->written - app_threads_offset; touch_hw_watchdog(); log_buf_clear(); threads_offset = ALIGN(ctx->written, 512); ctx->buf_offset = threads_offset; ctx->written = threads_offset; start_apanic_threads = 1; show_state_thread_filter(0, SHOW_KTHREADS); start_apanic_threads = 0; ctx->buf_offset = ALIGN(ctx->written, 512); ctx->written += apanic_write_console_mmc(ctx->buf_offset); threads_len = ctx->written - threads_offset + 512; touch_hw_watchdog(); for (con = console_drivers; con; con = con->next) con->flags |= CON_ENABLED; /* * Finally write the panic header */ memset(ctx->bounce, 0, PAGE_SIZE); hdr->magic = PANIC_MAGIC; hdr->version = PHDR_VERSION; hdr->console_offset = console_offset; hdr->console_length = console_len; hdr->app_threads_offset = app_threads_offset; hdr->app_threads_length = app_threads_len; hdr->threads_offset = threads_offset; hdr->threads_length = threads_len; rc = ctx->mmc_panic_ops->panic_write(ctx->hd, ctx->bounce, 0, console_offset); if (rc <= 0) { pr_emerg("apanic: Header write failed (%d)\n", rc); return; } pr_emerg("apanic: Panic dump successfully written\n"); }
static ssize_t twl6030_poweroff_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { pr_emerg("\n **** TWL6030 POWER OFF CALLED ****\n"); twl6030_poweroff(); return 10; }
void cypress_touchkey_panic_display(struct i2c_adapter *pAdap) { u8 mod_ver; u8 fw_ver; u8 buf[2] = {0,}; int ret = 0; /* * Check driver has been started. */ if (!(info && info->client && info->client->adapter)) return; /* * If there is an associated LDO check to make sure it is powered, if * not then we can exit as it wasn't powered when panic occurred. */ /* * If pAdap is NULL then exit with message. */ if (!pAdap) { pr_emerg("\n\n%s Passed NULL pointer!\n", __func__); return; } /* * If pAdap->algo_data is not NULL then this driver is using HW I2C, * then change adapter to use GPIO I2C panic driver. * NB!Will "probably" not work on systems with dedicated I2C pins. */ if (pAdap->algo_data) { info->client->adapter = pAdap; } else { /* * Otherwise use panic safe SW I2C algo, */ info->client->adapter->algo = pAdap->algo; } pr_emerg("\n\n[Display of Cypress Touchkey registers]\n"); ret = i2c_smbus_read_i2c_block_data(info->client, CYPRESS_FW_VER, ARRAY_SIZE(buf), buf); if (system_rev >= JANICE_R0_3) { mod_ver = JANICE_TOUCHKEY_HW03_MOD_VER; fw_ver = JANICE_TOUCHKEY_M_04_FW_VER; } else { mod_ver = JANICE_TOUCHKEY_HW02_MOD_VER; fw_ver = JANICE_TOUCHKEY_M_03_FW_VER; } if (ret != ARRAY_SIZE(buf)) printk(KERN_ERR "failed to read FW ver.\n"); else if ((buf[1] == mod_ver) && (buf[0] < fw_ver)) { printk(KERN_DEBUG "[TouchKey] %s : Mod Ver 0x%02x\n", __func__, buf[1]); printk(KERN_DEBUG "[TouchKey] FW mod 0x%02x, phone 0x%02x\n", buf[0], fw_ver); } }
void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %p\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %p mm_users %d mm_count %d nr_ptes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx shared_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %p flags %lx core_state %p\n" #ifdef CONFIG_AIO "ioctx_table %p\n" #endif #ifdef CONFIG_MEMCG "owner %p " #endif "exe_file %p\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %p\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) "tlb_flush_pending %d\n" #endif "%s", /* This is here to hold the comma */ mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), atomic_long_read((atomic_long_t *)&mm->nr_ptes), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) mm->tlb_flush_pending, #endif "" /* This is here to not have a comma! */ ); dump_flags(mm->def_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); }
void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, mm->core_state, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->mmu_notifier_mm, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); }
void hv_message_intr(struct pt_regs *regs, int intnum) { /* * We enter with interrupts disabled and leave them disabled, * to match expectations of called functions (e.g. * do_ccupdate_local() in mm/slab.c). This is also consistent * with normal call entry for device interrupts. */ int message[HV_MAX_MESSAGE_SIZE/sizeof(int)]; HV_RcvMsgInfo rmi; int nmsgs = 0; /* Track time spent here in an interrupt context */ struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: less than 1/8th stack free? */ { long sp = stack_pointer - (long) current_thread_info(); if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { pr_emerg("hv_message_intr: " "stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); } } #endif while (1) { rmi = hv_receive_message(__get_cpu_var(msg_state), (HV_VirtAddr) message, sizeof(message)); #ifdef CONFIG_DATAPLANE if (dataplane_debug(smp_processor_id())) pr_warn("cpu %d: dataplane recv msg len %d, src %d\n", smp_processor_id(), rmi.msglen, rmi.source); #endif if (rmi.msglen == 0) break; if (rmi.msglen < 0) panic("hv_receive_message failed: %d", rmi.msglen); ++nmsgs; if (rmi.source == HV_MSG_TILE) { int tag; /* we just send tags for now */ BUG_ON(rmi.msglen != sizeof(int)); tag = message[0]; #ifdef CONFIG_SMP evaluate_message(message[0]); #else panic("Received IPI message %d in UP mode", tag); #endif } else if (rmi.source == HV_MSG_INTR) { HV_IntrMsg *him = (HV_IntrMsg *)message; struct hv_driver_cb *cb = (struct hv_driver_cb *)him->intarg; cb->callback(cb, him->intdata); __get_cpu_var(irq_stat).irq_hv_msg_count++; } } /* * We shouldn't have gotten a message downcall with no * messages available. */ if (nmsgs == 0) panic("Message downcall invoked with no messages!"); /* * Track time spent against the current process again and * process any softirqs if they are waiting. */ irq_exit(); set_irq_regs(old_regs); }
static int __init mipi_cmd_ili9486_wvga_pt_init(void) { int ret; pr_emerg("Flea-mipi_cmd_ili9486_wvga_pt_init \n"); pinfo.xres =320; pinfo.yres =480; pinfo.type = MIPI_CMD_PANEL; pinfo.pdest = DISPLAY_1; pinfo.wait_cycle = 0; pinfo.bpp = 24; #ifdef CONFIG_FB_MSM_MDP303 pinfo.lcdc.h_back_porch = 100; pinfo.lcdc.h_front_porch = 100; pinfo.lcdc.h_pulse_width = 8; pinfo.lcdc.v_back_porch = 20; pinfo.lcdc.v_front_porch = 20; pinfo.lcdc.v_pulse_width = 1; #else pinfo.lcdc.h_back_porch = 50; pinfo.lcdc.h_pulse_width = 20; pinfo.lcdc.h_pulse_width = 8; pinfo.lcdc.v_back_porch = 10; pinfo.lcdc.v_front_porch = 10; pinfo.lcdc.v_pulse_width = 5; #endif /* CONFIG_FB_MSM_MDP303 */ pinfo.lcdc.border_clr = 0; /* blk */ pinfo.lcdc.underflow_clr = 0xff; /* blue */ pinfo.lcdc.hsync_skew = 0; pinfo.bl_max = DRIVER_MAX_BACKLIGHT_LEVEL;; pinfo.bl_min = 1; pinfo.fb_num = 2; #ifdef CONFIG_FB_MSM_MDP303 pinfo.clk_rate = 350000000; #else pinfo.clk_rate = 152000000; #endif pinfo.lcd.refx100 = 6790; /* adjust refx100 to prevent tearing */ pinfo.mipi.mode = DSI_CMD_MODE; pinfo.mipi.dst_format = DSI_CMD_DST_FORMAT_RGB888;//Flea++ pinfo.mipi.vc = 0; pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB; pinfo.mipi.data_lane0 = TRUE; #ifdef CONFIG_FB_MSM_MDP303 pinfo.lcd.vsync_enable = TRUE; pinfo.lcd.v_back_porch = 2; pinfo.lcd.v_front_porch = 2; pinfo.lcd.v_pulse_width = 25; pinfo.lcd.vsync_notifier_period = 0; pinfo.lcd.hw_vsync_mode = TRUE; pinfo.mipi.data_lane1 = FALSE; pinfo.mipi.t_clk_post = 0x20; pinfo.mipi.t_clk_pre = 0x2F; pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.te_sel = 1; /* TE from vsync gpio */ pinfo.mipi.interleave_max = 1; pinfo.mipi.insert_dcs_cmd = TRUE; pinfo.mipi.wr_mem_continue = 0x3c; pinfo.mipi.wr_mem_start = 0x2c; pinfo.mipi.dsi_phy_db = &dsi_cmd_mode_phy_db; pinfo.mipi.tx_eot_append = 0x01; pinfo.mipi.rx_eot_ignore = 0; pinfo.mipi.dlane_swap = 0x01; #else pinfo.mipi.data_lane1 = FALSE; pinfo.mipi.t_clk_post = 0x18; pinfo.mipi.t_clk_pre = 0x14; pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.te_sel = 1; /* TE from vsycn gpio */ pinfo.mipi.interleave_max = 1; pinfo.mipi.insert_dcs_cmd = TRUE; pinfo.mipi.wr_mem_continue = 0x3c; pinfo.mipi.wr_mem_start = 0x2c; pinfo.mipi.dsi_phy_db = &dsi_cmd_mode_phy_db; #endif /* CONFIG_FB_MSM_MDP303 */ pr_emerg("Flea-mipi_cmd_ili9486_wvga_pt_init_set_bl \n"); ret = mipi_ili9486_device_register(&pinfo, MIPI_DSI_PRIM, MIPI_DSI_PANEL_WVGA); if (ret) pr_err("%s: failed to register device!\n", __func__); return ret; }