int LGE_ErrorHandler_Main( int crash_side, char * message) { char * kmem_buf; LG_ErrorHandler_enable = 1; raw_local_irq_enable(); kmem_buf = kmalloc(LGE_ERROR_MAX_ROW*LGE_ERROR_MAX_COLUMN, GFP_ATOMIC); memcpy(kmem_buf, message, LGE_ERROR_MAX_ROW*LGE_ERROR_MAX_COLUMN); switch(crash_side) { case MODEM_CRASH: display_info_LCD(crash_side, message); break; case APPL_CRASH: display_info_LCD(crash_side, message); break; } kfree(kmem_buf); raw_local_irq_disable(); preempt_disable(); mdelay(100); while(1) { // 1. Check Volume UP Key gpio_direction_output(34,1); gpio_direction_output(37,0); //volume down gpio_direction_output(38,1); //volume up gpio_direction_input(34); if(gpio_get_value(34)==0){ printk("volup\n"); return SMSM_SYSTEM_REBOOT; //volume up key is pressed } mdelay(100); // 2. Check Volume DOWN Key gpio_direction_output(34,1); gpio_direction_output(37,1); //volume down gpio_direction_output(38,0); gpio_direction_input(34); if(gpio_get_value(34)==0){ printk("voldown\n"); return SMSM_SYSTEM_REBOOT; //volume down key is pressed } mdelay(100); } }
static int rcu_debugfs_show(struct seq_file *m, void *unused) { int cpu, q, s[2], msecs; raw_local_irq_disable(); msecs = div_s64(sched_clock() - rcu_timestamp, NSEC_PER_MSEC); raw_local_irq_enable(); seq_printf(m, "%14u: #batches seen\n", rcu_stats.nbatches); seq_printf(m, "%14u: #barriers seen\n", atomic_read(&rcu_stats.nbarriers)); seq_printf(m, "%14llu: #callbacks invoked\n", rcu_stats.ninvoked); seq_printf(m, "%14u: #callbacks left to invoke\n", atomic_read(&rcu_stats.nleft)); seq_printf(m, "%14u: #msecs since last end-of-batch\n", msecs); seq_printf(m, "%14u: #passes forced (0 is best)\n", rcu_stats.nforced); seq_printf(m, "\n"); for_each_online_cpu(cpu) seq_printf(m, "%4d ", cpu); seq_printf(m, " CPU\n"); s[1] = s[0] = 0; for_each_online_cpu(cpu) { struct rcu_data *rd = &rcu_data[cpu]; int w = ACCESS_ONCE(rd->which) & 1; seq_printf(m, "%c%c%c%d ", '-', idle_cpu(cpu) ? 'I' : '-', rd->wait ? 'W' : '-', w); s[w]++; } seq_printf(m, " FLAGS\n"); for (q = 0; q < 2; q++) { for_each_online_cpu(cpu) { struct rcu_data *rd = &rcu_data[cpu]; struct rcu_list *l = &rd->cblist[q]; seq_printf(m, "%4d ", l->count); } seq_printf(m, " Q%d%c\n", q, " *"[s[q] > s[q^1]]); } seq_printf(m, "\nFLAGS:\n"); seq_printf(m, " I - cpu idle, 0|1 - Q0 or Q1 is current Q, other is previous Q,\n"); seq_printf(m, " W - cpu does not permit current batch to end (waiting),\n"); seq_printf(m, " * - marks the Q that is current for most CPUs.\n"); return 0; }
/*** * panicrpt_goinpanic * display info of panic to LCD * return value : when it reurns '1'(int), panicrpt is doing something */ int panicrpt_goinpanic (int epanicpos, char *pbuf) { if (!ipanicrpt_notwork) { if (ipanicrpt_inpanic == 0) { ipanicrpt_inpanic = 1; panicrpt_displayerr (epanicpos, pbuf); // //while(1) { } ; preempt_enable (); raw_local_irq_enable (); panicrpt_waitinput (); // machine_emergency_restart (); // } } return ipanicrpt_notwork; }
int LGE_ErrorHandler_Main( int crash_side, char * message) { char * kmem_buf; LG_ErrorHandler_enable = 1; raw_local_irq_enable(); /* [email protected] 10.11.04 S * 0010515: temporary disable CONFIG_FRAMEBUFFER_CONSOLE because of current consumption */ #if 0 kmem_buf = kmalloc(LGE_ERROR_MAX_ROW*LGE_ERROR_MAX_COLUMN, GFP_ATOMIC); memcpy(kmem_buf, message, LGE_ERROR_MAX_ROW*LGE_ERROR_MAX_COLUMN); switch(crash_side) { case MODEM_CRASH: display_info_LCD(crash_side, message); break; case APPL_CRASH: display_info_LCD(crash_side, message); break; } kfree(kmem_buf); #endif /* [email protected] 10.11.04 E */ raw_local_irq_disable(); preempt_disable(); /* BEGIN: 0006765 [email protected] 2010-06-03 */ /* MODIFY 0006765: Fix : The kernel panic screen is disappeared just after linux kernel gets system panic */ mdelay(100); while(1) { // 1. Check Volume Key /* [email protected] 10.11.03 S 0010460: add HW revision feature to vol key gpio in kernel panic */ #if CONFIG_MACH_LGE_BRYCE #if defined (LG_HW_REV2) gpio_direction_output(19,1); gpio_direction_output(56,0); //volume down gpio_direction_output(20,1); //volume up gpio_direction_input(19); if(gpio_get_value(19)==0){ printk("### vol down key pressed \n"); mdelay(100); /* [email protected] 10.11.03 S 0010472: each volup/voldown key mapped separately to reboot/download mode. */ return SMSM_SYSTEM_DOWNLOAD; /* [email protected] 10.11.03 E */ } mdelay(100); gpio_direction_output(19,1); gpio_direction_output(20,0); gpio_direction_output(56,1); gpio_direction_input(19); if(gpio_get_value(19)==0){ printk("### vol up key pressed\n"); mdelay(100); return SMSM_SYSTEM_REBOOT; } mdelay(100); #endif #if defined (LG_HW_REV3) || defined(LG_HW_REV4) || defined(LG_HW_REV5) || defined(LG_HW_REV6) || defined(LG_HW_REV7) gpio_direction_output(20,1); gpio_direction_output(56,0); //volume down gpio_direction_output(19,1); //volume up gpio_direction_input(20); if(gpio_get_value(20)==0){ printk("### vol down key pressed \n"); mdelay(100); /* [email protected] 10.11.03 S 0010472: each volup/voldown key mapped separately to reboot/download mode. */ return SMSM_SYSTEM_DOWNLOAD; /* [email protected] 10.11.03 E */ } mdelay(100); gpio_direction_output(20,1); gpio_direction_output(19,0); gpio_direction_output(56,1); gpio_direction_input(20); if(gpio_get_value(20)==0){ printk("### vol up key pressed\n"); mdelay(100); return SMSM_SYSTEM_REBOOT; } mdelay(100); #endif #endif /* [email protected] 10.11.03 E */ } /* END: 0006765 [email protected] 2010-06-03 */ }
/* * do_page_fault() * Handle all page faults in the system. */ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long missqw0, unsigned long missqw1) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; unsigned long acctyp; const struct exception_table_entry *fix; pgd_t *pgd; unsigned long address; int user = 0; int fault = 0; /* * We should be able to handle an interrupt while resolving * a fault; so enable interrupts now. */ raw_local_irq_enable(); address = MMU_MISSQW1_VPN_GET(missqw1) << MMU_VPN_SHIFT; fault_printk(FAULT_DBG_TRACE, "FAULT[%d]: ti[%p], missqw0=%08lx, missqw1=%08lx, addr=%08lx\n", raw_smp_processor_id(), (void *)current_thread_info(), missqw0, missqw1, address); /* * If we're in an interrupt, we must not take a fault. */ if (in_atomic()) { printk(KERN_CRIT "FAULT[%d]: page fault on interrupt stack: preempt_count=%x\n", raw_smp_processor_id(), preempt_count()); goto no_context; } /* * See if we have a valid mm and what mode the PC was in. */ tsk = current; mm = tsk->mm; user = user_mode(regs); /* * If we're in an interrupt, we must not take a fault. */ if (!mm) { printk(KERN_CRIT "FAULT[%d]: page fault without task mm structure\n", raw_smp_processor_id()); goto no_context; } /* * See if the faulting address is within the task address space. */ if (address > TASK_SIZE) { printk(KERN_CRIT "FAULT[%d]: page fault beyond user space\n", raw_smp_processor_id()); goto no_context; } /* * Make sure that our root page directory handed to use by * the hardware is the same as for this task. If not, we failed * to properly update the PGD in switch_mm() or we have a spurious * hardware fault for a different PGD. */ down_read(&mm->mmap_sem); pgd = (pgd_t *)(MMU_MISSQW0_PGD_GET(missqw0) << MMU_MISSQW0_PGD_SHIFT); if (mm->pgd != pgd) { up_read(&mm->mmap_sem); printk(KERN_CRIT "FAULT[%d]: invalid root page table: %p - %p\n", raw_smp_processor_id(), mm->pgd, pgd); goto no_context; } /* * The privilege violations are reported as part of the MMU miss information * and not as a trap (as is done with other architectures). Thus we must * check for priv violations and report either a user mode failure * or kill the kernel. */ if (fault_is_priv(regs, missqw0, missqw1)) { fault_printk(FAULT_DBG_ERROR, "FAULT[%d]: privilege violation\n", raw_smp_processor_id()); goto bad_area; } /* * Read the VMA list and find if we have a VMA covering this * fault address. The find returns the vma where vma->vm_end * > address. Make sure to check the start! * * Make sure that all paths release the semaphore. */ vma = find_vma(mm, address); if (!vma) { fault_printk(FAULT_DBG_ERROR, "FAULT[%d]: unable to find address: %lx\n", raw_smp_processor_id(), address); goto bad_area; } /* * Is this in a hole in the address space? If so, see if the * stack region can be expanded to cover the hole. */ if (vma->vm_start > address) { if (!(vma->vm_flags & VM_GROWSDOWN)) { fault_printk(FAULT_DBG_ERROR, "FAULT[%d]: region is not expandable\n", raw_smp_processor_id()); goto bad_area; } if (expand_stack(vma, address)) { fault_printk(FAULT_DBG_ERROR, "FAULT[%d]: expand_stack failed\n", raw_smp_processor_id()); goto bad_area; } } /* * We have a valid VMA for this address, now determine the type * of access? */ acctyp = fault_access_type(missqw0, missqw1); if ((vma->vm_flags & acctyp) != acctyp) { /* * The access type was not allowed for this region, * thus we now have a bad_area. */ fault_printk(FAULT_DBG_ERROR, "FAULT[%d]: illegal access type: %lx, %lx\n", raw_smp_processor_id(), vma->vm_flags & acctyp, acctyp); goto bad_area; } /* * Call generic fault hanlder to resolve the fault. If Linux * can not resolve the fault, we must terminate instead of * endlessly retrying the fault. */ fault = handle_mm_fault(mm, vma, address, (acctyp & VM_WRITE)); if (unlikely(fault & VM_FAULT_ERROR)) { /* * We hit a shared mapping outside of the file, or some * other thing happened to us that made us unable to * handle the page fault gracefully. */ if (fault & VM_FAULT_OOM) { up_read(&mm->mmap_sem); if (user) { printk(KERN_CRIT "FAULT[%d]: killing process group: %s\n", raw_smp_processor_id(), current->comm); do_group_exit(SIGKILL); return; } printk(KERN_CRIT "FAULT[%d]: OOM in kernel space: %s\n", raw_smp_processor_id(), current->comm); goto no_context; } if (fault & VM_FAULT_SIGBUS) { fault_printk(FAULT_DBG_ERROR, "FAULT[%d]: handle_mm_fault returned SIGBUS\n", raw_smp_processor_id()); goto bad_area; } printk(KERN_CRIT "Linux changed the definition of VM_FAULT_ERROR: %x\n", fault); up_read(&mm->mmap_sem); goto no_context; return; } #if defined(CONFIG_DEBUG_VM) /* * Make sure the fault was resolved, if not stop. This should * only happen during bring-up as it would cause an infinite * fault in real-life. The primary cause would be different * views of where an address lies in the 2 level page table. */ if (!fault_is_resolved(regs, missqw0, missqw1)) { printk(KERN_CRIT "do_page_fault: fault not resolved\n"); goto no_context; } #endif /* * Count the fault types. */ if (fault & VM_FAULT_MAJOR) { current->maj_flt++; } else { current->min_flt++; } up_read(&mm->mmap_sem); return; bad_area: /* * Something tried to access memory that is not mappable. If * this is a user space request, we will signal the user. If * this is the kernel, we die (unless we can fix it with the * exception table). */ up_read(&mm->mmap_sem); if (user) { struct siginfo si; si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SEGV_MAPERR; si.si_addr = (void __user *)address; force_sig_info(SIGSEGV, &si, tsk); return; } /* Fall through */ no_context: /* * Since we can enter here for both user and non-user mode without the above * check, we need to re-check that we are not in user mode before trying the * exception table. This prevents a bogus user program from loading a PC with * an exception table address and trying to get the kernel to do "funny" things. * * Try and see if the fault is in the exception table. If found * move the PC to the fixup location; otherwise terminate. */ if (!user) { fix = search_exception_tables(regs->pc); if (fix) { regs->pc = fix->fixup; return; } } fault_terminate(regs, missqw0, missqw1); }
int LGE_ErrorHandler_Main( int crash_side, char * message) { ram_console_setpanic(); if (hidden_reset_enable) { if (crash_side == MODEM_CRASH) { unsigned *temp; printk(KERN_INFO"%s: arm9 has crashed...\n",__func__); printk(KERN_INFO"%s\n", message); atomic_notifier_call_chain(&panic_notifier_list, 0, "arm9 has crashed...\n"); temp = lge_get_fb_copy_virt_addr(); *temp = 0x12345678; printk(KERN_INFO"%s: hidden magic %x\n",__func__, temp[0]); return SMSM_SYSTEM_REBOOT; } return 0; } if(BLUE_ERROR_HANDLER_LEVEL != 0) { if (get_suspend_state() != PM_SUSPEND_ON) { lcd_suspend = 0; } switch(crash_side) { case MODEM_CRASH: case APPL_CRASH: case ANDROID_CRASH: if(!LG_ErrorHandler_enable) { LG_ErrorHandler_enable = 1; raw_local_irq_enable(); if (message != NULL) display_info_LCD(crash_side, message); } break; case ANDROID_DISPLAY_INFO : if (message != NULL) display_info_LCD(crash_side, message); return 0; default : break; } } raw_local_irq_disable(); preempt_disable(); smsm_reset_modem(SMSM_APPS_SHUTDOWN); if(BLUE_ERROR_HANDLER_LEVEL == 0) { mdelay(100); return SMSM_SYSTEM_REBOOT; } while(1) { gpio_set_value(36,0); gpio_set_value(32,1); gpio_set_value(33,1); if(gpio_get_value(38)==0) { printk("Pressed Volume up key\n"); return SMSM_SYSTEM_DOWNLOAD; } else if(gpio_get_value(37) ==0 ) { printk("Pressed Volume down key\n"); return SMSM_SYSTEM_REBOOT; } mdelay(200); ; } }
int main() { raw_local_irq_enable(); return 0; }