static int rk_lpmode_enter(unsigned long arg) { //RKPM_DDR_PFUN(slp_setting(rkpm_jdg_sram_ctrbits),slp_setting); RKPM_DDR_FUN(slp_setting); local_flush_tlb_all(); flush_cache_all(); outer_flush_all(); outer_disable(); cpu_proc_fin(); //outer_inv_all();// ??? // l2x0_inv_all_pm(); //rk319x is not need flush_cache_all(); rkpm_ddr_printch('d'); //rkpm_udelay(3*10); dsb(); wfi(); rkpm_ddr_printch('D'); return 0; }
void machine_restart(char *cmd) { /* * Clean and disable cache, and turn off interrupts */ cpu_proc_fin(); /* * Tell the mm system that we are going to reboot - * we may need it to insert some 1:1 mappings so that * soft boot works. */ setup_mm_for_reboot(reboot_mode_nds32); /* Execute kernel restart handler call chain */ do_kernel_restart(cmd); /* * Now call the architecture specific reboot code. */ arch_reset(reboot_mode_nds32); /* * Whoops - the architecture was unable to reboot. * Tell the user! */ mdelay(1000); pr_info("Reboot failed -- System halted\n"); while (1) ; }
/* * The framework loads the hibernation image into a linked list anchored * at restore_pblist, for swsusp_arch_resume() to copy back to the proper * destinations. * * To make this work if resume is triggered from initramfs, the * pagetables need to be switched to allow writes to kernel mem. */ void notrace mtk_arch_restore_image(void) { phys_reset_t phys_reset; struct pbe *pbe; for (pbe = restore_pblist; pbe; pbe = pbe->next) copy_page(pbe->orig_address, pbe->address); #if 0 /* [ALPS01496758] since CA17 has cache bug, replace with the modified assemlby version */ /* Clean and invalidate caches */ flush_cache_all(); /* Turn off caching */ cpu_proc_fin(); /* Push out any further dirty data, and ensure cache is empty */ flush_cache_all(); #else __disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2(); #endif /* Take out a flat memory mapping. */ setup_mm_for_reboot(); phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); /* Return from cpu_suspend/swsusp_arch_suspend */ phys_reset((unsigned long)virt_to_phys(cpu_resume)); /* Should never get here. */ BUG(); }
void machine_restart(char * __unused) { /* * Clean and disable cache, and turn off interrupts */ cpu_proc_fin(); /* * Tell the mm system that we are going to reboot - * we may need it to insert some 1:1 mappings so that * soft boot works. */ setup_mm_for_reboot(reboot_mode); /* * copy branch instruction to reset location and call it */ *(unsigned long *)0 = *(unsigned long *)0x03800000; ((void(*)(void))0)(); /* * Whoops - the architecture was unable to reboot. * Tell the user! Should never happen... */ mdelay(1000); printk("Reboot failed -- System halted\n"); while (1); }
void machine_kexec(struct kimage *image) { unsigned long page_list; unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; page_list = image->head & PAGE_MASK; /* we need both effective and real address here */ reboot_code_buffer_phys = page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; /* copy our kernel relocation code to the control code page */ memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); flush_icache_range((unsigned long) reboot_code_buffer, (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); cpu_proc_fin(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ cpu_reset(reboot_code_buffer_phys); }
static void __soft_restart(void *addr) { phys_reset_t phys_reset; /* Take out a flat memory mapping. */ setup_mm_for_reboot(); /* Clean and invalidate caches */ flush_cache_all(); /* Turn off caching */ cpu_proc_fin(); /* Push out any further dirty data, and ensure cache is empty */ flush_cache_all(); /* Push out the dirty data from external caches */ outer_disable(); /* Switch to the identity mapping. */ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset((unsigned long)addr); /* Should never get here. */ BUG(); }
static void kld_reboot( int reset , int boot ) { local_irq_disable(); kld_set_reset_flag(reset , boot ); cpu_proc_fin(); setup_mm_for_reboot('r'); rk28_soft_restart(); }
void machine_kexec(struct kimage *image) { unsigned long page_list; unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; arch_kexec(); page_list = image->head & PAGE_MASK; /* we need both effective and real address here */ reboot_code_buffer_phys = page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ mem_text_write_kernel_word(&kexec_start_address, image->start); mem_text_write_kernel_word(&kexec_indirection_page, page_list); mem_text_write_kernel_word(&kexec_mach_type, machine_arch_type); mem_text_write_kernel_word(&kexec_boot_atags, image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET); #ifdef CONFIG_KEXEC_HARDBOOT mem_text_write_kernel_word(&kexec_hardboot, image->hardboot); #endif /* copy our kernel relocation code to the control code page */ memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); flush_icache_range((unsigned long) reboot_code_buffer, (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); if (kexec_reinit) kexec_reinit(); local_irq_disable(); local_fiq_disable(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ #ifdef CONFIG_KEXEC_HARDBOOT if (image->hardboot && kexec_hardboot_hook) /* Run any final machine-specific shutdown code. */ kexec_hardboot_hook(); #endif flush_cache_all(); outer_flush_all(); outer_disable(); cpu_proc_fin(); outer_inv_all(); flush_cache_all(); __virt_to_phys(cpu_reset)(reboot_code_buffer_phys); }
static void msm_panic_restart(char mode, const char *cmd) { arm_machine_flush_console(); local_irq_disable(); local_fiq_disable(); flush_cache_all(); cpu_proc_fin(); flush_cache_all(); msm_restart(mode, cmd); mdelay(1000); printk(KERN_ERR "Reboot failed -- System halted\n"); while (1) ; }
void machine_kexec(struct kimage *image) { unsigned long page_list; unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; arch_kexec(); page_list = image->head & PAGE_MASK; reboot_code_buffer_phys = page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); flush_icache_range((unsigned long) reboot_code_buffer, (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); cpu_proc_fin(); __virt_to_phys(cpu_reset)(reboot_code_buffer_phys); }
/* This shouldn't return. */ int armboot(unsigned char* kerneladdr, int size, unsigned char* cmdline) { struct physlist* addrmap; struct tag* t; unsigned int* asmaddr; int mappos; unsigned int kernaddr; /* At this point, we will allocate 64k for a list of things that need to be mapped later. */ addrmap = (struct physlist*)kmalloc(64 * 1024, GFP_KERNEL); if (!addrmap) return -ENOMEM; /* We start at the first element of the map. */ mappos = 0; /* Now we need to allocate 64kbytes for our tagged list. */ t = (struct tag*)kmalloc(64*1024, GFP_KERNEL); if (!t) return -ENOMEM; /* Set up our tagged list reloc block. */ addrmap[mappos].physaddr = virt_to_phys(t); addrmap[mappos].newphysaddr = TAGBASE; addrmap[mappos].last = 0; /* Now set up the tagged list. */ t->hdr.tag = ATAG_CORE; t->hdr.size = tag_size(tag_core); t->u.core.flags = 0; t->u.core.pagesize = 0x1000; t->u.core.rootdev = 0x00FF; t = tag_next(t); if (cmdline) { t->hdr.tag = ATAG_CMDLINE; t->hdr.size = (sizeof(struct tag_header)+strlen(cmdline)+4) >> 2; strcpy(t->u.cmdline.cmdline, cmdline); t = tag_next(t); } t->hdr.tag = ATAG_MEM; /* We assume that memory is contiguous, */ t->hdr.size = tag_size(tag_mem32); /* and we won't let anyone tell us otherwise. */ t->u.mem.size = (num_physpages >> (20-PAGE_SHIFT)) * 1024*1024; t->u.mem.start = MEMSTART; t = tag_next(t); t->hdr.tag = ATAG_NONE; t->hdr.size = 0; t = tag_next(t); /* Now just fill in the size, and we can begin relocating the kernel. */ addrmap[mappos].size = virt_to_phys(t) - addrmap[mappos].physaddr; mappos++; kernaddr = MEMSTART+0x8000; printk(KERN_EMERG "armboot: Placing new kernel into temporary RAM...\n"); /* Begin relocating. We're going to have to trust that the kernel won't give us any addresses in the first few megs of RAM. */ while(size) { int relocsize; unsigned char* block; /* Make sure it fits within our block... */ relocsize = (size > 64*1024) ? 64*1024 : size; size -= relocsize; /* Allocate said block... (always 64k to please your kmallocness) */ block = (unsigned char*)kmalloc(64*1024, GFP_KERNEL); if (!block) return -ENOMEM; /* Copy kernel into said block... */ memcpy(block, kerneladdr, relocsize); /* Set up our allocation tables. */ addrmap[mappos].physaddr = virt_to_phys(block); addrmap[mappos].newphysaddr = kernaddr; addrmap[mappos].size = relocsize; addrmap[mappos].last = 0; mappos++; kernaddr += relocsize; kerneladdr += relocsize; } addrmap[mappos-1].last = 1; /* Nowadays we get an address from the kernel. */ asmaddr = (unsigned int*)kmalloc(64*1024, GFP_KERNEL); memcpy(asmaddr, &armboot_asm, 16384); // 16kb armboot_ptr = (void(*)(int,int,int,int))asmaddr; cpu_proc_fin (); printk(KERN_EMERG "armboot: Booting new kernel...\n"); armboot_ptr(virt_to_phys(addrmap), ((int)&relocdone) - ((int)&armboot_asm) + virt_to_phys(asmaddr), MEMSTART+0x8000, machine_arch_type); return 0; }
struct ipanic_header *ipanic_header_from_sd(unsigned int offset, unsigned int magic) { struct ipanic_data_header *dheader; int dt; char str[256]; size_t size = 0; struct ipanic_header *header; struct ipanic_data_header dheader_header = { .type = IPANIC_DT_HEADER, .offset = offset, .used = sizeof(struct ipanic_header), }; header = (struct ipanic_header *)ipanic_data_from_sd(&dheader_header, 0); if (IS_ERR_OR_NULL((void *)header)) { LOGD("read header failed[%ld]\n", PTR_ERR((void *)header)); header = NULL; } else if (header->magic != magic) { LOGD("no ipanic data[%x]\n", header->magic); kfree(header); header = NULL; ipanic_erase(); } else { for (dt = IPANIC_DT_HEADER + 1; dt < IPANIC_DT_RESERVED31; dt++) { dheader = &header->data_hdr[dt]; if (dheader->valid) { size += snprintf(str + size, 256 - size, "%s[%x@%x],", dheader->name, dheader->used, dheader->offset); } } LOGD("ipanic data available^v^%s^v^\n", str); } return header; } struct aee_oops *ipanic_oops_from_sd(void) { struct aee_oops *oops = NULL; struct ipanic_header *hdr = NULL; struct ipanic_data_header *dheader; char *data; int i; hdr = ipanic_header_from_sd(0, AEE_IPANIC_MAGIC); if (hdr == NULL) { return NULL; } oops = aee_oops_create(AE_DEFECT_FATAL, AE_KE, IPANIC_MODULE_TAG); if (oops == NULL) { LOGE("%s: can not allocate buffer\n", __func__); return NULL; } for (i = IPANIC_DT_HEADER + 1; i < IPANIC_DT_RESERVED31; i++) { dheader = &hdr->data_hdr[i]; if (dheader->valid == 0) { continue; } data = ipanic_data_from_sd(dheader, 1); if (data) { switch (i) { case IPANIC_DT_KERNEL_LOG: oops->console = data; oops->console_len = dheader->used; break; case IPANIC_DT_MINI_RDUMP: oops->mini_rdump = data; oops->mini_rdump_len = dheader->used; break; case IPANIC_DT_MAIN_LOG: oops->android_main = data; oops->android_main_len = dheader->used; break; case IPANIC_DT_SYSTEM_LOG: oops->android_system = data; oops->android_system_len = dheader->used; break; case IPANIC_DT_EVENTS_LOG: /* Todo .. */ break; case IPANIC_DT_RADIO_LOG: oops->android_radio = data; oops->android_radio_len = dheader->used; break; case IPANIC_DT_CURRENT_TSK: memcpy(oops->process_path, data, sizeof(struct aee_process_info)); break; case IPANIC_DT_MMPROFILE: oops->mmprofile = data; oops->mmprofile_len = dheader->used; break; default: LOGI("%s: [%d] NOT USED.\n", __func__, i); } } else { LOGW("%s: read %s failed, %x@%x\n", __func__, dheader->name, dheader->used, dheader->offset); } } return oops; } int ipanic(struct notifier_block *this, unsigned long event, void *ptr) { struct ipanic_data_header *dheader; struct kmsg_dumper dumper; ipanic_atf_log_rec_t atf_log = {ATF_LOG_SIZE, 0, 0}; int dt; int errno; struct ipanic_header *ipanic_hdr; aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_IPANIC_START); aee_rr_rec_exp_type(2); bust_spinlocks(1); spin_lock_irq(&ipanic_lock); aee_disable_api(); mrdump_mini_ke_cpu_regs(NULL); ipanic_mrdump_mini(AEE_REBOOT_MODE_KERNEL_PANIC, "kernel PANIC"); if (!ipanic_data_is_valid(IPANIC_DT_KERNEL_LOG)) { ipanic_klog_region(&dumper); errno = ipanic_data_to_sd(IPANIC_DT_KERNEL_LOG, &dumper); if (errno == -1) aee_nested_printf("$"); } ipanic_klog_region(&dumper); errno = ipanic_data_to_sd(IPANIC_DT_OOPS_LOG, &dumper); if (errno == -1) aee_nested_printf("$"); ipanic_data_to_sd(IPANIC_DT_CURRENT_TSK, 0); /* kick wdt after save the most critical infos */ ipanic_kick_wdt(); ipanic_data_to_sd(IPANIC_DT_MAIN_LOG, (void *)1); ipanic_data_to_sd(IPANIC_DT_SYSTEM_LOG, (void *)4); ipanic_data_to_sd(IPANIC_DT_EVENTS_LOG, (void *)2); ipanic_data_to_sd(IPANIC_DT_RADIO_LOG, (void *)3); aee_wdt_dump_info(); ipanic_klog_region(&dumper); ipanic_data_to_sd(IPANIC_DT_WDT_LOG, &dumper); #ifdef CONFIG_MTK_WQ_DEBUG mt_dump_wq_debugger(); #endif ipanic_klog_region(&dumper); ipanic_data_to_sd(IPANIC_DT_WQ_LOG, &dumper); ipanic_data_to_sd(IPANIC_DT_MMPROFILE, 0); ipanic_data_to_sd(IPANIC_DT_ATF_LOG, &atf_log); errno = ipanic_header_to_sd(0); if (!IS_ERR(ERR_PTR(errno))) mrdump_mini_ipanic_done(); ipanic_klog_region(&dumper); ipanic_data_to_sd(IPANIC_DT_LAST_LOG, &dumper); LOGD("ipanic done^_^"); ipanic_hdr = ipanic_header(); for (dt = IPANIC_DT_HEADER + 1; dt < IPANIC_DT_RESERVED31; dt++) { dheader = &ipanic_hdr->data_hdr[dt]; if (dheader->valid) { LOGD("%s[%x@%x],", dheader->name, dheader->used, dheader->offset); } } LOGD("^_^\n"); aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_IPANIC_DONE); return NOTIFY_DONE; } void ipanic_recursive_ke(struct pt_regs *regs, struct pt_regs *excp_regs, int cpu) { int errno; struct kmsg_dumper dumper; aee_nested_printf("minidump\n"); aee_rr_rec_exp_type(3); bust_spinlocks(1); flush_cache_all(); #ifdef __aarch64__ cpu_cache_off(); #else cpu_proc_fin(); #endif mrdump_mini_ke_cpu_regs(excp_regs); mrdump_mini_per_cpu_regs(cpu, regs); flush_cache_all(); ipanic_mrdump_mini(AEE_REBOOT_MODE_NESTED_EXCEPTION, "Nested Panic"); ipanic_data_to_sd(IPANIC_DT_CURRENT_TSK, 0); ipanic_kick_wdt(); ipanic_klog_region(&dumper); ipanic_data_to_sd(IPANIC_DT_KERNEL_LOG, &dumper); errno = ipanic_header_to_sd(0); if (!IS_ERR(ERR_PTR(errno))) mrdump_mini_ipanic_done(); if (ipanic_dt_active(IPANIC_DT_RAM_DUMP)) { aee_nested_printf("RAMDUMP.\n"); __mrdump_create_oops_dump(AEE_REBOOT_MODE_NESTED_EXCEPTION, excp_regs, "Nested Panic"); } bust_spinlocks(0); }
static int do_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg) { struct apm_user * as; struct pm_dev * pm; struct ipm_config conf; as = filp->private_data; if (check_apm_user(as, "ioctl")) return -EIO; memset(&conf, 0, sizeof(conf)); switch (cmd) { case APM_IOC_SUSPEND: pm_do_suspend(CPUMODE_SLEEP); break; case APM_IOC_SET_WAKEUP: if ((pm = pm_find((pm_dev_t)arg,NULL)) == NULL) return -EINVAL; pm_send(pm,PM_SET_WAKEUP,NULL); break; case APM_IOC_SLEEP: pm_go_sleep(arg); break; case APM_IOC_SET_SPROF_WIN: sleep_to = arg * HZ; APM_DPRINTK("do_ioctl: sleep timeout %ld\n", arg); break; case APM_IOC_WAKEUP_ENABLE: PWER |= arg; APM_DPRINTK("do_ioctl: enable wakeup source:PWER=0x%x\n", PWER); break; case APM_IOC_WAKEUP_DISABLE: PWER &= ~arg; APM_DPRINTK("do_ioctl: disable wakeup source:PWER=0x%x\n", PWER); break; case APM_IOC_POWEROFF: APM_DPRINTK("do_ioctl: do power off\n"); /* here all device should response ok */ pm_send_all(PM_SUSPEND, (void *)3); pm_do_poweroff(); pm_send_all(PM_RESUME, (void *)0); break; case APM_IOC_RESET_BP: APM_DPRINTK("do_ioctl: reset bp\n"); GPCR(GPIO_BB_RESET) = GPIO_bit(GPIO_BB_RESET); mdelay(1); GPSR(GPIO_BB_RESET) = GPIO_bit(GPIO_BB_RESET); break; case APM_IOC_USEROFF_ENABLE: APM_DPRINTK("do_ioctl: useroff support enable\n"); user_off_available = (int)arg; break; case APM_IOC_NOTIFY_BP: break; case APM_IOC_REFLASH: cpu_proc_fin(); *(unsigned long *)(phys_to_virt(FLAG_ADDR)) = REFLASH_FLAG; // power_ic_periph_set_usb_pull_up(0); //LIN mdelay(1000); //LIN let GPIO control the connectivity #include <linux/power_ic.h> set_GPIO_mode(GPIO_USB_READY|GPIO_OUT); clr_GPIO(GPIO_USB_READY); power_ic_periph_set_usb_pull_up(0); mdelay(10); power_ic_set_reg_bit(POWER_IC_REG_EOC_CONN_CONTROL,19,1);//LIN set USB_CNTRL(bit19) to disable emu control mdelay(1000); //LIN power_ic_periph_set_usb_pull_up(1); /* Initialize the watchdog and let it fire */ OWER = OWER_WME; OSSR = OSSR_M3; OSMR3 = OSCR + CLOCK_TICK_RATE/100; /* ... in 10 ms */ MDREFR |= MDREFR_SLFRSH; while(1); break; case APM_IOC_PASSTHRU: cpu_proc_fin(); *(unsigned long *)(phys_to_virt(FLAG_ADDR)) = PASS_THRU_FLAG; power_ic_periph_set_usb_pull_up(0); mdelay(1000); power_ic_periph_set_usb_pull_up(1); /* Initialize the watchdog and let it fire */ OWER = OWER_WME; OSSR = OSSR_M3; OSMR3 = OSCR + CLOCK_TICK_RATE/100; /* ... in 10 ms */ MDREFR |= MDREFR_SLFRSH; while(1); break; case APM_IOC_SET_IPROF_WIN: /* set profile window size */ break; case APM_IOC_STARTPMU: if( pipm_start_pmu !=NULL ) pipm_start_pmu(); break; case APM_IOC_GET_IPM_CONFIG: get_ipm_config(&conf); return (copy_to_user((void *)arg, &conf,sizeof(conf)))? -EFAULT:0; break; case APM_IOC_SET_IPM_CONFIG: if(copy_from_user(&conf,(void *)arg,sizeof(conf))) return -EFAULT; return set_ipm_config(&conf); break; default: return -EINVAL; } return 0; }