int mshci_s3c_dma_map_sg(struct mshci_host *host, struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, int flush_type) { struct scatterlist *s; int i, j; static int count=0; if (flush_type == 2) { spin_unlock_irqrestore(&host->lock, host->sl_flags); flush_all_cpu_caches(); outer_flush_all(); spin_lock_irqsave(&host->lock, host->sl_flags); } else if(flush_type == 1) { spin_unlock_irqrestore(&host->lock, host->sl_flags); flush_all_cpu_caches(); spin_lock_irqsave(&host->lock, host->sl_flags); } for_each_sg(sg, s, nents, i) { s->dma_address = mshci_s3c_dma_map_page(dev, sg_page(s), s->offset, s->length, dir, flush_type); if (dma_mapping_error(dev, s->dma_address)) { goto bad_mapping; } }
void msm7k_fiq_handler(void) { struct pt_regs ctx_regs; static cpumask_t fiq_cpu_mask; int this_cpu; unsigned long msm_fiq_flags; spin_lock_irqsave(&msm_fiq_lock, msm_fiq_flags); this_cpu = smp_processor_id(); pr_info("%s: Fiq is received on CPU%d\n", __func__, this_cpu); fiq_counter += 1; ctx_regs.ARM_pc = msm_dump_cpu_ctx[this_cpu].fiq_r14; ctx_regs.ARM_lr = msm_dump_cpu_ctx[this_cpu].svc_r14; ctx_regs.ARM_sp = msm_dump_cpu_ctx[this_cpu].svc_r13; ctx_regs.ARM_fp = msm_dump_cpu_ctx[this_cpu].usr_r11; unwind_backtrace(&ctx_regs, current); if (fiq_counter == 1 && (cpu_is_msm8625() || cpu_is_msm8625q())) { cpumask_copy(&fiq_cpu_mask, cpu_online_mask); cpu_clear(this_cpu, fiq_cpu_mask); gic_raise_secure_softirq(&fiq_cpu_mask, GIC_SECURE_SOFT_IRQ); } flush_cache_all(); outer_flush_all(); spin_unlock_irqrestore(&msm_fiq_lock, msm_fiq_flags); return; }
/* kick a firmware */ static void mb_rproc_kick(struct rproc *rproc, int vqid) { struct device *dev = rproc->dev.parent; struct platform_device *pdev = to_platform_device(dev); struct mb_rproc_pdata *local = platform_get_drvdata(pdev); dev_dbg(dev, "KICK Firmware to start send messages vqid %d\n", vqid); flush_cache_all(); outer_flush_all(); /* Send swirq to firmware */ gpio_set_value(local->vring0, 0); gpio_set_value(local->vring1, 0); dsb(); if (!vqid) { udelay(500); gpio_set_value(local->vring0, 1); dsb(); } else { udelay(100); gpio_set_value(local->vring1, 1); dsb(); } }
static void mx2_reboot_internal(const char *cmd) { unsigned long custom_val; local_irq_disable(); if(cmd) { if (strstr(cmd, "charge")) __raw_writel(REBOOT_MODE_CHARGE, S5P_INFORM4); else if (strstr(cmd, "wipe")) __raw_writel(REBOOT_MODE_WIPE, S5P_INFORM4); else if (strstr(cmd, "upgrade")) __raw_writel(REBOOT_MODE_UPGRADE, S5P_INFORM4); else if (strstr(cmd, "custom")) { if (!strict_strtoul(cmd+7, 10, &custom_val)) { __raw_writel(custom_val & CUSTOM_MASK, S5P_INFORM7); /* notify uboot reboot to recovery */ __raw_writel(REBOOT_MODE_UPGRADE, S5P_INFORM4); } /* error cmd reboot to android */ } } flush_cache_all(); outer_flush_all(); arch_reset(0, 0); pr_emerg("%s: waiting for reboot\n", __func__); while (1); }
static int init_reset_vector(void) { /* remap iram to 0x00000000 */ sci_glb_set(REG_AHB_REMAP, BIT(0)); if (!sp_pm_reset_vector) { sp_pm_reset_vector = ioremap(SPRD_RESET_VECTORS, PAGE_SIZE); if (sp_pm_reset_vector == NULL) { printk(KERN_ERR "sp_pm_init: failed to map reset vector\n"); return 0; } } iram_start = (void __iomem *)(SPRD_IRAM_BASE); /* copy sleep code to (IRAM+16K). */ if ((sc8825_standby_iram_end - sc8825_standby_iram + 128) > SLEEP_CODE_SIZE) { panic("##: code size is larger than expected, need more memory!\n"); } memcpy_toio(iram_start, sc8825_standby_iram, SLEEP_CODE_SIZE); /* copy emc re-init code to (IRAM+16k+8K) */; memcpy_toio(iram_start+2*SLEEP_CODE_SIZE, emc_init_repowered, EMC_REINIT_CODE_SIZE); /* just make sure*/ flush_cache_all(); outer_flush_all(); return 0; }
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd) { struct mm_struct *mm = cmd->ctx->mm; fimg2d_calc_dma_size(cmd); if (fimg2d_check_address(cmd)) return -EINVAL; if (fimg2d_check_pgd(mm, cmd)) return -EFAULT; #ifndef CCI_SNOOP fimg2d_debug("cache flush\n"); perf_start(cmd, PERF_CACHE); if (is_inner_flushall(cmd->dma_all)) { inner_touch_range(cmd); flush_all_cpu_caches(); } else { inner_flush_clip_range(cmd); } #ifdef CONFIG_OUTER_CACHE if (is_outer_flushall(cmd->dma_all)) outer_flush_all(); else outer_flush_clip_range(cmd); #endif perf_end(cmd, PERF_CACHE); #endif return 0; }
static int rk_lpmode_enter(unsigned long arg) { //RKPM_DDR_PFUN(slp_setting(rkpm_jdg_sram_ctrbits),slp_setting); RKPM_DDR_FUN(slp_setting); local_flush_tlb_all(); flush_cache_all(); outer_flush_all(); outer_disable(); cpu_proc_fin(); //outer_inv_all();// ??? // l2x0_inv_all_pm(); //rk319x is not need flush_cache_all(); rkpm_ddr_printch('d'); //rkpm_udelay(3*10); dsb(); wfi(); rkpm_ddr_printch('D'); return 0; }
/* Architecture-specific restart for Kaen and other boards, where a GPIO line * is used to reset CPU and TPM together. * * Most of this function mimicks arm_machine_restart in process.c, except that * that function turns off caching and then flushes the cache one more time, * and we do not. This is certainly less clean but unlikely to matter as the * additional dirty cache lines do not contain critical data. * * On boards that don't implement the reset hardware we fall back to the old * method. */ static void gpio_machine_restart(char mode, const char *cmd) { tegra_pm_flush_console(); /* Disable interrupts first */ local_irq_disable(); local_fiq_disable(); /* We must flush the L2 cache for preserved / kcrashmem */ outer_flush_all(); /* Clean and invalidate caches */ flush_cache_all(); /* Reboot by resetting CPU and TPM via GPIO */ gpio_set_value(TEGRA_GPIO_RESET, 0); /* * printk should still work with interrupts disabled, but since we've * already flushed this isn't guaranteed to actually make it out. We'll * print it anyway just in case. */ printk(KERN_INFO "restart: trying legacy reboot\n"); legacy_arm_pm_restart(mode, cmd); }
/* Called from the FIQ asm handler */ void msm7k_fiq_handler(void) { struct irq_data *d; struct irq_chip *c; struct pt_regs context_regs; pr_info("Fiq is received %s\n", __func__); fiq_counter++; d = irq_get_irq_data(MSM8625_INT_A9_M2A_2); c = irq_data_get_irq_chip(d); c->irq_mask(d); local_irq_disable(); /* Clear the IRQ from the ENABLE_SET */ gic_clear_irq_pending(MSM8625_INT_A9_M2A_2); local_irq_enable(); flush_cache_all(); outer_flush_all(); pr_err("%s msm_dump_cpu_ctx usr_r0:0x%x", __func__, msm_dump_cpu_ctx.usr_r0); pr_err("%s msm_dump_cpu_ctx usr_r0:0x%x usr_r1:0x%x usr_r2:0x%x usr_r3:0x%x usr_r4:0x%x usr_r5:0x%x usr_r6:0x%x usr_r7:0x%x usr_r8:0x%x usr_r9:0x%x usr_r10:0x%x usr_r11:0x%x usr_r12:0x%x usr_r13:0x%x usr_r14:0x%x irq_spsr:0x%x irq_r13:0x%x irq_r14:0x%x svc_spsr:0x%x svc_r13:0x%x svc_r14:0x%x abt_spsr:0x%x abt_r13:0x%x abt_r14:0x%x und_spsr:0x%x und_r13:0x%x und_r14:0x%x fiq_spsr:0x%x fiq_r8:0x%x fiq_r9:0x%x fiq_r10:0x%x fiq_r11:0x%x fiq_r12:0x%x fiq_r13:0x%x fiq_r14:0x%x\n",__func__, msm_dump_cpu_ctx.usr_r0,msm_dump_cpu_ctx.usr_r1,msm_dump_cpu_ctx.usr_r2,msm_dump_cpu_ctx.usr_r3, msm_dump_cpu_ctx.usr_r4, msm_dump_cpu_ctx.usr_r5, msm_dump_cpu_ctx.usr_r6, msm_dump_cpu_ctx.usr_r7, msm_dump_cpu_ctx.usr_r8, msm_dump_cpu_ctx.usr_r9, msm_dump_cpu_ctx.usr_r10, msm_dump_cpu_ctx.usr_r11, msm_dump_cpu_ctx.usr_r12, msm_dump_cpu_ctx.usr_r13, msm_dump_cpu_ctx.usr_r14, msm_dump_cpu_ctx.irq_spsr, msm_dump_cpu_ctx.irq_r13, msm_dump_cpu_ctx.irq_r14, msm_dump_cpu_ctx.svc_spsr, msm_dump_cpu_ctx.svc_r13, msm_dump_cpu_ctx.svc_r14, msm_dump_cpu_ctx.abt_spsr,msm_dump_cpu_ctx.abt_r13, msm_dump_cpu_ctx.abt_r14, msm_dump_cpu_ctx.und_spsr,msm_dump_cpu_ctx.und_r13, msm_dump_cpu_ctx.und_r14, msm_dump_cpu_ctx.fiq_spsr,msm_dump_cpu_ctx.fiq_r8, msm_dump_cpu_ctx.fiq_r9, msm_dump_cpu_ctx.fiq_r10, msm_dump_cpu_ctx.fiq_r11, msm_dump_cpu_ctx.fiq_r12, msm_dump_cpu_ctx.fiq_r13, msm_dump_cpu_ctx.fiq_r14); context_regs.ARM_sp = msm_dump_cpu_ctx.svc_r13; context_regs.ARM_lr = msm_dump_cpu_ctx.svc_r14; context_regs.ARM_fp = msm_dump_cpu_ctx.usr_r11; //for the svc r11 is the same with usr r11 context_regs.ARM_pc = msm_dump_cpu_ctx.svc_r14; //dump_stack(); unwind_backtrace(&context_regs, current); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif return; }
void exynos4_cpu_suspend(void) { unsigned int tmp; if ((!soc_is_exynos4210()) && (exynos4_is_c2c_use())) { /* Gating CLK_IEM_APC & Enable CLK_SSS */ tmp = __raw_readl(EXYNOS4_CLKGATE_IP_DMC); tmp &= ~(0x1 << 17); tmp |= (0x1 << 4); __raw_writel(tmp, EXYNOS4_CLKGATE_IP_DMC); /* Set MAX divider for PWI */ tmp = __raw_readl(EXYNOS4_CLKDIV_DMC1); tmp |= (0xF << 8); __raw_writel(tmp, EXYNOS4_CLKDIV_DMC1); /* Set clock source for PWI */ tmp = __raw_readl(EXYNOS4_CLKSRC_DMC); tmp &= ~EXYNOS4_CLKSRC_DMC_MASK; tmp |= ((0x6 << 16)|(0x1 << 12)); __raw_writel(tmp, EXYNOS4_CLKSRC_DMC); } outer_flush_all(); #ifdef CONFIG_ARM_TRUSTZONE exynos_smc(SMC_CMD_SLEEP, 0, 0, 0); #else /* issue the standby signal into the pm unit. */ cpu_do_idle(); #endif }
static void sec_reboot(char str, const char *cmd) { local_irq_disable(); pr_emerg("%s (%d, %s)\n", __func__, str, cmd ? cmd : "(null)"); __raw_writel(0x12345678,SPRD_INFORM2); /* Don't enter lpm mode */ if (!cmd) { __raw_writel(REBOOT_MODE_PREFIX | REBOOT_MODE_NONE, SPRD_INFORM3); } else { unsigned long value; if (!strcmp(cmd, "fota")) __raw_writel(REBOOT_MODE_PREFIX | REBOOT_MODE_FOTA, SPRD_INFORM3); else if (!strcmp(cmd, "fota_bl")) __raw_writel(REBOOT_MODE_PREFIX | REBOOT_MODE_FOTA_BL, SPRD_INFORM3); else if (!strcmp(cmd, "recovery")) __raw_writel(REBOOT_MODE_PREFIX | REBOOT_MODE_RECOVERY, SPRD_INFORM3); else if (!strcmp(cmd, "download")) __raw_writel(REBOOT_MODE_PREFIX | REBOOT_MODE_DOWNLOAD, SPRD_INFORM3); else if (!strcmp(cmd, "upload")) __raw_writel(REBOOT_MODE_PREFIX | REBOOT_MODE_UPLOAD, SPRD_INFORM3); else if (!strcmp(cmd, "secure")) __raw_writel(REBOOT_MODE_PREFIX | REBOOT_MODE_SECURE, SPRD_INFORM3); else if (!strncmp(cmd, "debug", 5) && !kstrtoul(cmd + 5, 0, &value)) __raw_writel(REBOOT_SET_PREFIX | REBOOT_SET_DEBUG | value, SPRD_INFORM3); else if (!strncmp(cmd, "swsel", 5) && !kstrtoul(cmd + 5, 0, &value)) __raw_writel(REBOOT_SET_PREFIX | REBOOT_SET_SWSEL | value, SPRD_INFORM3); else if (!strncmp(cmd, "sud", 3) && !kstrtoul(cmd + 3, 0, &value)) __raw_writel(REBOOT_SET_PREFIX | REBOOT_SET_SUD | value, SPRD_INFORM3); else if (!strncmp(cmd, "emergency", 9)) __raw_writel(0, SPRD_INFORM3); else __raw_writel(REBOOT_MODE_PREFIX | REBOOT_MODE_NONE, SPRD_INFORM3); } flush_cache_all(); outer_flush_all(); arch_reset(0, cmd); mdelay(1000); pr_emerg("%s: waiting for reboot\n", __func__); while (1); }
void machine_crash_swreset(void) { printk(KERN_INFO "Software reset on panic!\n"); flush_cache_all(); outer_flush_all(); outer_disable(); arm_pm_restart_sw(0, NULL); }
static void sec_reboot(char str, const char *cmd) { local_irq_disable(); local_fiq_disable(); pr_emerg("%s (%d, %s)\n", __func__, str, cmd ? cmd : "(null)"); writel(0x12345678,SPRD_INFORM2); /* Don't enter lpm mode */ if (!cmd) { writel(REBOOT_MODE_PREFIX | REBOOT_MODE_NONE, SPRD_INFORM3); } else { unsigned long value; if (!strcmp(cmd, "fota")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_ARM11_FOTA, SPRD_INFORM3); else if (!strcmp(cmd, "recovery")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_RECOVERY, SPRD_INFORM3); else if (!strcmp(cmd, "download")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_DOWNLOAD, SPRD_INFORM3); else if (!strcmp(cmd, "upload")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_UPLOAD, SPRD_INFORM3); #if defined(CONFIG_RTC_CHN_ALARM_BOOT) else if (!strcmp(cmd, "alarmboot")) writel(REBOOT_MODE_PREFIX | REBOOT_MODE_ALARM_BOOT, SPRD_INFORM3); #endif else if (!strncmp(cmd, "debug", 5) && !kstrtoul(cmd + 5, 0, &value)) writel(REBOOT_SET_PREFIX | REBOOT_SET_DEBUG | value, SPRD_INFORM3); else if (!strncmp(cmd, "swsel", 5) && !kstrtoul(cmd + 5, 0, &value)) writel(REBOOT_SET_PREFIX | REBOOT_SET_SWSEL | value, SPRD_INFORM3); else if (!strncmp(cmd, "sud", 3) && !kstrtoul(cmd + 3, 0, &value)) writel(REBOOT_SET_PREFIX | REBOOT_SET_SUD | value, SPRD_INFORM3); else writel(REBOOT_MODE_PREFIX | REBOOT_MODE_NONE, SPRD_INFORM3); } flush_cache_all(); outer_flush_all(); arch_reset(0, cmd); pr_emerg("%s: waiting for reboot\n", __func__); while (1); }
static int exynos_cpu_suspend(unsigned long arg) { flush_cache_all(); outer_flush_all(); exynos_smc(SMC_CMD_SLEEP, 0, 0, 0); pr_info("Failed to suspend the system\n"); writel(0, sysram_ns_base_addr + EXYNOS_BOOT_FLAG); return 1; }
static int highbank_suspend_finish(unsigned long val) { outer_flush_all(); outer_disable(); highbank_set_pwr_suspend(); cpu_do_idle(); highbank_clear_pwr_request(); return 0; }
/* Called from the FIQ bark handler */ void msm_7k_bark_fin(void) { fiq_counter++; // local_irq_disable(); //clear_pending_spi(MSM8625_INT_A9_M2A_2); gic_clear_irq_pending(MSM8625_INT_A9_M2A_2); //gic_clear_spi_pending(MSM8625_INT_A9_M2A_2); //local_irq_enable(); flush_cache_all(); outer_flush_all(); return; }
static void sec_power_off(void) { int poweroff_try = 0; local_irq_disable(); pr_emerg("%s : cable state=%d wpc status=%d\n", __func__, is_cable_attached, is_wpc_cable_attached); while (1) { /* Check reboot charging */ if ((is_wpc_cable_attached || is_cable_attached || (poweroff_try >= 5)) && (!is_ovlo_state || !lpcharge)) { pr_emerg ("%s: charger connected(%d) or power" "off failed(%d), reboot!\n", __func__, is_cable_attached, poweroff_try); /* To enter LP charging */ writel(0x0, EXYNOS_INFORM2); flush_cache_all(); outer_flush_all(); exynos5_restart(0, 0); pr_emerg("%s: waiting for reboot\n", __func__); while (1) ; } /* wait for power button release */ if (gpio_get_value(GPIO_nPOWER)) { pr_emerg("%s: set PS_HOLD low\n", __func__); /* power off code * PS_HOLD Out/High --> * Low PS_HOLD_CONTROL, R/W, 0x1002_330C */ writel(readl(EXYNOS_PS_HOLD_CONTROL) & 0xFFFFFEFF, EXYNOS_PS_HOLD_CONTROL); ++poweroff_try; pr_emerg ("%s: Should not reach here! (poweroff_try:%d)\n", __func__, poweroff_try); } else { /* if power button is not released, wait and check TA again */ pr_info("%s: PowerButton is not released.\n", __func__); } mdelay(1000); } }
static void cacheperf(void *vbuf, enum cachemaintenance id) { struct timespec beforets; struct timespec afterts; phys_addr_t pbuf = virt_to_phys(vbuf); u32 pbufend, xfer_size, i; long timeval; xfer_size = START_SIZE; while (xfer_size <= END_SIZE) { pbufend = pbuf + xfer_size; timeval = 0; for (i = 0; i < try_cnt; i++) { memset(vbuf, i, xfer_size); getnstimeofday(&beforets); switch (id) { case CM_CLEAN: if (l1) dmac_map_area(vbuf, xfer_size, DMA_TO_DEVICE); if (l2) outer_clean_range(pbuf, pbufend); break; case CM_INV: if (l2) outer_inv_range(pbuf, pbufend); if (l1) dmac_unmap_area(vbuf, xfer_size, DMA_FROM_DEVICE); break; case CM_FLUSH: if (l1) dmac_flush_range(vbuf, (void *)((u32) vbuf + xfer_size)); if (l2) outer_flush_range(pbuf, pbufend); break; case CM_FLUSHALL: if (l1) flush_cache_all(); if (l2) outer_flush_all(); break; } getnstimeofday(&afterts); timeval += update_timeval(beforets, afterts); } printk(KERN_INFO "%lu\n", timeval/try_cnt); xfer_size *= 2; } }
void machine_kexec(struct kimage *image) { unsigned long page_list; unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; arch_kexec(); page_list = image->head & PAGE_MASK; /* we need both effective and real address here */ reboot_code_buffer_phys = page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ mem_text_write_kernel_word(&kexec_start_address, image->start); mem_text_write_kernel_word(&kexec_indirection_page, page_list); mem_text_write_kernel_word(&kexec_mach_type, machine_arch_type); mem_text_write_kernel_word(&kexec_boot_atags, image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET); #ifdef CONFIG_KEXEC_HARDBOOT mem_text_write_kernel_word(&kexec_hardboot, image->hardboot); #endif /* copy our kernel relocation code to the control code page */ memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); flush_icache_range((unsigned long) reboot_code_buffer, (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); if (kexec_reinit) kexec_reinit(); local_irq_disable(); local_fiq_disable(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ #ifdef CONFIG_KEXEC_HARDBOOT if (image->hardboot && kexec_hardboot_hook) /* Run any final machine-specific shutdown code. */ kexec_hardboot_hook(); #endif flush_cache_all(); outer_flush_all(); outer_disable(); cpu_proc_fin(); outer_inv_all(); flush_cache_all(); __virt_to_phys(cpu_reset)(reboot_code_buffer_phys); }
int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len, unsigned long vaddr, enum cache_operation cacheop) { struct omap_tiler_info *info; int n_pages; phys_addr_t paddr = tiler_virt2phys(vaddr); if (!buffer) { pr_err("%s(): buffer is NULL\n", __func__); return -EINVAL; } if (!buffer->cached) { pr_err("%s(): buffer not mapped as cacheable\n", __func__); return -EINVAL; } info = buffer->priv_virt; if (!info) { pr_err("%s(): tiler info of buffer is NULL\n", __func__); return -EINVAL; } n_pages = info->n_tiler_pages; if (len > (n_pages * PAGE_SIZE)) { pr_err("%s(): size to flush is greater than allocated size\n", __func__); return -EINVAL; } if (TILER_PIXEL_FMT_PAGE != info->fmt) { pr_err("%s(): only TILER 1D buffers can be cached\n", __func__); return -EINVAL; } #if 0 if (len > FULL_CACHE_FLUSH_THRESHOLD) { on_each_cpu(per_cpu_cache_flush_arm, NULL, 1); outer_flush_all(); return 0; } #endif if (cacheop == CACHE_FLUSH) { flush_cache_user_range(vaddr, vaddr + len); outer_flush_range(paddr, paddr + len); } else { outer_inv_range(paddr, paddr + len); dmac_map_area((const void*) vaddr, len, DMA_FROM_DEVICE); } return 0; }
static void sec_debug_set_upload_magic(unsigned magic) { pr_emerg("(%s) %x\n", __func__, magic); if (magic) sec_debug_set_qc_dload_magic(1); __raw_writel(magic, restart_reason); flush_cache_all(); #ifndef CONFIG_ARM64 outer_flush_all(); #endif }
void sec_debug_hw_reset(void) { pr_emerg("(%s) %s %s\n", __func__, init_uts_ns.name.release, init_uts_ns.name.version); pr_emerg("(%s) rebooting...\n", __func__); flush_cache_all(); #ifndef CONFIG_ARM64 outer_flush_all(); #endif do_msm_restart(0, "sec_debug_hw_reset"); while (1) ; }
int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len, unsigned long vaddr, enum cache_operation cacheop) { struct omap_tiler_info *info; int n_pages; if (!buffer) { pr_err("%s(): buffer is NULL\n", __func__); return -EINVAL; } if (!buffer->cached) { pr_err("%s(): buffer not mapped as cacheable\n", __func__); return -EINVAL; } info = buffer->priv_virt; if (!info) { pr_err("%s(): tiler info of buffer is NULL\n", __func__); return -EINVAL; } n_pages = info->n_tiler_pages; if (len > (n_pages * PAGE_SIZE)) { pr_err("%s(): size to flush is greater than allocated size\n", __func__); return -EINVAL; } if (TILER_PIXEL_FMT_PAGE != info->fmt) { pr_err("%s(): only TILER 1D buffers can be cached\n", __func__); return -EINVAL; } if (len > FULL_CACHE_FLUSH_THRESHOLD) { on_each_cpu(per_cpu_cache_flush_arm, NULL, 1); outer_flush_all(); return 0; } flush_cache_user_range(vaddr, vaddr + len); if (cacheop == CACHE_FLUSH) outer_flush_range(info->tiler_addrs[0], info->tiler_addrs[0] + len); else outer_inv_range(info->tiler_addrs[0], info->tiler_addrs[0] + len); return 0; }
void sec_peripheral_secure_check_fail(void) { sec_debug_set_upload_magic(0x77665507); sec_debug_set_qc_dload_magic(0); printk("sec_periphe\n"); pr_emerg("(%s) rebooting...\n", __func__); flush_cache_all(); #ifndef CONFIG_ARM64 outer_flush_all(); #endif do_msm_restart(0, "peripheral_hw_reset"); while (1) ; }
static int sirfsoc_pm_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_MEM: sirfsoc_pre_suspend_power_off(); outer_flush_all(); outer_disable(); cpu_suspend(0, sirfsoc_finish_suspend); outer_resume(); break; default: return -EINVAL; } return 0; }
void machine_kexec(struct kimage *image) { unsigned long page_list; unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; if (num_online_cpus() > 1) { pr_err("kexec: error: multiple CPUs still online\n"); return; } page_list = image->head & PAGE_MASK; /* we need both effective and real address here */ reboot_code_buffer_phys = page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; /* copy our kernel relocation code to the control code page */ memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); flush_icache_range((unsigned long) reboot_code_buffer, (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); if (kexec_reinit) kexec_reinit(); local_irq_disable(); local_fiq_disable(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ flush_cache_all(); outer_flush_all(); outer_disable(); cpu_proc_fin(); outer_inv_all(); flush_cache_all(); cpu_reset(reboot_code_buffer_phys); }
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd) { struct mm_struct *mm = cmd->ctx->mm; struct fimg2d_dma *c; enum pt_status pt; int i; fimg2d_calc_dma_size(cmd); if (fimg2d_check_address(cmd)) return -EINVAL; for (i = 0; i < MAX_IMAGES; i++) { c = &cmd->dma[i].base; if (!c->size) continue; pt = fimg2d_check_pagetable(mm, c->addr, c->size); if (pt == PT_FAULT) return -EFAULT; } #ifndef CCI_SNOOP fimg2d_debug("cache flush\n"); perf_start(cmd, PERF_CACHE); if (is_inner_flushall(cmd->dma_all)) { inner_touch_range(cmd); flush_all_cpu_caches(); } else { inner_flush_clip_range(cmd); } #ifdef CONFIG_OUTER_CACHE if (is_outer_flushall(cmd->dma_all)) outer_flush_all(); else outer_flush_clip_range(cmd); #endif perf_end(cmd, PERF_CACHE); #endif return 0; }
/* Called from the FIQ asm handler */ void msm7k_fiq_handler(void) { struct irq_data *d; struct irq_chip *c; pr_info("Fiq is received %s\n", __func__); fiq_counter++; d = irq_get_irq_data(MSM8625_INT_A9_M2A_2); c = irq_data_get_irq_chip(d); c->irq_mask(d); local_irq_disable(); /* Clear the IRQ from the ENABLE_SET */ gic_clear_irq_pending(MSM8625_INT_A9_M2A_2); local_irq_enable(); flush_cache_all(); outer_flush_all(); return; }
s32_t pwrctrl_l2c_disable( void_t ) { #ifdef CONFIG_CACHE_L2X0 #ifdef CHIP_BB_HI6210 /*no need*/ #else /* flush cache all */ /*l2x0_flush_all();*/ outer_flush_all(); /* disable l2x0 cache */ pwrctrl_write_reg32(ACPU_L2CC_CTRL_VA,PWRCTRL_SWITCH_OFF); /* barrier */ dmb(); #endif #endif return RET_OK; }
void mmp_arch_reset(char mode, const char *cmd) { int count = 10; static unsigned char data; if ((!cpu_is_pxa910()) && (!cpu_is_pxa168()) && (!cpu_is_pxa988()) && (!cpu_is_pxa986()) && (!cpu_is_pxa1088())) return; printk("%s (%c)\n", __func__, mode); switch (mode) { case 's': /* Jump into ROM at address 0 */ cpu_reset(0); break; case 'w': default: #if defined(CONFIG_MFD_D2199) if (is_panic) { /* dump buck1 voltage */ d2199_extern_reg_read(D2199_BUCK2PH_BUCK1_REG, &data); pr_info("buck1 voltage: 0x%x\n", data); d2199_extern_reg_write(D2199_BUCK2PH_BUCK1_REG, 0xd8); /* double check */ d2199_extern_reg_read(D2199_BUCK2PH_BUCK1_REG, &data); pr_info("buck1 voltage: 0x%x\n", data); } #endif while(count--) { flush_cache_all(); outer_flush_all(); do_wdt_reset(cmd); mdelay(1000); printk("Watchdog fail...retry\n"); } break; } }