Beispiel #1
0
static int s3c_pm_enter(suspend_state_t state)
{
	static unsigned long regs_save[16];
	unsigned int gpio;
	/* ensure the debug is initialised (if enabled) */

	s3c_pm_debug_init();

	S3C_PMDBG("%s(%d)\n", __func__, state);

	if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) {
		printk(KERN_ERR "%s: error: no cpu sleep function\n", __func__);
		return -EINVAL;
	}

	/* check if we have anything to wake-up with... bad things seem
	 * to happen if you suspend with no wakeup (system will often
	 * require a full power-cycle)
	*/

	if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
	    !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
		printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
		printk(KERN_ERR "%s: Aborting sleep\n", __func__);
		return -EINVAL;
	}

	/* store the physical address of the register recovery block */

	s3c_sleep_save_phys = virt_to_phys(regs_save);

	S3C_PMDBG("s3c_sleep_save_phys=0x%08lx\n", s3c_sleep_save_phys);

	/* save all necessary core registers not covered by the drivers */


#if 0
	/* control power of moviNAND at PM and add 700ms delay for stabilization of moviNAND. */
    gpio = readl(S5PV210_GPJ2DAT);
    writel(gpio & (~0x80), S5PV210_GPJ2DAT);
    mdelay(700);
#endif 

	s3c_pm_save_gpios();
	s3c_pm_save_uarts();
	s3c_pm_save_core();

	config_sleep_gpio();

	/* set the irq configuration for wake */

	s3c_pm_configure_extint();

	S3C_PMDBG("sleep: irq wakeup masks: %08lx,%08lx\n",
	    s3c_irqwake_intmask, s3c_irqwake_eintmask);

	s3c_pm_arch_prepare_irqs();

	/* call cpu specific preparation */

	pm_cpu_prep();

	/* flush cache back to ram */

	flush_cache_all();

	s3c_pm_check_store();

	/* clear wakeup_stat register for next wakeup reason */
	__raw_writel(__raw_readl(S5P_WAKEUP_STAT), S5P_WAKEUP_STAT);

	/* send the cpu to sleep... */

	s3c_pm_arch_stop_clocks();

	/* s3c_cpu_save will also act as our return point from when
	 * we resume as it saves its own register state and restores it
	 * during the resume.  */

	pmstats->sleep_count++;
	pmstats->sleep_freq = __raw_readl(S5P_CLK_DIV0);
	s3c_cpu_save(regs_save);
	pmstats->wake_count++;
	pmstats->wake_freq = __raw_readl(S5P_CLK_DIV0);

	/* restore the cpu state using the kernel's cpu init code. */

	cpu_init();

	fiq_glue_resume();
	local_fiq_enable();

	s3c_pm_restore_core();
	s3c_pm_restore_uarts();
	s3c_pm_restore_gpios();
	s5pv210_restore_eint_group();

	s3c_pm_debug_init();

        /* restore the system state */

	if (pm_cpu_restore)
		pm_cpu_restore();

	/* check what irq (if any) restored the system */

	s3c_pm_arch_show_resume_irqs();

	S3C_PMDBG("%s: post sleep, preparing to return\n", __func__);

	/* LEDs should now be 1110 */
	s3c_pm_debug_smdkled(1 << 1, 0);

	s3c_pm_check_restore();

	/* ok, let's return from sleep */

	S3C_PMDBG("S3C PM Resume (post-restore)\n");
	return 0;
}
Beispiel #2
0
result_t binary_neuter_xn(void *fb, size_t size) {

	tt_virtual_address_t va, l1, l2, start, end;
	tt_physical_address_t pa;
	tt_translation_table_base_register_t ttbr, ttbr0, ttbr1;
	tt_translation_table_base_control_register_t ttbcr;
	tt_first_level_descriptor_t fld;
	tt_second_level_descriptor_t sld;
	size_t tmp;

	printk(KERN_ALERT "binary_neuter_xn\n");

	printk(KERN_ALERT "this could take some time\n");

	va.all = (u32_t)fb;

	// set some limits on the range for the brute force
	// pa to va translations. It is assumed that kernel
	// memory is located above the 2GB line. So the low
	// end is 2GB the high end is 3GB assuming that the
	// kernel page tables are usually located just above
	// the 2GB line. This limit can be increased to whatever
	// but keep in mind it will take potentially longer to
	// perform the translation.

	start.all = 2 * (u32_t)ONE_GIGABYTE;
	end.all = (4 * (u32_t)ONE_GIGABYTE - 1);

	ttbr0 = tt_get_ttbr0();
	ttbr1 = tt_get_ttbr1();
	ttbcr = tt_get_ttbcr();
	tt_select_ttbr(va, ttbr0, ttbr1, ttbcr, &ttbr);

	//printk(KERN_ALERT "ttbcr:%08x\n", ttbcr.all);
	//printk(KERN_ALERT "ttbr:%08x\n", ttbr.all);

	tt_ttbr_to_pa(ttbr, &pa);

	gen_pa_to_va(pa, start, end, &l1);

	while(va.all < ((u32_t)fb + size)) {
		tt_get_fld(va, l1, &fld);
		if(tt_fld_is_supersection(fld) == TRUE) {
			fld.supersection.fields.xn = FALSE;
			tt_set_fld(va, l1, fld);
			tmp = TT_SUPERSECTION_SIZE;
		}
		else if(tt_fld_is_section(fld) == TRUE) {
			fld.section.fields.xn = FALSE;
			tt_set_fld(va, l1, fld);
			tmp = TT_SECTION_SIZE;
		}
		else if(tt_fld_is_page_table(fld) == TRUE) {

			//printk(KERN_ALERT "fld:%08x\n", fld.all);

			tt_fld_to_pa(fld, &pa);
			gen_pa_to_va(pa, start, end, &l2);

			tt_get_sld(va, l2, &sld);

			//printk(KERN_ALERT "sld:%08x\n", sld.all);

			if(tt_sld_is_large_page(sld) == TRUE) {
				sld.large_page.fields.xn = FALSE;
				tmp = TT_LARGE_PAGE_SIZE;
			}
			else if(tt_sld_is_small_page(sld) == TRUE) {
				sld.small_page.fields.xn = FALSE;
				tmp = TT_SMALL_PAGE_SIZE;
			}
			else {
				return FAILURE;
			}

			tt_set_sld(va, l2, sld);
		}
		else {
			return FAILURE;
		}

		va.all += tmp;
	}

	tlb_invalidate_entire_unified_tlb();
	flush_cache_all();

	return SUCCESS;
}
Beispiel #3
0
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end)
{
	flush_cache_all();
}
Beispiel #4
0
void hi3536_cpu_die(unsigned int cpu)
{
	flush_cache_all();
	hi3536_scu_power_off(cpu);
	BUG();
}
Beispiel #5
0
int draw_rgb888_screen(void)
{
	struct fb_info *fb = registered_fb[0];
	u32 height = fb->var.yres / 5;
	u32 line = fb->fix.line_length;
	u32 i, j;
#ifndef CONFIG_FRAMEBUFFER_CONSOLE
		struct module *owner;
#endif

	pr_info( "##############%s\n", __func__);

#ifndef CONFIG_FRAMEBUFFER_CONSOLE
		owner = fb->fbops->owner;
		if (!try_module_get(owner))
			return -ENODEV;
		if (fb->fbops->fb_open && fb->fbops->fb_open(fb, 0)) {
			module_put(owner);
			return -ENODEV;
		}
#endif
	for (i = 0; i < height; i++) {
		for (j = 0; j < fb->var.xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

	for (i = height; i < height * 2; i++) {
		for (j = 0; j < fb->var.xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

	for (i = height * 2; i < height * 3; i++) {
		for (j = 0; j < fb->var.xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

	for (i = height * 3; i < height * 4; i++) {
		for (j = 0; j < fb->var.xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0xff, 1);
		}
	}

	for (i = height * 4; i < height * 5; i++) {
		for (j = 0; j < fb->var.xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

#if defined(CONFIG_FB_MSM_MIPI_NOVATEK_BOE_CMD_WVGA_PT)
		flush_cache_all();
		outer_flush_all();
#endif
	return 0;
}
Beispiel #6
0
static void cache_maint_phys(phys_addr_t start, size_t length, enum cacheop op)
{
	size_t left = length;
	phys_addr_t begin = start;

	if (!soc_is_exynos5250() && !soc_is_exynos5210()) {
		if (length > (size_t) L1_FLUSH_ALL) {
			flush_cache_all();
			smp_call_function(
					(smp_call_func_t)__cpuc_flush_kern_all,
					NULL, 1);

			goto outer_cache_ops;
		}
	}

#ifdef CONFIG_HIGHMEM
	do {
		size_t len;
		struct page *page;
		void *vaddr;
		off_t offset;

		page = phys_to_page(start);
		offset = offset_in_page(start);
		len = PAGE_SIZE - offset;

		if (left < len)
			len = left;

		if (PageHighMem(page)) {
			vaddr = kmap(page);
			cache_maint_inner(vaddr + offset, len, op);
			kunmap(page);
		} else {
			vaddr = page_address(page) + offset;
			cache_maint_inner(vaddr, len, op);
		}
		left -= len;
		start += len;
	} while (left);
#else
	cache_maint_inner(phys_to_virt(begin), left, op);
#endif

outer_cache_ops:
	switch (op) {
	case EM_CLEAN:
		outer_clean_range(begin, begin + length);
		break;
	case EM_INV:
		if (length <= L2_FLUSH_ALL) {
			outer_inv_range(begin, begin + length);
			break;
		}
		/* else FALL THROUGH */
	case EM_FLUSH:
		outer_flush_range(begin, begin + length);
		break;
	}
}
Beispiel #7
0
/* Initializes the SE (SDP, SRAM resize, RPC handler) */
static int tf_se_init(struct tf_comm *comm,
	u32 sdp_backing_store_addr, u32 sdp_bkext_store_addr)
{
	int error;
	unsigned int crc;

	if (comm->se_initialized) {
		dpr_info("%s: SE already initialized... nothing to do\n",
			__func__);
		return 0;
	}

	/* Secure CRC read */
	dpr_info("%s: Secure CRC Read...\n", __func__);

	crc = omap4_secure_dispatcher(API_HAL_KM_GETSECUREROMCODECRC_INDEX,
		0, 0, 0, 0, 0, 0);
	pr_info("SMC: SecureCRC=0x%08X\n", crc);

	/*
	 * Flush caches before resize, just to be sure there is no
	 * pending public data writes back to SRAM that could trigger a
	 * security violation once their address space is marked as
	 * secure.
	 */
#define OMAP4_SRAM_PA   0x40300000
#define OMAP4_SRAM_SIZE 0xe000
	flush_cache_all();
	outer_flush_range(OMAP4_SRAM_PA,
			OMAP4_SRAM_PA + OMAP4_SRAM_SIZE);
	wmb();

	/* SRAM resize */
	dpr_info("%s: SRAM resize (52KB)...\n", __func__);
	error = omap4_secure_dispatcher(API_HAL_SEC_L3_RAM_RESIZE_INDEX,
		FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
		SEC_RAM_SIZE_52KB, 0, 0, 0);

	if (error == API_HAL_RET_VALUE_OK) {
		dpr_info("%s: SRAM resize OK\n", __func__);
	} else {
		dpr_err("%s: SRAM resize failed [0x%x]\n", __func__, error);
		goto error;
	}

	/* SDP init */
	dpr_info("%s: SDP runtime init..."
		"(sdp_backing_store_addr=%x, sdp_bkext_store_addr=%x)\n",
		__func__,
		sdp_backing_store_addr, sdp_bkext_store_addr);
	error = omap4_secure_dispatcher(API_HAL_SDP_RUNTIMEINIT_INDEX,
		FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 2,
		sdp_backing_store_addr, sdp_bkext_store_addr, 0, 0);

	if (error == API_HAL_RET_VALUE_OK) {
		dpr_info("%s: SDP runtime init OK\n", __func__);
	} else {
		dpr_err("%s: SDP runtime init failed [0x%x]\n",
			__func__, error);
		goto error;
	}

	/* RPC init */
	dpr_info("%s: RPC init...\n", __func__);
	error = omap4_secure_dispatcher(API_HAL_TASK_MGR_RPCINIT_INDEX,
		FLAG_START_HAL_CRITICAL, 1,
		(u32) (u32(*const) (u32, u32, u32, u32)) &rpc_handler, 0, 0, 0);

	if (error == API_HAL_RET_VALUE_OK) {
		dpr_info("%s: RPC init OK\n", __func__);
	} else {
		dpr_err("%s: RPC init failed [0x%x]\n", __func__, error);
		goto error;
	}

	comm->se_initialized = true;

	return 0;

error:
	return -EFAULT;
}
Beispiel #8
0
static int s3c_pm_enter(suspend_state_t state)
{
#ifndef USE_DMA_ALLOC
	static unsigned long regs_save[16];
#endif /* !USE_DMA_ALLOC */
	unsigned int tmp,audiodomain_On;

	/* ensure the debug is initialised (if enabled) */

	s3c_pm_debug_init();

	S3C_PMDBG("%s(%d)\n", __func__, state);

	if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) {
		printk(KERN_ERR "%s: error: no cpu sleep function\n", __func__);
		return -EINVAL;
	}

	/* check if we have anything to wake-up with... bad things seem
	 * to happen if you suspend with no wakeup (system will often
	 * require a full power-cycle)
	*/
		s3c_irqwake_intmask = 0xFFFD; // rtc_alarm

	if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
	    !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
		printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
		printk(KERN_ERR "%s: Aborting sleep\n", __func__);
		return -EINVAL;
	}

	/* store the physical address of the register recovery block */

#ifndef USE_DMA_ALLOC
	s3c_sleep_save_phys = virt_to_phys(regs_save);
#else
	__raw_writel(phy_regs_save, S5P_INFORM2);
#endif /* !USE_DMA_ALLOC */

	/* set flag for sleep mode idle2 flag is also reserved */
	__raw_writel(SLEEP_MODE, S5P_INFORM1);

	S3C_PMDBG("s3c_sleep_save_phys=0x%08lx\n", s3c_sleep_save_phys);

	/* save all necessary core registers not covered by the drivers */

	s3c_pm_save_gpios();
	s3c_pm_save_uarts();
	s3c_pm_save_core();

	s3c_config_sleep_gpio();

	/* set the irq configuration for wake */

	s3c_pm_configure_extint();

	S3C_PMDBG("sleep: irq wakeup masks: %08lx,%08lx\n",
			s3c_irqwake_intmask, s3c_irqwake_eintmask);

	/*Set EINT as wake up source*/
#if defined(CONFIG_OPTICAL_GP2A)
	if(gp2a_get_proximity_enable())
	{
		s3c_pm_set_eint(2, 0x4); // Proximity
	}
#endif
	s3c_pm_set_eint( 6, 0x4); // det_3.5
	s3c_pm_set_eint( 7, 0x2); // pmic
	s3c_pm_set_eint(11, 0x2); // onedram
	s3c_pm_set_eint(20, 0x3); // wifi
	s3c_pm_set_eint(21, 0x4); // bt
	s3c_pm_set_eint(22, 0x2); // power key
	s3c_pm_set_eint(23, 0x2);   // microusb
	s3c_pm_set_eint(25, 0x4); // volume down
	s3c_pm_set_eint(26, 0x4); // volume up
	s3c_pm_set_eint(28, 0x4);   // T_FLASH_DETECT
	s3c_pm_set_eint(29, 0x4);   // ok key
   	if(get_headset_status() & SEC_HEADSET_4_POLE_DEVICE)
	{
		s3c_pm_set_eint(30, 0x4); //sendend
	}
	else
	{
		s3c_pm_clear_eint(30);
	}

	//s3c_pm_arch_prepare_irqs();
	

	/* call cpu specific preparation */

	pm_cpu_prep();

	/* flush cache back to ram */

	flush_cache_all();

	s3c_pm_check_store();

	__raw_writel(s3c_irqwake_intmask, S5P_WAKEUP_MASK); //0xFFDD:key, RTC_ALARM	
	

	/*clear for next wakeup*/
	tmp = __raw_readl(S5P_WAKEUP_STAT);
	__raw_writel(tmp, S5P_WAKEUP_STAT);

	//s3c_config_sleep_gpio();

	// Enable PS_HOLD pin to avoid reset failure */
	__raw_writel((0x5 << 12 | 0x1<<9 | 0x1<<8 | 0x1<<0),S5P_PSHOLD_CONTROL);


	/* send the cpu to sleep... */

	s3c_pm_arch_stop_clocks();

	/* s3c_cpu_save will also act as our return point from when
	 * we resume as it saves its own register state and restores it
	 * during the resume.  */

	s3c_cpu_save(regs_save);

	/* restore the cpu state using the kernel's cpu init code. */

	cpu_init();
	

	/* restore the system state */

	s3c_pm_restore_core();

	/*Reset the uart registers*/
	__raw_writel(0x0, S3C24XX_VA_UART3+S3C2410_UCON);
	__raw_writel(0xf, S3C24XX_VA_UART3+S5P_UINTM);
	__raw_writel(0xf, S3C24XX_VA_UART3+S5P_UINTSP);
	__raw_writel(0xf, S3C24XX_VA_UART3+S5P_UINTP);
	__raw_writel(0x0, S3C24XX_VA_UART2+S3C2410_UCON);
	__raw_writel(0xf, S3C24XX_VA_UART2+S5P_UINTM);
	__raw_writel(0xf, S3C24XX_VA_UART2+S5P_UINTSP);
	__raw_writel(0xf, S3C24XX_VA_UART2+S5P_UINTP);
	__raw_writel(0x0, S3C24XX_VA_UART1+S3C2410_UCON);
	__raw_writel(0xf, S3C24XX_VA_UART1+S5P_UINTM);
	__raw_writel(0xf, S3C24XX_VA_UART1+S5P_UINTSP);
	__raw_writel(0xf, S3C24XX_VA_UART1+S5P_UINTP);
	__raw_writel(0x0, S3C24XX_VA_UART0+S3C2410_UCON);
	__raw_writel(0xf, S3C24XX_VA_UART0+S5P_UINTM);
	__raw_writel(0xf, S3C24XX_VA_UART0+S5P_UINTSP);
	__raw_writel(0xf, S3C24XX_VA_UART0+S5P_UINTP);

	s3c_pm_restore_uarts();
	s3c_pm_restore_gpios();

	tmp = readl(S5P_NORMAL_CFG);
	if(!(tmp & S5PC110_POWER_DOMAIN_AUDIO)) {
		tmp = tmp | S5PC110_POWER_DOMAIN_AUDIO;
		writel(tmp , S5P_NORMAL_CFG);
		audiodomain_On = 1;
	} else {
		audiodomain_On = 0;
	}

	/* enable gpio, uart, mmc */
	tmp = __raw_readl(S5P_OTHERS);
	tmp |= (1<<31) | (1<<30) | (1<<28) | (1<<29);
	__raw_writel(tmp, S5P_OTHERS);

	tmp = readl(S5P_NORMAL_CFG);
	if (audiodomain_On) {
		tmp = tmp & ~S5PC110_POWER_DOMAIN_AUDIO;
		writel(tmp , S5P_NORMAL_CFG);
	}

	/*clear for next wakeup*/
	tmp = __raw_readl(S5P_WAKEUP_STAT);
	//printk("\nS5P_WAKEUP_STAT=%x\n",tmp);
	__raw_writel(tmp, S5P_WAKEUP_STAT);

	printk("wakeup source is 0x%x  \n", tmp);
	printk(" EXT_INT_0_PEND       %x \n", __raw_readl(S5PV210_EINTPEND(0)));
	printk(" EXT_INT_1_PEND       %x \n", __raw_readl(S5PV210_EINTPEND(1)));
	printk(" EXT_INT_2_PEND       %x \n", __raw_readl(S5PV210_EINTPEND(2)));
	printk(" EXT_INT_3_PEND       %x \n", __raw_readl(S5PV210_EINTPEND(3)));

	s3c_pm_clear_eint(21);
//	s3c_pm_clear_eint(22); // to be cleared later

	/* check what irq (if any) restored the system */
	s3c_pm_debug_init();

	s3c_pm_arch_show_resume_irqs();



#if defined(CONFIG_MACH_S5PC110_P1)
	// Set wakeup stat
	s3c_pm_set_wakeup_stat();
#endif // CONFIG_MACH_S5PC110_P1

	//printk("Int pending register before =%d\n",readl(S5PV210_EINTPEND(eint_pend_reg(22))));

	//printk("Int pending register after =%d\n",readl(S5PV210_EINTPEND(eint_pend_reg(22))));

	//S3C_PMDBG("%s: post sleep, preparing to return\n", __func__);
	//printk("%s: post sleep, preparing to return\n", __func__);

	/* LEDs should now be 1110 */
	//s3c_pm_debug_smdkled(1 << 1, 0);


	s3c_pm_check_restore();

	//mdelay(500);

	/* ok, let's return from sleep */
	printk(KERN_ERR "\n%s:%d\n", __func__, __LINE__);

	S3C_PMDBG("S3C PM Resume (post-restore)\n");
	return 0;
}
Beispiel #9
0
/* callback from assembly code */
void s3c_pm_cb_flushcache(void)
{
	flush_cache_all();
}
Beispiel #10
0
static int s3c_pm_enter(suspend_state_t state)
{
	/* ensure the debug is initialised (if enabled) */

	s3c_pm_debug_init();

	S3C_PMDBG("%s(%d)\n", __func__, state);

	if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) {
		printk(KERN_ERR "%s: error: no cpu sleep function\n", __func__);
		return -EINVAL;
	}

	/* check if we have anything to wake-up with... bad things seem
	 * to happen if you suspend with no wakeup (system will often
	 * require a full power-cycle)
	*/

	if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
	    !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
		printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
		printk(KERN_ERR "%s: Aborting sleep\n", __func__);
		return -EINVAL;
	}

	/* save all necessary core registers not covered by the drivers */
        gpio_set_value(EXYNOS4_GPX0(1), 0);
        s3c_gpio_cfgpin(EXYNOS4_GPX0(1), S3C_GPIO_SFN(1));

	s3c_pm_save_gpios();
	s3c_pm_saved_gpios();
	s3c_pm_save_uarts();
	s3c_pm_save_core();

	if(s3c_config_sleep_gpio_table)
		s3c_config_sleep_gpio_table();
	
	/* set the irq configuration for wake */

	s3c_pm_configure_extint();

	S3C_PMDBG("sleep: irq wakeup masks: %08lx,%08lx\n",
	    s3c_irqwake_intmask, s3c_irqwake_eintmask);

	s3c_pm_arch_prepare_irqs();

	
	printk("****************************************************************\n");
		printk("BEFORE SLEEP: WAKEUP_STAT: 0x%x\n", __raw_readl(S5P_WAKEUP_STAT));
		printk("BEFORE SLEEP:  ICCICR_CPU0: 0x%x\n", __raw_readl(S5P_VA_GIC_CPU+0));
		printk("BEFORE SLEEP:  0x%x\n", __raw_readl(S5P_VA_GIC_CPU+4));
		printk("BEFORE SLEEP:  0x%x\n", __raw_readl(S5P_VA_GIC_CPU+8));
		printk("BEFORE SLEEP:  0x%x\n", __raw_readl(S5P_VA_GIC_CPU+0xc));
		printk("BEFORE SLEEP:  0x%x\n", __raw_readl(S5P_VA_GIC_CPU+0x10));
		printk("BEFORE SLEEP:  0x%x\n", __raw_readl(S5P_VA_GIC_CPU+0x14));
		printk("BEFORE SLEEP:  0x%x\n", __raw_readl(S5P_VA_GIC_CPU+0x18));
		printk("BEFORE SLEEP:  0x%x\n", __raw_readl(S5P_VA_GIC_CPU+0x1c));
		printk("BEFORE SLEEP:  0x%x\n", __raw_readl(S5P_VA_GIC_CPU+0x40));
		printk("BEFORE SLEEP:  0x%x\n", __raw_readl(S5P_VA_GIC_CPU+0x44));
		printk("BEFORE SLEEP:  0x%x\n", __raw_readl(S5P_VA_GIC_CPU+0xFC));
		printk("****************************************************************\n");

	/* call cpu specific preparation */

	pm_cpu_prep();

	/* flush cache back to ram */

	flush_cache_all();

	s3c_pm_check_store();

	/* send the cpu to sleep... */

	s3c_pm_arch_stop_clocks();

	/* s3c_cpu_save will also act as our return point from when
	 * we resume as it saves its own register state and restores it
	 * during the resume.  */

	s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET);

	/* restore the cpu state using the kernel's cpu init code. */

	cpu_init();
        gpio_set_value(EXYNOS4_GPX0(1), 0);
        s3c_gpio_cfgpin(EXYNOS4_GPX0(1), S3C_GPIO_SFN(1));
	s3c_pm_restore_core();
	s3c_pm_restore_uarts();
	s3c_pm_restore_gpios();
	s3c_pm_restored_gpios();

	s3c_pm_debug_init();

        /* restore the system state */

	if (pm_cpu_restore)
		pm_cpu_restore();

	/* check what irq (if any) restored the system */

	s3c_pm_arch_show_resume_irqs();

	S3C_PMDBG("%s: post sleep, preparing to return\n", __func__);

	/* LEDs should now be 1110 */
	s3c_pm_debug_smdkled(1 << 1, 0);

	s3c_pm_check_restore();

	/* ok, let's return from sleep */

	S3C_PMDBG("S3C PM Resume (post-restore)\n");
	return 0;
}
Beispiel #11
0
static int s5p6442_pm_enter(suspend_state_t state)
{
	unsigned long regs_save[16];
	unsigned int tmp;
	unsigned int eint_wakeup_mask;

	/* ensure the debug is initialised (if enabled) */

	DBG("s5p6442_pm_enter(%d)\n", state);

	if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) {
		printk(KERN_ERR PFX "error: no cpu sleep functions set\n");
		return -EINVAL;
	}

	/* prepare check area if configured */
	s5p6442_pm_check_prepare();

	/* store the physical address of the register recovery block */
	s5p6442_sleep_save_phys = virt_to_phys(regs_save);

	printk("s5p6442_sleep_save_phys=0x%08lx\n", s5p6442_sleep_save_phys);

	DEBUG_WAKEUP("[PM-1] EINT0(0x%08x), EINT1(0x%08x), EINT2(0x%08x), EINT3(0x%08x)\n", 
		__raw_readl(S5P64XX_GPA0_BASE+0xF40), __raw_readl(S5P64XX_GPA0_BASE+0xF44),
		__raw_readl(S5P64XX_GPA0_BASE+0xF48), __raw_readl(S5P64XX_GPA0_BASE+0xF4C));

	/* save all necessary core registers not covered by the drivers */

	s5p6442_pm_do_save(gpio_save, ARRAY_SIZE(gpio_save));
	s5p6442_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
	s5p6442_pm_do_save(core_save, ARRAY_SIZE(core_save));
	s5p6442_pm_do_save(sromc_save, ARRAY_SIZE(sromc_save));
	// s5p6442_pm_do_save(onenand_save, ARRAY_SIZE(onenand_save));
	// s5p6442_pm_do_save(uart_save, ARRAY_SIZE(uart_save));

#ifdef S5P6442_POWER_GATING_IROM
	//s5p6442_pwrgate_config(S5P6442_IROM_ID, S5P6442_ACTIVE_MODE);
	 tmp = __raw_readl(S5P_NORMAL_CFG);
         tmp |= (1 << S5P6442_IROM_ID);
        __raw_writel(tmp, S5P_NORMAL_CFG);
#endif

	/* ensure INF_REG0  has the resume address */
	__raw_writel(virt_to_phys(s5p6442_cpu_resume), S5P_INFORM0);

//	s5p_pwr_clk_gating_reset();

	/* set the irq configuration for wake */
	// s5p6442_pm_configure_extint();

	/* call cpu specific preperation */

	pm_cpu_prep();

	/* flush cache back to ram */

	flush_cache_all();

	//s5p6442_pm_check_store();

	s5p_config_sleep_gpio();  // for sleep current optimization

	/* set the irq configuration for wake */
	eint_wakeup_mask = s5p6442_pm_configure_extint();

	/* USB & OSC Clock pad Disable */
	tmp = __raw_readl(S5P_SLEEP_CFG);
	tmp &= ~((1 << S5P_SLEEP_CFG_OSC_EN) | (1 << S5P_SLEEP_CFG_USBOSC_EN));
	if(call_state)
	{
		tmp |= ((1 << S5P_SLEEP_CFG_OSC_EN) | (1 << S5P_SLEEP_CFG_USBOSC_EN));
	}
	__raw_writel(tmp , S5P_SLEEP_CFG);

	/* Power mode Config setting */
	tmp = __raw_readl(S5P_PWR_CFG);
	tmp &= S5P_CFG_WFI_CLEAN;
	tmp |= S5P_CFG_WFI_SLEEP;
	__raw_writel(tmp,S5P_PWR_CFG);
#if 0
	/* Set wakeup mask regsiter */
	tmp = 0xFFFF;
	tmp &= ~(1 << 5);	// keypad
	tmp &= ~(1 << 1);
	tmp &= ~(1 << 2);
	__raw_writel(tmp , S5P_WAKEUP_MASK);
	//Save the normal mode configuration of WAKE_UP sources and make EXT Key as a
	//wake up source from suspend mode(Praveen)
#if 1 
	eint_wakeup_mask = __raw_readl(S5P_EINT_WAKEUP_MASK);
	tmp = 0xFFFFFFFF;
	//tmp &= ~(1 << 10);
	tmp &= ~(1 << 22);    //EINT2_6
	//tmp &= ~(1 << 15);
	__raw_writel(tmp, S5P_EINT_WAKEUP_MASK);
#endif
#endif	
	//Removed by Praveen(July 12, 2010)
	//__raw_writel(s3c_irqwake_eintmask,S5P_EINT_WAKEUP_MASK);

	/* send the cpu to sleep... */
	__raw_writel(0xffffffff, S5P64XX_VIC0INTENCLEAR);
	__raw_writel(0xffffffff, S5P64XX_VIC1INTENCLEAR);
	__raw_writel(0xffffffff, S5P64XX_VIC2INTENCLEAR);
	__raw_writel(0xffffffff, S5P64XX_VIC0SOFTINTCLEAR);
	__raw_writel(0xffffffff, S5P64XX_VIC1SOFTINTCLEAR);
	__raw_writel(0xffffffff, S5P64XX_VIC2SOFTINTCLEAR);

	/*  EINTPEND CLEAR */
	__raw_writel ( 0xFF, S5P64XX_GPA0_BASE + 0xF40 );
	__raw_writel ( 0xFF, S5P64XX_GPA0_BASE + 0xF44 );
	__raw_writel ( 0xFF, S5P64XX_GPA0_BASE + 0xF48 );
	__raw_writel ( 0xFF, S5P64XX_GPA0_BASE + 0xF4C );

#if (CONFIG_BOARD_REVISION == 0x0)  // EVT1 doesn't work 'PDNEN' setting.
	__raw_writel(0x2, S5P64XX_PDNEN);
#endif

	/* SYSC INT Disable */
	tmp = __raw_readl(S5P_OTHERS);
	tmp &= ~(0x3<<8);
	tmp |= S5P_OTHER_SYSC_INTOFF;
	if(call_state)
	{
		tmp |= (0x3<<8);
	}
	__raw_writel(tmp,S5P_OTHERS);

	tmp = __raw_readl(S5P_WAKEUP_STAT);
	__raw_writel(tmp, S5P_WAKEUP_STAT);

	/* s5p6442_cpu_save will also act as our return point from when
	 * we resume as it saves its own register state, so use the return
	 * code to differentiate return from save and return from sleep */

	if (s5p6442_cpu_save(regs_save) == 0) {
		flush_cache_all();
		pm_cpu_sleep();
	}

	/* restore the cpu state */
	cpu_init();

	/* restore the system state */
	s5p6442_pm_do_restore(gpio_save, ARRAY_SIZE(gpio_save));
	s5p6442_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));	
	s5p6442_pm_do_restore_core(core_save, ARRAY_SIZE(core_save));
	s5p6442_pm_do_restore(sromc_save, ARRAY_SIZE(sromc_save));
	//s5p6442_pm_do_restore(onenand_save, ARRAY_SIZE(onenand_save));
	//s5p6442_pm_do_restore(uart_save, ARRAY_SIZE(uart_save));

#if (CONFIG_BOARD_REVISION == 0x0)
	__raw_writel(0x0, S5P64XX_PDNEN);
#endif

	/*enable gpio, uart, mmc*/
	tmp = __raw_readl(S5P_OTHERS);
	tmp |= (1<<31)|(1<<28)|(1<<29);
	__raw_writel(tmp, S5P_OTHERS);

	/* UART INTERRUPT PENDING REG clear */
	tmp = __raw_readl(S3C24XX_VA_UART0 + S3C2410_UINTP);	
	__raw_writel(tmp, S3C24XX_VA_UART0 + S3C2410_UINTP);
	tmp = __raw_readl(S3C24XX_VA_UART1 + S3C2410_UINTP);
	__raw_writel(tmp, S3C24XX_VA_UART1 + S3C2410_UINTP);
	tmp = __raw_readl(S3C24XX_VA_UART2 + S3C2410_UINTP);
	__raw_writel(tmp, S3C24XX_VA_UART2 + S3C2410_UINTP);

	tmp = __raw_readl(S3C24XX_VA_UART0 + S3C2410_UINTSP);
	__raw_writel(tmp, S3C24XX_VA_UART0 + S3C2410_UINTSP);
	tmp = __raw_readl(S3C24XX_VA_UART1 + S3C2410_UINTSP);
	__raw_writel(tmp, S3C24XX_VA_UART1 + S3C2410_UINTSP);
	tmp = __raw_readl(S3C24XX_VA_UART2 + S3C2410_UINTSP);
	__raw_writel(tmp, S3C24XX_VA_UART2 + S3C2410_UINTSP);

	/*
	__raw_writel(0xF, S3C24XX_VA_UART0 + S3C2410_UINTSP);
	__raw_writel(0xF, S3C24XX_VA_UART1 + S3C2410_UINTSP);
	__raw_writel(0xF, S3C24XX_VA_UART2 + S3C2410_UINTSP);
	*/

	/* Enable CLK GATE for UART */
	tmp = __raw_readl(S5P_CLKGATE_IP3);
        tmp |= (S5P_CLKGATE_IP3_UART0 | S5P_CLKGATE_IP3_UART1 | S5P_CLKGATE_IP3_UART2);
       __raw_writel(tmp, S5P_CLKGATE_IP3);

	//[sm.kim: remove inconsistant code
	//tmp = __raw_readl(S5P64XX_EINT2PEND);
	//__raw_writel(tmp, S5P64XX_EINT2PEND);
	//]

	// __raw_writel(__raw_readl(S5P64XX_EINT0MASK)&~(1UL << 1), S5P64XX_EINT0MASK); //unmasking ext. INT

	tmp = __raw_readl(S5P_WAKEUP_STAT);
	DEBUG_WAKEUP("[PM] WAKEUP_STAT (0x%08x)\n", tmp);
	__raw_writel(tmp, S5P_WAKEUP_STAT);

	if( (__raw_readl(S5P64XX_GPA0_BASE+0xF40) & (1 << 7)) ||  // AP_PMIC_IRQ
			(__raw_readl(S5P64XX_GPA0_BASE+0xF48) & (1 << 7)) )  // JACK_nINT
		charger_wakeup = 1;
	else
		charger_wakeup = 0;

	if (__raw_readl(S5P64XX_GPA0_BASE+0xF48) & 0x01)
		wakeup_sdslot = 1;

	if(tmp == 0x00000001)
	{
		if((__raw_readl(S5P64XX_GPA0_BASE+0xF48) & 0x40) ||(__raw_readl(S5P64XX_GPA0_BASE+0xF44) & 0x01) )
			wakeup_flag_for_key = 249;
		else if(__raw_readl(S5P64XX_GPA0_BASE+0xF4C) & 0x01)
			wakeup_flag_for_key = 102;
		else
			wakeup_flag_for_key = 0;
	}

	DEBUG_WAKEUP("[PM-2] EINT0(0x%08x), EINT1(0x%08x), EINT2(0x%08x), EINT3(0x%08x)\n", 
		__raw_readl(S5P64XX_GPA0_BASE+0xF40), __raw_readl(S5P64XX_GPA0_BASE+0xF44),
		__raw_readl(S5P64XX_GPA0_BASE+0xF48), __raw_readl(S5P64XX_GPA0_BASE+0xF4C));
#if 1 //Write back the normal mode configuration for WAKE_UP source(Praveen)
	__raw_writel(eint_wakeup_mask, S5P_EINT_WAKEUP_MASK);
#endif
	// mdelay(10);

	DBG("post sleep, preparing to return\n");

	//s5p6442_pm_check_restore();

	/* ok, let's return from sleep */
	DBG("S5P6442 PM Resume (post-restore)\n");
	return 0;
}
Beispiel #12
0
static void per_cpu_cache_flush_arm(void *arg)
{
	flush_cache_all();
}
Beispiel #13
0
static int s3c6410_pm_enter(suspend_state_t state)
{
	unsigned long regs_save[16];
	unsigned int tmp;
	unsigned int wakeup_stat = 0x0;
	unsigned int eint0pend = 0x0;

	/* ensure the debug is initialised (if enabled) */

	DBG("s3c6410_pm_enter(%d)\n", state);


	if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) {
		printk(KERN_ERR PFX "error: no cpu sleep functions set\n");
		return -EINVAL;
	}

	/* prepare check area if configured */
	s3c6410_pm_check_prepare();

	/* store the physical address of the register recovery block */
	s3c6410_sleep_save_phys = virt_to_phys(regs_save);

	DBG("s3c6410_sleep_save_phys=0x%08lx\n", s3c6410_sleep_save_phys);

	/* save all necessary core registers not covered by the drivers */

	s3c6410_pm_do_save(gpio_save, ARRAY_SIZE(gpio_save));
	s3c6410_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
	s3c6410_pm_do_save(core_save, ARRAY_SIZE(core_save));
	s3c6410_pm_do_save(sromc_save, ARRAY_SIZE(sromc_save));
//bss	s3c6410_pm_do_save(onenand_save, ARRAY_SIZE(onenand_save));
	s3c6410_pm_do_save(uart_save, ARRAY_SIZE(uart_save));

	/* ensure INF_REG0  has the resume address */
	__raw_writel(0xE240000C, (phys_to_virt(0x50008000)));
	__raw_writel(0xE5901000, (phys_to_virt(0x50008004)));
	__raw_writel(0xE1a0f001, (phys_to_virt(0x50008008)));
	__raw_writel(0xe320f000, (phys_to_virt(0x5000800C)));
	__raw_writel(0xe320f000, (phys_to_virt(0x50008010)));
	__raw_writel(0xe320f000, (phys_to_virt(0x50008014)));

	__raw_writel(virt_to_phys(s3c6410_cpu_resume), S3C_INFORM0);

	/* set the irq configuration for wake */
	s3c6410_pm_configure_extint();

	/* call cpu specific preperation */
	__raw_writel(0xF, S3C_INFORM3);
	pm_cpu_prep();

	/* flush cache back to ram */

	flush_cache_all();

	s3c6410_pm_check_store();

	s3c_config_sleep_gpio();	

	tmp = __raw_readl(S3C64XX_SPCONSLP);
	tmp &= ~(0x3 << 12);
	__raw_writel(tmp | (0x1 << 12), S3C64XX_SPCONSLP);

	/* send the cpu to sleep... */

	__raw_writel(0xffffffff, S3C64XX_VIC0INTENCLEAR);
	__raw_writel(0xffffffff, S3C64XX_VIC1INTENCLEAR);
	__raw_writel(0xffffffff, S3C64XX_VIC0SOFTINTCLEAR);
	__raw_writel(0xffffffff, S3C64XX_VIC1SOFTINTCLEAR);

	/* Unmask clock gating and block power turn on */
	__raw_writel(0x43E00041, S3C_HCLK_GATE); 
	__raw_writel(0xF2040000, S3C_PCLK_GATE);
	__raw_writel(0x80000011, S3C_SCLK_GATE);
	__raw_writel(0x00000000, S3C_MEM0_CLK_GATE);

	__raw_writel(0x1, S3C_OSC_STABLE);
	__raw_writel(0x3, S3C_PWR_STABLE);

	/* Set WFI instruction to SLEEP mode */

	tmp = __raw_readl(S3C_PWR_CFG);
	tmp &= ~(0x3<<5);
	tmp |= (0x3<<5);
	__raw_writel(tmp, S3C_PWR_CFG);

	tmp = __raw_readl(S3C_SLEEP_CFG);
	tmp &= ~(0x61<<0);
	__raw_writel(tmp, S3C_SLEEP_CFG);

	__raw_writel(0x2, S3C64XX_SLPEN);

	/* Clear WAKEUP_STAT register for next wakeup -jc.lee */
	/* If this register do not be cleared, Wakeup will be failed */
	__raw_writel(__raw_readl(S3C_WAKEUP_STAT), S3C_WAKEUP_STAT);

	/* s3c6410_cpu_save will also act as our return point from when
	 * we resume as it saves its own register state, so use the return
	 * code to differentiate return from save and return from sleep */

	if (s3c6410_cpu_save(regs_save) == 0) {
		flush_cache_all();
		pm_cpu_sleep();
	}

	/* restore the cpu state */
	cpu_init();

	__raw_writel(s3c_eint_mask_val, S3C_EINT_MASK);

	/* restore the system state */
	s3c6410_pm_do_restore_core(core_save, ARRAY_SIZE(core_save));
	s3c6410_pm_do_restore(sromc_save, ARRAY_SIZE(sromc_save));
	s3c6410_pm_do_restore(gpio_save, ARRAY_SIZE(gpio_save));
	s3c6410_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
//bss	s3c6410_pm_do_restore(onenand_save, ARRAY_SIZE(onenand_save));
	s3c6410_pm_do_restore(uart_save, ARRAY_SIZE(uart_save));
	
	__raw_writel(0x0, S3C64XX_SLPEN);

	wakeup_stat = __raw_readl(S3C_WAKEUP_STAT);
	eint0pend = __raw_readl(S3C64XX_EINT0PEND);

	__raw_writel(eint0pend, S3C64XX_EINT0PEND);

	DBG("post sleep, preparing to return\n");

	s3c6410_pm_check_restore();

	extra_eint0pend = eint0pend;
	extra_wakeup_stat = wakeup_stat;

	pr_info("%s: WAKEUP_STAT(0x%08x), EINT0PEND(0x%08x)\n",
			__func__, wakeup_stat, eint0pend);

	s3c_config_wakeup_gpio();	

	/* ok, let's return from sleep */
	DBG("S3C6410 PM Resume (post-restore)\n");
	return 0;
}
Beispiel #14
0
static int s3c_pm_enter(suspend_state_t state)
{
	static unsigned long regs_save[16];

	

	s3c_pm_debug_init();

	S3C_PMDBG("%s(%d)\n", __func__, state);

	if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) {
		printk(KERN_ERR "%s: error: no cpu sleep function\n", __func__);
		return -EINVAL;
	}

	

	if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
	    !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
		printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
		printk(KERN_ERR "%s: Aborting sleep\n", __func__);
		return -EINVAL;
	}

	

	s3c_sleep_save_phys = virt_to_phys(regs_save);

	S3C_PMDBG("s3c_sleep_save_phys=0x%08lx\n", s3c_sleep_save_phys);

	

	s3c_pm_save_gpios();
	s3c_pm_save_uarts();
	s3c_pm_save_core();

	

	s3c_pm_configure_extint();

	S3C_PMDBG("sleep: irq wakeup masks: %08lx,%08lx\n",
	    s3c_irqwake_intmask, s3c_irqwake_eintmask);

	s3c_pm_arch_prepare_irqs();

	

	pm_cpu_prep();

	

	flush_cache_all();

	s3c_pm_check_store();

	

	s3c_pm_arch_stop_clocks();

	

	s3c_cpu_save(regs_save);

	

	cpu_init();

	

	s3c_pm_restore_core();
	s3c_pm_restore_uarts();
	s3c_pm_restore_gpios();

	s3c_pm_debug_init();

	

	s3c_pm_arch_show_resume_irqs();

	S3C_PMDBG("%s: post sleep, preparing to return\n", __func__);

	
	s3c_pm_debug_smdkled(1 << 1, 0);

	s3c_pm_check_restore();

	

	S3C_PMDBG("S3C PM Resume (post-restore)\n");
	return 0;
}
Beispiel #15
0
static inline void cpu_enter_lowpower(void)
{
	flush_cache_all();
}
Beispiel #16
0
static int hisik3_pm_enter(suspend_state_t state)
{
	unsigned long flage = 0;

	switch (state) {
		case PM_SUSPEND_STANDBY:
		case PM_SUSPEND_MEM:
			break;
		default:
			return -EINVAL;
	}

	if (has_wake_lock(WAKE_LOCK_SUSPEND)) {
		printk("hisik3_pm_enter has wake lock\n");
		return -EAGAIN;
	}

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	local_irq_save(flage);

	hisik3_pm_save_gic();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

#ifdef CONFIG_CACHE_L2X0
	hisik3_pm_disable_l2x0();
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/*set pmu to low power*/
	pmulowpower(1);

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/* here is an workround way to delay 40ms
         * make sure LDO0 is poweroff very clean */
	mdelay(40);

	/* protect timer0_0 timer0_1 and disable timer0 clk */
	protect_timer0_register();

#ifdef CONFIG_LOWPM_DEBUG
	/*set io to lowpower mode*/
	ioshowstatus(1);
	setiolowpower();
	ioshowstatus(1);

	/*set pmu to low power mode*/
	pmulowpower_show(1);
	pmulowpowerall(1);
	pmulowpower_show(1);
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	edb_putstr("[PM]Enter hilpm_cpu_godpsleep...\r\n");

#ifdef CONFIG_LOWPM_DEBUG
	/*time enable*/
	timer0_0_enable();

	/*rtc*/
	rtc_enable();
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	hilpm_cpu_godpsleep();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/*
	 *the status has been changed in fastboot,
	 *it causes difference with kernel's status,
	 */
	pmctrl_reinit();
	pctrl_reinit();
	sysctrl_reinit();

	/*uart init.*/
	edb_reinit();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

#ifdef CONFIG_LOWPM_DEBUG
	/*restore debug uart0*/
	debuguart_reinit();

	/*disable timer0*/
	timer0_0_disable();

	/*restore pmu config*/
	pmulowpowerall(0);
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/*PMU regs restore*/
	pmulowpower(0);

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	/* restore timer0_0 timer0_1 and enable timer0 clk */
	restore_timer0_register();

	flush_cache_all();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

#ifdef CONFIG_CACHE_L2X0
	hisik3_pm_enable_l2x0();
#endif

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	hisik3_pm_retore_gic();

	if (unlikely(in_atomic())) {
		pr_warn("PM: in atomic[%08x] at %d\n", preempt_count(), __LINE__);
	}

	local_irq_restore(flage);

	pr_notice("[PM]Restore OK.\r\n");

	return 0;
}
Beispiel #17
0
static inline void cpu_enter_lowpower(void)
{
	/* Just flush the cache. Changing the coherency is not yet
	 * available on msm. */
	flush_cache_all();
}
DAL_STATUS DAL_Printf(const char *fmt, ...)
{
	va_list args;
	uint i;
    DAL_STATUS ret = DAL_STATUS_OK;
    //printk("[MTKFB_DAL] DAL_Printf mfc_handle=0x%08X, fmt=0x%08X\n", mfc_handle, fmt);
    if (NULL == mfc_handle) 
        return DAL_STATUS_NOT_READY;

    if (NULL == fmt)
        return DAL_STATUS_INVALID_ARGUMENT;

    DAL_LOCK();

	va_start (args, fmt);
	i = vsprintf(dal_print_buffer, fmt, args);
        BUG_ON(i>=ARRAY_SIZE(dal_print_buffer));
	va_end (args);

    DAL_CHECK_MFC_RET(MFC_Print(mfc_handle, dal_print_buffer));

    flush_cache_all();
/*
    if (LCD_STATE_POWER_OFF == LCD_GetState()) {
        ret = DAL_STATUS_LCD_IN_SUSPEND;
        dal_enable_when_resume = TRUE;
        goto End;
    }
    */
    if (down_interruptible(&sem_early_suspend)) {
        DISP_LOG_PRINT(ANDROID_LOG_INFO, "DAL", "can't get semaphore in DAL_Printf()\n");
        goto End;
    }

#if 0
	if(is_early_suspended){
		up(&sem_early_suspend);
		DISP_LOG_PRINT(ANDROID_LOG_INFO, "DAL", "DAL_Printf in power off\n");
		goto End;
	}
#endif
    up(&sem_early_suspend);

    mutex_lock(&OverlaySettingMutex);

    if (!dal_shown) {
        dal_shown = TRUE;
    }

    //DAL enable, switch ui layer from default 3 to 2
    if(isAEEEnabled==0)
    {
        printk("[DDP] isAEEEnabled from 0 to 1, ASSERT_LAYER=%d, dal_fb_pa %x\n", 
            ASSERT_LAYER, dal_fb_pa);
            
        isAEEEnabled = 1;
        DAL_CHECK_LCD_RET(LCD_Dynamic_Change_FB_Layer(isAEEEnabled)); // default_ui_ layer coniig to changed_ui_layer
        
        DAL_CHECK_MFC_RET(MFC_Open(&mfc_handle, dal_fb_addr,
                                   DAL_WIDTH, DAL_HEIGHT, DAL_BPP,
                                   DAL_FG_COLOR, DAL_BG_COLOR));        
        //DAL_Clean();        
        DAL_CHECK_LCD_RET(LCD_LayerSetAddress(ASSERT_LAYER, dal_fb_pa));
        DAL_CHECK_LCD_RET(LCD_LayerSetAlphaBlending(ASSERT_LAYER, TRUE, 0x80));
        DAL_CHECK_LCD_RET(LCD_LayerSetPitch(ASSERT_LAYER, DAL_WIDTH * DAL_BPP));  
		cached_layer_config[ASSERT_LAYER].fmt= DAL_FORMAT;
		cached_layer_config[ASSERT_LAYER].src_x = 0;
		cached_layer_config[ASSERT_LAYER].src_y = 0;
		cached_layer_config[ASSERT_LAYER].src_w = DAL_WIDTH;
		cached_layer_config[ASSERT_LAYER].src_h = DAL_HEIGHT;
		cached_layer_config[ASSERT_LAYER].dst_x = 0;
		cached_layer_config[ASSERT_LAYER].dst_y = 0;
		cached_layer_config[ASSERT_LAYER].dst_w = DAL_WIDTH;
		cached_layer_config[ASSERT_LAYER].dst_h = DAL_HEIGHT;
        DAL_CHECK_LCD_RET(LCD_LayerEnable(ASSERT_LAYER, TRUE));

        printk("after AEE config LCD layer 3: \n");
        LCD_Dump_Layer_Info();
    }
    atomic_set(&OverlaySettingDirtyFlag, 1);
    atomic_set(&OverlaySettingApplied, 0);
    mutex_unlock(&OverlaySettingMutex);

    DAL_CHECK_DISP_RET(DISP_UpdateScreen(0, 0, 
                                         DAL_WIDTH,
                                         DAL_HEIGHT));
End:
    DAL_UNLOCK();

    return ret;
}
Beispiel #19
0
struct ipanic_header *ipanic_header_from_sd(unsigned int offset, unsigned int magic)
{
	struct ipanic_data_header *dheader;
	int dt;
	char str[256];
	size_t size = 0;
	struct ipanic_header *header;
	struct ipanic_data_header dheader_header = {
		.type = IPANIC_DT_HEADER,
		.offset = offset,
		.used = sizeof(struct ipanic_header),
	};
	header = (struct ipanic_header *)ipanic_data_from_sd(&dheader_header, 0);
	if (IS_ERR_OR_NULL((void *)header)) {
		LOGD("read header failed[%ld]\n", PTR_ERR((void *)header));
		header = NULL;
	} else if (header->magic != magic) {
		LOGD("no ipanic data[%x]\n", header->magic);
		kfree(header);
		header = NULL;
		ipanic_erase();
	} else {
		for (dt = IPANIC_DT_HEADER + 1; dt < IPANIC_DT_RESERVED31; dt++) {
			dheader = &header->data_hdr[dt];
			if (dheader->valid) {
				size += snprintf(str + size, 256 - size, "%s[%x@%x],",
						 dheader->name, dheader->used, dheader->offset);
			}
		}
		LOGD("ipanic data available^v^%s^v^\n", str);
	}
	return header;
}

struct aee_oops *ipanic_oops_from_sd(void)
{
	struct aee_oops *oops = NULL;
	struct ipanic_header *hdr = NULL;
	struct ipanic_data_header *dheader;
	char *data;
	int i;
	hdr = ipanic_header_from_sd(0, AEE_IPANIC_MAGIC);
	if (hdr == NULL) {
		return NULL;
	}

	oops = aee_oops_create(AE_DEFECT_FATAL, AE_KE, IPANIC_MODULE_TAG);
	if (oops == NULL) {
		LOGE("%s: can not allocate buffer\n", __func__);
		return NULL;
	}

	for (i = IPANIC_DT_HEADER + 1; i < IPANIC_DT_RESERVED31; i++) {
		dheader = &hdr->data_hdr[i];
		if (dheader->valid == 0) {
			continue;
		}
		data = ipanic_data_from_sd(dheader, 1);
		if (data) {
			switch (i) {
			case IPANIC_DT_KERNEL_LOG:
				oops->console = data;
				oops->console_len = dheader->used;
				break;
			case IPANIC_DT_MINI_RDUMP:
				oops->mini_rdump = data;
				oops->mini_rdump_len = dheader->used;
				break;
			case IPANIC_DT_MAIN_LOG:
				oops->android_main = data;
				oops->android_main_len = dheader->used;
				break;
			case IPANIC_DT_SYSTEM_LOG:
				oops->android_system = data;
				oops->android_system_len = dheader->used;
				break;
			case IPANIC_DT_EVENTS_LOG:
				/* Todo .. */
				break;
			case IPANIC_DT_RADIO_LOG:
				oops->android_radio = data;
				oops->android_radio_len = dheader->used;
				break;
			case IPANIC_DT_CURRENT_TSK:
				memcpy(oops->process_path, data, sizeof(struct aee_process_info));
				break;
			case IPANIC_DT_MMPROFILE:
				oops->mmprofile = data;
				oops->mmprofile_len = dheader->used;
				break;
			default:
				LOGI("%s: [%d] NOT USED.\n", __func__, i);
			}
		} else {
			LOGW("%s: read %s failed, %x@%x\n", __func__,
			     dheader->name, dheader->used, dheader->offset);
		}
	}
	return oops;
}

int ipanic(struct notifier_block *this, unsigned long event, void *ptr)
{
	struct ipanic_data_header *dheader;
	struct kmsg_dumper dumper;
	ipanic_atf_log_rec_t atf_log = {ATF_LOG_SIZE, 0, 0};
	int dt;
	int errno;
	struct ipanic_header *ipanic_hdr;
	aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_IPANIC_START);
	aee_rr_rec_exp_type(2);
	bust_spinlocks(1);
	spin_lock_irq(&ipanic_lock);
	aee_disable_api();
	mrdump_mini_ke_cpu_regs(NULL);
	ipanic_mrdump_mini(AEE_REBOOT_MODE_KERNEL_PANIC, "kernel PANIC");
	if (!ipanic_data_is_valid(IPANIC_DT_KERNEL_LOG)) {
		ipanic_klog_region(&dumper);
		errno = ipanic_data_to_sd(IPANIC_DT_KERNEL_LOG, &dumper);
		if (errno == -1)
			aee_nested_printf("$");
	}
	ipanic_klog_region(&dumper);
	errno = ipanic_data_to_sd(IPANIC_DT_OOPS_LOG, &dumper);
	if (errno == -1)
		aee_nested_printf("$");
	ipanic_data_to_sd(IPANIC_DT_CURRENT_TSK, 0);
	/* kick wdt after save the most critical infos */
	ipanic_kick_wdt();
	ipanic_data_to_sd(IPANIC_DT_MAIN_LOG, (void *)1);
	ipanic_data_to_sd(IPANIC_DT_SYSTEM_LOG, (void *)4);
	ipanic_data_to_sd(IPANIC_DT_EVENTS_LOG, (void *)2);
	ipanic_data_to_sd(IPANIC_DT_RADIO_LOG, (void *)3);
	aee_wdt_dump_info();
	ipanic_klog_region(&dumper);
	ipanic_data_to_sd(IPANIC_DT_WDT_LOG, &dumper);
#ifdef CONFIG_MTK_WQ_DEBUG
	mt_dump_wq_debugger();
#endif
	ipanic_klog_region(&dumper);
	ipanic_data_to_sd(IPANIC_DT_WQ_LOG, &dumper);
	ipanic_data_to_sd(IPANIC_DT_MMPROFILE, 0);
	ipanic_data_to_sd(IPANIC_DT_ATF_LOG, &atf_log);
	errno = ipanic_header_to_sd(0);
	if (!IS_ERR(ERR_PTR(errno)))
		mrdump_mini_ipanic_done();
	ipanic_klog_region(&dumper);
	ipanic_data_to_sd(IPANIC_DT_LAST_LOG, &dumper);
	LOGD("ipanic done^_^");
	ipanic_hdr = ipanic_header();
	for (dt = IPANIC_DT_HEADER + 1; dt < IPANIC_DT_RESERVED31; dt++) {
		dheader = &ipanic_hdr->data_hdr[dt];
		if (dheader->valid) {
			LOGD("%s[%x@%x],", dheader->name, dheader->used, dheader->offset);
		}
	}
	LOGD("^_^\n");
	aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_IPANIC_DONE);

	return NOTIFY_DONE;
}

void ipanic_recursive_ke(struct pt_regs *regs, struct pt_regs *excp_regs, int cpu)
{
	int errno;
	struct kmsg_dumper dumper;
	aee_nested_printf("minidump\n");
	aee_rr_rec_exp_type(3);
	bust_spinlocks(1);
	flush_cache_all();
#ifdef __aarch64__
	cpu_cache_off();
#else
	cpu_proc_fin();
#endif
	mrdump_mini_ke_cpu_regs(excp_regs);
	mrdump_mini_per_cpu_regs(cpu, regs);
	flush_cache_all();
	ipanic_mrdump_mini(AEE_REBOOT_MODE_NESTED_EXCEPTION, "Nested Panic");

	ipanic_data_to_sd(IPANIC_DT_CURRENT_TSK, 0);
	ipanic_kick_wdt();
	ipanic_klog_region(&dumper);
	ipanic_data_to_sd(IPANIC_DT_KERNEL_LOG, &dumper);
	errno = ipanic_header_to_sd(0);
	if (!IS_ERR(ERR_PTR(errno)))
		mrdump_mini_ipanic_done();
	if (ipanic_dt_active(IPANIC_DT_RAM_DUMP)) {
		aee_nested_printf("RAMDUMP.\n");
		__mrdump_create_oops_dump(AEE_REBOOT_MODE_NESTED_EXCEPTION, excp_regs,
					  "Nested Panic");
	}
	bust_spinlocks(0);
}
static IMG_RESULT AllocPages(
    SYSMEM_Heap *     heap,
    IMG_UINT32        ui32Size,
    SYSMEMU_sPages *  psPages,
    SYS_eMemAttrib    eMemAttrib
)
{
    IMG_UINT32  ui32NoPages;
    IMG_UINT32  ui32ExamPages;
    IMG_UINT32  i;
    IMG_UINT64  ui64DeviceMemoryBase;
    IMG_PHYSADDR  paCpuPhysAddr;
    IMG_UINT32  ui32Result;
    size_t      physAddrArrSize;

    struct priv_params *  prv = (struct priv_params *)heap->priv;

    /* If we don't know where the memory is...*/
    SYSOSKM_DisableInt();

    /* Calculate required no. of pages...*/
    ui32NoPages = (ui32Size + (HOST_MMU_PAGE_SIZE-1)) / HOST_MMU_PAGE_SIZE;

    /* Loop over allocated pages until we find an unallocated slot big enough for this allocation...*/
    ui32ExamPages = 0;
    while (ui32ExamPages < prv->npages)
    {
        /* If the current page is not allocated and we might have enough remaining to make this allocation...*/
        if (
                (!prv->alloc_pool[prv->cur_index]) &&
                ((prv->cur_index + ui32NoPages) <= prv->npages)
            )
        {
            /* Can we make this allocation...*/
            for (i=0; i<ui32NoPages; i++)
            {
                if (prv->alloc_pool[prv->cur_index+i])
                {
                    break;
                }
            }
            if (i == ui32NoPages)
            {
                /* Yes, mark pages as allocated...*/
                for (i=0; i<ui32NoPages; i++)
                {
                    prv->alloc_pool[prv->cur_index+i] = IMG_TRUE;
                }

                /* Calculate the memory address of the start of the allocation...*/
                //psPages->pvCpuKmAddr = (IMG_VOID *)((IMG_UINTPTR)prv->vstart + (prv->cur_index * HOST_MMU_PAGE_SIZE));
                psPages->pvImplData = (IMG_VOID *)(prv->vstart + (prv->cur_index * HOST_MMU_PAGE_SIZE));

                /* Update the current page index....*/
                prv->cur_index += ui32NoPages;
                if (prv->cur_index >= prv->npages)
                {
                    prv->cur_index = 0;
                }
                break;
            }
        }

        /* Update examined pages and page index...*/
        ui32ExamPages++;
        prv->cur_index++;
        if (prv->cur_index >= prv->npages)
        {
            prv->cur_index = 0;
        }
    }
    SYSOSKM_EnableInt();

    /* Check if allocation failed....*/
    IMG_ASSERT(ui32ExamPages < prv->npages);
    if (ui32ExamPages >= prv->npages)
    {
        /* Failed...*/
        /* dump some fragmentation information */
        int i = 0;
        int nAllocated = 0;
        int n64kBlocks  = 0;    // number of blocks of <16 consecutive pages
        int n128kBlocks = 0;
        int n256kBlocks = 0;
        int nBigBlocks  = 0;    // number of blocks of >=64 consecutive pages
        int nMaxBlocks  = 0;
        int nPages = 0;
        for(i = 0; i < (int)prv->npages; i++)
        {
            IMG_UINT8 isallocated = prv->alloc_pool[i];
            nPages++;
            if(i == prv->npages-1 || isallocated != prv->alloc_pool[i+1])
            {
                if(isallocated)
                    nAllocated += nPages;
                else if(nPages < 16)
                    n64kBlocks++;
                else if(nPages < 32)
                    n128kBlocks++;
                else if(nPages < 64)
                    n256kBlocks++;
                else
                    nBigBlocks++;
                    if(nMaxBlocks < nPages)
                        nMaxBlocks = nPages;
                isallocated = prv->alloc_pool[i];
                nPages = 0;
            }
        }
#ifdef printk
        /* hopefully, this will give some idea of the fragmentation of the memory */
        printk("AllocPages not able to allocate memory \n");
        printk("  number available memory areas under 64k:%d\n", n64kBlocks);
        printk("  number available memory areas under 128k:%d\n", n128kBlocks);
        printk("  number available memory areas under 256k:%d\n", n256kBlocks);
        printk("  number available memory areas over 256k:%d\n", nBigBlocks);
        printk("  total allocated memory:%dk/%dk\n", nAllocated*4, prv->npages*4);
#endif


        return IMG_ERROR_OUT_OF_MEMORY;
    }


    paCpuPhysAddr = CpuKmAddrToCpuPAddr(heap, psPages->pvImplData);
    IMG_ASSERT(paCpuPhysAddr != 0);
    if (paCpuPhysAddr == 0)
    {
        return IMG_ERROR_GENERIC_FAILURE;
    }

#ifdef CONFIG_ARM
    /* This flushes the outer cache in ARM, so we avoid memory corruption by late
       flushes of memory previously marked as cached. */
    if ((eMemAttrib & SYS_MEMATTRIB_CACHED) == 0) {
        mb();
        /* the following two calls are somewhat expensive, but are there for defensive reasons */
        flush_cache_all();
        outer_flush_all();
    }
#endif
    {
        IMG_PHYSADDR *      ppaCpuPhysAddrs;
        size_t numPages, pg_i, offset;

        // Memory for physical addresses
        numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE;
        physAddrArrSize = sizeof(*ppaCpuPhysAddrs) * numPages;
        ppaCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize);
        if (!ppaCpuPhysAddrs)
        {
            return IMG_ERROR_OUT_OF_MEMORY;
        }
        for (pg_i = 0, offset = 0; pg_i < numPages; offset += HOST_MMU_PAGE_SIZE, ++pg_i)
        {
                ppaCpuPhysAddrs[pg_i] = paCpuPhysAddr + offset;
        }
        // Set pointer to physical address in structure
        psPages->ppaPhysAddr = ppaCpuPhysAddrs;

    }
    /* Add this to the list of mappable regions...*/
    ui32Result = SYSBRGU_CreateMappableRegion(paCpuPhysAddr, ui32Size, eMemAttrib, psPages, &psPages->hRegHandle);
    IMG_ASSERT(ui32Result == IMG_SUCCESS);
    if (ui32Result != IMG_SUCCESS) 
    {
        goto error_mappable_region;
    }

#if defined (CLEAR_PAGES)
        if (psPages->pvImplData)
    IMG_MEMSET( psPages->pvImplData, 0, ui32Size);
#endif

    return IMG_SUCCESS;

    /* Error handling. */
error_mappable_region:
    IMG_BIGORSMALL_FREE(physAddrArrSize, psPages->ppaPhysAddr);
    psPages->ppaPhysAddr = IMG_NULL;

    return ui32Result;
}
Beispiel #21
0
static void per_cpu_cache_flush_arm(void *arg)
{
    PVR_UNREFERENCED_PARAMETER(arg);
    flush_cache_all();
}
Beispiel #22
0
void __init
iommu_init(int iommund, struct sbus_bus *sbus)
{
	unsigned int impl, vers;
	unsigned long tmp;
	struct iommu_struct *iommu;
	struct linux_prom_registers iommu_promregs[PROMREG_MAX];
	struct resource r;
	unsigned long *bitmap;

	iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
	if (!iommu) {
		prom_printf("Unable to allocate iommu structure\n");
		prom_halt();
	}
	prom_getproperty(iommund, "reg", (void *) iommu_promregs,
			 sizeof(iommu_promregs));
	memset(&r, 0, sizeof(r));
	r.flags = iommu_promregs[0].which_io;
	r.start = iommu_promregs[0].phys_addr;
	iommu->regs = (struct iommu_regs *)
		sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
	if(!iommu->regs) {
		prom_printf("Cannot map IOMMU registers\n");
		prom_halt();
	}
	impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
	vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
	tmp = iommu->regs->control;
	tmp &= ~(IOMMU_CTRL_RNGE);
	tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
	iommu->regs->control = tmp;
	iommu_invalidate(iommu->regs);
	iommu->start = IOMMU_START;
	iommu->end = 0xffffffff;

	/* Allocate IOMMU page table */
	/* Stupid alignment constraints give me a headache. 
	   We need 256K or 512K or 1M or 2M area aligned to
           its size and current gfp will fortunately give
           it to us. */
        tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
	if (!tmp) {
		prom_printf("Unable to allocate iommu table [0x%08x]\n",
			    IOMMU_NPTES*sizeof(iopte_t));
		prom_halt();
	}
	iommu->page_table = (iopte_t *)tmp;

	/* Initialize new table. */
	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
	flush_cache_all();
	flush_tlb_all();
	iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
	iommu_invalidate(iommu->regs);

	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
	if (!bitmap) {
		prom_printf("Unable to allocate iommu bitmap [%d]\n",
			    (int)(IOMMU_NPTES>>3));
		prom_halt();
	}
Beispiel #23
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static int
fec_enet_rx(struct net_device *ndev, int budget)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	unsigned short status;
	struct	sk_buff	*skb;
	ushort	pkt_len;
	__u8 *data;
	int	pkt_received = 0;

#ifdef CONFIG_M532x
	flush_cache_all();
#endif

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {

		if (pkt_received >= budget)
			break;
		pkt_received++;

		/* Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((status & BD_ENET_RX_LAST) == 0)
			printk("FEC ENET: rcv is not +last\n");

		if (!fep->opened)
			goto rx_processing_done;

		/* Check for errors. */
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			ndev->stats.rx_errors++;
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
				/* Frame too long or too short. */
				ndev->stats.rx_length_errors++;
			}
			if (status & BD_ENET_RX_NO)	/* Frame alignment */
				ndev->stats.rx_frame_errors++;
			if (status & BD_ENET_RX_CR)	/* CRC Error */
				ndev->stats.rx_crc_errors++;
			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
				ndev->stats.rx_fifo_errors++;
		}

		/* Report late collisions as a frame error.
		 * On this error, the BD is closed, but we don't know what we
		 * have in the buffer.  So, just drop this frame on the floor.
		 */
		if (status & BD_ENET_RX_CL) {
			ndev->stats.rx_errors++;
			ndev->stats.rx_frame_errors++;
			goto rx_processing_done;
		}

		/* Process the incoming frame. */
		ndev->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		ndev->stats.rx_bytes += pkt_len;
		data = (__u8*)__va(bdp->cbd_bufaddr);

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);

		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
			swap_buffer(data, pkt_len);

		/* This does 16 byte alignment, exactly what we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);

		if (unlikely(!skb)) {
			printk("%s: Memory squeeze, dropping packet.\n",
					ndev->name);
			ndev->stats.rx_dropped++;
		} else {
			skb_reserve(skb, NET_IP_ALIGN);
			skb_put(skb, pkt_len - 4);	/* Make room */
			skb_copy_to_linear_data(skb, data, pkt_len - 4);
			skb->protocol = eth_type_trans(skb, ndev);

			/* Get receive timestamp from the skb */
			if (fep->hwts_rx_en && fep->bufdesc_ex) {
				struct skb_shared_hwtstamps *shhwtstamps =
							    skb_hwtstamps(skb);
				unsigned long flags;
				struct bufdesc_ex *ebdp =
					(struct bufdesc_ex *)bdp;

				memset(shhwtstamps, 0, sizeof(*shhwtstamps));

				spin_lock_irqsave(&fep->tmreg_lock, flags);
				shhwtstamps->hwtstamp = ns_to_ktime(
				    timecounter_cyc2time(&fep->tc, ebdp->ts));
				spin_unlock_irqrestore(&fep->tmreg_lock, flags);
			}

			if (!skb_defer_rx_timestamp(skb))
				napi_gro_receive(&fep->napi, skb);
		}

		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;

		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
		bdp->cbd_sc = status;

		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

			ebdp->cbd_esc = BD_ENET_RX_INT;
			ebdp->cbd_prot = 0;
			ebdp->cbd_bdu = 0;
		}

		/* Update BD pointer to next entry */
		if (status & BD_ENET_RX_WRAP)
			bdp = fep->rx_bd_base;
		else
			bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
	}
	fep->cur_rx = bdp;

	return pkt_received;
}
Beispiel #24
0
static int mx6_suspend_enter(suspend_state_t state)
{
	unsigned int wake_irq_isr[4];
	struct gic_dist_state gds;
	struct gic_cpu_state gcs;

	wake_irq_isr[0] = __raw_readl(gpc_base +
			GPC_ISR1_OFFSET) & gpc_wake_irq[0];
	wake_irq_isr[1] = __raw_readl(gpc_base +
			GPC_ISR2_OFFSET) & gpc_wake_irq[1];
	wake_irq_isr[2] = __raw_readl(gpc_base +
			GPC_ISR3_OFFSET) & gpc_wake_irq[2];
	wake_irq_isr[3] = __raw_readl(gpc_base +
			GPC_ISR4_OFFSET) & gpc_wake_irq[3];
	if (wake_irq_isr[0] | wake_irq_isr[1] |
			wake_irq_isr[2] | wake_irq_isr[3]) {
		printk(KERN_INFO "There are wakeup irq pending,system resume!\n");
		printk(KERN_INFO "wake_irq_isr[0-3]: 0x%x, 0x%x, 0x%x, 0x%x\n",
				wake_irq_isr[0], wake_irq_isr[1],
				wake_irq_isr[2], wake_irq_isr[3]);
		return 0;
	}
	mx6_suspend_store();

	/* i.MX6dl TO1.0 TKT094231: can't support ARM_POWER_OFF mode */
	if (state == PM_SUSPEND_MEM && cpu_is_mx6dl())
		state = PM_SUSPEND_STANDBY;

	switch (state) {
	case PM_SUSPEND_MEM:
		gpu_power_down();
		usb_power_down_handler();
		mxc_cpu_lp_set(ARM_POWER_OFF);
		break;
	case PM_SUSPEND_STANDBY:
		mxc_cpu_lp_set(STOP_POWER_OFF);
		break;
	default:
		return -EINVAL;
	}

	if (state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY) {
		if (pm_data && pm_data->suspend_enter)
			pm_data->suspend_enter();

		local_flush_tlb_all();
		flush_cache_all();

		if (state == PM_SUSPEND_MEM) {
			/* preserve gic state */
			save_gic_dist_state(0, &gds);
			save_gic_cpu_state(0, &gcs);
		}

		suspend_in_iram(state, (unsigned long)iram_paddr,
			(unsigned long)suspend_iram_base);

		if (state == PM_SUSPEND_MEM) {
			/* restore gic registers */
			restore_gic_dist_state(0, &gds);
			restore_gic_cpu_state(0, &gcs);
			usb_power_up_handler();
			gpu_power_up();
		}
		mx6_suspend_restore();

		if (pm_data && pm_data->suspend_exit)
			pm_data->suspend_exit();
	} else {
			cpu_do_idle();
	}

	return 0;
}
Beispiel #25
0
static int s3c_pm_enter(suspend_state_t state)
{
	/* ensure the debug is initialised (if enabled) */

	s3c_pm_debug_init();

	S3C_PMDBG("%s(%d)\n", __func__, state);

	if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) {
		printk(KERN_ERR "%s: error: no cpu sleep function\n", __func__);
		return -EINVAL;
	}

	/* check if we have anything to wake-up with... bad things seem
	 * to happen if you suspend with no wakeup (system will often
	 * require a full power-cycle)
	*/

	if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
	    !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
		printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
		printk(KERN_ERR "%s: Aborting sleep\n", __func__);
		return -EINVAL;
	}

	/* save all necessary core registers not covered by the drivers */

	s3c_pm_save_gpios();
	s3c_pm_saved_gpios();
	s3c_pm_save_uarts();
	s3c_pm_save_core();

	/* set the irq configuration for wake */

	s3c_pm_configure_extint();

	S3C_PMDBG("sleep: irq wakeup masks: %08lx,%08lx\n",
	    s3c_irqwake_intmask, s3c_irqwake_eintmask);

	s3c_pm_arch_prepare_irqs();

	/* call cpu specific preparation */

	pm_cpu_prep();

	/* flush cache back to ram */

	flush_cache_all();

	s3c_pm_check_store();

	/* send the cpu to sleep... */

	s3c_pm_arch_stop_clocks();

	/* this will also act as our return point from when
	 * we resume as it saves its own register state and restores it
	 * during the resume.  */

	cpu_suspend(0, pm_cpu_sleep);

	/* restore the system state */

	s3c_pm_restore_core();
	s3c_pm_restore_uarts();
	s3c_pm_restore_gpios();
	s3c_pm_restored_gpios();

	s3c_pm_debug_init();

	/* check what irq (if any) restored the system */

	s3c_pm_arch_show_resume_irqs();

	S3C_PMDBG("%s: post sleep, preparing to return\n", __func__);

	/* LEDs should now be 1110 */
	s3c_pm_debug_smdkled(1 << 1, 0);

	s3c_pm_check_restore();

	/* ok, let's return from sleep */

	S3C_PMDBG("S3C PM Resume (post-restore)\n");
	return 0;
}
/* Called from the FIQ bark handler */
void msm_wdog_bark_fin(void)
{
	flush_cache_all();
	pr_crit("\nApps Watchdog bark received - Calling Panic\n");
	panic("Apps Watchdog Bark received\n");
}
Beispiel #27
0
void flush_cache_mm(struct mm_struct *mm)
{
	flush_cache_all();
}
Beispiel #28
0
static void
r5900_flush_icache_range(unsigned long start, unsigned long end)
{
	flush_cache_all();
}
Beispiel #29
0
/**********************************************************************
 * Explicit Cache flush request from user space via syscall
 * Needed for JITs which generate code on the fly
 */
SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
{
	/* TBD: optimize this */
	flush_cache_all();
	return 0;
}
Beispiel #30
0
int update_ddr_freq(int ddr_rate)
{
	int i;
	unsigned int reg;

	if (!can_change_ddr_freq())
		return -1;

	local_flush_tlb_all();
	flush_cache_all();

	iram_ddr_settings[0][0] = ddr_settings_size;
	if (ddr_rate == LP_APM_CLK) {
		if (mx50_ddr_type == MX50_LPDDR2) {
			for (i = 0; i < iram_ddr_settings[0][0]; i++) {
				iram_ddr_settings[i + 1][0] =
								lpddr2_24[i][0];
				iram_ddr_settings[i + 1][1] =
								lpddr2_24[i][1];
			}
		} else {
			for (i = 0; i < iram_ddr_settings[0][0]; i++) {
				iram_ddr_settings[i + 1][0]
								= mddr_24[i][0];
				iram_ddr_settings[i + 1][1]
								= mddr_24[i][1];
			}
		}
	} else {
        if (mx50_ddr_type == MX50_DDR2)
        {
		    if (ddr_rate == ddr_low_rate) {
		        for (i = 0; i < iram_ddr_settings[0][0]; i++) {
			        iram_ddr_settings[i + 1][0] =
					        ddr2_133[i][0];
			        iram_ddr_settings[i + 1][1] =
					        ddr2_133[i][1];
		        }
            }
		    if (ddr_rate == ddr_med_rate) {
		        for (i = 0; i < iram_ddr_settings[0][0]; i++) {
			        iram_ddr_settings[i + 1][0] =
					        ddr2_160[i][0];
			        iram_ddr_settings[i + 1][1] =
					        ddr2_160[i][1];
		        }
            }
		    else {
		        for (i = 0; i < iram_ddr_settings[0][0]; i++) {
			        iram_ddr_settings[i + 1][0] =
					        normal_databahn_settings[i][0];
			        iram_ddr_settings[i + 1][1] =
					        normal_databahn_settings[i][1];
		        }
            }
        }
        else
        {
		    for (i = 0; i < iram_ddr_settings[0][0]; i++) {
			    iram_ddr_settings[i + 1][0] =
					    normal_databahn_settings[i][0];
			    iram_ddr_settings[i + 1][1] =
					    normal_databahn_settings[i][1];
		    }
		    if (ddr_rate == ddr_med_rate) {
			    /*Change the tref setting */
			    for (i = 0; i < iram_ddr_settings[0][0]; i++) {
				    if (iram_ddr_settings[i + 1][0] == 0x40) {
					    if (mx50_ddr_type == MX50_LPDDR2)
						    /* LPDDR2 133MHz. */
						    iram_ddr_settings[i + 1][1] =
								    0x00050180;
					    else
						    /* mDDR 133MHz. */
						    iram_ddr_settings[i + 1][1] =
								    0x00050208;
					    break;
				    }
			    }
		    }
        }
    }
	/* Disable all masters from accessing the DDR, leave masters
	 * on port 0 and 1 enabled.
	 */
	reg = __raw_readl(qosc_base + HW_QOS_DISABLE);
	reg |= QoS_DISABLE_MASTERS;
	__raw_writel(reg, qosc_base + HW_QOS_DISABLE_SET);
	udelay(100);

	/* Set the DDR to default freq. */
	change_ddr_freq(ccm_base, databahn_base, ddr_rate,
					iram_ddr_settings);

	/* Enable all masters to access the DDR. */
	__raw_writel(reg, qosc_base + HW_QOS_DISABLE_CLR);

	return 0;
}