Esempio n. 1
0
/*==========================================================================*
 * Name:         start_secondary
 *
 * Description:  This routine activate a secondary processor.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    *unused - currently unused.
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 * 2003-06-24 hy  modify for linux-2.5.69
 *
 *==========================================================================*/
int __init start_secondary(void *unused)
{
	cpu_init();
	smp_callin();
	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
		cpu_relax();

	smp_online();

	/*
	 * low-memory mappings have been cleared, flush them from
	 * the local TLBs too.
	 */
	local_flush_tlb_all();

	cpu_idle();
	return 0;
}
Esempio n. 2
0
File: smp.c Progetto: 24hours/linux
static int octeon_cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();

	if (cpu == 0)
		return -EBUSY;

	set_cpu_online(cpu, false);
	cpu_clear(cpu, cpu_callin_map);
	local_irq_disable();
	octeon_fixup_irqs();
	local_irq_enable();

	flush_cache_all();
	local_flush_tlb_all();

	return 0;
}
Esempio n. 3
0
/*
 * Acquire the ia64_ctx.lock before calling this function!
 */
void
wrap_mmu_context (struct mm_struct *mm)
{
	unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
	struct task_struct *tsk;
	int i;

	if (ia64_ctx.next > max_ctx)
		ia64_ctx.next = 300;	/* skip daemons */
	ia64_ctx.limit = max_ctx + 1;

	/*
	 * Scan all the task's mm->context and set proper safe range
	 */

	read_lock(&tasklist_lock);
  repeat:
	for_each_process(tsk) {
		if (!tsk->mm)
			continue;
		tsk_context = tsk->mm->context;
		if (tsk_context == ia64_ctx.next) {
			if (++ia64_ctx.next >= ia64_ctx.limit) {
				/* empty range: reset the range limit and start over */
				if (ia64_ctx.next > max_ctx)
					ia64_ctx.next = 300;
				ia64_ctx.limit = max_ctx + 1;
				goto repeat;
			}
		}
		if ((tsk_context > ia64_ctx.next) && (tsk_context < ia64_ctx.limit))
			ia64_ctx.limit = tsk_context;
	}
	read_unlock(&tasklist_lock);
	/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
	{
		int cpu = get_cpu(); /* prevent preemption/migration */
		for (i = 0; i < NR_CPUS; ++i)
			if (cpu_online(i) && (i != cpu))
				per_cpu(ia64_need_tlb_flush, i) = 1;
		put_cpu();
	}
	local_flush_tlb_all();
}
Esempio n. 4
0
static int octeon_cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();

	if (cpu == 0)
		return -EBUSY;

	if (!octeon_bootloader_entry_addr)
		return -ENOTSUPP;

	set_cpu_online(cpu, false);
	calculate_cpu_foreign_map();
	octeon_fixup_irqs();

	__flush_cache_all();
	local_flush_tlb_all();

	return 0;
}
Esempio n. 5
0
void __init davinci_common_init(struct davinci_soc_info *soc_info)
{
	int ret;

	if (!soc_info) {
		ret = -EINVAL;
		goto err;
	}

	memcpy(&davinci_soc_info, soc_info, sizeof(struct davinci_soc_info));

	if (davinci_soc_info.io_desc && (davinci_soc_info.io_desc_num > 0))
		iotable_init(davinci_soc_info.io_desc,
				davinci_soc_info.io_desc_num);

	/*
	 * Normally devicemaps_init() would flush caches and tlb after
	 * mdesc->map_io(), but we must also do it here because of the CPU
	 * revision check below.
	 */
	local_flush_tlb_all();
	flush_cache_all();

	/*
	 * We want to check CPU revision early for cpu_is_xxxx() macros.
	 * IO space mapping must be initialized before we can do that.
	 */
	ret = davinci_init_id(&davinci_soc_info);
	if (ret < 0)
		goto err;

	if (davinci_soc_info.cpu_clks) {
		ret = davinci_clk_init(davinci_soc_info.cpu_clks);

		if (ret != 0)
			goto err;
	}

	return;

err:
	panic("davinci_common_init: SoC Initialization failed\n");
}
static void __init _omap2_map_common_io(void)
{
	/* Normally devicemaps_init() would flush caches and tlb after
	 * mdesc->map_io(), but we must also do it here because of the CPU
	 * revision check below.
	 */
	local_flush_tlb_all();
	flush_cache_all();

	omap2_check_revision();
	omap_sram_init();
	omapfb_reserve_sdram();
	omap_vram_reserve_sdram();
	dspbridge_reserve_sdram();

#ifdef CONFIG_TF_MSHIELD
	tf_allocate_workspace();
#endif
}
Esempio n. 7
0
File: io.c Progetto: mozyg/kernel
void __init omap2_map_common_io(void)
{
#if defined(CONFIG_ARCH_OMAP2420)
	iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc));
	iotable_init(omap242x_io_desc, ARRAY_SIZE(omap242x_io_desc));
#elif defined(CONFIG_ARCH_OMAP2430)
	iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc));
	iotable_init(omap243x_io_desc, ARRAY_SIZE(omap243x_io_desc));
#endif
	/* Normally devicemaps_init() would flush caches and tlb after
	 * mdesc->map_io(), but we must also do it here because of the CPU
	 * revision check below.
	 */
	local_flush_tlb_all();
	flush_cache_all();
	omap2_check_revision();
	omap_sram_init();
	omapfb_reserve_sdram();
}
Esempio n. 8
0
/*
 *  Maps common IO regions for tcc88xx.
 */
void __init tcc_map_common_io(void)
{
	iotable_init(tcc8800_io_desc, ARRAY_SIZE(tcc8800_io_desc));

	/* Normally devicemaps_init() would flush caches and tlb after
	 * mdesc->map_io(), but we must also do it here because of the CPU
	 * revision check below.
	 */
	local_flush_tlb_all();
	flush_cache_all();
#if defined(__TODO__)
	IO_UTIL_ReadECID();
#endif

	//tcc_reserve_sdram();

	// XXX
	tca_ckc_init();
}
Esempio n. 9
0
/*
 * Maps common IO regions for omap1. This should only get called from
 * board specific init.
 */
void __init omap1_map_common_io(void)
{
	iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc));

	/* Normally devicemaps_init() would flush caches and tlb after
	 * mdesc->map_io(), but we must also do it here because of the CPU
	 * revision check below.
	 */
	local_flush_tlb_all();
	flush_cache_all();

	/* We want to check CPU revision early for cpu_is_omapxxxx() macros.
	 * IO space mapping must be initialized before we can do that.
	 */
	omap_check_revision();

#ifdef CONFIG_ARCH_OMAP730
	if (cpu_is_omap730()) {
		iotable_init(omap730_io_desc, ARRAY_SIZE(omap730_io_desc));
	}
#endif

#ifdef CONFIG_ARCH_OMAP850
	if (cpu_is_omap850()) {
		iotable_init(omap850_io_desc, ARRAY_SIZE(omap850_io_desc));
	}
#endif

#ifdef CONFIG_ARCH_OMAP15XX
	if (cpu_is_omap15xx()) {
		iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
	}
#endif
#if defined(CONFIG_ARCH_OMAP16XX)
	if (cpu_is_omap16xx()) {
		iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc));
	}
#endif

	omap_sram_init();
	omapfb_reserve_sdram();
}
Esempio n. 10
0
static __init void wire_stupidity_into_tlb(void)
{
#ifdef CONFIG_32BIT
	write_c0_wired(0);
	local_flush_tlb_all();

	/* marvell and extra space */
	add_wired_entry(ENTRYLO(0xf4000000), ENTRYLO(0xf4010000),
	                0xf4000000UL, PM_64K);
	/* fpga, rtc, and uart */
	add_wired_entry(ENTRYLO(0xfc000000), ENTRYLO(0xfd000000),
	                0xfc000000UL, PM_16M);
//	/* m-sys and internal SRAM */
//	add_wired_entry(ENTRYLO(0xfe000000), ENTRYLO(0xff000000),
//	                0xfe000000UL, PM_16M);

	marvell_base = 0xf4000000;
	//mv64340_sram_base = 0xfe000000;	/* Currently unused */
#endif
}
Esempio n. 11
0
void __init davinci_map_common_io(void)
{
	iotable_init(davinci_io_desc, ARRAY_SIZE(davinci_io_desc));
	/*
	* Deep sleep mode IRAM
	*/
	iotable_init(davinci_io_desc_IRAM, ARRAY_SIZE(davinci_io_desc_IRAM));

	/* Normally devicemaps_init() would flush caches and tlb after
	 * mdesc->map_io(), but we must also do it here because of the CPU
	 * revision check below.
	 */
	local_flush_tlb_all();
	flush_cache_all();

	/* We want to check CPU revision early for cpu_is_xxxx() macros.
	 * IO space mapping must be initialized before we can do that.
	 */
	davinci_check_revision();
}
Esempio n. 12
0
int __cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();
	struct task_struct *p;
	int ret;

	ret = mp_ops->cpu_disable(cpu);
	if (ret)
		return ret;

	/*
	 * Take this CPU offline.  Once we clear this, we can't return,
	 * and we must not schedule until we're ready to give up the cpu.
	 */
	set_cpu_online(cpu, false);

	/*
	 * OK - migrate IRQs away from this CPU
	 */
	migrate_irqs();

	/*
	 * Stop the local timer for this CPU.
	 */
	local_timer_stop(cpu);

	/*
	 * Flush user cache and TLB mappings, and then remove this CPU
	 * from the vm mask set of all processes.
	 */
	flush_cache_all();
	local_flush_tlb_all();

	read_lock(&tasklist_lock);
	for_each_process(p)
		if (p->mm)
			cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
	read_unlock(&tasklist_lock);

	return 0;
}
Esempio n. 13
0
int __init rk29_sram_init(void)
{
	char *start;
	char *end;
	char *ram;

	iotable_init(sram_io_desc, ARRAY_SIZE(sram_io_desc));

	/*
	 * Normally devicemaps_init() would flush caches and tlb after
	 * mdesc->map_io(), but since we're called from map_io(), we
	 * must do it here.
	 */
	local_flush_tlb_all();
	flush_cache_all();

        memset((char *)SRAM_CODE_OFFSET,0x0,(SRAM_CODE_END - SRAM_CODE_OFFSET + 1));
	memset((char *)SRAM_DATA_OFFSET,0x0,(SRAM_DATA_END - SRAM_DATA_OFFSET + 1));

	/* Copy code from RAM to SRAM CODE */
	start = &__ssram_code_text;
	end   = &__esram_code_text;
	ram   = &__sram_code_start;
	memcpy(start, ram, (end-start));
	flush_icache_range((unsigned long) start, (unsigned long) end);

	printk("CPU SRAM: copied sram code from %p to %p - %p\n", ram, start, end);

	/* Copy data from RAM to SRAM DATA */
	start = &__ssram_data;
	end   = &__esram_data;
	ram   = &__sram_data_start;
	memcpy(start, ram, (end-start));

	printk("CPU SRAM: copied sram data from %p to %p - %p\n", ram, start, end);

	sram_log_dump();

	return 0;
}
Esempio n. 14
0
File: pm.c Progetto: Astralix/kernel
static int rk_lpmode_enter(unsigned long arg)
{

        RKPM_DDR_PFUN(slp_setting(rkpm_jdg_sram_ctrbits),slp_setting); 
        
        local_flush_tlb_all();
        flush_cache_all();
        outer_flush_all();
        outer_disable();
        cpu_proc_fin();
        //outer_inv_all();// ???
        //  l2x0_inv_all_pm(); //rk319x is not need
        flush_cache_all();
        
        rkpm_ddr_printch('d');

        dsb();
        wfi();  
        
        rkpm_ddr_printch('D');
	return 0;
}
Esempio n. 15
0
void __init
ia64_tlb_init (void)
{
	ia64_ptce_info_t ptce_info;
	unsigned long tr_pgbits;
	long status;

	if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
		printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;"
		       "defaulting to architected purge page-sizes.\n", status);
		purge.mask = 0x115557000;
	}
	purge.max_bits = ia64_fls(purge.mask);

	ia64_get_ptce(&ptce_info);
	local_cpu_data->ptce_base = ptce_info.base;
	local_cpu_data->ptce_count[0] = ptce_info.count[0];
	local_cpu_data->ptce_count[1] = ptce_info.count[1];
	local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
	local_cpu_data->ptce_stride[1] = ptce_info.stride[1];

	local_flush_tlb_all();		/* nuke left overs from bootstrapping... */
}
Esempio n. 16
0
/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
 *  @start, @end interpreted as kvaddr
 * Interestingly, shared TLB entries can also be flushed using just
 * @start,@end alone (interpreted as user vaddr), although technically SASID
 * is also needed. However our smart TLbProbe lookup takes care of that.
 */
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long flags;

	/* exactly same as above, except for TLB entry not taking ASID */

	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
		local_flush_tlb_all();
		return;
	}

	start &= PAGE_MASK;

	local_irq_save(flags);
	while (start < end) {
		tlb_entry_erase(start);
		start += PAGE_SIZE;
	}

	utlb_invalidate();

	local_irq_restore(flags);
}
Esempio n. 17
0
/*
 * Hide the first two arguments to __cpu_suspend - these are an implementation
 * detail which platform code shouldn't have to know about.
 */
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
	struct mm_struct *mm = current->active_mm;
	int ret;

	if (!idmap_pgd)
		return -EINVAL;

	/*
	 * Provide a temporary page table with an identity mapping for
	 * the MMU-enable code, required for resuming.  On successful
	 * resume (indicated by a zero return code), we need to switch
	 * back to the correct page tables.
	 */
	ret = __cpu_suspend(arg, fn);
	if (ret == 0) {
		cpu_switch_mm(mm->pgd, mm);
		local_flush_bp_all();
		local_flush_tlb_all();
	}

	return ret;
}
Esempio n. 18
0
/* change the DDR frequency. */
int update_lpddr2_freq(int ddr_rate)
{
	if (ddr_rate == curr_ddr_rate)
		return 0;

	dev_dbg(busfreq_dev, "\nBus freq set to %d start...\n", ddr_rate);

	/*
	 * Flush the TLB, to ensure no TLB maintenance occurs
	 * when DDR is in self-refresh.
	 */
	local_flush_tlb_all();
	/* Now change DDR frequency. */
	mx6_change_lpddr2_freq(ddr_rate,
		(low_bus_freq_mode | ultra_low_bus_freq_mode),
		reg_addrs);

	curr_ddr_rate = ddr_rate;

	dev_dbg(busfreq_dev, "\nBus freq set to %d done...\n", ddr_rate);

	return 0;
}
Esempio n. 19
0
/* Usable for KV1 addresses only! */
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long flags;
	int size;

	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;

	if (size > TFP_TLB_SIZE / 2) {
		local_flush_tlb_all();
		return;
	}

	local_irq_save(flags);

	write_c0_entrylo(0);

	start &= PAGE_MASK;
	end += (PAGE_SIZE - 1);
	end &= PAGE_MASK;
	while (start < end) {
		signed long idx;

		write_c0_vaddr(start);
		write_c0_entryhi(start);
		start += PAGE_SIZE;
		tlb_probe();
		idx = read_c0_tlbset();
		if (idx < 0)
			continue;

		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
		tlb_write();
	}

	local_irq_restore(flags);
}
Esempio n. 20
0
void play_dead(void)
{
	unsigned int cpu = smp_processor_id();

	void (*do_play_dead)(void) = (void (*)(void))KSEG1ADDR(__play_dead);

	local_irq_disable();
	if(cpu == 0) set_smp_mbox0(0);
	else if(cpu == 1) set_smp_mbox1(0);
	else if(cpu == 2) set_smp_mbox2(0);
	else if(cpu == 3) set_smp_mbox3(0);

	smp_clr_pending(1<<cpu);
	smp_clr_pending(1<<(cpu + 8));
	while(1) {
		while(cpumask_test_cpu(cpu, &cpu_running))
			;
		local_flush_tlb_all();
		blast_icache_jz();
		blast_dcache_jz();

		do_play_dead();
	}
}
void restore_processor_state(void)
{
	local_flush_tlb_all();
}
Esempio n. 22
0
static void flush_tlb_all_ipi(void *info)
{
	local_flush_tlb_all();
}
Esempio n. 23
0
void flush_tlb_all(void)
{
	smp_call_function(flush_tlb_all_ipi, 0, 1, 1);
	local_flush_tlb_all();
}
Esempio n. 24
0
/*
 * Add a new chunk of uncached memory pages to the specified pool.
 *
 * @pool: pool to add new chunk of uncached memory to
 * @nid: node id of node to allocate memory from, or -1
 *
 * This is accomplished by first allocating a granule of cached memory pages
 * and then converting them to uncached memory pages.
 */
static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
{
	struct page *page;
	int status, i, nchunks_added = uc_pool->nchunks_added;
	unsigned long c_addr, uc_addr;

	if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
		return -1;	/* interrupted by a signal */

	if (uc_pool->nchunks_added > nchunks_added) {
		/* someone added a new chunk while we were waiting */
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return 0;
	}

	if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return -1;
	}

	/* attempt to allocate a granule's worth of cached memory pages */

	page = alloc_pages_exact_node(nid,
				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
				IA64_GRANULE_SHIFT-PAGE_SHIFT);
	if (!page) {
		mutex_unlock(&uc_pool->add_chunk_mutex);
		return -1;
	}

	/* convert the memory pages from cached to uncached */

	c_addr = (unsigned long)page_address(page);
	uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;

	/*
	 * There's a small race here where it's possible for someone to
	 * access the page through /dev/mem halfway through the conversion
	 * to uncached - not sure it's really worth bothering about
	 */
	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
		SetPageUncached(&page[i]);

	flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);

	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
	if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
		atomic_set(&uc_pool->status, 0);
		status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
		if (status || atomic_read(&uc_pool->status))
			goto failed;
	} else if (status != PAL_VISIBILITY_OK)
		goto failed;

	preempt_disable();

	if (ia64_platform_is("sn2"))
		sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
	else
		flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);

	/* flush the just introduced uncached translation from the TLB */
	local_flush_tlb_all();

	preempt_enable();

	status = ia64_pal_mc_drain();
	if (status != PAL_STATUS_SUCCESS)
		goto failed;
	atomic_set(&uc_pool->status, 0);
	status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
	if (status || atomic_read(&uc_pool->status))
		goto failed;

	/*
	 * The chunk of memory pages has been converted to uncached so now we
	 * can add it to the pool.
	 */
	status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
	if (status)
		goto failed;

	uc_pool->nchunks_added++;
	mutex_unlock(&uc_pool->add_chunk_mutex);
	return 0;

	/* failed to convert or add the chunk so give it back to the kernel */
failed:
	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
		ClearPageUncached(&page[i]);

	free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
	mutex_unlock(&uc_pool->add_chunk_mutex);
	return -1;
}
Esempio n. 25
0
static int __init
expose_p2m_init(void)
{
	unsigned long gpfn;
	unsigned long mfn;
	unsigned long p2m_mfn;

	int error_count = 0;

	const int strides[] = {
		PTRS_PER_PTE, PTRS_PER_PTE/2, PTRS_PER_PTE/3, PTRS_PER_PTE/4,
		L1_CACHE_BYTES/sizeof(pte_t), 1
	};
	int i;
	

#if 0
	printd("about to call p2m_expose_init()\n");
	if (p2m_expose_init() < 0) {
		printd("p2m_expose_init() failed\n");
		return -EINVAL;
	}
	printd("p2m_expose_init() success\n");
#else
	if (!p2m_initialized) {
		printd("p2m exposure isn't initialized\n");
		return -EINVAL;
	}
#endif

	printd("p2m expose test begins\n");
	for (gpfn = p2m_min_low_pfn; gpfn < p2m_max_low_pfn; gpfn++) {
		mfn = HYPERVISOR_phystomach(gpfn);
		p2m_mfn = p2m_phystomach(gpfn);
		if (mfn != p2m_mfn) {
			printd("gpfn 0x%016lx "
			       "mfn 0x%016lx p2m_mfn 0x%016lx\n",
			       gpfn, mfn, p2m_mfn);
			printd("mpaddr 0x%016lx "
			       "maddr 0x%016lx p2m_maddr 0x%016lx\n",
			       gpfn << PAGE_SHIFT,
			       mfn << PAGE_SHIFT, p2m_mfn << PAGE_SHIFT);

			error_count++;
			if (error_count > 16) {
				printk("too many errors\n");
				return -EINVAL;
			}
		}
	}
	printd("p2m expose test done!\n");

	printk("type     "
	       "stride      "
	       "type     : "
	       "     nsec /  count = "
	       "nsec per conv\n");
	for (i = 0; i < sizeof(strides)/sizeof(strides[0]); i++) {
		int stride = strides[i];
		local_flush_tlb_all();
		do_with_hypercall("cold tlb",
				  p2m_min_low_pfn, p2m_max_low_pfn, stride);
		do_with_hypercall("warm tlb",
				  p2m_min_low_pfn, p2m_max_low_pfn, stride);

		local_flush_tlb_all();
		do_with_table("cold tlb",
			      p2m_min_low_pfn, p2m_max_low_pfn, stride);
		do_with_table("warm tlb",
			      p2m_min_low_pfn, p2m_max_low_pfn, stride);
	}

	return -EINVAL;
}
Esempio n. 26
0
void smp_flush_tlb_all(void)
{
	xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
	local_flush_tlb_all();
}
Esempio n. 27
0
static int mx6_suspend_enter(suspend_state_t state)
{
	unsigned int wake_irq_isr[4];
	unsigned int cpu_type;
	struct gic_dist_state gds;
	struct gic_cpu_state gcs;
	bool arm_pg = false;

	if (cpu_is_mx6q())
		cpu_type = MXC_CPU_MX6Q;
	else if (cpu_is_mx6dl())
		cpu_type = MXC_CPU_MX6DL;
	else
		cpu_type = MXC_CPU_MX6SL;

	wake_irq_isr[0] = __raw_readl(gpc_base +
			GPC_ISR1_OFFSET) & gpc_wake_irq[0];
	wake_irq_isr[1] = __raw_readl(gpc_base +
			GPC_ISR2_OFFSET) & gpc_wake_irq[1];
	wake_irq_isr[2] = __raw_readl(gpc_base +
			GPC_ISR3_OFFSET) & gpc_wake_irq[2];
	wake_irq_isr[3] = __raw_readl(gpc_base +
			GPC_ISR4_OFFSET) & gpc_wake_irq[3];
	if (wake_irq_isr[0] | wake_irq_isr[1] |
			wake_irq_isr[2] | wake_irq_isr[3]) {
		printk(KERN_INFO "There are wakeup irq pending,system resume!\n");
		printk(KERN_INFO "wake_irq_isr[0-3]: 0x%x, 0x%x, 0x%x, 0x%x\n",
				wake_irq_isr[0], wake_irq_isr[1],
				wake_irq_isr[2], wake_irq_isr[3]);
		return 0;
	}
	mx6_suspend_store();

	/*
	 * i.MX6dl TO1.0/i.MX6dq TO1.1/1.0 TKT094231: can't support
	 * ARM_POWER_OFF mode.
	 */
	if (state == PM_SUSPEND_MEM &&
		((mx6dl_revision() == IMX_CHIP_REVISION_1_0) ||
		(cpu_is_mx6q() && mx6q_revision() <= IMX_CHIP_REVISION_1_1))) {
		state = PM_SUSPEND_STANDBY;
	}

	switch (state) {
	case PM_SUSPEND_MEM:
		disp_power_down();
		usb_power_down_handler();
		mxc_cpu_lp_set(ARM_POWER_OFF);
		arm_pg = true;
		break;
	case PM_SUSPEND_STANDBY:
		if (cpu_is_mx6sl()) {
			disp_power_down();
			usb_power_down_handler();
			mxc_cpu_lp_set(STOP_XTAL_ON);
			arm_pg = true;
		} else
			mxc_cpu_lp_set(STOP_POWER_OFF);
		break;
	default:
		return -EINVAL;
	}

	/*
	 * L2 can exit by 'reset' or Inband beacon (from remote EP)
	 * toggling phy_powerdown has same effect as 'inband beacon'
	 * So, toggle bit18 of GPR1, to fix errata
	 * "PCIe PCIe does not support L2 Power Down"
	 */
	__raw_writel(__raw_readl(IOMUXC_GPR1) | (1 << 18), IOMUXC_GPR1);

	if (state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY) {

		local_flush_tlb_all();
		flush_cache_all();

		if (arm_pg) {
			/* preserve gic state */
			save_gic_dist_state(0, &gds);
			save_gic_cpu_state(0, &gcs);
		}

		if (pm_data && pm_data->suspend_enter)
			pm_data->suspend_enter();

		suspend_in_iram(state, (unsigned long)iram_paddr,
			(unsigned long)suspend_iram_base, cpu_type);

		if (pm_data && pm_data->suspend_exit)
			pm_data->suspend_exit();

		/* Reset the RBC counter. */
		/* All interrupts should be masked before the
		  * RBC counter is reset.
		 */
		/* Mask all interrupts. These will be unmasked by
		  * the mx6_suspend_restore routine below.
		  */
		__raw_writel(0xffffffff, gpc_base + 0x08);
		__raw_writel(0xffffffff, gpc_base + 0x0c);
		__raw_writel(0xffffffff, gpc_base + 0x10);
		__raw_writel(0xffffffff, gpc_base + 0x14);

		/* Clear the RBC counter and RBC_EN bit. */
		/* Disable the REG_BYPASS_COUNTER. */
		__raw_writel(__raw_readl(MXC_CCM_CCR) &
			~MXC_CCM_CCR_RBC_EN, MXC_CCM_CCR);
		/* Make sure we clear REG_BYPASS_COUNT*/
		__raw_writel(__raw_readl(MXC_CCM_CCR) &
		(~MXC_CCM_CCR_REG_BYPASS_CNT_MASK), MXC_CCM_CCR);
		/* Need to wait for a minimum of 2 CLKILS (32KHz) for the
		  * counter to clear and reset.
		  */
		udelay(80);

		if (arm_pg) {
			/* restore gic registers */
			restore_gic_dist_state(0, &gds);
			restore_gic_cpu_state(0, &gcs);
		}
		if (state == PM_SUSPEND_MEM || (cpu_is_mx6sl())) {
			usb_power_up_handler();
			disp_power_up();
		}

		mx6_suspend_restore();

		__raw_writel(BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG,
			anatop_base + HW_ANADIG_ANA_MISC0_CLR);

	} else {
			cpu_do_idle();
	}

	/*
	 * L2 can exit by 'reset' or Inband beacon (from remote EP)
	 * toggling phy_powerdown has same effect as 'inband beacon'
	 * So, toggle bit18 of GPR1, to fix errata
	 * "PCIe PCIe does not support L2 Power Down"
	 */
	__raw_writel(__raw_readl(IOMUXC_GPR1) & (~(1 << 18)), IOMUXC_GPR1);

	return 0;
}
Esempio n. 28
0
static int mx6_suspend_enter(suspend_state_t state)
{
	unsigned int wake_irq_isr[4];
	struct gic_dist_state gds;
	struct gic_cpu_state gcs;

	wake_irq_isr[0] = __raw_readl(gpc_base +
			GPC_ISR1_OFFSET) & gpc_wake_irq[0];
	wake_irq_isr[1] = __raw_readl(gpc_base +
			GPC_ISR2_OFFSET) & gpc_wake_irq[1];
	wake_irq_isr[2] = __raw_readl(gpc_base +
			GPC_ISR3_OFFSET) & gpc_wake_irq[2];
	wake_irq_isr[3] = __raw_readl(gpc_base +
			GPC_ISR4_OFFSET) & gpc_wake_irq[3];
	if (wake_irq_isr[0] | wake_irq_isr[1] |
			wake_irq_isr[2] | wake_irq_isr[3]) {
		printk(KERN_INFO "There are wakeup irq pending,system resume!\n");
		printk(KERN_INFO "wake_irq_isr[0-3]: 0x%x, 0x%x, 0x%x, 0x%x\n",
				wake_irq_isr[0], wake_irq_isr[1],
				wake_irq_isr[2], wake_irq_isr[3]);
		return 0;
	}
	mx6_suspend_store();

	/* i.MX6dl TO1.0 TKT094231: can't support ARM_POWER_OFF mode */
	if (state == PM_SUSPEND_MEM && cpu_is_mx6dl())
		state = PM_SUSPEND_STANDBY;

	switch (state) {
	case PM_SUSPEND_MEM:
		gpu_power_down();
		usb_power_down_handler();
		mxc_cpu_lp_set(ARM_POWER_OFF);
		break;
	case PM_SUSPEND_STANDBY:
		mxc_cpu_lp_set(STOP_POWER_OFF);
		break;
	default:
		return -EINVAL;
	}

	if (state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY) {
		if (pm_data && pm_data->suspend_enter)
			pm_data->suspend_enter();

		local_flush_tlb_all();
		flush_cache_all();

		if (state == PM_SUSPEND_MEM) {
			/* preserve gic state */
			save_gic_dist_state(0, &gds);
			save_gic_cpu_state(0, &gcs);
		}

		suspend_in_iram(state, (unsigned long)iram_paddr,
			(unsigned long)suspend_iram_base);

		if (state == PM_SUSPEND_MEM) {
			/* restore gic registers */
			restore_gic_dist_state(0, &gds);
			restore_gic_cpu_state(0, &gcs);
			usb_power_up_handler();
			gpu_power_up();
		}
		mx6_suspend_restore();

		if (pm_data && pm_data->suspend_exit)
			pm_data->suspend_exit();
	} else {
			cpu_do_idle();
	}

	return 0;
}
Esempio n. 29
0
int swsusp_arch_resume(void)
{
	/* Avoid TLB mismatch during and after kernel resume */
	local_flush_tlb_all();
	return restore_image();
}
Esempio n. 30
0
static inline void ipi_flush_tlb_all(void *ignored)
{
	local_flush_tlb_all();
}