static inline void disable_early_ack(u32 mc_override)
{
	static u32 override_val;

	override_val = mc_override & (~MC_EMEM_ARB_OVERRIDE_EACK_MASK);
	mc_writel(override_val, MC_EMEM_ARB_OVERRIDE);
	__cpuc_flush_dcache_area(&override_val, sizeof(override_val));
	outer_clean_range(__pa(&override_val), __pa(&override_val + 1));
	override_val |= mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK;
}
Example #2
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * This is really belt and braces; we hold unintended secondary
	 * CPUs in the holding pen until we're ready for them.  However,
	 * since we haven't sent them a soft interrupt, they shouldn't
	 * be there.
	 */
	//writew((BSYM(virt_to_phys(vexpress_secondary_startup))>>16), SECOND_START_ADDR_HI);
	//writew(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR_LO);
	writel(0xbabe, SECOND_MAGIC_NUMBER_ADRESS);
	writel(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR);
	__cpuc_flush_kern_all();
	pen_release = cpu;

	#if 0  //debug
	unsigned int *ptr=&pen_release;
	printk("pen_release = 0x%08x, addr= 0x%08x, pen_release ptr = 0x%08x\n ",pen_release,&pen_release,*ptr);
	#endif
	
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	smp_cross_call(cpumask_of(cpu));

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);
	return pen_release != -1 ? -ENOSYS : 0;
}
static void inner_outer_flush_dcache_area(void *addr, size_t length)
{
	phys_addr_t start, end;

    __cpuc_flush_dcache_area(addr, length);

	start = virt_to_phys(addr);
	end   = start + length;

	outer_cache.flush_range(start, end);
}
Example #4
0
int meson_trustzone_memconfig()
{
	int ret;
	struct memconfig_hal_api_arg arg;
	arg.memconfigbuf_phy_addr = __pa(memsecure);
	arg.memconfigbuf_count = MEMCONFIG_NUM;

	__cpuc_flush_dcache_area(memsecure, sizeof(memsecure));
	outer_clean_range(__pa(memsecure), (__pa(memsecure + MEMCONFIG_NUM)));
	__cpuc_flush_dcache_area(&arg, sizeof(arg));
	outer_clean_range(__pa(&arg), __pa(((struct memconfig_hal_api_arg*)&arg)) + 1);

	ret = meson_smc_hal_api(TRUSTZONE_HAL_API_MEMCONFIG, __pa(&arg));

	outer_inv_range(__pa(&arg), __pa(((struct memconfig_hal_api_arg*)&arg)) + 1);
	dmac_unmap_area(&arg, sizeof(arg), DMA_FROM_DEVICE);
	outer_inv_range(__pa(memsecure), __pa(memsecure + MEMCONFIG_NUM));
	dmac_unmap_area(memsecure, sizeof(memsecure), DMA_FROM_DEVICE);

	return ret;
}
Example #5
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;
	static int cold_boot_done;

	/* Only need to bring cpu out of reset this way once */
	if (cold_boot_done == false) {
		prepare_cold_cpu(cpu);
		cold_boot_done = true;
	}

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 *
	 * Note that "pen_release" is the hardware CPU ID, whereas
	 * "cpu" is Linux's internal ID.
	 */
	pen_release = cpu;
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	gic_raise_softirq(cpumask_of(cpu), 1);

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
/*
 * Copy the user page.  No aliasing to deal with so we can just
 * attack the kernel's existing mapping of these pages.
 */
static void v6_copy_user_highpage_nonaliasing(struct page *to,
	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
	void *kto, *kfrom;

	kfrom = kmap_atomic(from, KM_USER0);
	kto = kmap_atomic(to, KM_USER1);
	copy_page(kto, kfrom);
	__cpuc_flush_dcache_area(kto, PAGE_SIZE);
	kunmap_atomic(kto, KM_USER1);
	kunmap_atomic(kfrom, KM_USER0);
}
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int ret;
	int flag = 0;
	unsigned long timeout;

	pr_debug("Starting secondary CPU %d\n", cpu);

	
	preset_lpj = loops_per_jiffy;

	if (cpu > 0 && cpu < ARRAY_SIZE(cold_boot_flags))
		flag = cold_boot_flags[cpu];
	else
		__WARN();

	if (per_cpu(cold_boot_done, cpu) == false) {
		ret = scm_set_boot_addr((void *)
					virt_to_phys(msm_secondary_startup),
					flag);
		if (ret == 0)
			release_secondary(cpu);
		else
			printk(KERN_DEBUG "Failed to set secondary core boot "
					  "address\n");
		per_cpu(cold_boot_done, cpu) = true;
		init_cpu_debug_counter_for_cold_boot();
	}

	spin_lock(&boot_lock);

	pen_release = cpu_logical_map(cpu);
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	gic_raise_softirq(cpumask_of(cpu), 1);

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		udelay(10);
	}

	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
void bsp_hdmi_set_addr(unsigned int base_addr)
{
	uintptr_t para[5];
	int ret;

	memcpy((void *)HDMI_MAIN_START, (void *)hdmi_mc_table, hdmi_mc_table_size);
	hdmi_main = (int (*)(int type, uintptr_t* para))HDMI_MAIN_START;
	__cpuc_flush_dcache_area((void *)HDMI_MAIN_START, hdmi_mc_table_size);
	__cpuc_flush_icache_all();

	memset(para, 0, 5*sizeof(uintptr_t));
	para[0] = (uintptr_t)base_addr;
	ret = bsp_hdmi_main(HDMI_CODE_SET_ADDR, para);
}
/*
 * This is called by __cpu_suspend() to save the state, and do whatever
 * flushing is required to ensure that when the CPU goes to sleep we have
 * the necessary data available when the caches are not searched.
 */
void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
{
	
	#if defined(CONFIG_ARCH_JAVA)
	u32 *save_ptr_v = ptr;
	#endif
	*save_ptr = virt_to_phys(ptr);

	/* This must correspond to the LDM in cpu_resume() assembly */
	*ptr++ = virt_to_phys(idmap_pgd);
	*ptr++ = sp;
	*ptr++ = virt_to_phys(cpu_do_resume);

	cpu_do_suspend(ptr);
	#if defined(CONFIG_ARCH_JAVA)
	__cpuc_flush_dcache_area(save_ptr_v, ptrsz);
	__cpuc_flush_dcache_area(save_ptr,  sizeof(ptr));
	#else
	flush_cache_all();
	outer_clean_range(*save_ptr, *save_ptr + ptrsz);
	outer_clean_range(virt_to_phys(save_ptr),
			  virt_to_phys(save_ptr) + sizeof(*save_ptr));
	#endif
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	edb_putstr("boot_secondary\n");

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * This is really belt and braces; we hold unintended secondary
	 * CPUs in the holding pen until we're ready for them.  However,
	 * since we haven't sent them a soft interrupt, they shouldn't
	 * be there.
	 */
	pen_release = cpu;
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	wmb();

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	gic_raise_softirq(cpumask_of(cpu), (GIC_SECURE_INT_FLAG | 1));

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
void kbase_sync_to_cpu(phys_addr_t paddr, void *vaddr, size_t sz)
{
#ifdef CONFIG_ARM
	__cpuc_flush_dcache_area(vaddr, sz);
	outer_flush_range(paddr, paddr + sz);
#elif defined(CONFIG_ARM64)
	/* FIXME (MID64-46): There's no other suitable cache flush function for ARM64 */
	flush_cache_all();
#elif defined(CONFIG_X86)
	struct scatterlist scl = { 0, };
	sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, paddr & (PAGE_SIZE - 1));
	dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_FROM_DEVICE);
#else
#error Implement cache maintenance for your architecture here
#endif
}
Example #12
0
static int is_slt_scu_state_sync(unsigned long state)
{
    int ret = 1;
    int cpu;

    __cpuc_flush_dcache_area(g_iSCU_State, 2*sizeof(int));

    for(cpu = 0; cpu < NR_CPUS; cpu++)
    {
        if(g_iSCU_State[cpu] != state)
        {
            ret = 0;
            break;
        }
    }

    return ret;
}
/*
 * Copy the user page.  No aliasing to deal with so we can just
 * attack the kernel's existing mapping of these pages.
 */
static void v6_copy_user_highpage_nonaliasing(struct page *to,
	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
	void *kto, *kfrom;

	kfrom = kmap_atomic(from, KM_USER0);
	kto = kmap_atomic(to, KM_USER1);
	copy_page(kto, kfrom);
#ifdef CONFIG_HIGHMEM
	/*
	 * kmap_atomic() doesn't set the page virtual address, and
	 * kunmap_atomic() takes care of cache flushing already.
	 */
	if (page_address(to) != NULL)
#endif
		__cpuc_flush_dcache_area(kto, PAGE_SIZE);
	kunmap_atomic(kto, KM_USER1);
	kunmap_atomic(kfrom, KM_USER0);
}
Example #14
0
void kunmap_atomic(void *kvaddr, enum km_type type)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	unsigned int idx = type + KM_TYPE_NR * smp_processor_id();

	if (kvaddr >= (void *)FIXADDR_START) {
		__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
		set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
		local_flush_tlb_kernel_page(vaddr);
#else
		(void) idx;  /* to kill a warning */
#endif
	} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
		/* this address was obtained through kmap_high_get() */
		kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
	}
	pagefault_enable();
}
Example #15
0
static void check_and_rewrite_cpu_entry(void)
{
	unsigned int i;
	unsigned int *p=0xc0000000;
	int changed=0;
	unsigned int count=sizeof(cpu_entry_code)/sizeof(cpu_entry_code[0]);
	for(i=0; i<count; i++){
		if(cpu_entry_code[i] != p[i]){
			changed=1;
			break;
		}
	}
	if(changed != 0){
		printk("!!!CPU boot warning: cpu entry code has been changed!\n");
		for(i=0, p=0xc0000000; i<count; i++)
			p[i]=cpu_entry_code[i];

		smp_wmb();
		__cpuc_flush_dcache_area((void *)p, sizeof(cpu_entry_code));
		outer_clean_range(__pa(p), __pa(p+count));
	}
}
Example #16
0
static void pagemap_flush_page(struct page *page)
{
#ifdef CONFIG_HIGHMEM
    void *km = NULL;

    if (!page_address(page)) {
        km = kmap(page);
        if (!km) {
            pr_err("unable to map high page\n");
            return;
        }
    }
#endif

    __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
    outer_flush_range(page_to_phys(page), page_to_phys(page)+PAGE_SIZE);
    wmb();

#ifdef CONFIG_HIGHMEM
    if (km) kunmap(page);
#endif
}
Example #17
0
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);
		BUG_ON(buf->page);
		BUG_ON(!buf->ptr);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			/*
			 * Since we may have written to a page cache page,
			 * we need to ensure that the data will be coherent
			 * with user mappings.
			 */
			__cpuc_flush_dcache_area(ptr, size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	} else {
		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
	}
}
Example #18
0
void __kunmap_atomic(void *kvaddr)
{
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	int idx, type;

	if (kvaddr >= (void *)FIXADDR_START) {
		type = kmap_atomic_idx();
		idx = type + KM_TYPE_NR * smp_processor_id();

		if (cache_is_vivt())
			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
		set_top_pte(vaddr, __pte(0));
#else
		(void) idx;  /* to kill a warning */
#endif
		kmap_atomic_idx_pop();
	} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
		/* this address was obtained through kmap_high_get() */
		kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
	}
	pagefault_enable();
}
Example #19
0
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int ret;
	int flag = 0;
	unsigned long timeout;

	pr_debug("Starting secondary CPU %d\n", cpu);

	/* Set preset_lpj to avoid subsequent lpj recalculations */
	preset_lpj = loops_per_jiffy;

	if (cpu > 0 && cpu < ARRAY_SIZE(cold_boot_flags))
		flag = cold_boot_flags[cpu];
	else
		__WARN();

	if (per_cpu(cold_boot_done, cpu) == false) {
		ret = scm_set_boot_addr((void *)
					virt_to_phys(msm_secondary_startup),
					flag);
		if (ret == 0)
			release_secondary(cpu);
		else
			printk(KERN_DEBUG "Failed to set secondary core boot "
					  "address\n");
		per_cpu(cold_boot_done, cpu) = true;
		init_cpu_debug_counter_for_cold_boot();
	}

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 *
	 * Note that "pen_release" is the hardware CPU ID, whereas
	 * "cpu" is Linux's internal ID.
	 */
	pen_release = cpu_logical_map(cpu);
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	gic_raise_softirq(cpumask_of(cpu), 1);

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
Example #20
0
void eLIBs_CleanFlushDCacheRegion(void *adr, __u32 bytes)
{
	__cpuc_flush_dcache_area(adr, bytes + (1 << 5) * 2 - 2);
}
void eLIBs_CleanFlushDCacheRegion_nand(void *adr, size_t bytes)
{
	__cpuc_flush_dcache_area(adr, bytes + (1 << 5) * 2 - 2);
}
Example #22
0
static int wait_slt_scu_state_sync(unsigned long state, int wait)
{

    int ret = 0, i;
    unsigned long retry = 0;
    static volatile int get_exit = 0;
    static volatile int cpu_num = 0;
    unsigned long all_cpu_mask = 0;

    int cpu = raw_smp_processor_id();

    //printk("wait_slt_scu_state_sync, cpu%d wait state=%d\n", cpu, state);

    if(cpu_num & (0x1 << cpu))
    {
        //printk(KERN_ERR, "cpu%d already waitting\n", cpu);
        return 0;
    }

    while(cpu_num && get_exit)
    {
        //printk(KERN_INFO, "wait other cpu to finish waiting loop\n");
        mdelay(10);
    }

    spin_lock(&scu_wait_sync_lock);
    cpu_num  |= 0x1 << cpu;
    get_exit = 0;
    __cpuc_flush_dcache_area(&get_exit, sizeof(int));
    __cpuc_flush_dcache_area(&cpu_num, sizeof(int));
    spin_unlock(&scu_wait_sync_lock);

    for(i = 0; i < NR_CPUS; i++)
    {
        all_cpu_mask |= (0x1 << i);
    }

    /* wait all cpu in sync loop */
    while(cpu_num != all_cpu_mask)
    {
        retry++;

        if(retry > 0x10000)
        {
            //printk(KERN_INFO, "scu wait sync state (%d) timeout\n", state);
            goto wait_sync_out;
        }

        if(get_exit)
            break;

        //printk(KERN_INFO, "\n\nretry=0x%08x wait state = %d\n", retry, state);
        //slt_scu_print_state();
        mdelay(1);
    }

    spin_lock(&scu_wait_sync_lock);
    get_exit |= 0x1 << cpu;
    __cpuc_flush_dcache_area(&get_exit, sizeof(int));
    spin_unlock(&scu_wait_sync_lock);


    ret = is_slt_scu_state_sync(state);

    /* make sure all cpu exit wait sync loop
     * check cpu_num is for the case retry timeout
     */
    while(1)
    {
        //printk(KERN_INFO, "wait exit retry\n");
        if(!get_exit ||
           get_exit == all_cpu_mask ||
           cpu_num != all_cpu_mask)
        {
            break;
        }
        mdelay(1);
    }

wait_sync_out:
    spin_lock(&scu_wait_sync_lock);
    cpu_num &= ~(0x01 << cpu);
    __cpuc_flush_dcache_area(&cpu_num, sizeof(int));
    spin_unlock(&scu_wait_sync_lock);

    //printk("cpu%d exit fun, ret=%s\n", cpu, ret ? "pass" : "fail");
    return ret;
}
Example #23
0
static int slt_scu_test_func(void *data)
{
    int ret = 0, loop, pass;
    int cpu = raw_smp_processor_id();
    unsigned long irq_flag;
    int cpu_cnt;

    unsigned long buf;
    unsigned long *mem_buf = (unsigned long *)data;
    unsigned long retry;

    //spin_lock(&scu_thread_lock[cpu]);
    //local_irq_save(irq_flag);
#if 0
    if(cpu == 0)
    {
        mtk_wdt_enable(WK_WDT_DIS);
    }
#endif

    if(!mem_buf)
    {
        printk(KERN_ERR, "allocate memory fail for cpu scu test\n");
        g_iCPU_PassFail = -1;
        goto scu_thread_out;
    }

    printk("\n>>slt_scu_test_func -- cpu id = %d, mem_buf = 0x%08x <<\n", cpu, mem_buf);

    msleep(50);

    if(!wait_slt_scu_state_sync(SCU_STATE_START, 1))
    {
        printk("cpu%d wait SCU_STATE_START timeout\n", cpu);

        goto scu_thread_out;
    }
    g_iCPU_PassFail = 0;
    g_iSCU_PassFail[cpu] = 1;

    for (loop = 0; loop < g_iScuLoopCount; loop++) {

        slt_scu_write_state(cpu, SCU_STATE_EXECUTE);
        spin_lock_irqsave(&scu_thread_irq_lock[cpu], irq_flag);
        if(!wait_slt_scu_state_sync(SCU_STATE_EXECUTE, 1))
        {
            spin_unlock_irqrestore(&scu_thread_irq_lock[cpu], irq_flag);
            printk("cpu%d wait SCU_STATE_EXECUTE timeout\n", cpu);
            goto scu_thread_out;
        }

        g_iSCU_PassFail[cpu] = fp6_scu_start(mem_buf);
        spin_unlock_irqrestore(&scu_thread_irq_lock[cpu], irq_flag);

        __cpuc_flush_dcache_area(g_iSCU_PassFail, 2*sizeof(int));

        printk("\n>>cpu%d scu : fp6_scu_start %s ret=0x%x<<\n", cpu, g_iSCU_PassFail[cpu] != 0xA? "fail" : "pass", g_iSCU_PassFail[cpu]);

        slt_scu_write_state(cpu, SCU_STATE_EXEEND);

        if(!wait_slt_scu_state_sync(SCU_STATE_EXEEND, 1))
        {
            printk("cpu%d wait SCU_STATE_EXEEND timeout\n", cpu);
            goto scu_thread_out;

        }

        if(cpu == 0)
        {
            pass = 1;
            for(cpu_cnt = 0; cpu_cnt < NR_CPUS; cpu_cnt++)
            {
                if(g_iSCU_PassFail[cpu_cnt] != 0xA)
                {
                    pass = 0;
                }
            }

            if(pass)
            {
                g_iCPU_PassFail += 1;
            }
        }
    }

scu_thread_out:

    slt_scu_write_state(cpu, SCU_STATE_IDEL);

    if(cpu == 0)
    {
        if (g_iCPU_PassFail == g_iScuLoopCount) {
            printk("\n>> CPU scu test pass <<\n\n");
        }else {
            printk("\n>> CPU scu test fail (loop count = %d)<<\n\n", g_iCPU_PassFail);
        }
        //mtk_wdt_enable(WK_WDT_EN);
    }

    wait_slt_scu_state_sync(SCU_STATE_IDEL, 1);

    printk("cpu%d scu thread out\n", cpu);

    //local_irq_restore(irq_flag);

    //spin_unlock(&scu_thread_lock[cpu]);
    return 0;

}
unsigned long rknand_dma_flush_dcache(unsigned long ptr,int size,int dir)
{
     __cpuc_flush_dcache_area((void*)ptr, size + 63);
    return ((unsigned long )virt_to_phys((void *)ptr));
}
Example #25
0
void mt_smp_set_boot_addr(u32 addr, int cpu)
{
    boot_addr_array[cpu] = addr;
    __cpuc_flush_dcache_area(boot_addr_array, sizeof(boot_addr_array));
}
/* This is a copy of _ump_osk_msync from drivers/amlogic/gpu/ump/linux/ump_osk_low_level_mem.c
 * with adapted parameters */
static void meson_drm_ump_osk_msync(struct drm_gem_cma_object *cma_obj, void *virt, u32 offset, size_t size, enum drm_meson_msync_op op, struct meson_drm_session_data *session_data)
{
	struct drm_gem_object *gem_obj;
	u32 start_p, end_p;

	/* Flush L1 using virtual address, the entire range in one go.
	 * Only flush if user space process has a valid write mapping on given address. */
	if ((cma_obj) && (virt != NULL) && (access_ok(VERIFY_WRITE, virt, size))) {
		__cpuc_flush_dcache_area(virt, size);
		DBG_MSG(3, ("meson_drm_ump_osk_msync(): Flushing CPU L1 Cache. CPU address: %x, size: %x\n", virt, size));
	} else {
		if (session_data) {
			if (op == DRM_MESON_MSYNC_FLUSH_L1) {
				DBG_MSG(4, ("meson_drm_ump_osk_msync(): Pending %d L1 cache flushes\n", session_data->has_pending_level1_cache_flush));
				session_data->has_pending_level1_cache_flush = 0;
				level1_cache_flush_all();
				return;
			} else {
				if (session_data->cache_operations_ongoing) { /* This is set in cache_operations_control(start) */
					session_data->has_pending_level1_cache_flush++;
					DBG_MSG(4, ("meson_drm_ump_osk_msync(): Defering L1 cache flush (%d pending)\n" session_data->has_pending_level1_cache_flush));
				} else {
					/* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
					level1_cache_flush_all();
				}
			}
		} else {
			DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__, __LINE__));
			level1_cache_flush_all();
		}
	}

	if (!cma_obj)
		return;
	gem_obj = &cma_obj->base;

	DBG_MSG(3, ("meson_drm_ump_osk_msync(): Flushing CPU L2 Cache\n"));

	/* Flush L2 using physical addresses
	 * Our allocations are always contiguous (GEM CMA), so we have only one mem block */

	if (offset >= gem_obj->size) {
		offset -= gem_obj->size;
		return;
	}

	if (offset) {
		start_p = (u32)cma_obj->paddr + offset;
		/* We'll zero the offset later, after using it to calculate end_p. */
	} else {
		start_p = (u32)cma_obj->paddr;
	}

	if (size < gem_obj->size - offset) {
		end_p = start_p + size;
		size = 0;
	} else {
		if (offset) {
			end_p = start_p + (gem_obj->size - offset);
			size -= gem_obj->size - offset;
			offset = 0;
		} else {
			end_p = start_p + gem_obj->size;
			size -= gem_obj->size;
		}
	}

	switch (op) {
	case DRM_MESON_MSYNC_CLEAN:
		outer_clean_range(start_p, end_p);
		break;
	case DRM_MESON_MSYNC_CLEAN_AND_INVALIDATE:
		outer_flush_range(start_p, end_p);
		break;
	case DRM_MESON_MSYNC_INVALIDATE:
		outer_inv_range(start_p, end_p);
		break;
	default:
		break;
	}

	return;
}