Esempio n. 1
0
int meson_trustzone_efuse(struct efuse_hal_api_arg* arg)
{
	int ret;
	if (!arg) {
		return -1;
	}
	set_cpus_allowed_ptr(current, cpumask_of(0));
	__cpuc_flush_dcache_area(__va(arg->buffer_phy), arg->size);
	outer_clean_range((arg->buffer_phy), (arg->buffer_phy + arg->size));

	__cpuc_flush_dcache_area(__va(arg->retcnt_phy), sizeof(unsigned int));
	outer_clean_range(arg->retcnt_phy, (arg->retcnt_phy + sizeof(unsigned int)));

	__cpuc_flush_dcache_area((void*)arg, sizeof(struct efuse_hal_api_arg));
	outer_clean_range(__pa(arg), __pa(arg + 1));

	ret = meson_smc_hal_api(TRUSTZONE_HAL_API_EFUSE, __pa(arg));

	if (arg->cmd == EFUSE_HAL_API_READ) {
		outer_inv_range((arg->buffer_phy), (arg->buffer_phy + arg->size));
		dmac_unmap_area(__va(arg->buffer_phy), arg->size, DMA_FROM_DEVICE);
	}
	outer_inv_range((arg->retcnt_phy), (arg->retcnt_phy + sizeof(unsigned int)));
	dmac_unmap_area(__va(arg->buffer_phy), arg->size, DMA_FROM_DEVICE);

	return ret;
}
/*
 * This is called by __cpu_suspend() to save the state, and do whatever
 * flushing is required to ensure that when the CPU goes to sleep we have
 * the necessary data available when the caches are not searched.
 */
void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
{
	u32 *ctx = ptr;

	*save_ptr = virt_to_phys(ptr);

	/* This must correspond to the LDM in cpu_resume() assembly */
	*ptr++ = virt_to_phys(idmap_pgd);
	*ptr++ = sp;
	*ptr++ = virt_to_phys(cpu_do_resume);

	cpu_do_suspend(ptr);

	flush_cache_louis();

	/*
	 * flush_cache_louis does not guarantee that
	 * save_ptr and ptr are cleaned to main memory,
	 * just up to the Level of Unification Inner Shareable.
	 * Since the context pointer and context itself
	 * are to be retrieved with the MMU off that
	 * data must be cleaned from all cache levels
	 * to main memory using "area" cache primitives.
	*/
	__cpuc_flush_dcache_area(ctx, ptrsz);
	__cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));

	outer_clean_range(*save_ptr, *save_ptr + ptrsz);
	outer_clean_range(virt_to_phys(save_ptr),
			  virt_to_phys(save_ptr) + sizeof(*save_ptr));
}
void __init pxa_cpu_reset_handler_init(void)
{
	int cpu;
#ifdef CONFIG_TZ_HYPERVISOR
	tzlc_cmd_desc cmd_desc;
	tzlc_handle tzlc_hdl;
#endif

	/* Assign the address for saving reset handler */
	reset_handler_pa = pm_reserve_pa + PAGE_SIZE;
	reset_handler = (u32 *)__arm_ioremap(reset_handler_pa,
						PAGE_SIZE, MT_MEMORY_SO);
	if (reset_handler == NULL)
		panic("failed to remap memory for reset handler!\n");
	memset(reset_handler, 0x0, PAGE_SIZE);

	/* Flush the addr to DDR */
	__cpuc_flush_dcache_area((void *)&reset_handler_pa,
				sizeof(reset_handler_pa));
	outer_clean_range(__pa(&reset_handler_pa),
		__pa(&reset_handler_pa + 1));

/*
 * with TrustZone enabled, CIU_WARM_RESET_VECTOR is used by TrustZone software,
 * and kernel use CIU_SW_SCRATCH_REG to save the cpu reset entry.
*/
#ifdef CONFIG_TZ_HYPERVISOR
	tzlc_hdl = pxa_tzlc_create_handle();
	cmd_desc.op = TZLC_CMD_SET_WARM_RESET_ENTRY;
	cmd_desc.args[0] = __pa(pxa988_cpu_reset_entry);
	pxa_tzlc_cmd_op(tzlc_hdl, &cmd_desc);
	pxa_tzlc_destroy_handle(tzlc_hdl);
#else
	/* We will reset from DDR directly by default */
	writel(__pa(pxa988_cpu_reset_entry), CIU_WARM_RESET_VECTOR);
#endif

#ifdef CONFIG_PM
	/* Setup the resume handler for the first core */
	pxa988_set_reset_handler(__pa(pxa988_cpu_resume_handler), 0);
#endif

	/* Setup the handler for secondary cores */
	for (cpu = 1; cpu < CONFIG_NR_CPUS; cpu++)
		pxa988_set_reset_handler(__pa(pxa988_secondary_startup), cpu);

#ifdef CONFIG_HOTPLUG_CPU
	/* Setup the handler for Hotplug cores */
	writel(__pa(pxa988_secondary_startup), &secondary_cpu_handler);
	__cpuc_flush_dcache_area((void *)&secondary_cpu_handler,
				sizeof(secondary_cpu_handler));
	outer_clean_range(__pa(&secondary_cpu_handler),
		__pa(&secondary_cpu_handler + 1));
#endif
}
Esempio n. 4
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 */
	pen_release = cpu;
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1);

	smp_cross_call(cpumask_of(cpu));

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		if (pen_release == -1)
			break;
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
Esempio n. 5
0
/*
 *  ======== MEM_FlushCache ========
 *  Purpose:
 *      Flush cache
 */
void MEM_FlushCache(void *pMemBuf, u32 cBytes, s32 FlushType)
{
	if (cRefs <= 0 || !pMemBuf)
		goto func_end;
	switch (FlushType) {
	/* invalidate only */
	case PROC_INVALIDATE_MEM:
		dmac_inv_range(pMemBuf, pMemBuf + cBytes);
		outer_inv_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				cBytes));
	break;
	/* writeback only */
	case PROC_WRITEBACK_MEM:
		dmac_clean_range(pMemBuf, pMemBuf + cBytes);
		outer_clean_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	/* writeback and invalidate */
	case PROC_WRITEBACK_INVALIDATE_MEM:
		dmac_flush_range(pMemBuf, pMemBuf + cBytes);
		outer_flush_range(__pa((u32)pMemBuf), __pa((u32)pMemBuf +
				  cBytes));
	break;
	default:
		GT_1trace(MEM_debugMask, GT_6CLASS, "MEM_FlushCache: invalid "
			  "FlushMemType 0x%x\n", FlushType);
	break;
	}
func_end:
	return;
}
Esempio n. 6
0
/*
 * omap4_sec_dispatcher: Routine to dispatch low power secure
 * service routines
 *
 * @idx: The HAL API index
 * @flag: The flag indicating criticality of operation
 * @nargs: Number of valid arguments out of four.
 * @arg1, arg2, arg3 args4: Parameters passed to secure API
 *
 * Return the error value on success/failure
 */
u32 omap4_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
							 u32 arg3, u32 arg4)
{
	u32 ret;
	u32 param[5];

	param[0] = nargs;
	param[1] = arg1;
	param[2] = arg2;
	param[3] = arg3;
	param[4] = arg4;

	/* Look-up Only once */
	if (!l4_secure_clkdm)
		l4_secure_clkdm = clkdm_lookup("l4_secure_clkdm");

	/* Put l4 secure to SW_WKUP so that moduels are accessible */
	omap2_clkdm_wakeup(l4_secure_clkdm);

	/*
	 * Secure API needs physical address
	 * pointer for the parameters
	 */
	flush_cache_all();
	outer_clean_range(__pa(param), __pa(param + 5));

	ret = omap_smc2(idx, flag, __pa(param));

	/* Restore the HW_SUP so that module can idle */
	omap2_clkdm_allow_idle(l4_secure_clkdm);

	return ret;
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * Update the AuxCoreBoot0 with boot state for secondary core.
	 * omap_secondary_startup() routine will hold the secondary core till
	 * the AuxCoreBoot1 register is updated with cpu state
	 * A barrier is added to ensure that write buffer is drained
	 */
	flush_cache_all();
	outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
	omap_modify_auxcoreboot0(0x200, 0xfffffdff);
	smp_cross_call(cpumask_of(cpu));
	set_event();
	flush_cache_all();
	smp_wmb();

	/*
	 * Now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return 0;
}
Esempio n. 8
0
/*
 * Write pen_release in a way that is guaranteed to be visible to all
 * observers, irrespective of whether they're taking part in coherency
 * or not.  This is necessary for the hotplug code to work reliably.
 */
static void write_pen_release(int val)
{
	pen_release = val;
	smp_wmb();
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
}
Esempio n. 9
0
static inline void unmap_page(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);
		BUG_ON(!buf->page);
		BUG_ON(buf->ptr);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, page_address(buf->page),
			page_to_dma(dev, buf->page),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr;
			unsigned long phys;
			ptr = kmap_atomic(buf->page, KM_BOUNCE_READ) + buf->offset;
			phys = page_to_phys(buf->page) + buf->offset;
			memcpy(ptr, buf->safe, size);
			dmac_clean_range(ptr, ptr + size);
			kunmap_atomic(ptr - buf->offset, KM_BOUNCE_READ);
			outer_clean_range(phys, phys + size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	} else {
		__dma_page_dev_to_cpu(dma_to_page(dev, dma_addr),
			      dma_addr & ~PAGE_MASK, size, dir);
	}
}
Esempio n. 10
0
/**
 * omap_sec_dispatcher: Routine to dispatch low power secure
 * service routines
 * @idx: The HAL API index
 * @flag: The flag indicating criticality of operation
 * @nargs: Number of valid arguments out of four.
 * @arg1, arg2, arg3 args4: Parameters passed to secure API
 *
 * Return the non-zero error value on failure.
 */
u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
							 u32 arg3, u32 arg4)
{
	u32 ret;
	u32 param[5];

	param[0] = nargs;
	param[1] = arg1;
	param[2] = arg2;
	param[3] = arg3;
	param[4] = arg4;

	if (!l4_secure_clkdm)
		l4_secure_clkdm = clkdm_lookup("l4_secure_clkdm");

	clkdm_wakeup(l4_secure_clkdm);

	/*
	 * Secure API needs physical address
	 * pointer for the parameters
	 */
	flush_cache_all();
	outer_clean_range(__pa(param), __pa(param + 5));
	ret = omap_smc2(idx, flag, __pa(param));

	clkdm_allow_idle(l4_secure_clkdm);

	return ret;
}
/*
 * This is called by __cpu_suspend() to save the state, and do whatever
 * flushing is required to ensure that when the CPU goes to sleep we have
 * the necessary data available when the caches are not searched.
 */
void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
{
	*save_ptr = virt_to_phys(ptr);

	/* This must correspond to the LDM in cpu_resume() assembly */
	*ptr++ = virt_to_phys(idmap_pgd);
	*ptr++ = sp;
	*ptr++ = virt_to_phys(cpu_do_resume);

	cpu_do_suspend(ptr);

	flush_cache_all();
	outer_clean_range(*save_ptr, *save_ptr + ptrsz);
	outer_clean_range(virt_to_phys(save_ptr),
			  virt_to_phys(save_ptr) + sizeof(*save_ptr));
}
Esempio n. 12
0
void mshci_s3c_dma_page_cpu_to_dev(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir, int flush_type)
{
	unsigned long paddr;

	if (dir != DMA_FROM_DEVICE) {
		mshci_s3c_dma_cache_maint_page(page, off, size, dir, 
			dmac_map_area, 
			flush_type, 1);

		paddr = page_to_phys(page) + off;
		if ( flush_type != 2) {
			outer_clean_range(paddr, paddr + size);
		}
		/* FIXME: non-speculating: flush on bidirectional mappings? */
	} else {
		paddr = page_to_phys(page) + off;

		if ( flush_type != 2) {
			outer_inv_range(paddr, paddr + size);
		}
		/* FIXME: non-speculating: flush on bidirectional mappings? */
		
		mshci_s3c_dma_cache_maint_page(page, off, size, dir, 
			dmac_unmap_area, 
			flush_type, 1);
	}
}
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
{
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");

	if (buf) {
		BUG_ON(buf->size != size);
		BUG_ON(buf->direction != dir);

		dev_dbg(dev,
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);

		DO_STATS(dev->archdata.dmabounce->bounce_count++);

		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
			void *ptr = buf->ptr;

			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);

			
			dmac_clean_range(ptr, ptr + size);
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
		}
		free_safe_buffer(dev->archdata.dmabounce, buf);
	}
}
Esempio n. 14
0
void __init tegra_cpu_reset_handler_init(void)
{
#ifdef CONFIG_SMP
    __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] =
        *((u32 *)cpu_present_mask);
    __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] =
        virt_to_phys((void *)tegra_secondary_startup);
#endif

#ifdef CONFIG_PM_SLEEP
    __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] =
        TEGRA_IRAM_CODE_AREA;
    __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] =
        virt_to_phys((void *)tegra_resume);
#endif

    /* Push all of reset handler data out to the L3 memory system. */
    __cpuc_coherent_kern_range(
        (unsigned long)&__tegra_cpu_reset_handler_data[0],
        (unsigned long)&__tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE]);

    outer_clean_range(__pa(&__tegra_cpu_reset_handler_data[0]),
                      __pa(&__tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE]));

    tegra_cpu_reset_handler_enable();
}
Esempio n. 15
0
void s5p_mfc_cache_clean(const void *start_addr, unsigned long size)
{
	unsigned long paddr;

	dmac_map_area(start_addr, size, DMA_TO_DEVICE);
	paddr = __pa((unsigned long)start_addr);
	outer_clean_range(paddr, paddr + size);
}
Esempio n. 16
0
inline void BCMFASTPATH
osl_cache_flush(void *va, uint size)
{
	unsigned long paddr;
	dmac_map_area(va, size, DMA_TX);
	paddr = __pa(va);
	outer_clean_range(paddr, paddr + size);
}
Esempio n. 17
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * This is really belt and braces; we hold unintended secondary
	 * CPUs in the holding pen until we're ready for them.  However,
	 * since we haven't sent them a soft interrupt, they shouldn't
	 * be there.
	 */
	writew((BSYM(virt_to_phys(vexpress_secondary_startup))>>16), SECOND_START_ADDR_HI);
	writew(BSYM(virt_to_phys(vexpress_secondary_startup)), SECOND_START_ADDR_LO);
	pen_release = cpu;

#if defined(CONFIG_MSTAR_STR_DBGMSG)
	{
    	unsigned int *ptr=&pen_release;
    	printk("pen_release = 0x%08x, addr= 0x%08x, pen_release ptr = 0x%08x\n ",pen_release,&pen_release,*ptr);
	}
#endif

	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	smp_cross_call(cpumask_of(cpu));

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}
#if defined(CONFIG_MSTAR_STR_DBGMSG)
    {
    	unsigned int *ptr=&pen_release;
    	printk("pen_release = 0x%08x, addr= 0x%08x, pen_release ptr = 0x%08x\n ",pen_release,&pen_release,*ptr);
    }
#endif
	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);
	return pen_release != -1 ? -ENOSYS : 0;
}
Esempio n. 18
0
void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
			 unsigned long poke_phys_addr, unsigned long poke_val)
{
	unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
	poke[0] = poke_phys_addr;
	poke[1] = poke_val;
	__cpuc_flush_dcache_area((void *)poke, 8);
	outer_clean_range(__pa(poke), __pa(poke + 2));
}
Esempio n. 19
0
/*
 * Write pen_release in a way that is guaranteed to be visible to all
 * observers, irrespective of whether they're taking part in coherency
 * or not.  This is necessary for the hotplug code to work reliably.
 */
static void __cpuinit write_pen_release(int val)
{
    pen_release = val;
    smp_wmb();
#ifdef NOT_FOR_L4
    __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
    outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
#endif
}
Esempio n. 20
0
void sunxi_set_cpus_boot_entry(int cpu, void *entry)
{
    if(cpu < NR_CPUS) {
        cpus_boot_entry[cpu] = (void *)(virt_to_phys(entry));
        smp_wmb();
        __cpuc_flush_dcache_area(cpus_boot_entry, NR_CPUS*4);
        outer_clean_range(__pa(&cpus_boot_entry), __pa(&cpus_boot_entry + 1));
    }
}
int memory_engine_cache(memory_engine_t *engine, uint cmd,
				shm_driver_operation_t op)
{
	int res = 0;
	memory_node_t  *node;
	char tag_clean[] = "clean";
	char tag_invalidate[] = "invalidate";
	char tag_cleanAndinvalidate[] = "clean and invalidate";
	char *ptr_tag;
	if (engine == NULL) {
		return -EINVAL;
	}
	down(&(engine->m_mutex));

	node = memory_engine_lookup_shm_node_for_cache(&(engine->m_shm_root),
						op.m_param3, op.m_param2);
	if ((node == NULL) || (node->m_next_free != NULL)) {
		res = 0;
		if (cmd == SHM_DEVICE_CMD_INVALIDATE) {
			ptr_tag = tag_invalidate;
		} else if (cmd == SHM_DEVICE_CMD_CLEAN) {
			ptr_tag = tag_clean;
		} else {
			ptr_tag = tag_cleanAndinvalidate;
		}

		up(&(engine->m_mutex));
		return res;
	}
	up(&(engine->m_mutex));

	switch (cmd) {
	case SHM_DEVICE_CMD_INVALIDATE:
		dmac_map_area((const void *)op.m_param1,
			      op.m_param2, DMA_FROM_DEVICE);
		outer_inv_range(op.m_param3,
				op.m_param3 + op.m_param2);
		break;
	case SHM_DEVICE_CMD_CLEAN:
		dmac_map_area((const void *)op.m_param1,
			      op.m_param2, DMA_TO_DEVICE);
		outer_clean_range(op.m_param3,
				  op.m_param3 + op.m_param2);
		break;
	case SHM_DEVICE_CMD_CLEANANDINVALIDATE:
		dmac_flush_range((const void *)op.m_param1,
				 (const void *)(op.m_param1 +
						op.m_param2));
		outer_flush_range(op.m_param3,
				  op.m_param3 + op.m_param2);
		break;
	default:
		res = -ENOTTY;
	}

	return res;
}
Esempio n. 22
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
    unsigned long timeout;
    static int is_first_boot = 1;

    printk(KERN_CRIT "Boot slave CPU\n");

    /*
     * Set synchronisation state between this boot processor
     * and the secondary one
     */
    spin_lock(&boot_lock);

    HOTPLUG_INFO("boot_secondary, cpu: %d\n", cpu);
    /*
    * The secondary processor is waiting to be released from
    * the holding pen - release it, then wait for it to flag
    * that it has been released by resetting pen_release.
    *
    * Note that "pen_release" is the hardware CPU ID, whereas
    * "cpu" is Linux's internal ID.
    */
    pen_release = cpu;
    __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
    outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

    if (is_first_boot)
    {
        mt65xx_reg_sync_writel(SLAVE_MAGIC_NUM, SLAVE_MAGIC_REG);
        is_first_boot = 0;
    }
    else
    {
        mt65xx_reg_sync_writel(virt_to_phys(mt_secondary_startup), BOOTROM_BOOT_ADDR);
    }
    power_on_cpu1();

    smp_cross_call(cpumask_of(cpu));

    timeout = jiffies + (1 * HZ);
    while (time_before(jiffies, timeout)) {
        smp_rmb();
        if (pen_release == -1)
            break;

        udelay(10);
    }

    /*
     * Now the secondary core is starting up let it run its
     * calibrations, then wait for it to finish
     */
    spin_unlock(&boot_lock);

    return pen_release != -1 ? -ENOSYS : 0;
}
Esempio n. 23
0
void s5p_mfc_cache_clean(const void *start_addr, unsigned long size)
{
	unsigned long paddr;
	void *cur_addr, *end_addr;

	dmac_map_area(start_addr, size, DMA_TO_DEVICE);

	cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
	end_addr = cur_addr + PAGE_ALIGN(size);

	while (cur_addr < end_addr) {
		paddr = page_to_pfn(vmalloc_to_page(cur_addr));
		paddr <<= PAGE_SHIFT;
		if (paddr)
			outer_clean_range(paddr, paddr + PAGE_SIZE);
		cur_addr += PAGE_SIZE;
	}

	/* FIXME: L2 operation optimization */
	/*
	unsigned long start, end, unitsize;
	unsigned long cur_addr, remain;

	dmac_map_area(start_addr, size, DMA_TO_DEVICE);

	cur_addr = (unsigned long)start_addr;
	remain = size;

	start = page_to_pfn(vmalloc_to_page(cur_addr));
	start <<= PAGE_SHIFT;
	if (start & PAGE_MASK) {
		unitsize = min((start | PAGE_MASK) - start + 1, remain);
		end = start + unitsize;
		outer_clean_range(start, end);
		remain -= unitsize;
		cur_addr += unitsize;
	}

	while (remain >= PAGE_SIZE) {
		start = page_to_pfn(vmalloc_to_page(cur_addr));
		start <<= PAGE_SHIFT;
		end = start + PAGE_SIZE;
		outer_clean_range(start, end);
		remain -= PAGE_SIZE;
		cur_addr += PAGE_SIZE;
	}

	if (remain) {
		start = page_to_pfn(vmalloc_to_page(cur_addr));
		start <<= PAGE_SHIFT;
		end = start + remain;
		outer_clean_range(start, end);
	}
	*/

}
Esempio n. 24
0
static void exynos_mem_paddr_cache_clean(dma_addr_t start, size_t length)
{
	if (length > (size_t) L2_FLUSH_ALL) {
		flush_cache_all();		/* L1 */
		smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
		outer_clean_all();		/* L2 */
	} else if (length > (size_t) L1_FLUSH_ALL) {
		dma_addr_t end = start + length - 1;

		flush_cache_all();		/* L1 */
		smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
		outer_clean_range(start, end);  /* L2 */
	} else {
		dma_addr_t end = start + length - 1;

		dmac_flush_range(phys_to_virt(start), phys_to_virt(end));
		outer_clean_range(start, end);	/* L2 */
	}
}
static inline void disable_early_ack(u32 mc_override)
{
	static u32 override_val;

	override_val = mc_override & (~MC_EMEM_ARB_OVERRIDE_EACK_MASK);
	mc_writel(override_val, MC_EMEM_ARB_OVERRIDE);
	__cpuc_flush_dcache_area(&override_val, sizeof(override_val));
	outer_clean_range(__pa(&override_val), __pa(&override_val + 1));
	override_val |= mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK;
}
Esempio n. 26
0
static void cacheperf(void *vbuf, enum cachemaintenance id)
{
	struct timespec beforets;
	struct timespec afterts;
	phys_addr_t pbuf = virt_to_phys(vbuf);
	u32 pbufend, xfer_size, i;
	long timeval;

	xfer_size = START_SIZE;
	while (xfer_size <= END_SIZE) {
		pbufend = pbuf + xfer_size;
		timeval = 0;

		for (i = 0; i < try_cnt; i++) {
			memset(vbuf, i, xfer_size);
			getnstimeofday(&beforets);

			switch (id) {
			case CM_CLEAN:
				if (l1)
					dmac_map_area(vbuf, xfer_size,
							DMA_TO_DEVICE);
				if (l2)
					outer_clean_range(pbuf, pbufend);
				break;
			case CM_INV:
				if (l2)
					outer_inv_range(pbuf, pbufend);
				if (l1)
					dmac_unmap_area(vbuf, xfer_size,
							DMA_FROM_DEVICE);
				break;
			case CM_FLUSH:
				if (l1)
					dmac_flush_range(vbuf,
					(void *)((u32) vbuf + xfer_size));
				if (l2)
					outer_flush_range(pbuf, pbufend);
				break;
			case CM_FLUSHALL:
				if (l1)
					flush_cache_all();
				if (l2)
					outer_flush_all();
				break;
			}
			getnstimeofday(&afterts);
			timeval += update_timeval(beforets, afterts);
		}
		printk(KERN_INFO "%lu\n", timeval/try_cnt);
		xfer_size *= 2;
	}
}
Esempio n. 27
0
int meson_trustzone_memconfig()
{
	int ret;
	struct memconfig_hal_api_arg arg;
	arg.memconfigbuf_phy_addr = __pa(memsecure);
	arg.memconfigbuf_count = MEMCONFIG_NUM;

	__cpuc_flush_dcache_area(memsecure, sizeof(memsecure));
	outer_clean_range(__pa(memsecure), (__pa(memsecure + MEMCONFIG_NUM)));
	__cpuc_flush_dcache_area(&arg, sizeof(arg));
	outer_clean_range(__pa(&arg), __pa(((struct memconfig_hal_api_arg*)&arg)) + 1);

	ret = meson_smc_hal_api(TRUSTZONE_HAL_API_MEMCONFIG, __pa(&arg));

	outer_inv_range(__pa(&arg), __pa(((struct memconfig_hal_api_arg*)&arg)) + 1);
	dmac_unmap_area(&arg, sizeof(arg), DMA_FROM_DEVICE);
	outer_inv_range(__pa(memsecure), __pa(memsecure + MEMCONFIG_NUM));
	dmac_unmap_area(memsecure, sizeof(memsecure), DMA_FROM_DEVICE);

	return ret;
}
Esempio n. 28
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;
	static int cold_boot_done;

	/* Only need to bring cpu out of reset this way once */
	if (cold_boot_done == false) {
		prepare_cold_cpu(cpu);
		cold_boot_done = true;
	}

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting pen_release.
	 *
	 * Note that "pen_release" is the hardware CPU ID, whereas
	 * "cpu" is Linux's internal ID.
	 */
	pen_release = cpu;
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	/*
	 * Send the secondary CPU a soft interrupt, thereby causing
	 * the boot monitor to read the system wide flags register,
	 * and branch to the address found there.
	 */
	gic_raise_softirq(cpumask_of(cpu), 1);

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
Esempio n. 29
0
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int ret;
	int flag = 0;
	unsigned long timeout;

	pr_debug("Starting secondary CPU %d\n", cpu);

	
	preset_lpj = loops_per_jiffy;

	if (cpu > 0 && cpu < ARRAY_SIZE(cold_boot_flags))
		flag = cold_boot_flags[cpu];
	else
		__WARN();

	if (per_cpu(cold_boot_done, cpu) == false) {
		ret = scm_set_boot_addr((void *)
					virt_to_phys(msm_secondary_startup),
					flag);
		if (ret == 0)
			release_secondary(cpu);
		else
			printk(KERN_DEBUG "Failed to set secondary core boot "
					  "address\n");
		per_cpu(cold_boot_done, cpu) = true;
		init_cpu_debug_counter_for_cold_boot();
	}

	spin_lock(&boot_lock);

	pen_release = cpu_logical_map(cpu);
	__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
	outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));

	gic_raise_softirq(cpumask_of(cpu), 1);

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (pen_release == -1)
			break;

		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		udelay(10);
	}

	spin_unlock(&boot_lock);

	return pen_release != -1 ? -ENOSYS : 0;
}
Esempio n. 30
0
void s5p_mfc_cache_clean(void *alloc_ctx)
{
	struct vb2_cma_phys_buf *buf = (struct vb2_cma_phys_buf *)alloc_ctx;
	void *start_addr;
	unsigned long size;
	unsigned long paddr = (dma_addr_t)buf->paddr;

	start_addr = (dma_addr_t *)phys_to_virt(buf->paddr);
	size = buf->size;

	dmac_map_area(start_addr, size, DMA_TO_DEVICE);
	outer_clean_range(paddr, paddr + size);
}