Beispiel #1
0
static void *
__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
	    pgprot_t prot)
{
	struct page *page;
	void *addr;

        /* Following is a work-around (a.k.a. hack) to prevent pages
        * with __GFP_COMP being passed to split_page() which cannot
        * handle them.  The real problem is that this flag probably
        * should be 0 on ARM as it is not supported on this
        * platform--see CONFIG_HUGETLB_PAGE. */
        gfp &= ~(__GFP_COMP);

	*handle = ~0;
	size = PAGE_ALIGN(size);

	page = __dma_alloc_buffer(dev, size, gfp);
	if (!page)
		return NULL;

	if (!arch_is_coherent())
		addr = __dma_alloc_remap(page, size, gfp, prot);
	else
		addr = page_address(page);

	if (addr)
		*handle = pfn_to_dma(dev, page_to_pfn(page));
	else
		__dma_free_buffer(page, size);

	return addr;
}
Beispiel #2
0
static void *
__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
	    pgprot_t prot)
{
	struct page *page;
	void *addr;

	*handle = ~0;
	size = PAGE_ALIGN(size);

	page = __dma_alloc_buffer(dev, size, gfp);
	if (!page)
		return NULL;

	if (!arch_is_coherent())
		addr = __dma_alloc_remap(page, size, gfp, prot);
	else
		addr = page_address(page);

	if (addr)
		*handle = page_to_dma(dev, page);
	else
		__dma_free_buffer(page,size);

	return addr;
}
/**
 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
 * @size: size of buffer (same as passed to dma_map_page)
 * @dir: DMA transfer direction (same as passed to dma_map_page)
 *
 * Unmap a page streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_page() call.
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir,
		struct dma_attrs *attrs)
{
	if (!arch_is_coherent())
		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
				      handle & ~PAGE_MASK, size, dir);
}
/**
 * arm_dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_page().
 */
static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir,
	     struct dma_attrs *attrs)
{
	if (!arch_is_coherent())
		__dma_page_cpu_to_dev(page, offset, size, dir);
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
static void arm_dma_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	unsigned int offset = handle & (PAGE_SIZE - 1);
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
	if (!arch_is_coherent())
		__dma_page_cpu_to_dev(page, offset, size, dir);
}
Beispiel #6
0
/*
 * free a page as defined by the above mapping.
 * Must not be called with IRQs disabled.
 */
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
    WARN_ON(irqs_disabled());

    if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
        return;

    size = PAGE_ALIGN(size);

    if (!arch_is_coherent())
        __dma_free_remap(cpu_addr, size);

    __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
}
Beispiel #7
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
    unsigned long timeout;

    /*
     * set synchronisation state between this boot processor
     * and the secondary one
     */
    spin_lock(&boot_lock);

    /*
     * The secondary processor is waiting to be released from
     * the holding pen - release it, then wait for it to flag
     * that it has been released by resetting pen_release.
     *
     * Note that "pen_release" is the hardware CPU ID, whereas
     * "cpu" is Linux's internal ID.
     */
    pen_release = cpu;
    smp_wmb();

    clean_dcache_area((void *)  &pen_release, sizeof(pen_release));
    outer_clean_range(__pa(&pen_release), __pa(&pen_release +
                      sizeof(pen_release)));

    dsb_sev();

    /*
     * Timeout set on purpose in jiffies so that on slow processors
     * that must also have low HZ it will wait longer.
     */
    timeout = jiffies + 128;

    udelay(100);

    /*
     * If the secondary CPU was waiting on WFE, it should
     * be already watching <pen_release>, or it could be
     * waiting in WFI, send it an IPI to be sure it wakes.
     */

    if( pen_release != -1 )
    {
        smp_cross_call(cpumask_of(cpu));
    }

    while (time_before(jiffies, timeout)) {
        smp_rmb();
        if (pen_release == -1)
            break;

        udelay(10);
    }

    if (arch_is_coherent()) {
        outer_cache.inv_range = NULL;
        outer_cache.clean_range = NULL;
        outer_cache.flush_range = NULL;
        outer_cache.sync = NULL;
    }

    /*
     * now the secondary core is starting up let it run its
     * calibrations, then wait for it to finish
     */
    spin_unlock(&boot_lock);

    return pen_release != -1 ? -ENOSYS : 0;
}