Exemplo n.º 1
0
int mv_dma_init(void)
{
#if defined(CONFIG_MV78200) || defined(CONFIG_MV632X)
	if (MV_FALSE == mvSocUnitIsMappedToThisCpu(IDMA))
	{
		printk(KERN_INFO"IDMA is not mapped to this CPU\n");
		return -ENODEV;
	}
#endif
	printk(KERN_INFO "Use IDMA channels %d and %d for enhancing the following function:\n",
                CPY_CHAN1, CPY_CHAN2);
#ifdef CONFIG_MV_IDMA_COPYUSER
        printk(KERN_INFO "  o Copy From/To user space operations.\n");
#endif
#ifdef CONFIG_MV_IDMA_MEMCOPY
	printk(KERN_INFO "  o memcpy() and memmove() operations.\n");
#endif
#ifdef CONFIG_MV_IDMA_MEMZERO
	printk(KERN_INFO "  o memzero() operations.\n");
#endif

#ifdef CONFIG_MV_IDMA_MEMZERO
	DPRINTK(KERN_ERR "ZERO buffer address 0x%08x\n", (u32)dmaMemInitBuff);
	
	asm_memzero(dmaMemInitBuff, sizeof(dmaMemInitBuff));
	dmac_flush_range(dmaMemInitBuff, dmaMemInitBuff + sizeof(dmaMemInitBuff));
#endif

        MV_REG_WRITE(IDMA_BYTE_COUNT_REG(CPY_CHAN1), 0);
        MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(CPY_CHAN1), 0);
        MV_REG_WRITE(IDMA_CTRL_HIGH_REG(CPY_CHAN1), ICCHR_ENDIAN_LITTLE 
#ifdef MV_CPU_LE
      		| ICCHR_DESC_BYTE_SWAP_EN
#endif
		 );
        MV_REG_WRITE(IDMA_CTRL_LOW_REG(CPY_CHAN1), CPY_IDMA_CTRL_LOW_VALUE);

        MV_REG_WRITE(IDMA_BYTE_COUNT_REG(CPY_CHAN2), 0);
        MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(CPY_CHAN2), 0);
        MV_REG_WRITE(IDMA_CTRL_HIGH_REG(CPY_CHAN2), ICCHR_ENDIAN_LITTLE 
#ifdef MV_CPU_LE
      		| ICCHR_DESC_BYTE_SWAP_EN
#endif
		 );
        MV_REG_WRITE(IDMA_CTRL_LOW_REG(CPY_CHAN2), CPY_IDMA_CTRL_LOW_VALUE);

        current_dma_channel = CPY_CHAN1;
	dma_proc_entry = create_proc_entry("dma_copy", S_IFREG | S_IRUGO, 0);
	dma_proc_entry->read_proc = dma_read_proc;
//	dma_proc_entry->write_proc = dma_write_proc;
	dma_proc_entry->nlink = 1;

	idma_init = 1;

	return 0;
}
Exemplo n.º 2
0
/*=======================================================================*/
void dma_memzero(void *to, __kernel_size_t n)
{
	u32 phys_from, phys_to;	
	u32 unaligned_to;
	unsigned long flags;	

	DPRINTK("dma_memcopy: entering\n");

	/* This is used in the very early stages */
	if(!idma_init)
    		return asm_memzero(to ,n);

	/* Fallback for the case that one or both buffers are not physically contiguous  */
	if(!virt_addr_valid(to))
        {
		DPRINTK("Failing back to asm_memzero because of limitations\n");
            return asm_memzero(to ,n);
        }	

	++dma_memzero_cnt;	

	/*
	 * If buffer start addr is not cache line-aligned, so we need to make it so.
	 */
	unaligned_to = (u32)to & 31;
	if(unaligned_to)
	{
		DPRINTK("Fixing up starting address %d bytes\n", 32 - unaligned_to);

		asm_memzero(to, 32 - unaligned_to);

		to = (void*)((u32)to + (32 - unaligned_to));

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= (32 - unaligned_to);
	}	

	/*
	 * If buffer end addr is not cache line-aligned, so we need to make it so.
	 */
	unaligned_to = ((u32)to + n) & 31;
	if(unaligned_to)
	{	
		u32 tmp_to = (u32)to + (n - unaligned_to);
		DPRINTK("Fixing ending alignment %d bytes\n", unaligned_to);

		asm_memzero((void *)tmp_to, unaligned_to);

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= unaligned_to;
	}

	phys_from = physical_address((u32)dmaMemInitBuff, 0);
        phys_to = physical_address((u32)to, 1);

	/*
	 *  Prepare the IDMA.
	 */
	if ((!phys_from) || (!phys_to))
        {
	    /* The requested page isn't available, fall back to */
            DPRINTK(" no physical address, fall back: to %p \n", to);
            return asm_memzero(to,n);
        }

        spin_lock_irqsave(&current->mm->page_table_lock, flags);
	if (idma_busy)
	{
            /* 
	     * The idma engine is busy, 
  	     * might happen when dma_copy_to/from_user will call the arch_copy_to/from_user 
	     * which might cause a page fault, that can lead to a memcpy or memzero.	
	     */
            DPRINTK(" idma is busy... \n");
	    spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
            return asm_memzero(to,n);
	}
	idma_busy = 1;

	/* Ensure that the destination revion is invalidated */
	mvOsCacheInvalidate(NULL, (void *)to, n);
	
	/* Start DMA */
        DPRINTK(" activate DMA: channel %d from %x with source hold to %x len %x\n",CPY_CHAN1, phys_from, phys_to, n);
     	mvDmaMemInit(CPY_CHAN1, phys_from, phys_to, n);
	
#ifdef RT_DEBUG
	dma_activations++;
#endif
        
	if(wait_for_idma(CPY_CHAN1))
        {
	    BUG(); 
	}	

        DPRINTK("dma_memzero(0x%x, %lu): exiting\n", (u32) to, n);

	idma_busy = 0;
	spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
}
Exemplo n.º 3
0
/*=======================================================================*/
void xor_memzero(void *to, __kernel_size_t n)
{
	u32 xor_dma_unaligned_to;
	u32 to_pa;
        int ua = 0;
        int         chan;
        struct xor_channel_t *channel;

	DPRINTK("xor_memzero(0x%x, %lu): entering\n", (u32) to, (unsigned long)n);

        if (xor_engine_initialized == 0)
        {
            DPRINTK(KERN_WARNING" %s: xor engines not initialized yet\n", __func__);
       	    xor_memzero_miss++;
	    return asm_memzero(to, n);
        }
 	if (!(virt_addr_valid((u32) to))) {
		DPRINTK("xor_memcpy(0x%x, %lu): falling back to memzero\n",
			(u32) to, (unsigned long)n);
		xor_memzero_miss++;
		return asm_memzero(to, n);
	}

	/*
	 * We can only handled completely cache-aligned transactions
	 * with the DMA engine.  Dst must be cache-line
	 * aligned AND the length must be a multiple of the cache-line.
	 */

	to_pa = virt_to_phys(to);

	/*
	 * Ok, start addr is not cache line-aligned, so we need to make it so.
	 */
	xor_dma_unaligned_to = (u32) to & 31;
	if (xor_dma_unaligned_to)
        {
            ua++;
	    asm_memzero(to, 32 - xor_dma_unaligned_to);
            to = (void *)((u32)to + 32 - xor_dma_unaligned_to);
	    n -= 32 - xor_dma_unaligned_to;
	}

	/*
	 * Ok, we're aligned at the top, now let's check the end
	 * of the buffer and align that. After this we should have
	 * a block that is a multiple of cache line size.
	 */
	xor_dma_unaligned_to = ((u32) to + n) & 31;
	if (xor_dma_unaligned_to) {
	    u32 tmp_to = (u32) to + n - xor_dma_unaligned_to;
	    asm_memzero((void *)tmp_to, xor_dma_unaligned_to);
            n -= xor_dma_unaligned_to;
	    ua++;
	}

	/*
	 * OK! We should now be fully aligned on both ends. 
	 */
        chan = allocate_channel();
        if ( chan == -1)
         {
                DPRINTK("XOR engines are busy, return\n");
       		xor_memzero_miss++;
		return asm_memzero(to, n);
        }
        if (down_trylock(&meminit_sema))
        {
            DPRINTK("meminit is used by one of the XOR engines\n");
            xor_memzero_miss++;
            free_channel(&xor_channel[chan]);
	    return asm_memzero(to, n);
        }

	DPRINTK("setting up rest of descriptor for channel %d\n", chan);
        channel = &xor_channel[chan];
	
        /* Ensure that the cache is clean */
	dmac_inv_range(to, to + n);

	channel->chan_active = 1;

	DPRINTK("setting up rest of descriptor\n");
        if( mvXorMemInit(chan, virt_to_phys(to), n, 0, 0) != MV_OK)
        {
            printk(KERN_ERR "%s: DMA memzero operation on channel %d failed. to %p len %d!\n", __func__, chan,
                to, n);
            free_channel(channel);
            up(&meminit_sema);
       	    return asm_memzero(to, n);
        }
        xor_waiton_eng(chan);


        DPRINTK("DMA memzero complete\n");
	// check to see if failed
        up(&meminit_sema);
        free_channel(channel);
	xor_memzero_hit++;
	if (ua)
		xor_memzero_unaligned++;

}