Exemplo n.º 1
0
/*=======================================================================*/
unsigned long dma_copy_to_user(void *to, const void *from, unsigned long n)
{
	if(!idma_init)
    		return __arch_copy_to_user((void *)to, (void *)from, n);

     	dma_to_user++;
     	DPRINTK(KERN_CRIT "dma_copy_to_user(%#10x, 0x%#10x, %lu): entering\n", (u32) to, (u32) from, n);
    
        return  dma_copy(to, from, n, 1);
}
Exemplo n.º 2
0
/*=======================================================================*/
unsigned long xor_copy_to_user(void *to, const void *from, unsigned long n)
{
	if(!xor_engine_initialized)
    		return __arch_copy_to_user((void *)to, (void *)from, n);

     	dma_to_user++;
     	DPRINTK(KERN_CRIT "xor_copy_to_user(%#10x, 0x%#10x, %lu): entering\n", (u32) to, (u32) from, n);
    
        return  xor_dma_copy(to, from, n, 1);
}
Exemplo n.º 3
0
unsigned long ___copy_to_user(void *to, const void *from, unsigned long n)
{
	if (get_fs() == KERNEL_DS) {
		memcpy(to, from, n);
		return 0;
	}

	if (n < 256)
		return __arch_copy_to_user(to, from, n);

	down_read(&current->mm->mmap_sem);
	while (n) {
		pte_t *pte;
		spinlock_t *ptl;
		int tocopy;

		while (unlikely(!pin_page_for_write(to, &pte, &ptl))) {
			up_read(&current->mm->mmap_sem);
			if (put_user(*((u8 *)from), (u8 *)to))
				goto out;
			down_read(&current->mm->mmap_sem);
		}

		tocopy = ((~((unsigned long)to)) & (PAGE_SIZE - 1)) + 1;
		if (tocopy > n)
			tocopy = n;

		memcpy(to, from, tocopy);
		to += tocopy;
		from += tocopy;
		n -= tocopy;

		pte_unmap_unlock(pte, ptl);
	}

out:
	up_read(&current->mm->mmap_sem);

	return n;
}
Exemplo n.º 4
0
/*=======================================================================*/
static unsigned long dma_copy(void *to, const void *from, unsigned long n, unsigned int to_user)
{
	u32 chunk,i;
	u32 k_chunk = 0;
	u32 u_chunk = 0;
	u32 phys_from, phys_to;
	
        unsigned long flags;
	u32 unaligned_to;
	u32 index = 0;
        u32 temp;

        unsigned long uaddr, kaddr;
        unsigned char kaddr_kernel_static = 0;
	DPRINTK("dma_copy: entering\n");


	/* 
      	 * The unaligned is taken care seperatly since the dst might be part of a cache line that is changed 
	 * by other process -> we must not invalidate this cache lines and we can't also flush it, since other 
	 * process (or the exception handler) might fetch the cache line before we copied it. 
	 */

	/*
	 * Ok, start addr is not cache line-aligned, so we need to make it so.
	 */
	unaligned_to = (u32)to & 31;
	if(unaligned_to)
	{
		DPRINTK("Fixing up starting address %d bytes\n", 32 - unaligned_to);

		if(to_user)
		{
		    if(__arch_copy_to_user(to, from, 32 - unaligned_to)) 
			goto exit_dma; 
		}
		else
		{
		    if(__arch_copy_from_user(to, from, 32 - unaligned_to)) 
			goto exit_dma;
		}

		temp = (u32)to + (32 - unaligned_to);
		to = (void *)temp;
		temp = (u32)from + (32 - unaligned_to);
		from = (void *)temp;

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= (32 - unaligned_to);
	}

	/*
	 * Ok, we're aligned at the top, now let's check the end
	 * of the buffer and align that. After this we should have
	 * a block that is a multiple of cache line size.
	 */
	unaligned_to = ((u32)to + n) & 31;
	if(unaligned_to)
	{	
		u32 tmp_to = (u32)to + (n - unaligned_to);
		u32 tmp_from = (u32)from + (n - unaligned_to);
		DPRINTK("Fixing ending alignment %d bytes\n", unaligned_to);

		if(to_user)
		{
		    if(__arch_copy_to_user((void *)tmp_to, (void *)tmp_from, unaligned_to))
			goto exit_dma;
		}
		else
		{
		    if(__arch_copy_from_user((void *)tmp_to, (void *)tmp_from, unaligned_to))
			goto exit_dma;
		}

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= unaligned_to;
	}

        if(to_user)
        {
            uaddr = (unsigned long)to;  
            kaddr = (unsigned long)from;
        }
        else
        {
             uaddr = (unsigned long)from;
             kaddr = (unsigned long)to;
        }
        if(virt_addr_valid(kaddr))
        {
            kaddr_kernel_static = 1;
            k_chunk = n;
        }
	else
	{
		DPRINTK("kernel address is not linear, fall back\n");
		goto exit_dma;
	}
         
        spin_lock_irqsave(&current->mm->page_table_lock, flags);
	if (idma_busy)
	{
	    BUG();
	}
	idma_busy = 1;
     
        i = 0;
	while(n > 0)
	{
	    if(k_chunk == 0)
	    {
                /* virtual address */
	        k_chunk = page_remainder((u32)kaddr);
		DPRINTK("kaddr reminder %d \n",k_chunk);
	    }

	    if(u_chunk == 0)
	    {
                u_chunk = page_remainder((u32)uaddr);
                DPRINTK("uaddr reminder %d \n", u_chunk);
            }
        
            chunk = ((u_chunk < k_chunk) ? u_chunk : k_chunk);
            if(n < chunk)
	    {
		chunk = n;
	    }

	    if(chunk == 0)
	    {
	    	break;
	    }
            phys_from = physical_address((u32)from, 0);
            phys_to = physical_address((u32)to, 1);
	    DPRINTK("choose chunk %d \n",chunk);
	    /* if page doesn't exist go out */
	    if ((!phys_from) || (!phys_to))
	    {
		/* The requested page isn't available, fall back to */
		DPRINTK(" no physical address, fall back: from %p , to %p \n", from, to);
		goto wait_for_idmas;
   
	    }
	    /*
	     *  Prepare the IDMA.
	     */
            if (chunk < IDMA_MIN_COPY_CHUNK)
            {
        	DPRINTK(" chunk %d too small , use memcpy \n",chunk);
                /* the "to" address might cross cache line boundary, so part of the line*/  
                /* may be subject to DMA, so we need to wait to last DMA engine to finish */
                if (index > 0)
                {
                    if(wait_for_idma(PREV_CHANNEL(current_dma_channel)))
                    {
	                BUG();
                    }
                }
                

                if(to_user) 
		{
	       	    if(__arch_copy_to_user((void *)to, (void *)from, chunk)) {
			printk("ERROR: %s %d shouldn't happen\n",__FUNCTION__, __LINE__);	
			goto wait_for_idmas;
		    }
		}
	        else
		{
	            if(__arch_copy_from_user((void *)to, (void *)from, chunk)) {
			printk("ERROR: %s %d shouldn't happen\n",__FUNCTION__, __LINE__);	
			goto wait_for_idmas;	
		    }
		}
           }
            else
            {
                /* 
	 	 * Ensure that the cache is clean:
	 	 *      - from range must be cleaned
        	 *      - to range must be invalidated
	         */
		dmac_flush_range(from, from + chunk);
		dmac_inv_range(to, to + chunk);
               
               	    if(index > 1)
		    {
		        if(wait_for_idma(current_dma_channel))
                        {
		            BUG(); 
                            goto unlock_dma;
                        }
                    }
		    /* Start DMA */
                    DPRINTK(" activate DMA: channel %d from %x to %x len %x\n",
                            current_dma_channel, phys_from, phys_to, chunk);
		    mvDmaTransfer(current_dma_channel, phys_from, phys_to, chunk, 0);
                    current_dma_channel = NEXT_CHANNEL(current_dma_channel); 
#ifdef RT_DEBUG
                    dma_activations++;
#endif
		    index++;
                }
                

		/* go to next chunk */
		from += chunk;
		to += chunk;
                kaddr += chunk;
                uaddr += chunk;
		n -= chunk;
		u_chunk -= chunk;
		k_chunk -= chunk;		
	}
        
wait_for_idmas:
        if (index > 1)
        {
	    if(wait_for_idma(current_dma_channel))
            {
	        BUG(); 
            }
        }

        if (index > 0)
        {
            if(wait_for_idma(PREV_CHANNEL(current_dma_channel)))
            {
	        BUG();
            }
        }

unlock_dma:    
	idma_busy = 0;    
        spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
 exit_dma:
        
        DPRINTK("dma_copy(0x%x, 0x%x, %lu): exiting\n", (u32) to,
                (u32) from, n);
       

        if(n != 0)
        {
       	    if(to_user)
                return __arch_copy_to_user((void *)to, (void *)from, n);
	            else
                return __arch_copy_from_user((void *)to, (void *)from, n);
        }
        return 0;
}
Exemplo n.º 5
0
/*
 * n must be greater equal than 64.
 */
static unsigned long xor_dma_copy(void *to, const void *from, unsigned long n, unsigned int to_user)
{
	u32 chunk,i;
	u32 k_chunk = 0;
	u32 u_chunk = 0;
	u32 phys_from, phys_to;
	
        unsigned long flags;
	u32 unaligned_to;
	u32 index = 0;
        u32 temp;

        unsigned long uaddr, kaddr;
	int     chan1, chan2 = -1;
        int     current_channel;
        struct xor_channel_t *channel;
       
        DPRINTK("xor_dma_copy: entering\n");


        chan1 = allocate_channel();
        if (chan1 != -1)
        {
            chan2 = allocate_channel();
            if(chan2 == -1)
            {
                free_channel(&xor_channel[chan1]);
            }
        }
        if((chan1 == -1) || (chan2 == -1))
        {
            goto exit_dma;
        }
        current_channel = chan1;
	/* 
      	 * The unaligned is taken care seperatly since the dst might be part of a cache line that is changed 
	 * by other process -> we must not invalidate this cache lines and we can't also flush it, since other 
	 * process (or the exception handler) might fetch the cache line before we copied it. 
	 */

	/*
	 * Ok, start addr is not cache line-aligned, so we need to make it so.
	 */
	unaligned_to = (u32)to & 31;
	if(unaligned_to)
	{
		DPRINTK("Fixing up starting address %d bytes\n", 32 - unaligned_to);

		if(to_user)
		{
		    if(__arch_copy_to_user(to, from, 32 - unaligned_to)) 
			goto free_channels; 
		}
		else
		{
		    if(__arch_copy_from_user(to, from, 32 - unaligned_to)) 
			goto free_channels;
		}

		temp = (u32)to + (32 - unaligned_to);
		to = (void *)temp;
		temp = (u32)from + (32 - unaligned_to);
		from = (void *)temp;

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= (32 - unaligned_to);
	}

	/*
	 * Ok, we're aligned at the top, now let's check the end
	 * of the buffer and align that. After this we should have
	 * a block that is a multiple of cache line size.
	 */
	unaligned_to = ((u32)to + n) & 31;
	if(unaligned_to)
	{	
		u32 tmp_to = (u32)to + (n - unaligned_to);
		u32 tmp_from = (u32)from + (n - unaligned_to);
		DPRINTK("Fixing ending alignment %d bytes\n", unaligned_to);

		if(to_user)
		{
		    if(__arch_copy_to_user((void *)tmp_to, (void *)tmp_from, unaligned_to))
			goto free_channels;
		}
		else
		{
		    if(__arch_copy_from_user((void *)tmp_to, (void *)tmp_from, unaligned_to))
			goto free_channels;
		}

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= unaligned_to;
	}

        if(to_user)
        {
            uaddr = (unsigned long)to;  
            kaddr = (unsigned long)from;
        }
        else
        {
             uaddr = (unsigned long)from;
             kaddr = (unsigned long)to;
        }
        if(virt_addr_valid(kaddr))
        {
            k_chunk = n;
        }
	else
	{
		DPRINTK("kernel address is not linear, fall back\n");
		goto free_channels;		
	}
         
        spin_lock_irqsave(&current->mm->page_table_lock, flags);
     
        i = 0;
	while(n > 0)
	{
	    if(k_chunk == 0)
	    {
                /* virtual address */
	        k_chunk = page_remainder((u32)kaddr);
		DPRINTK("kaddr reminder %d \n",k_chunk);
	    }

	    if(u_chunk == 0)
	    {
                u_chunk = page_remainder((u32)uaddr);
                DPRINTK("uaddr reminder %d \n", u_chunk);
            }
        
            chunk = ((u_chunk < k_chunk) ? u_chunk : k_chunk);
            if(n < chunk)
	    {
		chunk = n;
	    }

	    if(chunk == 0)
	    {
	    	break;
	    }
            phys_from = physical_address((u32)from, 0);
            phys_to = physical_address((u32)to, 1);
	    DPRINTK("choose chunk %d \n",chunk);
	    /* if page doesn't exist go out */
	    if ((!phys_from) || (!phys_to))
	    {
		/* The requested page isn't available, fall back to */
		DPRINTK(" no physical address, fall back: from %p , to %p \n", from, to);
		goto unlock_dma;
   
	    }
	    /*
	     *  Prepare the IDMA.
	     */
            if (chunk < XOR_MIN_COPY_CHUNK)
            {
                int last_chan = chan1;   
        	DPRINTK(" chunk %d too small , use memcpy \n",chunk);
        	
        	if(current_channel == chan1)
                {   
                    last_chan = chan2;
                }
                /* the "to" address might cross cache line boundary, so part of the line*/  
                /* may be subject to DMA, so we need to wait to last DMA engine to finish */
                if(index > 0)
                    xor_waiton_eng(last_chan);

                if(to_user) 
		{
	       	    if(__arch_copy_to_user((void *)to, (void *)from, chunk)) {
			printk("ERROR: %s %d shouldn't happen\n",__FUNCTION__, __LINE__);	
			goto unlock_dma;
		    }
		}
	        else
		{
	            if(__arch_copy_from_user((void *)to, (void *)from, chunk)) {
			printk("ERROR: %s %d shouldn't happen\n",__FUNCTION__, __LINE__);	
			goto unlock_dma;	
		    }
		}
            }
            else
	    {
		
		    /* 
		    * Ensure that the cache is clean:
		    *      - from range must be cleaned
		    *      - to range must be invalidated
		    */
//		    mvOsCacheFlush(NULL, (void *)from, chunk);
		    dmac_flush_range(from, from + chunk);
		    //	    mvOsCacheInvalidate(NULL, (void *)to, chunk);
		    dmac_inv_range(to, to + chunk);
		    if(index > 0)
		    {
			xor_waiton_eng(current_channel);
		    }
		    channel = &xor_channel[current_channel];
  
		    /* Start DMA */
		    DPRINTK(" activate DMA: channel %d from %x to %x len %x\n",
                            current_channel, phys_from, phys_to, chunk);
		    channel->pDescriptor->srcAdd0 = phys_from;
		    channel->pDescriptor->phyDestAdd = phys_to;
		    channel->pDescriptor->byteCnt = chunk;
		    channel->pDescriptor->phyNextDescPtr = 0;
		    channel->pDescriptor->status = BIT31;
		    channel->chan_active = 1;

		    if( mvXorTransfer(current_channel, MV_DMA, channel->descPhyAddr) != MV_OK)
		    {
			printk(KERN_ERR "%s: DMA copy operation on channel %d failed!\n", __func__, current_channel);
			print_xor_regs(current_channel);
			BUG();
		    }
                
		    if(current_channel == chan1) 
		    {
			current_channel = chan2;
                    }
		    else
		    {
			current_channel = chan1;
		    }
#ifdef RT_DEBUG
			dma_activations++;
#endif
			index++;
		    
		}

		/* go to next chunk */
		from += chunk;
		to += chunk;
                kaddr += chunk;
                uaddr += chunk;
		n -= chunk;
		u_chunk -= chunk;
		k_chunk -= chunk;		
	}
unlock_dma:
        xor_waiton_eng(chan1);
        xor_waiton_eng(chan2);
        spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
free_channels:
        free_channel(&xor_channel[chan1]);
        free_channel(&xor_channel[chan2]);

exit_dma:        
        DPRINTK("xor_dma_copy(0x%x, 0x%x, %lu): exiting\n", (u32) to,
                (u32) from, n);
       
        if(n != 0)
        {
       	    if(to_user)
                return __arch_copy_to_user((void *)to, (void *)from, n);
	            else
                return __arch_copy_from_user((void *)to, (void *)from, n);
        }
        return 0;
}