Exemplo n.º 1
0
void SndAl_RawSamples( int stream, int samples, int rate, int width, int channels, const byte *data, float volume, int entityNum )
{
	ALuint buffer;
	ALuint format = AL_FORMAT_STEREO16;
	ALint  state;

	// Work out AL format
	if ( width == 1 )
	{
		if ( channels == 1 )
		{
			format = AL_FORMAT_MONO8;
		}
		else if ( channels == 2 )
		{
			format = AL_FORMAT_STEREO8;
		}
	}
	else if ( width == 2 )
	{
		if ( channels == 1 )
		{
			format = AL_FORMAT_MONO16;
		}
		else if ( channels == 2 )
		{
			format = AL_FORMAT_STEREO16;
		}
	}

	// Create the source if necessary
	if ( source_handle == -1 )
	{
		allocate_channel();

		// Failed?
		if ( source_handle == -1 )
		{
			si.Printf( PRINT_ALL, "Can't allocate streaming source\n" );
			return;
		}
	}

	// Create a buffer, and stuff the data into it
	qalGenBuffers( 1, &buffer );
	qalBufferData( buffer, format, data, ( samples * width * channels ), rate );

	// Shove the data onto the source
	qalSourceQueueBuffers( source, 1, &buffer );

	// Start the source playing if necessary
	qalGetSourcei( source, AL_SOURCE_STATE, &state );

	// Volume
	qalSourcef( source, AL_GAIN, volume * s_volume->value * s_gain->value );

	if ( !is_playing )
	{
		qalSourcePlay( source );
		is_playing = qtrue;
	}
}
Exemplo n.º 2
0
struct Channel *make_channel(void)
{
	return allocate_channel(TEST_CHANNEL);
}
Exemplo n.º 3
0
/*=======================================================================*/
void xor_memzero(void *to, __kernel_size_t n)
{
	u32 xor_dma_unaligned_to;
	u32 to_pa;
        int ua = 0;
        int         chan;
        struct xor_channel_t *channel;

	DPRINTK("xor_memzero(0x%x, %lu): entering\n", (u32) to, (unsigned long)n);

        if (xor_engine_initialized == 0)
        {
            DPRINTK(KERN_WARNING" %s: xor engines not initialized yet\n", __func__);
       	    xor_memzero_miss++;
	    return asm_memzero(to, n);
        }
 	if (!(virt_addr_valid((u32) to))) {
		DPRINTK("xor_memcpy(0x%x, %lu): falling back to memzero\n",
			(u32) to, (unsigned long)n);
		xor_memzero_miss++;
		return asm_memzero(to, n);
	}

	/*
	 * We can only handled completely cache-aligned transactions
	 * with the DMA engine.  Dst must be cache-line
	 * aligned AND the length must be a multiple of the cache-line.
	 */

	to_pa = virt_to_phys(to);

	/*
	 * Ok, start addr is not cache line-aligned, so we need to make it so.
	 */
	xor_dma_unaligned_to = (u32) to & 31;
	if (xor_dma_unaligned_to)
        {
            ua++;
	    asm_memzero(to, 32 - xor_dma_unaligned_to);
            to = (void *)((u32)to + 32 - xor_dma_unaligned_to);
	    n -= 32 - xor_dma_unaligned_to;
	}

	/*
	 * Ok, we're aligned at the top, now let's check the end
	 * of the buffer and align that. After this we should have
	 * a block that is a multiple of cache line size.
	 */
	xor_dma_unaligned_to = ((u32) to + n) & 31;
	if (xor_dma_unaligned_to) {
	    u32 tmp_to = (u32) to + n - xor_dma_unaligned_to;
	    asm_memzero((void *)tmp_to, xor_dma_unaligned_to);
            n -= xor_dma_unaligned_to;
	    ua++;
	}

	/*
	 * OK! We should now be fully aligned on both ends. 
	 */
        chan = allocate_channel();
        if ( chan == -1)
         {
                DPRINTK("XOR engines are busy, return\n");
       		xor_memzero_miss++;
		return asm_memzero(to, n);
        }
        if (down_trylock(&meminit_sema))
        {
            DPRINTK("meminit is used by one of the XOR engines\n");
            xor_memzero_miss++;
            free_channel(&xor_channel[chan]);
	    return asm_memzero(to, n);
        }

	DPRINTK("setting up rest of descriptor for channel %d\n", chan);
        channel = &xor_channel[chan];
	
        /* Ensure that the cache is clean */
	dmac_inv_range(to, to + n);

	channel->chan_active = 1;

	DPRINTK("setting up rest of descriptor\n");
        if( mvXorMemInit(chan, virt_to_phys(to), n, 0, 0) != MV_OK)
        {
            printk(KERN_ERR "%s: DMA memzero operation on channel %d failed. to %p len %d!\n", __func__, chan,
                to, n);
            free_channel(channel);
            up(&meminit_sema);
       	    return asm_memzero(to, n);
        }
        xor_waiton_eng(chan);


        DPRINTK("DMA memzero complete\n");
	// check to see if failed
        up(&meminit_sema);
        free_channel(channel);
	xor_memzero_hit++;
	if (ua)
		xor_memzero_unaligned++;

}
Exemplo n.º 4
0
/*
 * n must be greater equal than 64.
 */
static unsigned long xor_dma_copy(void *to, const void *from, unsigned long n, unsigned int to_user)
{
	u32 chunk,i;
	u32 k_chunk = 0;
	u32 u_chunk = 0;
	u32 phys_from, phys_to;
	
        unsigned long flags;
	u32 unaligned_to;
	u32 index = 0;
        u32 temp;

        unsigned long uaddr, kaddr;
	int     chan1, chan2 = -1;
        int     current_channel;
        struct xor_channel_t *channel;
       
        DPRINTK("xor_dma_copy: entering\n");


        chan1 = allocate_channel();
        if (chan1 != -1)
        {
            chan2 = allocate_channel();
            if(chan2 == -1)
            {
                free_channel(&xor_channel[chan1]);
            }
        }
        if((chan1 == -1) || (chan2 == -1))
        {
            goto exit_dma;
        }
        current_channel = chan1;
	/* 
      	 * The unaligned is taken care seperatly since the dst might be part of a cache line that is changed 
	 * by other process -> we must not invalidate this cache lines and we can't also flush it, since other 
	 * process (or the exception handler) might fetch the cache line before we copied it. 
	 */

	/*
	 * Ok, start addr is not cache line-aligned, so we need to make it so.
	 */
	unaligned_to = (u32)to & 31;
	if(unaligned_to)
	{
		DPRINTK("Fixing up starting address %d bytes\n", 32 - unaligned_to);

		if(to_user)
		{
		    if(__arch_copy_to_user(to, from, 32 - unaligned_to)) 
			goto free_channels; 
		}
		else
		{
		    if(__arch_copy_from_user(to, from, 32 - unaligned_to)) 
			goto free_channels;
		}

		temp = (u32)to + (32 - unaligned_to);
		to = (void *)temp;
		temp = (u32)from + (32 - unaligned_to);
		from = (void *)temp;

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= (32 - unaligned_to);
	}

	/*
	 * Ok, we're aligned at the top, now let's check the end
	 * of the buffer and align that. After this we should have
	 * a block that is a multiple of cache line size.
	 */
	unaligned_to = ((u32)to + n) & 31;
	if(unaligned_to)
	{	
		u32 tmp_to = (u32)to + (n - unaligned_to);
		u32 tmp_from = (u32)from + (n - unaligned_to);
		DPRINTK("Fixing ending alignment %d bytes\n", unaligned_to);

		if(to_user)
		{
		    if(__arch_copy_to_user((void *)tmp_to, (void *)tmp_from, unaligned_to))
			goto free_channels;
		}
		else
		{
		    if(__arch_copy_from_user((void *)tmp_to, (void *)tmp_from, unaligned_to))
			goto free_channels;
		}

                /*it's ok, n supposed to be greater than 32 bytes at this point*/
		n -= unaligned_to;
	}

        if(to_user)
        {
            uaddr = (unsigned long)to;  
            kaddr = (unsigned long)from;
        }
        else
        {
             uaddr = (unsigned long)from;
             kaddr = (unsigned long)to;
        }
        if(virt_addr_valid(kaddr))
        {
            k_chunk = n;
        }
	else
	{
		DPRINTK("kernel address is not linear, fall back\n");
		goto free_channels;		
	}
         
        spin_lock_irqsave(&current->mm->page_table_lock, flags);
     
        i = 0;
	while(n > 0)
	{
	    if(k_chunk == 0)
	    {
                /* virtual address */
	        k_chunk = page_remainder((u32)kaddr);
		DPRINTK("kaddr reminder %d \n",k_chunk);
	    }

	    if(u_chunk == 0)
	    {
                u_chunk = page_remainder((u32)uaddr);
                DPRINTK("uaddr reminder %d \n", u_chunk);
            }
        
            chunk = ((u_chunk < k_chunk) ? u_chunk : k_chunk);
            if(n < chunk)
	    {
		chunk = n;
	    }

	    if(chunk == 0)
	    {
	    	break;
	    }
            phys_from = physical_address((u32)from, 0);
            phys_to = physical_address((u32)to, 1);
	    DPRINTK("choose chunk %d \n",chunk);
	    /* if page doesn't exist go out */
	    if ((!phys_from) || (!phys_to))
	    {
		/* The requested page isn't available, fall back to */
		DPRINTK(" no physical address, fall back: from %p , to %p \n", from, to);
		goto unlock_dma;
   
	    }
	    /*
	     *  Prepare the IDMA.
	     */
            if (chunk < XOR_MIN_COPY_CHUNK)
            {
                int last_chan = chan1;   
        	DPRINTK(" chunk %d too small , use memcpy \n",chunk);
        	
        	if(current_channel == chan1)
                {   
                    last_chan = chan2;
                }
                /* the "to" address might cross cache line boundary, so part of the line*/  
                /* may be subject to DMA, so we need to wait to last DMA engine to finish */
                if(index > 0)
                    xor_waiton_eng(last_chan);

                if(to_user) 
		{
	       	    if(__arch_copy_to_user((void *)to, (void *)from, chunk)) {
			printk("ERROR: %s %d shouldn't happen\n",__FUNCTION__, __LINE__);	
			goto unlock_dma;
		    }
		}
	        else
		{
	            if(__arch_copy_from_user((void *)to, (void *)from, chunk)) {
			printk("ERROR: %s %d shouldn't happen\n",__FUNCTION__, __LINE__);	
			goto unlock_dma;	
		    }
		}
            }
            else
	    {
		
		    /* 
		    * Ensure that the cache is clean:
		    *      - from range must be cleaned
		    *      - to range must be invalidated
		    */
//		    mvOsCacheFlush(NULL, (void *)from, chunk);
		    dmac_flush_range(from, from + chunk);
		    //	    mvOsCacheInvalidate(NULL, (void *)to, chunk);
		    dmac_inv_range(to, to + chunk);
		    if(index > 0)
		    {
			xor_waiton_eng(current_channel);
		    }
		    channel = &xor_channel[current_channel];
  
		    /* Start DMA */
		    DPRINTK(" activate DMA: channel %d from %x to %x len %x\n",
                            current_channel, phys_from, phys_to, chunk);
		    channel->pDescriptor->srcAdd0 = phys_from;
		    channel->pDescriptor->phyDestAdd = phys_to;
		    channel->pDescriptor->byteCnt = chunk;
		    channel->pDescriptor->phyNextDescPtr = 0;
		    channel->pDescriptor->status = BIT31;
		    channel->chan_active = 1;

		    if( mvXorTransfer(current_channel, MV_DMA, channel->descPhyAddr) != MV_OK)
		    {
			printk(KERN_ERR "%s: DMA copy operation on channel %d failed!\n", __func__, current_channel);
			print_xor_regs(current_channel);
			BUG();
		    }
                
		    if(current_channel == chan1) 
		    {
			current_channel = chan2;
                    }
		    else
		    {
			current_channel = chan1;
		    }
#ifdef RT_DEBUG
			dma_activations++;
#endif
			index++;
		    
		}

		/* go to next chunk */
		from += chunk;
		to += chunk;
                kaddr += chunk;
                uaddr += chunk;
		n -= chunk;
		u_chunk -= chunk;
		k_chunk -= chunk;		
	}
unlock_dma:
        xor_waiton_eng(chan1);
        xor_waiton_eng(chan2);
        spin_unlock_irqrestore(&current->mm->page_table_lock, flags);
free_channels:
        free_channel(&xor_channel[chan1]);
        free_channel(&xor_channel[chan2]);

exit_dma:        
        DPRINTK("xor_dma_copy(0x%x, 0x%x, %lu): exiting\n", (u32) to,
                (u32) from, n);
       
        if(n != 0)
        {
       	    if(to_user)
                return __arch_copy_to_user((void *)to, (void *)from, n);
	            else
                return __arch_copy_from_user((void *)to, (void *)from, n);
        }
        return 0;
}
Exemplo n.º 5
0
/*=======================================================================*/
void *xor_memcpy(void *to, const void *from, __kernel_size_t n)
{
	u32 xor_dma_unaligned_to, xor_dma_unaligned_from;
	void *orig_to = to;
	u32 to_pa, from_pa;
        int ua = 0;
        int         chan;
        struct xor_channel_t *channel;

	DPRINTK("xor_memcpy(0x%x, 0x%x, %lu): entering\n", (u32) to, (u32) from,
		(unsigned long)n);

        if (xor_engine_initialized == 0)
        {
            DPRINTK(KERN_WARNING" %s: xor engines not initialized yet\n", __func__);
       	    xor_dma_miss++;
	    return asm_memmove(to, from, n);
        }
 	if (!(virt_addr_valid((u32) to) && virt_addr_valid((u32) from))) {
		DPRINTK("xor_memcpy(0x%x, 0x%x, %lu): falling back to memcpy\n",
			(u32) to, (u32) from, (unsigned long)n);
		xor_dma_miss++;
		return asm_memmove(to, from, n);
	}

	/*
	 * We can only handled completely cache-aligned transactions
	 * with the DMA engine.  Source and Dst must be cache-line
	 * aligned AND the length must be a multiple of the cache-line.
	 */

	to_pa = virt_to_phys(to);
	from_pa = virt_to_phys((void*)from);

	if (((to_pa + n > from_pa) && (to_pa < from_pa)) ||
	    ((from_pa < to_pa) && (from_pa + n > to_pa))) {
		DPRINTK("overlapping copy region (0x%x, 0x%x, %lu), falling back\n",
		     to_pa, from_pa, (unsigned long)n);
		xor_dma_miss++;
		return asm_memmove(to, from, n);
	}
	/*
	 * Ok, start addr is not cache line-aligned, so we need to make it so.
	 */
	xor_dma_unaligned_to = (u32) to & 31;
	xor_dma_unaligned_from = (u32) from & 31;;
	if (xor_dma_unaligned_to | xor_dma_unaligned_from) {
		ua++;
		if (xor_dma_unaligned_from > xor_dma_unaligned_to) {
			asm_memmove(to, from, 32 - xor_dma_unaligned_to);
			to = (void *)((u32)to + 32 - xor_dma_unaligned_to);
			from = (void *)((u32)from + 32 - xor_dma_unaligned_to);
			n -= 32 - xor_dma_unaligned_to;
		} else {
			asm_memmove(to, from, 32 - xor_dma_unaligned_from);
			to = (void *)((u32)to + 32 - xor_dma_unaligned_from);
			from = (void *)((u32)from + 32 - xor_dma_unaligned_from);
			n -= 32 - xor_dma_unaligned_from;
		}
	}

	/*
	 * Ok, we're aligned at the top, now let's check the end
	 * of the buffer and align that. After this we should have
	 * a block that is a multiple of cache line size.
	 */
	xor_dma_unaligned_to = ((u32) to + n) & 31;
	xor_dma_unaligned_from = ((u32) from + n) & 31;;
	if (xor_dma_unaligned_to | xor_dma_unaligned_from) {
		ua++;
		if (xor_dma_unaligned_to > xor_dma_unaligned_from) {
			u32 tmp_to = (u32) to + n - xor_dma_unaligned_to;
			u32 tmp_from = (u32) from + n - xor_dma_unaligned_to;

			asm_memmove((void *)tmp_to, (void *)tmp_from,
				   xor_dma_unaligned_to);

			n -= xor_dma_unaligned_to;
		} else {
			u32 tmp_to = (u32) to + n - xor_dma_unaligned_from;
			u32 tmp_from = (u32) from + n - xor_dma_unaligned_from;

			asm_memmove((void *)tmp_to, (void *)tmp_from,
				   xor_dma_unaligned_from);

			n -= xor_dma_unaligned_from;
		}
	}

	/*
	 * OK! We should now be fully aligned on both ends. 
	 */
        chan = allocate_channel();
        if ( chan == -1)
        {
                DPRINTK("XOR engines are busy, return\n");
       		xor_dma_miss++;
		return asm_memmove(to, from, n);
        }
	DPRINTK("setting up rest of descriptor for channel %d\n", chan);
        channel = &xor_channel[chan];
	
        /* Ensure that the cache is clean */
	dmac_clean_range(from, from + n);
	dmac_inv_range(to, to + n);

	DPRINTK("setting up rest of descriptor\n");
	// flush the cache to memory before XOR engine touches them
        channel->pDescriptor->srcAdd0 = virt_to_phys((void*)from);
	channel->pDescriptor->phyDestAdd = virt_to_phys(to);
        channel->pDescriptor->byteCnt = n;
        channel->pDescriptor->phyNextDescPtr = 0;
        channel->pDescriptor->status = BIT31;
	channel->chan_active = 1;

        if( mvXorTransfer(chan, MV_DMA, channel->descPhyAddr) != MV_OK)
        {
            printk(KERN_ERR "%s: DMA copy operation on channel %d failed!\n", __func__, chan);
            print_xor_regs(chan);
            BUG();
            free_channel(channel);
       	    return asm_memmove(to, from, n);
        }
        xor_waiton_eng(chan);


        DPRINTK("DMA copy complete\n");
	// check to see if failed
#if 0
	if (!(channel->pDescriptor->status & BIT30))
        {
            printk(KERN_ERR "%s: DMA copy operation completed with error!\n", __func__);
            printk(" srcAdd %x DestAddr %x, count %x\n", channel->pDescriptor->srcAdd0,
                                                channel->pDescriptor->phyDestAdd, n); 
            print_xor_regs(chan);            
	    BUG();
            free_channel(channel);
       	    return asm_memmove(to, from, n);
        }
#endif
        free_channel(channel);
 
	xor_dma_hit++;
	if (ua)
		xor_dma_unaligned++;

	return orig_to;
}
Exemplo n.º 6
0
int xor_mv(unsigned int src_no, unsigned int bytes, void **bh_ptr)
{
	void *bptr = NULL;
	int i;
        u32      *srcAddr;
        int         chan;
        struct xor_channel_t *channel;

	if(src_no <= 1)
	{
		printk(KERN_ERR "%s: need more than 1 src for XOR\n",
			__func__);
		BUG();
                return bytes;
	}
        if (xor_engine_initialized == 0)
        {
            printk(KERN_WARNING" %s: xor engines not initialized yet\n", __func__);
            return bytes;
        }

        chan = allocate_channel();
        if ( chan == -1)
         {
                DPRINTK("XOR engines are busy, return\n");
                return bytes;
        }
	DPRINTK("setting up rest of descriptor for channel %d\n", chan);
        channel = &xor_channel[chan];
	// flush the cache to memory before XOR engine touches them
        srcAddr = &(channel->pDescriptor->srcAdd0);
	for(i = src_no-1; i >= 0; i--)
	{
		DPRINTK("flushing source %d\n", i);
		bptr = (bh_ptr[i]);
		/* Buffer 0 is also the destination */
		if(i==0)
			dmac_flush_range(bptr,
					 bptr + bytes);			
		else
			dmac_clean_range(bptr,
					 bptr + bytes);
                srcAddr[i] = virt_to_phys(bh_ptr[i]);
	}

	channel->pDescriptor->phyDestAdd = virt_to_phys(bh_ptr[0]);
        channel->pDescriptor->byteCnt = bytes;
        channel->pDescriptor->phyNextDescPtr = 0;
        channel->pDescriptor->descCommand = (1 << src_no) - 1;
        channel->pDescriptor->status = BIT31;
	channel->chan_active = 1;
        if( mvXorTransfer(chan, MV_XOR, channel->descPhyAddr) != MV_OK )
        {
            printk(KERN_ERR "%s: XOR operation on channel %d failed!\n", __func__, chan);
            print_xor_regs(chan);
            BUG();
            free_channel(channel);
            return bytes;
        }
#ifdef CONFIG_ENABLE_XOR_INTERRUPTS
        wait_event(channel->waitq, (( channel->pDescriptor->status & BIT31) == 0));/*TODO add timeout*/
#else
        xor_waiton_eng(chan);
#endif
	DPRINTK("XOR complete\n");
#if 0
	if (!(channel->pDescriptor->status & BIT30)) {
	    printk(KERN_ERR "%s: XOR operation completed with error!\n", __func__);
            print_xor_regs(chan);            
	    BUG();
            free_channel(channel);
	    return MV_REG_READ(XOR_BYTE_COUNT_REG(chan));
        }
#endif
	DPRINTK("invalidate result in cache\n");
#if 0
	// invalidate the cache region to destination
        bptr = (bh_ptr[0]);
	dmac_inv_range(bptr, bptr + bytes);
#endif
        free_channel(channel);
        xor_hit++;
        return 0;
}