Example #1
0
/*
 * This function frees all buffers
 */
static void audio_clear_buf(audio_stream_t * s)
{
	DECLARE_WAITQUEUE(wait, current);
	int frag;

	if (!s->buffers)
		return;

	/* Ensure DMA isn't running */
	set_current_state(TASK_UNINTERRUPTIBLE);
	add_wait_queue(&s->stop_wq, &wait);
	DCSR(s->dma_ch) = DCSR_STOPIRQEN;
	schedule();
	remove_wait_queue(&s->stop_wq, &wait);

	/* free DMA buffers */
	for (frag = 0; frag < s->nbfrags; frag++) {
		audio_buf_t *b = &s->buffers[frag];
		if (!b->master)
			continue;
		consistent_free(b->data, b->master, b->dma_desc->dsadr);
	}

	/* free descriptor ring */
	if (s->buffers->dma_desc)
		consistent_free(s->buffers->dma_desc, 
				s->nbfrags * s->descs_per_frag * DMA_DESC_SIZE,
				s->dma_desc_phys);

	/* free buffer structure array */
	kfree(s->buffers);
	s->buffers = NULL;
}
Example #2
0
static void audio_clear_buf(audio_stream_t * s)
{
	DPRINTK("audio_clear_buf\n");

	/* ensure DMA won't run anymore */
	s->active = 0;
	s->stopped = 0;
	sa1100_dma_flush_all(s->dma_ch);

	if (s->buffers) {
		int frag;
		for (frag = 0; frag < s->nbfrags; frag++) {
			if (!s->buffers[frag].master)
				continue;
			consistent_free(s->buffers[frag].start,
					s->buffers[frag].master,
					s->buffers[frag].dma_addr);
		}
		kfree(s->buffers);
		s->buffers = NULL;
	}

	s->buf_idx = 0;
	s->buf = NULL;
}
Example #3
0
static void dma_direct_free_coherent(struct device *dev, size_t size,
			      void *vaddr, dma_addr_t dma_handle)
{
#ifdef NOT_COHERENT_CACHE
	consistent_free(size, vaddr);
#else
	free_pages((unsigned long)vaddr, get_order(size));
#endif
}
Example #4
0
static int pxa250_irda_stop(struct net_device *dev)
{
	struct pxa250_irda *si = dev->priv;
	
	printk(KERN_ERR "Irda stop... RX = %d TX = %d\n",rx_count,tx_count);

	disable_irq(dev->irq);
  	disable_irq(si->fir_irq); 
/*  	pxa250_irda_shutdown(si); */

	/*
	 * If we have been doing DMA receive, make sure we
	 * tidy that up cleanly.
	 */
	if (si->rxskb) {
	        dev_kfree_skb(si->rxskb);
		si->rxskb = NULL;
	}

	/* Stop IrLAP */
	if (si->irlap) {
		irlap_close(si->irlap);
		si->irlap = NULL;
	}

	consistent_free (si->txbuf_dma_virt,HPSIR_MAX_TXLEN,si->txbuf_dma);
	consistent_free (si->rxbuf_dma_virt,HPSIR_MAX_RXLEN,si->rxbuf_dma);
	pxa_free_dma(si->txdma_ch);
	pxa_free_dma(si->rxdma_ch);

	netif_stop_queue(dev);
	si->open = 0;

	/*
	 * Free resources
	 */
	free_irq(dev->irq, dev);
	free_irq(si->fir_irq, dev);


	MOD_DEC_USE_COUNT;

	return 0;
}
Example #5
0
static int camif_demalloc(camif_cfg_t *cfg)
{   __D("\n");
#if defined(P_DEDICATED_MEM) || defined(C_DEDICATED_MEM)
	iounmap(cfg->pp_virt_buf);
	cfg->pp_virt_buf = 0;
#else
	if ( cfg->pp_virt_buf ) {
		consistent_free(cfg->pp_virt_buf,cfg->pp_totalsize,cfg->pp_phys_buf);
		cfg->pp_virt_buf = 0;
	}
#endif
	return 0;
}
Example #6
0
static void __exit test_mcspi_exit(void)
{

	spi_unregister_driver(&spitst_spi);
	if (test_mcspi_smp)
		spi_unregister_driver(&spitst_spi2);

	remove_proc_file_entries();

	if (buffers_allocated == 1) {
		if (test_mcspi_smp) {
	#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 00))
		consistent_free((void *)spi2_rx_buf_dma_virt2, buffer_size,
				spi2_rx_buf_dma_phys2);
		consistent_free((void *)spi2_tx_buf_dma_virt2, buffer_size,
				spi2_tx_buf_dma_phys2);
	#else
		dma_free_coherent(NULL, buffer_size,
			(void *)spi2_rx_buf_dma_virt2, spi2_rx_buf_dma_phys2);
		dma_free_coherent(NULL, buffer_size,
			(void *)spi2_tx_buf_dma_virt2, spi2_tx_buf_dma_phys2);
	#endif
		}
	#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 00))
		consistent_free((void *)spi2_rx_buf_dma_virt2, buffer_size,
				spi2_rx_buf_dma_phys2);
		consistent_free((void *)spi2_tx_buf_dma_virt2, buffer_size,
				spi2_tx_buf_dma_phys2);
	#else

		dma_free_coherent(NULL, buffer_size,
			(void *)spi2_rx_buf_dma_virt2, spi2_rx_buf_dma_phys2);
		dma_free_coherent(NULL, buffer_size,
			(void *)spi2_tx_buf_dma_virt2, spi2_tx_buf_dma_phys2);
	#endif
	}
	return;
}
Example #7
0
static void omap_mmc_slot_cleanup( void )
{
        long flags;

        local_irq_save(flags); 

	omap_mmc_slot_down();
        
	free_irq(INT_FPGA_CD, &g_omap_mmc_data);
	free_irq(INT_MMC, &g_omap_mmc_data);

        /* Free DMA buffers */

        consistent_free(g_omap_mmc_data.buf_dma_virt, 2048, g_omap_mmc_data.buf_dma_phys);

        local_irq_restore(flags);
}
/*
 * Destroy a scatter/gather list handle that was created by alloc_dma_handle().
 * The list must be empty (contain no elements).
 */
void
ppc4xx_free_dma_handle(sgl_handle_t handle)
{
	sgl_list_info_t *psgl = (sgl_list_info_t *) handle;

	if (!handle) {
		printk("ppc4xx_free_dma_handle: got NULL\n");
		return;
	} else if (psgl->phead) {
		printk("ppc4xx_free_dma_handle: list not empty\n");
		return;
	} else if (!psgl->dma_addr) {	/* should never happen */
		printk("ppc4xx_free_dma_handle: no dma address\n");
		return;
	}

	consistent_free((void *) psgl);
}
Example #9
0
static void audio_discard_buf(audio_stream_t * s)
{
	DPRINTK("audio_discard_buf\n");

	/* ensure DMA isn't using those buffers */
	audio_reset(s);

	if (s->buffers) {
		int frag;
		for (frag = 0; frag < s->nbfrags; frag++) {
			if (!s->buffers[frag].master)
				continue;
			consistent_free(s->buffers[frag].data,
					s->buffers[frag].master,
					s->buffers[frag].dma_addr);
		}
		kfree(s->buffers);
		s->buffers = NULL;
	}
}
/*
 * Free the head of our info_list and remove it from the list.  Note
 * that this function does *not* unregister_framebuffer().  That is up
 * to the caller to do if it is appropriate.
 */
static void
remove_head_info(void)
{
	struct xilinxfb_info *i;

	/* Pull the head off of info_list. */
	spin_lock(&info_lock);
	i = info_list;
	info_list = i->next;
	spin_unlock(&info_lock);

	if (i->regs) {
		/* Turn off the display; the frame buffer is going away. */
		out_be32(i->regs + REG_CTRL, 0);
		iounmap(i->regs);
	}

	if (i->fb_virt_start)
		consistent_free((void *)i->fb_virt_start);

	kfree(i);
}
Example #11
0
static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
		dma_addr_t dma_handle, struct dma_attrs *attrs)
{
	consistent_free(vaddr);
}
Example #12
0
static int pxa250_irda_start(struct net_device *dev)
{
	struct pxa250_irda *si = dev->priv;
	int err;
	unsigned int flags;
	

	MOD_INC_USE_COUNT;

	__ECHO_IN;
	si->speed = 9600;

	local_irq_save(flags);
	
	err = request_irq(si->fir_irq, pxa250_irda_fir_irq, 0,  dev->name, dev);
	if (err)
		goto err_fir_irq;

	err = request_irq(dev->irq, pxa250_irda_irq, 0, dev->name, dev);
	if (err)
		goto err_irq;

	/*
	 * The interrupt must remain disabled for now.
	 */
	
	disable_irq(dev->irq);
  	disable_irq(si->fir_irq);

	local_irq_restore(flags);


	/* Allocate DMA channel for receiver (not used) */
	err = pxa_request_dma("IrDA receive", DMA_PRIO_LOW, pxa250_irda_rxdma_irq, dev);
	if (err < 0 )
	   goto err_rx_dma;
	si->rxdma_ch=err;

	DRCMRRXICDR = DRCMR_MAPVLD | si->rxdma_ch;
	

	/* Allocate DMA channel for transmit */
	err = pxa_request_dma("IrDA transmit", DMA_PRIO_LOW, pxa250_irda_txdma_irq , dev);
	if (err < 0 )
	   goto err_tx_dma;

	si->txdma_ch=err;

	/*
	 * Make sure that ICP will be able 
	 * to assert the transmit dma request bit
	 * through the peripherals request bus (PREQ)
	 */
	
	DRCMRTXICDR = DRCMR_MAPVLD | si->txdma_ch;

	DBG("rx(not used) channel=%d tx channel=%d\n",si->rxdma_ch,si->txdma_ch);
	
	/* allocate consistent buffers for dma access
	 * buffers have to be aligned and situated in dma capable memory region;
	 */
	si->rxbuf_dma_virt = consistent_alloc(GFP_KERNEL | GFP_DMA ,HPSIR_MAX_RXLEN , &si->rxbuf_dma);
	if (! si->rxbuf_dma_virt )
		goto err_rxbuf_dma;

	si->txbuf_dma_virt = consistent_alloc(GFP_KERNEL | GFP_DMA, HPSIR_MAX_TXLEN,  &si->txbuf_dma); 
	if (! si->txbuf_dma_virt )
		goto err_txbuf_dma;

	/* Alocate skb for receiver */
	err=pxa250_irda_rx_alloc(si);
	if (err)
	   goto err_rx_alloc;
	
	/*
	 * Setup the serial port for the specified config.
	 */
	err = pxa250_irda_startup(dev);
	if (err)
		goto err_startup;

	pxa250_irda_set_speed(dev,si->speed = 9600);


	/*
	 * Open a new IrLAP layer instance.
	 */
	si->irlap = irlap_open(dev, &si->qos, "pxa250");
	err = -ENOMEM;
	if (!si->irlap)
		goto err_irlap;

	/*
	 * Now enable the interrupt and start the queue
	 */
	si->open = 1;
	enable_irq(dev->irq);
	netif_start_queue(dev);
	return 0;

err_irlap:
	si->open = 0;
	pxa250_sir_irda_shutdown(si);
err_startup:
	dev_kfree_skb(si->rxskb);
err_rx_alloc:	
	consistent_free (si->txbuf_dma_virt,HPSIR_MAX_TXLEN,si->txbuf_dma);
err_txbuf_dma:
	consistent_free (si->rxbuf_dma_virt,HPSIR_MAX_RXLEN,si->rxbuf_dma);
err_rxbuf_dma:
	pxa_free_dma(si->txdma_ch);
err_tx_dma:
	pxa_free_dma(si->rxdma_ch);
err_rx_dma:
	free_irq(dev->irq, dev);
err_irq:
	free_irq(si->fir_irq, dev);
err_fir_irq:	
	MOD_DEC_USE_COUNT;
	return err;
}
Example #13
0
void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
	consistent_free(vaddr);
}
void
dma_free_coherent(void *p, int size, void *ptr, dma_addr_t phys)
{
    consistent_free(ptr, size, phys);
}