Exemplo n.º 1
0
int __init bf561_coreb_init(void)
{
	struct proc_dir_entry *proc_entry;
	init_waitqueue_head(&coreb_dma_wait);

	/* Request the core memory regions for Core B */
	if (request_mem_region(0xff600000, 0x4000,
		"Core B - Instruction SRAM") == NULL)
		goto exit;

	if (request_mem_region(0xFF610000, 0x4000, 
		"Core B - Instruction SRAM") == NULL)
		goto release_instruction_a_sram;

	if (request_mem_region(0xFF500000, 0x8000,
		"Core B - Data Bank B SRAM") == NULL)
		goto release_instruction_b_sram;

	if (request_mem_region(0xff400000, 0x8000,
		"Core B - Data Bank A SRAM") == NULL)
		goto release_data_b_sram;

	if (request_dma(CH_MEM_STREAM2_DEST, "Core B - DMA Destination") < 0)
		goto release_data_a_sram;

	if (request_dma(CH_MEM_STREAM2_SRC, "Core B - DMA Source") < 0)
		goto release_dma_dest;

	set_dma_callback(CH_MEM_STREAM2_DEST, coreb_dma_interrupt, NULL);

	misc_register(&coreb_dev);

	printk(KERN_INFO "Core B: Initializing /proc\n");
	coreb_proc_entry = create_proc_entry("coreb", 0, NULL);
	if (coreb_proc_entry)
	{
		coreb_proc_entry->owner = THIS_MODULE;
		coreb_proc_entry->read_proc = coreb_read_status;
	} else {
		printk(KERN_ERR "Core B: Unable to register /proc/coreb\n");
		goto release_dma_src;
	}
	printk(KERN_INFO "BF561 Core B driver %s initialized.\n", MODULE_VER);
	return 0;

release_dma_src:
	free_dma(CH_MEM_STREAM2_SRC);
release_dma_dest:
	free_dma(CH_MEM_STREAM2_DEST);
release_data_a_sram:
	release_mem_region(0xff400000, 0x8000);
release_data_b_sram:
	release_mem_region(0xff500000, 0x8000);
release_instruction_b_sram:
	release_mem_region(0xff610000, 0x4000);
release_instruction_a_sram:
	release_mem_region(0xff600000, 0x4000);
exit:
	return -ENOMEM;
}
Exemplo n.º 2
0
/* Request needed resources */
static int znet_request_resources (struct net_device *dev)
{
	struct znet_private *znet = netdev_priv(dev);

	if (request_irq (dev->irq, znet_interrupt, 0, "ZNet", dev))
		goto failed;
	if (request_dma (znet->rx_dma, "ZNet rx"))
		goto free_irq;
	if (request_dma (znet->tx_dma, "ZNet tx"))
		goto free_rx_dma;
	if (!request_region (znet->sia_base, znet->sia_size, "ZNet SIA"))
		goto free_tx_dma;
	if (!request_region (dev->base_addr, znet->io_size, "ZNet I/O"))
		goto free_sia;

	return 0;				/* Happy ! */

 free_sia:
	release_region (znet->sia_base, znet->sia_size);
 free_tx_dma:
	free_dma (znet->tx_dma);
 free_rx_dma:
	free_dma (znet->rx_dma);
 free_irq:
	free_irq (dev->irq, dev);
 failed:
	return -1;
}
Exemplo n.º 3
0
static int __init blackfin_dma_init(void)
{
	int i;

	printk(KERN_INFO "Blackfin DMA Controller\n");


#if ANOMALY_05000480
	bfin_write_DMAC_TC_PER(0x0111);
#endif

	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
		atomic_set(&dma_ch[i].chan_status, 0);
		dma_ch[i].regs = dma_io_base_addr[i];
	}
	
	request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
	request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy");

#if defined(CONFIG_DEB_DMA_URGENT)
	bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE()
			 | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT);
#endif

	return 0;
}
Exemplo n.º 4
0
static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
{
#ifdef CONFIG_SIR_BFIN_DMA
	dma_addr_t dma_handle;
#endif /* CONFIG_SIR_BFIN_DMA */

	if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) {
		dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n");
		return -EBUSY;
	}

	if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) {
		dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n");
		free_dma(port->rx_dma_channel);
		return -EBUSY;
	}

#ifdef CONFIG_SIR_BFIN_DMA

	set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
	set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);

	port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
						  &dma_handle, GFP_DMA);
	port->rx_dma_buf.head = 0;
	port->rx_dma_buf.tail = 0;
	port->rx_dma_nrows = 0;

	set_dma_config(port->rx_dma_channel,
				set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO,
									INTR_ON_ROW, DIMENSION_2D,
									DATA_SIZE_8, DMA_SYNC_RESTART));
	set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT);
	set_dma_x_modify(port->rx_dma_channel, 1);
	set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT);
	set_dma_y_modify(port->rx_dma_channel, 1);
	set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
	enable_dma(port->rx_dma_channel);

	port->rx_dma_timer.data = (unsigned long)(dev);
	port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;

#else

	if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
		dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
		return -EBUSY;
	}

	if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
		dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
		free_irq(port->irq, dev);
		return -EBUSY;
	}
#endif

	return 0;
}
Exemplo n.º 5
0
int snd_ad1816a_create(struct snd_card *card,
		       unsigned long port, int irq, int dma1, int dma2,
		       struct snd_ad1816a *chip)
{
        static struct snd_device_ops ops = {
		.dev_free =	snd_ad1816a_dev_free,
	};
	int error;

	chip->irq = -1;
	chip->dma1 = -1;
	chip->dma2 = -1;

	if ((chip->res_port = request_region(port, 16, "AD1816A")) == NULL) {
		snd_printk(KERN_ERR "ad1816a: can't grab port 0x%lx\n", port);
		snd_ad1816a_free(chip);
		return -EBUSY;
	}
	if (request_irq(irq, snd_ad1816a_interrupt, 0, "AD1816A", (void *) chip)) {
		snd_printk(KERN_ERR "ad1816a: can't grab IRQ %d\n", irq);
		snd_ad1816a_free(chip);
		return -EBUSY;
	}
	chip->irq = irq;
	if (request_dma(dma1, "AD1816A - 1")) {
		snd_printk(KERN_ERR "ad1816a: can't grab DMA1 %d\n", dma1);
		snd_ad1816a_free(chip);
		return -EBUSY;
	}
	chip->dma1 = dma1;
	if (request_dma(dma2, "AD1816A - 2")) {
		snd_printk(KERN_ERR "ad1816a: can't grab DMA2 %d\n", dma2);
		snd_ad1816a_free(chip);
		return -EBUSY;
	}
	chip->dma2 = dma2;

	chip->card = card;
	chip->port = port;
	spin_lock_init(&chip->lock);

	if ((error = snd_ad1816a_probe(chip))) {
		snd_ad1816a_free(chip);
		return error;
	}

	snd_ad1816a_init(chip);

	/* Register device */
	if ((error = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
		snd_ad1816a_free(chip);
		return error;
	}

	return 0;
}
Exemplo n.º 6
0
static int sport_request_resource(struct sport_device *sport)
{
	struct device *dev = &sport->pdev->dev;
	int ret;

	ret = peripheral_request_list(sport->pin_req, "soc-audio");
	if (ret) {
		dev_err(dev, "Unable to request sport pin\n");
		return ret;
	}

	ret = request_dma(sport->tx_dma_chan, "SPORT TX Data");
	if (ret) {
		dev_err(dev, "Unable to allocate DMA channel for sport tx\n");
		goto err_tx_dma;
	}
	set_dma_callback(sport->tx_dma_chan, sport_tx_irq, sport);

	ret = request_dma(sport->rx_dma_chan, "SPORT RX Data");
	if (ret) {
		dev_err(dev, "Unable to allocate DMA channel for sport rx\n");
		goto err_rx_dma;
	}
	set_dma_callback(sport->rx_dma_chan, sport_rx_irq, sport);

	ret = request_irq(sport->tx_err_irq, sport_err_irq,
			0, "SPORT TX ERROR", sport);
	if (ret) {
		dev_err(dev, "Unable to allocate tx error IRQ for sport\n");
		goto err_tx_irq;
	}

	ret = request_irq(sport->rx_err_irq, sport_err_irq,
			0, "SPORT RX ERROR", sport);
	if (ret) {
		dev_err(dev, "Unable to allocate rx error IRQ for sport\n");
		goto err_rx_irq;
	}

	return 0;
err_rx_irq:
	free_irq(sport->tx_err_irq, sport);
err_tx_irq:
	free_dma(sport->rx_dma_chan);
err_rx_dma:
	free_dma(sport->tx_dma_chan);
err_tx_dma:
	peripheral_free_list(sport->pin_req);
	return ret;
}
Exemplo n.º 7
0
/* Open/initialize the board.  This is called (in the current kernel)
   sometime after booting when the 'ifconfig' program is run.

   This routine should set everything up anew at each open, even
   registers that "should" only need to be set once at boot, so that
   there is non-reboot way to recover if something goes wrong.
   */
static int
net_open(struct device *dev)
{
	struct net_local *lp = (struct net_local *)dev->priv;
	int ioaddr = dev->base_addr;

	/* This is used if the interrupt line can turned off (shared).
	   See 3c503.c for an example of selecting the IRQ at config-time. */
	if (request_irq(dev->irq, &net_interrupt)) {
		return -EAGAIN;
	}


	/* Always snarf a DMA channel after the IRQ. */
	if (request_dma(dev->dma)) {
		free_irq(dev->irq);
		return -EAGAIN;
	}
	irq2dev_map[dev->irq] = dev;

	/* Reset the hardware here. */
	/*chipset_init(dev, 1);*/
	outb(0x00, ioaddr);
	lp->open_time = jiffies;

	dev->tbusy = 0;
	dev->interrupt = 0;
	dev->start = 1;
	return 0;
}
Exemplo n.º 8
0
static int dma_init(void) {
    int ret;

    /* Request DMA channel */
    ret = request_dma(CH_PPI, DRIVER_NAME);
    if(ret < 0) {
        printk(KERN_WARNING DRIVER_NAME ": Could not allocate DMA channel\n");
        return ret;
    }

    /* Disable channel while it is being configured */
    disable_dma(CH_PPI);

    /* Allocate buffer space for the DMA engine to use */
    dma_buffer = __get_dma_pages(GFP_KERNEL, page_alloc_order(BUFFER_SIZE * BUFFER_COUNT));
    if(dma_buffer == 0) {
        printk(KERN_WARNING DRIVER_NAME ": Could not allocate dma_pages\n");
        free_dma(CH_PPI);
        return -ENOMEM;
    }

    /* Invalid caching on the DMA buffer */
    invalidate_dcache_range(dma_buffer, dma_buffer + (BUFFER_SIZE * BUFFER_COUNT));

    /* Set DMA configuration */
    set_dma_start_addr(CH_PPI, dma_buffer);
    set_dma_config(CH_PPI, (DMAFLOW_AUTO | WNR | RESTART | DI_EN | WDSIZE_16 | DMA2D | DI_SEL));
    set_dma_x_count(CH_PPI, SAMPLES_PER_BUFFER * CHANNELS);
    set_dma_x_modify(CH_PPI, SAMPLE_SIZE);
    set_dma_y_count(CH_PPI, BUFFER_COUNT);
    set_dma_y_modify(CH_PPI, SAMPLE_SIZE);
    set_dma_callback(CH_PPI, &buffer_full_handler, NULL);

    return 0;
}
Exemplo n.º 9
0
/**
 * ccat_dma_init() - Initialize CCAT and host memory for DMA transfer
 * @dma object for management data which will be initialized
 * @channel number of the DMA channel
 * @ioaddr of the pci bar2 configspace used to calculate the address of the pci dma configuration
 * @dev which should be configured for DMA
 */
static int ccat_dma_init(struct ccat_dma_mem *const dma, size_t channel,
			 void __iomem * const bar2,
			 struct ccat_eth_fifo *const fifo)
{
	void __iomem *const ioaddr = bar2 + 0x1000 + (sizeof(u64) * channel);
	const dma_addr_t phys = CCAT_ALIGN_CHANNEL(dma->phys, channel);
	const u32 phys_hi = (sizeof(phys) > sizeof(u32)) ? phys >> 32 : 0;
	fifo->dma.start = CCAT_ALIGN_CHANNEL(dma->base, channel);

	fifo_set_end(fifo, CCAT_ALIGNMENT);
	if (request_dma(channel, KBUILD_MODNAME)) {
		pr_info("request dma channel %llu failed\n", (u64) channel);
		return -EINVAL;
	}

	/** bit 0 enables 64 bit mode on ccat */
	iowrite32((u32) phys | ((phys_hi) > 0), ioaddr);
	iowrite32(phys_hi, ioaddr + 4);

	pr_debug
	    ("DMA%llu mem initialized\n base:         0x%p\n start:        0x%p\n phys:         0x%09llx\n pci addr:     0x%01x%08x\n size:         %llu |%llx bytes.\n",
	     (u64) channel, dma->base, fifo->dma.start, (u64) dma->phys,
	     ioread32(ioaddr + 4), ioread32(ioaddr),
	     (u64) dma->size, (u64) dma->size);
	return 0;
}
Exemplo n.º 10
0
static int __init pvr2_dma_init(void)
{
	setup_irq(HW_EVENT_PVR2_DMA, &pvr2_dma_irq);
	request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade");

	return register_dmac(&pvr2_dma_info);
}
Exemplo n.º 11
0
int labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan)
{
	struct labpc_private *devpriv = dev->private;
	void *dma_buffer;
	unsigned long dma_flags;
	int ret;

	if (dma_chan != 1 && dma_chan != 3)
		return -EINVAL;

	dma_buffer = kmalloc(dma_buffer_size, GFP_KERNEL | GFP_DMA);
	if (!dma_buffer)
		return -ENOMEM;

	ret = request_dma(dma_chan, dev->board_name);
	if (ret) {
		kfree(dma_buffer);
		return ret;
	}

	devpriv->dma_buffer = dma_buffer;
	devpriv->dma_chan = dma_chan;
	devpriv->dma_addr = virt_to_bus(devpriv->dma_buffer);

	dma_flags = claim_dma_lock();
	disable_dma(devpriv->dma_chan);
	set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
	release_dma_lock(dma_flags);

	return 0;
}
Exemplo n.º 12
0
int pnp_check_dma(struct pnp_dev * dev, int idx)
{
#ifndef CONFIG_IA64
	int tmp;
	struct pnp_dev *tdev;
	unsigned long * dma = &dev->res.dma_resource[idx].start;

	/* if the resource doesn't exist, don't complain about it */
	if (cannot_compare(dev->res.dma_resource[idx].flags))
		return 1;

	/* check if the resource is valid */
	if (*dma < 0 || *dma == 4 || *dma > 7)
		return 0;

	/* check if the resource is reserved */
	for (tmp = 0; tmp < 8; tmp++) {
		if (pnp_reserve_dma[tmp] == *dma)
			return 0;
	}

	/* check for internal conflicts */
	for (tmp = 0; tmp < PNP_MAX_DMA && tmp != idx; tmp++) {
		if (dev->res.dma_resource[tmp].flags & IORESOURCE_DMA) {
			if (dev->res.dma_resource[tmp].start == *dma)
				return 0;
		}
	}

	/* check if the resource is already in use, skip if the
	 * device is active because it itself may be in use */
	if(!dev->active) {
		if (request_dma(*dma, "pnp"))
			return 0;
		free_dma(*dma);
	}

	/* check for conflicts with other pnp devices */
	pnp_for_each_dev(tdev) {
		if (tdev == dev)
			continue;
		for (tmp = 0; tmp < PNP_MAX_DMA; tmp++) {
			if (tdev->res.dma_resource[tmp].flags & IORESOURCE_DMA) {
				if (cannot_compare(tdev->res.dma_resource[tmp].flags))
					continue;
				if ((tdev->res.dma_resource[tmp].start == *dma))
					return 0;
			}
		}
	}

	return 1;
#else
	/* IA64 hasn't legacy DMA */
	return 0;
#endif
}
Exemplo n.º 13
0
static void wbsd_init_dma(struct wbsd_host* host)
{
	host->dma = -1;
	
	if (dma < 0)
		return;
	
	if (request_dma(dma, DRIVER_NAME))
		goto err;
	
	/*
	 * We need to allocate a special buffer in
	 * order for ISA to be able to DMA to it.
	 */
	host->dma_buffer = kmalloc(65536,
		GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
	if (!host->dma_buffer)
		goto free;

	/*
	 * Translate the address to a physical address.
	 */
	host->dma_addr = isa_virt_to_bus(host->dma_buffer);
			
	/*
	 * ISA DMA must be aligned on a 64k basis.
	 */
	if ((host->dma_addr & 0xffff) != 0)
		goto kfree;
	/*
	 * ISA cannot access memory above 16 MB.
	 */
	else if (host->dma_addr >= 0x1000000)
		goto kfree;

	host->dma = dma;
	
	return;
	
kfree:
	/*
	 * If we've gotten here then there is some kind of alignment bug
	 */
	BUG_ON(1);
	
	kfree(host->dma_buffer);
	host->dma_buffer = NULL;

free:
	free_dma(dma);

err:
	printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
		"Falling back on FIFO.\n", dma);
}
Exemplo n.º 14
0
static void __init xd_geninit (void)
{
	u8 i,controller;
	unsigned int address;

	for(i=0;i<(XD_MAXDRIVES << 6);i++)
		xd_blocksizes[i] = 1024;
		
	blksize_size[MAJOR_NR] = xd_blocksizes;

	if (xd_detect(&controller,&address)) {
		printk(KERN_INFO "Detected a%s controller (type %d) at address %06x\n",
			xd_sigs[controller].name,controller,address);
		if (!request_region(xd_iobase,4, "xd")) {
			printk(KERN_ERR "xd: Ports at 0x%x are not available\n", xd_iobase);
			return;
		}
		if (controller)
			xd_sigs[controller].init_controller(address);
		xd_drives = xd_initdrives(xd_sigs[controller].init_drive);
		
		printk(KERN_INFO "Detected %d hard drive%s (using IRQ%d & DMA%d)\n",
			xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
		for (i = 0; i < xd_drives; i++)
			printk(KERN_INFO " xd%c: CHS=%d/%d/%d\n",'a'+i,
				xd_info[i].cylinders,xd_info[i].heads,
				xd_info[i].sectors);

	}
	if (xd_drives) {
		if (!request_irq(xd_irq,xd_interrupt_handler, 0, "XT hard disk", NULL)) {
			if (request_dma(xd_dma,"xd")) {
				printk(KERN_ERR "xd: unable to get DMA%d\n",xd_dma);
				free_irq(xd_irq, NULL);
			}
		}
		else
			printk(KERN_ERR "xd: unable to get IRQ%d\n",xd_irq);
	}

	/* xd_maxsectors depends on controller - so set after detection */
	for(i=0; i<(XD_MAXDRIVES << 6); i++) xd_maxsect[i] = xd_maxsectors;
	max_sectors[MAJOR_NR] = xd_maxsect;

	for (i = 0; i < xd_drives; i++) {
		xd_valid[i] = 1;
		register_disk(&xd_gendisk, MKDEV(MAJOR_NR,i<<6), 1<<6, &xd_fops,
				xd_info[i].heads * xd_info[i].cylinders *
				xd_info[i].sectors);
	}

	xd_gendisk.nr_real = xd_drives;

}
Exemplo n.º 15
0
void dac0800_init(void)
{
	volatile unsigned char	*mbarp, *dmap;
	unsigned long		imr;
	unsigned int		icr;
	int			result;

	/* Register dac0800 as character device */
	result = register_chrdev(DAC0800_MAJOR, "dac0800", &dac0800_fops);
	if (result < 0) {
		printk(KERN_WARNING "DAC0800: can't get major %d\n",
			DAC0800_MAJOR);
		return;
	}

	printk ("DAC0800: Copyright (C) 1999, Greg Ungerer "
		"([email protected])\n");

	/* Install ISR (interrupt service routine) */
	result = request_irq(DAC0800_VEC, dac0800_isr, SA_INTERRUPT,
		"DAC0800", NULL);
	if (result) {
		printk ("DAC0800: IRQ %d already in use\n", DAC0800_VEC);
		return;
	}

	/* Set interrupt vector location */
	dmap = (volatile unsigned char *) dma_base_addr[DAC0800_DMA_CHAN];
	dmap[MCFDMA_DIVR] = DAC0800_VEC;

	/* Set interrupt level and priority */
	switch (DAC0800_DMA_CHAN) {
	case 1:  icr = MCFSIM_DMA1ICR ; imr = MCFSIM_IMR_DMA1; break;
	case 2:  icr = MCFSIM_DMA2ICR ; imr = MCFSIM_IMR_DMA2; break;
	case 3:  icr = MCFSIM_DMA3ICR ; imr = MCFSIM_IMR_DMA3; break;
	default: icr = MCFSIM_DMA0ICR ; imr = MCFSIM_IMR_DMA0; break;
	}

	mbarp = (volatile unsigned char *) MCF_MBAR;
	mbarp[icr] = MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1;
	mcf_setimr(mcf_getimr() & ~imr);

	/* Request DMA channel */
	printk ("DAC0800: requesting DMA channel %d\n", DAC0800_DMA_CHAN);
	result = request_dma(DAC0800_DMA_CHAN, "dac0800");
	if (result) {
		printk ("DAC0800: dma channel %d already in use\n",
			DAC0800_DMA_CHAN);
		return;
	}

	/* Program default timer rate */
	dac0800_setclk(8000);
}
Exemplo n.º 16
0
static	int	sscape_pnp_alloc_dma(sscape_info* devc)
{
	/* printk(KERN_INFO "sscape: requesting dma\n"); */
	if (request_dma(devc -> dma, "sscape")) return 0;
	/* printk(KERN_INFO "sscape: dma channel allocated\n"); */
	if (!sscape_alloc_dma(devc)) {
		free_dma(devc -> dma);
		return 0;
	};
	return 1;
}
Exemplo n.º 17
0
int
sound_alloc_dma (int chn, char *deviceID)
{
  int             err;

  if ((err = request_dma (chn, deviceID)) != 0)
    return err;

  dma_alloc_map[chn] = DMA_MAP_FREE;

  return 0;
}
static int __init blackfin_dma_init(void)
{
	int i;

	printk(KERN_INFO "Blackfin DMA Controller\n");

	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
		atomic_set(&dma_ch[i].chan_status, 0);
		dma_ch[i].regs = dma_io_base_addr[i];
	}
	/* Mark MEMDMA Channel 0 as requested since we're using it internally */
	request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
	request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy");

#if defined(CONFIG_DEB_DMA_URGENT)
	bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE()
			 | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT);
#endif

	return 0;
}
Exemplo n.º 19
0
static int __init pg_dma_init(void)
{
    int ret;

    ret = request_dma(dma_channel, "page ops");
    if (ret != 0)
        return ret;

    copy_page = copy_page_dma;
    clear_page = clear_page_dma;

    return ret;
}
Exemplo n.º 20
0
static int __init pvr2_dma_init(void)
{
	int i, base;

	setup_irq(HW_EVENT_PVR2_DMA, &pvr2_dma_irq);
	request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade");

	/* PVR2 cascade comes after on-chip DMAC */
	base = ONCHIP_NR_DMA_CHANNELS;

	for (i = 0; i < PVR2_NR_DMA_CHANNELS; i++)
		dma_info[base + i].ops = &pvr2_dma_ops;

	return register_dmac(&pvr2_dma_ops);
}
static int __init dmabrg_init(void)
{
	unsigned long or;
	int ret;

	dmabrg_handlers = kzalloc(10 * sizeof(struct dmabrg_handler),
				  GFP_KERNEL);
	if (!dmabrg_handlers)
		return -ENOMEM;

#ifdef CONFIG_SH_DMA
	/* request DMAC channel 0 before anyone else can get it */
	ret = request_dma(0, "DMAC 0 (DMABRG)");
	if (ret < 0)
		printk(KERN_INFO "DMABRG: DMAC ch0 not reserved!\n");
#endif

	__raw_writel(0, DMABRGCR);
	__raw_writel(0, DMACHCR0);
	__raw_writel(0x94000000, DMARSRA);	/* enable DMABRG in DMAC 0 */

	/* enable DMABRG mode, enable the DMAC */
	or = __raw_readl(DMAOR);
	__raw_writel(or | DMAOR_BRG | DMAOR_DMEN, DMAOR);

	ret = request_irq(DMABRGI0, dmabrg_irq, 0,
			"DMABRG USB address error", NULL);
	if (ret)
		goto out0;

	ret = request_irq(DMABRGI1, dmabrg_irq, 0,
			"DMABRG Transfer End", NULL);
	if (ret)
		goto out1;

	ret = request_irq(DMABRGI2, dmabrg_irq, 0,
			"DMABRG Transfer Half", NULL);
	if (ret == 0)
		return ret;

	free_irq(DMABRGI1, NULL);
out1:	free_irq(DMABRGI0, NULL);
out0:	kfree(dmabrg_handlers);
	return ret;
}
Exemplo n.º 22
0
static int hfmodem_open(struct inode *inode, struct file *file)
{
    struct hfmodem_state *dev = &hfmodem_state[0];

    if (dev->active)
        return -EBUSY;
    if (!dev->scops)
        return -EPERM;
    /*
     * clear vars
     */
    memset(&dev->l1, 0, sizeof(dev->l1));
    dev->dma.last_dmaptr = 0;
    dev->dma.lastfrag = 0;
    dev->dma.fragptr = 0;
    dev->dma.ptt_frames = 0;
    /*
     * allocate memory
     */
    if (!(dev->dma.buf = kmalloc(HFMODEM_FRAGSIZE * (HFMODEM_NUMFRAGS+HFMODEM_EXCESSFRAGS), GFP_KERNEL | GFP_DMA)))
        return -ENOMEM;
    /*
     * allocate resources
     */
    if (request_dma(dev->io.dma, hfmodem_drvname)) {
        kfree_s(dev->dma.buf, HFMODEM_FRAGSIZE * (HFMODEM_NUMFRAGS+HFMODEM_EXCESSFRAGS));
        return -EBUSY;
    }
    if (request_irq(dev->io.irq, hfmodem_interrupt, SA_INTERRUPT, hfmodem_drvname, dev)) {
        free_dma(dev->io.dma);
        kfree_s(dev->dma.buf, HFMODEM_FRAGSIZE * (HFMODEM_NUMFRAGS+HFMODEM_EXCESSFRAGS));
        return -EBUSY;
    }
    request_region(dev->io.base_addr, dev->scops->extent, hfmodem_drvname);

    /* clear requests */
    dev->active++;
    MOD_INC_USE_COUNT;
    hfmodem_refclock_init(dev);
    output_open(dev);
    dev->scops->init(dev);
    dev->scops->prepare_input(dev);
    dev->scops->trigger_input(dev);
    return 0;
}
Exemplo n.º 23
0
/**
 * test_request_free_channels - Tests request_dma for all the channels. It
 * assumes all channels are free. It requests all the channels and expect to
 * get 0 as return value
 *
 * returns: 	0 - success
 * 		-1 - failure
 */
static int test_request_free_channels(void)
{
	int status = 0;
	unsigned int i;
	int st;

	PDBG("inside test_request_free_channels\n");

	for (i = 0; i < TEST_MAX_CHANNELS; i++) {
		st = request_dma(i, DRIVER_NAME);
		if (st == 0) {
			PDBG("request_dma(%d) free = %d %s\n", i, st, PASS);
		} else {
			PDBG("request_dma(%d) free = %d %s\n", i, st, FAIL);
			status = -1;
		}
	}
	PINFO("test_request_free_channels %s\n", (status ? FAIL : PASS));

	return status;
}
Exemplo n.º 24
0
static int ppi_attach_irq(struct ppi_if *ppi, irq_handler_t handler)
{
	const struct ppi_info *info = ppi->info;
	int ret;

	ret = request_dma(info->dma_ch, "PPI_DMA");

	if (ret) {
		pr_err("Unable to allocate DMA channel for PPI\n");
		return ret;
	}
	set_dma_callback(info->dma_ch, handler, ppi);

	if (ppi->err_int) {
		ret = request_irq(info->irq_err, ppi_irq_err, 0, "PPI ERROR", ppi);
		if (ret) {
			pr_err("Unable to allocate IRQ for PPI\n");
			free_dma(info->dma_ch);
		}
	}
	return ret;
}
Exemplo n.º 25
0
/**
 * test_request_invalid_channels - Tests request_dma for all the channels that
 * are out of the valid channel range. It expects request_dma returns -EINVAL.
 *
 * returns: 	0 - success
 * 		-1 - failure
 */
static int test_request_invalid_channels(void)
{
	int status = 0;
	unsigned int i;
	int st;
	unsigned int chan2test[8] = {
		MAX_DMA_CHANNELS,
		MAX_DMA_CHANNELS + 1,
		MAX_DMA_CHANNELS + 2,
		MAX_DMA_CHANNELS + 3,
		MAX_DMA_CHANNELS * 10,
		MAX_DMA_CHANNELS * 10 + 1,
		MAX_DMA_CHANNELS * 10 + 2,
		MAX_DMA_CHANNELS * 10 + 3,
	};

	PDBG("inside test_request_invalid_channels\n");

	for (i = 0; i < 8; i++) {
		st = request_dma(chan2test[i], DRIVER_NAME);
		if (st == -EINVAL) {
			PDBG("request_dma(%d) invalid = %d %s\n",
			     chan2test[i], st, PASS);
		} else {
			PDBG("request_dma(%d) invalid = %d %s\n",
			     chan2test[i], st, FAIL);
			status = -1;

		}
	}

	PINFO("test_request_invalid_channels %s\n", (status ? FAIL : PASS));

	return status;

}
int snd_sbdsp_create(struct snd_card *card,
		     unsigned long port,
		     int irq,
		     irqreturn_t (*irq_handler)(int, void *, struct pt_regs *),
		     int dma8,
		     int dma16,
		     unsigned short hardware,
		     struct snd_sb **r_chip)
{
	struct snd_sb *chip;
	int err;
	static struct snd_device_ops ops = {
		.dev_free =	snd_sbdsp_dev_free,
	};

	snd_assert(r_chip != NULL, return -EINVAL);
	*r_chip = NULL;
	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
	if (chip == NULL)
		return -ENOMEM;
	spin_lock_init(&chip->reg_lock);
	spin_lock_init(&chip->open_lock);
	spin_lock_init(&chip->midi_input_lock);
	spin_lock_init(&chip->mixer_lock);
	chip->irq = -1;
	chip->dma8 = -1;
	chip->dma16 = -1;
	chip->port = port;
	
	if (request_irq(irq, irq_handler, hardware == SB_HW_ALS4000 ?
			SA_INTERRUPT | SA_SHIRQ : SA_INTERRUPT,
			"SoundBlaster", (void *) chip)) {
		snd_printk(KERN_ERR "sb: can't grab irq %d\n", irq);
		snd_sbdsp_free(chip);
		return -EBUSY;
	}
	chip->irq = irq;

	if (hardware == SB_HW_ALS4000)
		goto __skip_allocation;
	
	if ((chip->res_port = request_region(port, 16, "SoundBlaster")) == NULL) {
		snd_printk(KERN_ERR "sb: can't grab port 0x%lx\n", port);
		snd_sbdsp_free(chip);
		return -EBUSY;
	}

#ifdef CONFIG_ISA
	if (dma8 >= 0 && request_dma(dma8, "SoundBlaster - 8bit")) {
		snd_printk(KERN_ERR "sb: can't grab DMA8 %d\n", dma8);
		snd_sbdsp_free(chip);
		return -EBUSY;
	}
	chip->dma8 = dma8;
	if (dma16 >= 0) {
		if (hardware != SB_HW_ALS100 && (dma16 < 5 || dma16 > 7)) {
			/* no duplex */
			dma16 = -1;
		} else if (request_dma(dma16, "SoundBlaster - 16bit")) {
			snd_printk(KERN_ERR "sb: can't grab DMA16 %d\n", dma16);
			snd_sbdsp_free(chip);
			return -EBUSY;
		}
	}
	chip->dma16 = dma16;
#endif

      __skip_allocation:
	chip->card = card;
	chip->hardware = hardware;
	if ((err = snd_sbdsp_probe(chip)) < 0) {
		snd_sbdsp_free(chip);
		return err;
	}
	if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
		snd_sbdsp_free(chip);
		return err;
	}
	*r_chip = chip;
	return 0;
}
Exemplo n.º 27
0
static int elp_open(struct net_device *dev)
{
	elp_device *adapter;
	int retval;

	adapter = dev->priv;

	if (elp_debug >= 3)
		printk(KERN_DEBUG "%s: request to open device\n", dev->name);

	/*
	 * make sure we actually found the device
	 */
	if (adapter == NULL) {
		printk(KERN_ERR "%s: Opening a non-existent physical device\n", dev->name);
		return -EAGAIN;
	}
	/*
	 * disable interrupts on the board
	 */
	outb_control(0, dev);

	/*
	 * clear any pending interrupts
	 */
	inb_command(dev->base_addr);
	adapter_reset(dev);

	/*
	 * no receive PCBs active
	 */
	adapter->rx_active = 0;

	adapter->busy = 0;
	adapter->send_pcb_semaphore = 0;
	adapter->rx_backlog.in = 0;
	adapter->rx_backlog.out = 0;

	spin_lock_init(&adapter->lock);

	/*
	 * install our interrupt service routine
	 */
	if ((retval = request_irq(dev->irq, &elp_interrupt, 0, dev->name, dev))) {
		printk(KERN_ERR "%s: could not allocate IRQ%d\n", dev->name, dev->irq);
		return retval;
	}
	if ((retval = request_dma(dev->dma, dev->name))) {
		free_irq(dev->irq, dev);
		printk(KERN_ERR "%s: could not allocate DMA%d channel\n", dev->name, dev->dma);
		return retval;
	}
	adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE);
	if (!adapter->dma_buffer) {
		printk(KERN_ERR "%s: could not allocate DMA buffer\n", dev->name);
		free_dma(dev->dma);
		free_irq(dev->irq, dev);
		return -ENOMEM;
	}
	adapter->dmaing = 0;

	/*
	 * enable interrupts on the board
	 */
	outb_control(CMDE, dev);

	/*
	 * configure adapter memory: we need 10 multicast addresses, default==0
	 */
	if (elp_debug >= 3)
		printk(KERN_DEBUG "%s: sending 3c505 memory configuration command\n", dev->name);
	adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
	adapter->tx_pcb.data.memconf.cmd_q = 10;
	adapter->tx_pcb.data.memconf.rcv_q = 20;
	adapter->tx_pcb.data.memconf.mcast = 10;
	adapter->tx_pcb.data.memconf.frame = 20;
	adapter->tx_pcb.data.memconf.rcv_b = 20;
	adapter->tx_pcb.data.memconf.progs = 0;
	adapter->tx_pcb.length = sizeof(struct Memconf);
	adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0;
	if (!send_pcb(dev, &adapter->tx_pcb))
		printk(KERN_ERR "%s: couldn't send memory configuration command\n", dev->name);
	else {
		unsigned long timeout = jiffies + TIMEOUT;
		while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout));
		if (time_after_eq(jiffies, timeout))
			TIMEOUT_MSG(__LINE__);
	}


	/*
	 * configure adapter to receive broadcast messages and wait for response
	 */
	if (elp_debug >= 3)
		printk(KERN_DEBUG "%s: sending 82586 configure command\n", dev->name);
	adapter->tx_pcb.command = CMD_CONFIGURE_82586;
	adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
	adapter->tx_pcb.length = 2;
	adapter->got[CMD_CONFIGURE_82586] = 0;
	if (!send_pcb(dev, &adapter->tx_pcb))
		printk(KERN_ERR "%s: couldn't send 82586 configure command\n", dev->name);
	else {
		unsigned long timeout = jiffies + TIMEOUT;
		while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
		if (time_after_eq(jiffies, timeout))
			TIMEOUT_MSG(__LINE__);
	}

	/* enable burst-mode DMA */
	/* outb(0x1, dev->base_addr + PORT_AUXDMA); */

	/*
	 * queue receive commands to provide buffering
	 */
	prime_rx(dev);
	if (elp_debug >= 3)
		printk(KERN_DEBUG "%s: %d receive PCBs active\n", dev->name, adapter->rx_active);

	/*
	 * device is now officially open!
	 */

	netif_start_queue(dev);
	return 0;
}
/*
 * Function called when the module is initialized
 */
static int __init dma_module_init(void) {
       int error;
       int i = 0;

       transfers[0].data_type = OMAP_DMA_DATA_TYPE_S8;
       /*transfers[1].data_type = OMAP_DMA_DATA_TYPE_S16;
       transfers[2].data_type = OMAP_DMA_DATA_TYPE_S32;*/

       for(i = 0; i < TRANSFER_COUNT; i++){
           /* Create the transfer for the test */
           transfers[i].device_id = OMAP_DMA_NO_DEVICE;
           transfers[i].sync_mode = OMAP_DMA_SYNC_ELEMENT;
           transfers[i].data_burst = OMAP_DMA_DATA_BURST_DIS;
           transfers[i].endian_type = DMA_TEST_LITTLE_ENDIAN;
           transfers[i].addressing_mode = OMAP_DMA_AMODE_POST_INC;
		transfers[i].dst_addressing_mode = OMAP_DMA_AMODE_POST_INC;
           transfers[i].priority = DMA_CH_PRIO_HIGH;
           transfers[i].buffers.buf_size = (1024 * 1024);

           /* Request a dma transfer */
           error = request_dma(&transfers[i]);
           if( error ){
               set_test_passed(0);
               return 1;
           }

           /* Request 2 buffer for the transfer and fill them */
           error = create_transfer_buffers(&(transfers[i].buffers));
           if( error ){
               set_test_passed(0);
               return 1;
           }
           fill_source_buffer(&(transfers[i].buffers));

           /* Setup the dma transfer parameters */
           setup_dma_transfer(&transfers[i]);
       }

       for(i = 0; i < TRANSFER_COUNT; i++){
           /* Start the transfers */
           start_dma_transfer(&transfers[i]);
       }

       /* Poll if the all the transfers have finished */
       for(i = 0; i < TRANSFER_POLL_COUNT; i++){
            if(get_transfers_finished()){
               mdelay(TRANSFER_POLL_TIME);
               check_test_passed();
               break;
            }else{
               mdelay(TRANSFER_POLL_TIME);
            }
       }

       /* This will happen if the poll retries have been reached*/
       if(i == TRANSFER_POLL_COUNT){
           set_test_passed(0);
           return 1;
       }

       return 0;
}
Exemplo n.º 29
0
static int
powertecscsi_probe(struct expansion_card *ec)
{
	struct Scsi_Host *host;
    	struct powertec_info *info;
    	unsigned long base;
	int ret;

	base = ecard_address(ec, ECARD_IOC, ECARD_FAST);

	request_region(base + POWERTEC_FAS216_OFFSET,
		       16 << POWERTEC_FAS216_SHIFT, "powertec2-fas");

	host = scsi_register(&powertecscsi_template,
			     sizeof (struct powertec_info));
	if (!host) {
		ret = -ENOMEM;
		goto out_region;
	}

	host->io_port	  = base;
	host->irq	  = ec->irq;
	host->dma_channel = ec->dma;

	ec->irqaddr	= (unsigned char *)ioaddr(base + POWERTEC_INTR_STATUS);
	ec->irqmask	= POWERTEC_INTR_BIT;
	ec->irq_data	= (void *)(base + POWERTEC_INTR_CONTROL);
	ec->ops		= (expansioncard_ops_t *)&powertecscsi_ops;

	info = (struct powertec_info *)host->hostdata;
	info->ec = ec;
	info->term_port = base + POWERTEC_TERM_CONTROL;
	powertecscsi_terminator_ctl(host, term[ec->slot_no]);

	info->info.scsi.io_port		= host->io_port + POWERTEC_FAS216_OFFSET;
	info->info.scsi.io_shift	= POWERTEC_FAS216_SHIFT;
	info->info.scsi.irq		= host->irq;
	info->info.ifcfg.clockrate	= 40; /* MHz */
	info->info.ifcfg.select_timeout	= 255;
	info->info.ifcfg.asyncperiod	= 200; /* ns */
	info->info.ifcfg.sync_max_depth	= 7;
	info->info.ifcfg.cntl3		= CNTL3_BS8 | CNTL3_FASTSCSI | CNTL3_FASTCLK;
	info->info.ifcfg.disconnect_ok	= 1;
	info->info.ifcfg.wide_max_size	= 0;
	info->info.ifcfg.capabilities	= 0;
	info->info.dma.setup		= powertecscsi_dma_setup;
	info->info.dma.pseudo		= NULL;
	info->info.dma.stop		= powertecscsi_dma_stop;

	ret = fas216_init(host);
	if (ret)
		goto out_free;

	ret = request_irq(host->irq, powertecscsi_intr,
			  SA_INTERRUPT, "powertec", &info->info);
	if (ret) {
		printk("scsi%d: IRQ%d not free: %d\n",
		       host->host_no, host->irq, ret);
		goto out_release;
	}

	if (host->dma_channel != NO_DMA) {
		if (request_dma(host->dma_channel, "powertec")) {
			printk("scsi%d: DMA%d not free, using PIO\n",
			       host->host_no, host->dma_channel);
			host->dma_channel = NO_DMA;
		} else {
			set_dma_speed(host->dma_channel, 180);
			info->info.ifcfg.capabilities |= FASCAP_DMA;
		}
	}

	ret = fas216_add(host);
	if (ret == 0)
		goto out;

	if (host->dma_channel != NO_DMA)
		free_dma(host->dma_channel);
	free_irq(host->irq, host);

 out_release:
	fas216_release(host);

 out_free:
	scsi_unregister(host);

 out_region:
	release_region(base + POWERTEC_FAS216_OFFSET,
		       16 << POWERTEC_FAS216_SHIFT);

 out:
	return ret;
}
Exemplo n.º 30
0
ssize_t xfifo_dma_write(struct file *filp, const char __user *buf, size_t count,
        loff_t *f_pos)
{
        struct xfifo_dma_dev *dev = filp->private_data;
        size_t transfer_size;
 
        int retval = 0;
 
        if (mutex_lock_interruptible(&dev->mutex)) {
                return -EINTR;
        }
 
        dev->writes++;
 
        transfer_size = count;
        if (count > dev->fifo_depth) {
                transfer_size = dev->fifo_depth;
        }
 
        /* Allocate a DMA buffer for the transfer */
        dev->buffer_v_addr = dma_alloc_coherent(&dev->pdev->dev, transfer_size,
                &dev->buffer_d_addr, GFP_KERNEL);
        if (!dev->buffer_v_addr) {
                dev_err(&dev->pdev->dev,
                        "coherent DMA buffer allocation failed\n");
                retval = -ENOMEM;
                goto fail_buffer;
        }
 
        PDEBUG("dma buffer alloc - d @0x%0x v @0x%0x\n",
                (u32)dev->buffer_d_addr, (u32)dev->buffer_v_addr);
 
        if (request_dma(dev->dma_channel, MODULE_NAME)) {
                dev_err(&dev->pdev->dev,
                        "unable to alloc DMA channel %d\n",
                        dev->dma_channel);
                retval = -EBUSY;
                goto fail_client_data;
        }
 
        dev->busy = 1;
        dev->count = transfer_size;
 
        set_dma_mode(dev->dma_channel, DMA_MODE_WRITE);
        set_dma_addr(dev->dma_channel, dev->buffer_d_addr);
        set_dma_count(dev->dma_channel, transfer_size);
        set_pl330_client_data(dev->dma_channel, dev->client_data);
        set_pl330_done_callback(dev->dma_channel,
                xfifo_dma_done_callback, dev);
        set_pl330_fault_callback(dev->dma_channel,
                xfifo_dma_fault_callback, dev);
        set_pl330_incr_dev_addr(dev->dma_channel, 0);
 
        /* Load our DMA buffer with the user data */
        copy_from_user(dev->buffer_v_addr, buf, transfer_size);
 
        xfifo_dma_reset_fifo();
        /* Kick off the DMA */
        enable_dma(dev->dma_channel);
 
        mutex_unlock(&dev->mutex);
 
        wait_event_interruptible(xfifo_dma_wait, dev->busy == 0);
 
        /* Deallocate the DMA buffer and free the channel */
        free_dma(dev->dma_channel);
 
        dma_free_coherent(&dev->pdev->dev, dev->count, dev->buffer_v_addr,
                dev->buffer_d_addr);
 
        PDEBUG("dma write %d bytes\n", transfer_size);
 
        return transfer_size;
 
fail_client_data:
        dma_free_coherent(&dev->pdev->dev, transfer_size, dev->buffer_v_addr,
                dev->buffer_d_addr);
fail_buffer:
        mutex_unlock(&dev->mutex);
        return retval;
}