示例#1
0
static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
				      const struct ide_port_info *d)
{
	struct pci_dev *dev = to_pci_dev(hwif->dev);
	unsigned long base = ide_pci_dma_base(hwif, d);

	if (base == 0)
		return -1;

	hwif->dma_base = base;

	if (ide_pci_check_simplex(hwif, d) < 0)
		return -1;

	if (ide_pci_set_master(dev, d->name) < 0)
		return -1;

	if (!hwif->channel)
		outb(inb(base + 2) & 0x60, base + 2);

	printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx\n",
			 hwif->name, base, base + 7);

	if (ide_allocate_dma_engine(hwif))
		return -1;

	return 0;
}
示例#2
0
static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
{
	_auide_hwif *auide = &auide_hwif;
	dbdev_tab_t source_dev_tab, target_dev_tab;
	u32 dev_id, tsize, devwidth, flags;

	dev_id	 = IDE_DDMA_REQ;

	tsize    =  8; /*  1 */
	devwidth = 32; /* 16 */

#ifdef IDE_AU1XXX_BURSTMODE 
	flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
#else
	flags = DEV_FLAGS_SYNC;
#endif

	/* setup dev_tab for tx channel */
	auide_init_dbdma_dev( &source_dev_tab,
			      dev_id,
			      tsize, devwidth, DEV_FLAGS_OUT | flags);
 	auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );

	auide_init_dbdma_dev( &source_dev_tab,
			      dev_id,
			      tsize, devwidth, DEV_FLAGS_IN | flags);
 	auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
	
	/* We also need to add a target device for the DMA */
	auide_init_dbdma_dev( &target_dev_tab,
			      (u32)DSCR_CMD0_ALWAYS,
			      tsize, devwidth, DEV_FLAGS_ANYUSE);
	auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);	
 
	/* Get a channel for TX */
	auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
						 auide->tx_dev_id,
						 auide_ddma_tx_callback,
						 (void*)auide);
 
	/* Get a channel for RX */
	auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
						 auide->target_dev_id,
						 auide_ddma_rx_callback,
						 (void*)auide);

	auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
							     NUM_DESCRIPTORS);
	auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
							     NUM_DESCRIPTORS);

	/* FIXME: check return value */
	(void)ide_allocate_dma_engine(hwif);
	
	au1xxx_dbdma_start( auide->tx_chan );
	au1xxx_dbdma_start( auide->rx_chan );
 
	return 0;
} 
static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
{
	_auide_hwif *auide = &auide_hwif;
	dbdev_tab_t source_dev_tab, target_dev_tab;
	u32 dev_id, tsize, devwidth, flags;

	dev_id	 = hwif->ddma_id;

	tsize    =  8; 
	devwidth = 32; 

#ifdef IDE_AU1XXX_BURSTMODE 
	flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
#else
	flags = DEV_FLAGS_SYNC;
#endif

	
	auide_init_dbdma_dev(&source_dev_tab, dev_id, tsize, devwidth,
			     DEV_FLAGS_OUT | flags, auide->regbase);
 	auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );

	auide_init_dbdma_dev(&source_dev_tab, dev_id, tsize, devwidth,
			     DEV_FLAGS_IN | flags, auide->regbase);
 	auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab );
	
	
	auide_init_dbdma_dev(&target_dev_tab, (u32)DSCR_CMD0_ALWAYS, tsize,
			     devwidth, DEV_FLAGS_ANYUSE, auide->regbase);
	auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab);	
 
	
	auide->tx_chan = au1xxx_dbdma_chan_alloc(auide->target_dev_id,
						 auide->tx_dev_id,
						 auide_ddma_tx_callback,
						 (void*)auide);
 
	
	auide->rx_chan = au1xxx_dbdma_chan_alloc(auide->rx_dev_id,
						 auide->target_dev_id,
						 auide_ddma_rx_callback,
						 (void*)auide);

	auide->tx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->tx_chan,
							     NUM_DESCRIPTORS);
	auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
							     NUM_DESCRIPTORS);

	
	(void)ide_allocate_dma_engine(hwif);
	
	au1xxx_dbdma_start( auide->tx_chan );
	au1xxx_dbdma_start( auide->rx_chan );
 
	return 0;
} 
示例#4
0
static int palm_bk3710_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
{
	printk(KERN_INFO "    %s: MMIO-DMA\n", hwif->name);

	if (ide_allocate_dma_engine(hwif))
		return -1;

	hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;

	return 0;
}
示例#5
0
static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
					  const struct ide_port_info *d)
{
;

	if (ide_allocate_dma_engine(hwif))
		return -1;

	hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;

	return 0;
}
示例#6
0
static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
					  const struct ide_port_info *d)
{
#ifdef CONFIG_DEBUG_PRINTK
	printk(KERN_INFO "    %s: MMIO-DMA\n", hwif->name);
#else
	;
#endif

	if (ide_allocate_dma_engine(hwif))
		return -1;

	hwif->dma_base = hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;

	return 0;
}
示例#7
0
/* Creates a DMA map for the scatter-gather list entries */
static int __devinit ide_dma_sgiioc4(ide_hwif_t *hwif,
				     const struct ide_port_info *d)
{
	struct pci_dev *dev = to_pci_dev(hwif->dev);
	unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
	int num_ports = sizeof(struct ioc4_dma_regs);
	void *pad;

	printk(KERN_INFO "    %s: MMIO-DMA\n", hwif->name);

	if (request_mem_region(dma_base, num_ports, hwif->name) == NULL) {
		printk(KERN_ERR "%s(%s) -- ERROR: addresses 0x%08lx to 0x%08lx "
		       "already in use\n", __func__, hwif->name,
		       dma_base, dma_base + num_ports - 1);
		return -1;
	}

	hwif->dma_base = (unsigned long)hwif->io_ports.irq_addr +
			 IOC4_DMA_OFFSET;

	hwif->sg_max_nents = IOC4_PRD_ENTRIES;

	hwif->prd_max_nents = IOC4_PRD_ENTRIES;
	hwif->prd_ent_size = IOC4_PRD_BYTES;

	if (ide_allocate_dma_engine(hwif))
		goto dma_pci_alloc_failure;

	pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE,
				   (dma_addr_t *)&hwif->extra_base);
	if (pad) {
		ide_set_hwifdata(hwif, pad);
		return 0;
	}

	ide_release_dma_engine(hwif);

	printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n",
	       __func__, hwif->name);
	printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name);

dma_pci_alloc_failure:
	release_mem_region(dma_base, num_ports);

	return -1;
}
示例#8
0
static int __devinit scc_init_dma(ide_hwif_t *hwif,
				  const struct ide_port_info *d)
{
	return ide_allocate_dma_engine(hwif);
}