static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	int ret = -ENODEV;
	resource_size_t csr_base, mem_base;
	unsigned long csr_len, mem_len;
	struct denali_nand_info *denali;

	denali = kzalloc(sizeof(*denali), GFP_KERNEL);
	if (!denali)
		return -ENOMEM;

	ret = pci_enable_device(dev);
	if (ret) {
		printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
		goto failed_alloc_memery;
	}

	if (id->driver_data == INTEL_CE4100) {
		if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
			printk(KERN_ERR "Intel CE4100 only supports"
					" ONFI timing mode 1 or below\n");
			ret = -EINVAL;
			goto failed_enable_dev;
		}
		denali->platform = INTEL_CE4100;
		mem_base = pci_resource_start(dev, 0);
		mem_len = pci_resource_len(dev, 1);
		csr_base = pci_resource_start(dev, 1);
		csr_len = pci_resource_len(dev, 1);
	} else {
		denali->platform = INTEL_MRST;
		csr_base = pci_resource_start(dev, 0);
		csr_len = pci_resource_len(dev, 0);
		mem_base = pci_resource_start(dev, 1);
		mem_len = pci_resource_len(dev, 1);
		if (!mem_len) {
			mem_base = csr_base + csr_len;
			mem_len = csr_len;
		}
	}

	
	ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
	if (ret) {
		printk(KERN_ERR "Spectra: no usable DMA configuration\n");
		goto failed_enable_dev;
	}
	denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
					     DENALI_BUF_SIZE,
					     DMA_BIDIRECTIONAL);

	if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
		dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
		goto failed_enable_dev;
	}

	pci_set_master(dev);
	denali->dev = &dev->dev;
	denali->mtd.dev.parent = &dev->dev;

	ret = pci_request_regions(dev, DENALI_NAND_NAME);
	if (ret) {
		printk(KERN_ERR "Spectra: Unable to request memory regions\n");
		goto failed_dma_map;
	}

	denali->flash_reg = ioremap_nocache(csr_base, csr_len);
	if (!denali->flash_reg) {
		printk(KERN_ERR "Spectra: Unable to remap memory region\n");
		ret = -ENOMEM;
		goto failed_req_regions;
	}

	denali->flash_mem = ioremap_nocache(mem_base, mem_len);
	if (!denali->flash_mem) {
		printk(KERN_ERR "Spectra: ioremap_nocache failed!");
		ret = -ENOMEM;
		goto failed_remap_reg;
	}

	denali_hw_init(denali);
	denali_drv_init(denali);

	if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
			DENALI_NAND_NAME, denali)) {
		printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
		ret = -ENODEV;
		goto failed_remap_mem;
	}

	
	denali_set_intr_modes(denali, true);

	pci_set_drvdata(dev, denali);

	denali->mtd.name = "denali-nand";
	denali->mtd.owner = THIS_MODULE;
	denali->mtd.priv = &denali->nand;

	
	denali->nand.select_chip = denali_select_chip;
	denali->nand.cmdfunc = denali_cmdfunc;
	denali->nand.read_byte = denali_read_byte;
	denali->nand.waitfunc = denali_waitfunc;

	if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
		ret = -ENXIO;
		goto failed_req_irq;
	}

	if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
		ret = -ENODEV;
		printk(KERN_ERR "Spectra: device size not supported by this "
			"version of MTD.");
		goto failed_req_irq;
	}

	denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
	denali->nand.chipsize <<= (denali->devnum - 1);
	denali->nand.page_shift += (denali->devnum - 1);
	denali->nand.pagemask = (denali->nand.chipsize >>
						denali->nand.page_shift) - 1;
	denali->nand.bbt_erase_shift += (denali->devnum - 1);
	denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
	denali->nand.chip_shift += (denali->devnum - 1);
	denali->mtd.writesize <<= (denali->devnum - 1);
	denali->mtd.oobsize <<= (denali->devnum - 1);
	denali->mtd.erasesize <<= (denali->devnum - 1);
	denali->mtd.size = denali->nand.numchips * denali->nand.chipsize;
	denali->bbtskipbytes *= denali->devnum;


	
	denali->nand.bbt_td = &bbt_main_descr;
	denali->nand.bbt_md = &bbt_mirror_descr;

	
	denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
	denali->nand.options |= NAND_SKIP_BBTSCAN;
	denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;

	if (denali->nand.cellinfo & 0xc &&
			(denali->mtd.oobsize > (denali->bbtskipbytes +
			ECC_15BITS * (denali->mtd.writesize /
			ECC_SECTOR_SIZE)))) {
		
		denali->nand.ecc.strength = 15;
		denali->nand.ecc.layout = &nand_15bit_oob;
		denali->nand.ecc.bytes = ECC_15BITS;
		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
	} else if (denali->mtd.oobsize < (denali->bbtskipbytes +
			ECC_8BITS * (denali->mtd.writesize /
			ECC_SECTOR_SIZE))) {
		printk(KERN_ERR "Your NAND chip OOB is not large enough to"
				" contain 8bit ECC correction codes");
		goto failed_req_irq;
	} else {
		denali->nand.ecc.strength = 8;
		denali->nand.ecc.layout = &nand_8bit_oob;
		denali->nand.ecc.bytes = ECC_8BITS;
		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
	}

	denali->nand.ecc.bytes *= denali->devnum;
	denali->nand.ecc.strength *= denali->devnum;
	denali->nand.ecc.layout->eccbytes *=
		denali->mtd.writesize / ECC_SECTOR_SIZE;
	denali->nand.ecc.layout->oobfree[0].offset =
		denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes;
	denali->nand.ecc.layout->oobfree[0].length =
		denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes -
		denali->bbtskipbytes;

	denali->totalblks = denali->mtd.size >>
				denali->nand.phys_erase_shift;
	denali->blksperchip = denali->totalblks / denali->nand.numchips;

	denali->nand.ecc.calculate = denali_ecc_calculate;
	denali->nand.ecc.correct = denali_ecc_correct;
	denali->nand.ecc.hwctl = denali_ecc_hwctl;

	
	denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
	denali->nand.ecc.read_page = denali_read_page;
	denali->nand.ecc.read_page_raw = denali_read_page_raw;
	denali->nand.ecc.write_page = denali_write_page;
	denali->nand.ecc.write_page_raw = denali_write_page_raw;
	denali->nand.ecc.read_oob = denali_read_oob;
	denali->nand.ecc.write_oob = denali_write_oob;
	denali->nand.erase_cmd = denali_erase;

	if (nand_scan_tail(&denali->mtd)) {
		ret = -ENXIO;
		goto failed_req_irq;
	}

	ret = mtd_device_register(&denali->mtd, NULL, 0);
	if (ret) {
		dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
				ret);
		goto failed_req_irq;
	}
	return 0;

failed_req_irq:
	denali_irq_cleanup(dev->irq, denali);
failed_remap_mem:
	iounmap(denali->flash_mem);
failed_remap_reg:
	iounmap(denali->flash_reg);
failed_req_regions:
	pci_release_regions(dev);
failed_dma_map:
	dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
			 DMA_BIDIRECTIONAL);
failed_enable_dev:
	pci_disable_device(dev);
failed_alloc_memery:
	kfree(denali);
	return ret;
}
static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
			      struct hpc3_ethregs *hregs,
			      struct sgiseeq_regs *sregs)
{
	struct sgiseeq_rx_desc *rd;
	struct sk_buff *skb = NULL;
	struct sk_buff *newskb;
	unsigned char pkt_status;
	int len = 0;
	unsigned int orig_end = PREV_RX(sp->rx_new);

	/* Service every received packet. */
	rd = &sp->rx_desc[sp->rx_new];
	dma_sync_desc_cpu(dev, rd);
	while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
		len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
		dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
				 PKT_BUF_SZ, DMA_FROM_DEVICE);
		pkt_status = rd->skb->data[len];
		if (pkt_status & SEEQ_RSTAT_FIG) {
			/* Packet is OK. */
			/* We don't want to receive our own packets */
			if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
				if (len > rx_copybreak) {
					skb = rd->skb;
					newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
					if (!newskb) {
						newskb = skb;
						skb = NULL;
						goto memory_squeeze;
					}
					skb_reserve(newskb, 2);
				} else {
					skb = netdev_alloc_skb_ip_align(dev, len);
					if (skb)
						skb_copy_to_linear_data(skb, rd->skb->data, len);

					newskb = rd->skb;
				}
memory_squeeze:
				if (skb) {
					skb_put(skb, len);
					skb->protocol = eth_type_trans(skb, dev);
					netif_rx(skb);
					dev->stats.rx_packets++;
					dev->stats.rx_bytes += len;
				} else {
					printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
						dev->name);
					dev->stats.rx_dropped++;
				}
			} else {
				/* Silently drop my own packets */
				newskb = rd->skb;
			}
		} else {
			record_rx_errors(dev, pkt_status);
			newskb = rd->skb;
		}
		rd->skb = newskb;
		rd->rdma.pbuf = dma_map_single(dev->dev.parent,
					       newskb->data - 2,
					       PKT_BUF_SZ, DMA_FROM_DEVICE);

		/* Return the entry to the ring pool. */
		rd->rdma.cntinfo = RCNTINFO_INIT;
		sp->rx_new = NEXT_RX(sp->rx_new);
		dma_sync_desc_dev(dev, rd);
		rd = &sp->rx_desc[sp->rx_new];
		dma_sync_desc_cpu(dev, rd);
	}
	dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
	sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
	dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
	dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
	sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
	dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
	rx_maybe_restart(sp, hregs, sregs);
}
Beispiel #3
0
static int hss_hdlc_poll(struct napi_struct *napi, int budget)
{
	struct port *port = container_of(napi, struct port, napi);
	struct net_device *dev = port->netdev;
	unsigned int rxq = queue_ids[port->id].rx;
	unsigned int rxfreeq = queue_ids[port->id].rxfree;
	int received = 0;

#if DEBUG_RX
	printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
#endif

	while (received < budget) {
		struct sk_buff *skb;
		struct desc *desc;
		int n;
#ifdef __ARMEB__
		struct sk_buff *temp;
		u32 phys;
#endif

		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
			printk(KERN_DEBUG "%s: hss_hdlc_poll"
			       " napi_complete\n", dev->name);
#endif
			napi_complete(napi);
			qmgr_enable_irq(rxq);
			if (!qmgr_stat_empty(rxq) &&
			    napi_reschedule(napi)) {
#if DEBUG_RX
				printk(KERN_DEBUG "%s: hss_hdlc_poll"
				       " napi_reschedule succeeded\n",
				       dev->name);
#endif
				qmgr_disable_irq(rxq);
				continue;
			}
#if DEBUG_RX
			printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
			       dev->name);
#endif
			return received; 
		}

		desc = rx_desc_ptr(port, n);
#if 0 
		if (desc->error_count)
			printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
			       " errors %u\n", dev->name, desc->status,
			       desc->error_count);
#endif
		skb = NULL;
		switch (desc->status) {
		case 0:
#ifdef __ARMEB__
			if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
				phys = dma_map_single(&dev->dev, skb->data,
						      RX_SIZE,
						      DMA_FROM_DEVICE);
				if (dma_mapping_error(&dev->dev, phys)) {
					dev_kfree_skb(skb);
					skb = NULL;
				}
			}
#else
			skb = netdev_alloc_skb(dev, desc->pkt_len);
#endif
			if (!skb)
				dev->stats.rx_dropped++;
			break;
		case ERR_HDLC_ALIGN:
		case ERR_HDLC_ABORT:
			dev->stats.rx_frame_errors++;
			dev->stats.rx_errors++;
			break;
		case ERR_HDLC_FCS:
			dev->stats.rx_crc_errors++;
			dev->stats.rx_errors++;
			break;
		case ERR_HDLC_TOO_LONG:
			dev->stats.rx_length_errors++;
			dev->stats.rx_errors++;
			break;
		default:	
			printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X"
			       " errors %u\n", dev->name, desc->status,
			       desc->error_count);
			dev->stats.rx_errors++;
		}

		if (!skb) {
			
			desc->buf_len = RX_SIZE;
			desc->pkt_len = desc->status = 0;
			queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
			continue;
		}

		
#ifdef __ARMEB__
		temp = skb;
		skb = port->rx_buff_tab[n];
		dma_unmap_single(&dev->dev, desc->data,
				 RX_SIZE, DMA_FROM_DEVICE);
#else
		dma_sync_single_for_cpu(&dev->dev, desc->data,
					RX_SIZE, DMA_FROM_DEVICE);
		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
			      ALIGN(desc->pkt_len, 4) / 4);
#endif
		skb_put(skb, desc->pkt_len);

		debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);

		skb->protocol = hdlc_type_trans(skb, dev);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb->len;
		netif_receive_skb(skb);

		
#ifdef __ARMEB__
		port->rx_buff_tab[n] = temp;
		desc->data = phys;
#endif
		desc->buf_len = RX_SIZE;
		desc->pkt_len = 0;
		queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
		received++;
	}
#if DEBUG_RX
	printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
#endif
	return received;	
}
void rknand_dma_unmap_single(unsigned long ptr,int size,int dir)
{
    dma_unmap_single(NULL, (dma_addr_t)ptr,size, dir?DMA_TO_DEVICE:DMA_FROM_DEVICE);
}
Beispiel #5
0
/*
 * Init JobR independent of platform property detection
 */
static int caam_jr_init(struct device *dev)
{
	struct caam_drv_private_jr *jrp;
	dma_addr_t inpbusaddr, outbusaddr;
	int i, error;

	jrp = dev_get_drvdata(dev);

	/* Connect job ring interrupt handler. */
	for_each_possible_cpu(i)
		tasklet_init(&jrp->irqtask[i], caam_jr_dequeue,
			     (unsigned long)dev);

	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
			    "caam-jr", dev);
	if (error) {
		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
			jrp->ridx, jrp->irq);
		irq_dispose_mapping(jrp->irq);
		jrp->irq = 0;
		return -EINVAL;
	}

	error = caam_reset_hw_jr(dev);
	if (error)
		return error;

	jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH,
			       GFP_KERNEL | GFP_DMA);
	jrp->outring = kzalloc(sizeof(struct jr_outentry) *
			       JOBR_DEPTH, GFP_KERNEL | GFP_DMA);

	jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
			       GFP_KERNEL);

	if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
	    (jrp->entinfo == NULL)) {
		dev_err(dev, "can't allocate job rings for %d\n",
			jrp->ridx);
		return -ENOMEM;
	}

	for (i = 0; i < JOBR_DEPTH; i++)
		jrp->entinfo[i].desc_addr_dma = !0;

	/* Setup rings */
	inpbusaddr = dma_map_single(dev, jrp->inpring,
				    sizeof(u32 *) * JOBR_DEPTH,
				    DMA_TO_DEVICE);
	if (dma_mapping_error(dev, inpbusaddr)) {
		dev_err(dev, "caam_jr_init(): can't map input ring\n");
		kfree(jrp->inpring);
		kfree(jrp->outring);
		kfree(jrp->entinfo);
		return -EIO;
	}

	outbusaddr = dma_map_single(dev, jrp->outring,
				    sizeof(struct jr_outentry) * JOBR_DEPTH,
				    DMA_FROM_DEVICE);
	if (dma_mapping_error(dev, outbusaddr)) {
		dev_err(dev, "caam_jr_init(): can't map output ring\n");
			dma_unmap_single(dev, inpbusaddr,
					 sizeof(u32 *) * JOBR_DEPTH,
					 DMA_TO_DEVICE);
		kfree(jrp->inpring);
		kfree(jrp->outring);
		kfree(jrp->entinfo);
		return -EIO;
	}

	jrp->inp_ring_write_index = 0;
	jrp->out_ring_read_index = 0;
	jrp->head = 0;
	jrp->tail = 0;

	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);

	jrp->ringsize = JOBR_DEPTH;

	spin_lock_init(&jrp->inplock);
	spin_lock_init(&jrp->outlock);

	/* Select interrupt coalescing parameters */
	setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
		  (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
		  (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));

	jrp->assign = JOBR_UNASSIGNED;
	return 0;
}
Beispiel #6
0
/*
 * stop callback
 */
static int bcm_enet_stop(struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct device *kdev;
	int i;

	priv = netdev_priv(dev);
	kdev = &priv->pdev->dev;

	netif_stop_queue(dev);
	napi_disable(&priv->napi);
	if (priv->has_phy)
		phy_stop(priv->phydev);
	del_timer_sync(&priv->rx_timeout);

	/* mask all interrupts */
	enet_writel(priv, 0, ENET_IRMASK_REG);
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));

	/* make sure no mib update is scheduled */
	flush_scheduled_work();

	/* disable dma & mac */
	bcm_enet_disable_dma(priv, priv->tx_chan);
	bcm_enet_disable_dma(priv, priv->rx_chan);
	bcm_enet_disable_mac(priv);

	/* force reclaim of all tx buffers */
	bcm_enet_tx_reclaim(dev, 1);

	/* free the rx skb ring */
	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_skb[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
				 DMA_FROM_DEVICE);
		kfree_skb(priv->rx_skb[i]);
	}

	/* free remaining allocated memory */
	kfree(priv->rx_skb);
	kfree(priv->tx_skb);
	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
			  priv->rx_desc_cpu, priv->rx_desc_dma);
	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
			  priv->tx_desc_cpu, priv->tx_desc_dma);
	free_irq(priv->irq_tx, dev);
	free_irq(priv->irq_rx, dev);
	free_irq(dev->irq, dev);

	/* release phy */
	if (priv->has_phy) {
		phy_disconnect(priv->phydev);
		priv->phydev = NULL;
	}

	return 0;
}
Beispiel #7
0
/*
 * open callback, allocate dma rings & buffers and start rx operation
 */
static int bcm_enet_open(struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct sockaddr addr;
	struct device *kdev;
	struct phy_device *phydev;
	int i, ret;
	unsigned int size;
	char phy_id[MII_BUS_ID_SIZE + 3];
	void *p;
	u32 val;

	priv = netdev_priv(dev);
	kdev = &priv->pdev->dev;

	if (priv->has_phy) {
		/* connect to PHY */
		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
			 priv->mac_id ? "1" : "0", priv->phy_id);

		phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
				     PHY_INTERFACE_MODE_MII);

		if (IS_ERR(phydev)) {
			dev_err(kdev, "could not attach to PHY\n");
			return PTR_ERR(phydev);
		}

		/* mask with MAC supported features */
		phydev->supported &= (SUPPORTED_10baseT_Half |
				      SUPPORTED_10baseT_Full |
				      SUPPORTED_100baseT_Half |
				      SUPPORTED_100baseT_Full |
				      SUPPORTED_Autoneg |
				      SUPPORTED_Pause |
				      SUPPORTED_MII);
		phydev->advertising = phydev->supported;

		if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
			phydev->advertising |= SUPPORTED_Pause;
		else
			phydev->advertising &= ~SUPPORTED_Pause;

		dev_info(kdev, "attached PHY at address %d [%s]\n",
			 phydev->addr, phydev->drv->name);

		priv->old_link = 0;
		priv->old_duplex = -1;
		priv->old_pause = -1;
		priv->phydev = phydev;
	}

	/* mask all interrupts and request them */
	enet_writel(priv, 0, ENET_IRMASK_REG);
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));

	ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
	if (ret)
		goto out_phy_disconnect;

	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
			  IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
	if (ret)
		goto out_freeirq;

	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
			  IRQF_DISABLED, dev->name, dev);
	if (ret)
		goto out_freeirq_rx;

	/* initialize perfect match registers */
	for (i = 0; i < 4; i++) {
		enet_writel(priv, 0, ENET_PML_REG(i));
		enet_writel(priv, 0, ENET_PMH_REG(i));
	}

	/* write device mac address */
	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
	bcm_enet_set_mac_address(dev, &addr);

	/* allocate rx dma ring */
	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
	if (!p) {
		dev_err(kdev, "cannot allocate rx ring %u\n", size);
		ret = -ENOMEM;
		goto out_freeirq_tx;
	}

	memset(p, 0, size);
	priv->rx_desc_alloc_size = size;
	priv->rx_desc_cpu = p;

	/* allocate tx dma ring */
	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
	if (!p) {
		dev_err(kdev, "cannot allocate tx ring\n");
		ret = -ENOMEM;
		goto out_free_rx_ring;
	}

	memset(p, 0, size);
	priv->tx_desc_alloc_size = size;
	priv->tx_desc_cpu = p;

	priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
			       GFP_KERNEL);
	if (!priv->tx_skb) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
		ret = -ENOMEM;
		goto out_free_tx_ring;
	}

	priv->tx_desc_count = priv->tx_ring_size;
	priv->tx_dirty_desc = 0;
	priv->tx_curr_desc = 0;
	spin_lock_init(&priv->tx_lock);

	/* init & fill rx ring with skbs */
	priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
			       GFP_KERNEL);
	if (!priv->rx_skb) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
		ret = -ENOMEM;
		goto out_free_tx_skb;
	}

	priv->rx_desc_count = 0;
	priv->rx_dirty_desc = 0;
	priv->rx_curr_desc = 0;

	/* initialize flow control buffer allocation */
	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
			ENETDMA_BUFALLOC_REG(priv->rx_chan));

	if (bcm_enet_refill_rx(dev)) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
		ret = -ENOMEM;
		goto out;
	}

	/* write rx & tx ring addresses */
	enet_dma_writel(priv, priv->rx_desc_dma,
			ENETDMA_RSTART_REG(priv->rx_chan));
	enet_dma_writel(priv, priv->tx_desc_dma,
			ENETDMA_RSTART_REG(priv->tx_chan));

	/* clear remaining state ram for rx & tx channel */
	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));

	/* set max rx/tx length */
	enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
	enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);

	/* set dma maximum burst len */
	enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
			ENETDMA_MAXBURST_REG(priv->rx_chan));
	enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
			ENETDMA_MAXBURST_REG(priv->tx_chan));

	/* set correct transmit fifo watermark */
	enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);

	/* set flow control low/high threshold to 1/3 / 2/3 */
	val = priv->rx_ring_size / 3;
	enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
	val = (priv->rx_ring_size * 2) / 3;
	enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));

	/* all set, enable mac and interrupts, start dma engine and
	 * kick rx dma channel */
	wmb();
	enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
	enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
			ENETDMA_CHANCFG_REG(priv->rx_chan));

	/* watch "mib counters about to overflow" interrupt */
	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
	enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);

	/* watch "packet transferred" interrupt in rx and tx */
	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IR_REG(priv->rx_chan));
	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IR_REG(priv->tx_chan));

	/* make sure we enable napi before rx interrupt  */
	napi_enable(&priv->napi);

	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IRMASK_REG(priv->rx_chan));
	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IRMASK_REG(priv->tx_chan));

	if (priv->has_phy)
		phy_start(priv->phydev);
	else
		bcm_enet_adjust_link(dev);

	netif_start_queue(dev);
	return 0;

out:
	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_skb[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
				 DMA_FROM_DEVICE);
		kfree_skb(priv->rx_skb[i]);
	}
	kfree(priv->rx_skb);

out_free_tx_skb:
	kfree(priv->tx_skb);

out_free_tx_ring:
	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
			  priv->tx_desc_cpu, priv->tx_desc_dma);

out_free_rx_ring:
	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
			  priv->rx_desc_cpu, priv->rx_desc_dma);

out_freeirq_tx:
	free_irq(priv->irq_tx, dev);

out_freeirq_rx:
	free_irq(priv->irq_rx, dev);

out_freeirq:
	free_irq(dev->irq, dev);

out_phy_disconnect:
	phy_disconnect(priv->phydev);

	return ret;
}
Beispiel #8
0
static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
			       struct rsa_edesc *edesc)
{
	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
	struct caam_rsa_key *key = &ctx->key;
	struct device *dev = ctx->dev;
	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
	int sec4_sg_index = 0;
	size_t p_sz = key->p_sz;
	size_t q_sz = key->q_sz;

	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, pdb->p_dma)) {
		dev_err(dev, "Unable to map RSA prime factor p memory\n");
		return -ENOMEM;
	}

	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, pdb->q_dma)) {
		dev_err(dev, "Unable to map RSA prime factor q memory\n");
		goto unmap_p;
	}

	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, pdb->dp_dma)) {
		dev_err(dev, "Unable to map RSA exponent dp memory\n");
		goto unmap_q;
	}

	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, pdb->dq_dma)) {
		dev_err(dev, "Unable to map RSA exponent dq memory\n");
		goto unmap_dp;
	}

	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, pdb->c_dma)) {
		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
		goto unmap_dq;
	}

	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
		dev_err(dev, "Unable to map RSA tmp1 memory\n");
		goto unmap_qinv;
	}

	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
		dev_err(dev, "Unable to map RSA tmp2 memory\n");
		goto unmap_tmp1;
	}

	if (edesc->src_nents > 1) {
		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
		pdb->g_dma = edesc->sec4_sg_dma;
		sec4_sg_index += edesc->src_nents;
	} else {
		pdb->g_dma = sg_dma_address(req->src);
	}

	if (edesc->dst_nents > 1) {
		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
		pdb->f_dma = edesc->sec4_sg_dma +
			     sec4_sg_index * sizeof(struct sec4_sg_entry);
	} else {
		pdb->f_dma = sg_dma_address(req->dst);
	}

	pdb->sgf |= key->n_sz;
	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;

	return 0;

unmap_tmp1:
	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_qinv:
	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
unmap_dq:
	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
unmap_dp:
	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
unmap_q:
	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
unmap_p:
	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);

	return -ENOMEM;
}
Beispiel #9
0
static int eth_poll(struct napi_struct *napi, int budget)
{
	struct port *port = container_of(napi, struct port, napi);
	struct net_device *dev = port->netdev;
	unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
	int received = 0;

#if DEBUG_RX
	printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
#endif

	while (received < budget) {
		struct sk_buff *skb;
		struct desc *desc;
		int n;
#ifdef __ARMEB__
		struct sk_buff *temp;
		u32 phys;
#endif

		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
#if DEBUG_RX
			printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
			       dev->name);
#endif
			napi_complete(napi);
			qmgr_enable_irq(rxq);
			if (!qmgr_stat_empty(rxq) &&
			    napi_reschedule(napi)) {
#if DEBUG_RX
				printk(KERN_DEBUG "%s: eth_poll"
				       " napi_reschedule successed\n",
				       dev->name);
#endif
				qmgr_disable_irq(rxq);
				continue;
			}
#if DEBUG_RX
			printk(KERN_DEBUG "%s: eth_poll all done\n",
			       dev->name);
#endif
			return received; /* all work done */
		}

		desc = rx_desc_ptr(port, n);

#ifdef __ARMEB__
		if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
			phys = dma_map_single(&dev->dev, skb->data,
					      RX_BUFF_SIZE, DMA_FROM_DEVICE);
			if (dma_mapping_error(&dev->dev, phys)) {
				dev_kfree_skb(skb);
				skb = NULL;
			}
		}
#else
		skb = netdev_alloc_skb(dev,
				       ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
#endif

		if (!skb) {
			dev->stats.rx_dropped++;
			/* put the desc back on RX-ready queue */
			desc->buf_len = MAX_MRU;
			desc->pkt_len = 0;
			queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
			continue;
		}

		/* process received frame */
#ifdef __ARMEB__
		temp = skb;
		skb = port->rx_buff_tab[n];
		dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
				 RX_BUFF_SIZE, DMA_FROM_DEVICE);
#else
		dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
				RX_BUFF_SIZE, DMA_FROM_DEVICE);
		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
			      ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
#endif
		skb_reserve(skb, NET_IP_ALIGN);
		skb_put(skb, desc->pkt_len);

		debug_pkt(dev, "eth_poll", skb->data, skb->len);

		skb->protocol = eth_type_trans(skb, dev);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += skb->len;
		netif_receive_skb(skb);

		/* put the new buffer on RX-free queue */
#ifdef __ARMEB__
		port->rx_buff_tab[n] = temp;
		desc->data = phys + NET_IP_ALIGN;
#endif
		desc->buf_len = MAX_MRU;
		desc->pkt_len = 0;
		queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
		received++;
	}

#if DEBUG_RX
	printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
#endif
	return received;		/* not all work done */
}
Beispiel #10
0
static void
fec_enet_tx(struct net_device *ndev)
{
    struct	fec_enet_private *fep;
    struct bufdesc *bdp;
    unsigned short status;
    struct	sk_buff	*skb;

    fep = netdev_priv(ndev);
    spin_lock(&fep->hw_lock);
    bdp = fep->dirty_tx;

    while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
        if (bdp == fep->cur_tx && fep->tx_full == 0)
            break;

        dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                         FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
        bdp->cbd_bufaddr = 0;

        skb = fep->tx_skbuff[fep->skb_dirty];
        /* Check for errors. */
        if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
                      BD_ENET_TX_RL | BD_ENET_TX_UN |
                      BD_ENET_TX_CSL)) {
            ndev->stats.tx_errors++;
            if (status & BD_ENET_TX_HB)  /* No heartbeat */
                ndev->stats.tx_heartbeat_errors++;
            if (status & BD_ENET_TX_LC)  /* Late collision */
                ndev->stats.tx_window_errors++;
            if (status & BD_ENET_TX_RL)  /* Retrans limit */
                ndev->stats.tx_aborted_errors++;
            if (status & BD_ENET_TX_UN)  /* Underrun */
                ndev->stats.tx_fifo_errors++;
            if (status & BD_ENET_TX_CSL) /* Carrier lost */
                ndev->stats.tx_carrier_errors++;
        } else {
            ndev->stats.tx_packets++;
        }

        if (status & BD_ENET_TX_READY)
            printk("HEY! Enet xmit interrupt and TX_READY.\n");

        /* Deferred means some collisions occurred during transmit,
         * but we eventually sent the packet OK.
         */
        if (status & BD_ENET_TX_DEF)
            ndev->stats.collisions++;

        /* Free the sk buffer associated with this last transmit */
        dev_kfree_skb_any(skb);
        fep->tx_skbuff[fep->skb_dirty] = NULL;
        fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;

        /* Update pointer to next buffer descriptor to be transmitted */
        if (status & BD_ENET_TX_WRAP)
            bdp = fep->tx_bd_base;
        else
            bdp++;

        /* Since we have freed up a buffer, the ring is no longer full
         */
        if (fep->tx_full) {
            fep->tx_full = 0;
            if (netif_queue_stopped(ndev))
                netif_wake_queue(ndev);
        }
    }
    fep->dirty_tx = bdp;
    spin_unlock(&fep->hw_lock);
}
Beispiel #11
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static void
fec_enet_rx(struct net_device *ndev)
{
    struct fec_enet_private *fep = netdev_priv(ndev);
    const struct platform_device_id *id_entry =
        platform_get_device_id(fep->pdev);
    struct bufdesc *bdp;
    unsigned short status;
    struct	sk_buff	*skb;
    ushort	pkt_len;
    __u8 *data;

#ifdef CONFIG_M532x
    flush_cache_all();
#endif

    spin_lock(&fep->hw_lock);

    /* First, grab all of the stats for the incoming packet.
     * These get messed up if we get called due to a busy condition.
     */
    bdp = fep->cur_rx;

    while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {

        /* Since we have allocated space to hold a complete frame,
         * the last indicator should be set.
         */
        if ((status & BD_ENET_RX_LAST) == 0)
            printk("FEC ENET: rcv is not +last\n");

        if (!fep->opened)
            goto rx_processing_done;

        /* Check for errors. */
        if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
                      BD_ENET_RX_CR | BD_ENET_RX_OV)) {
            ndev->stats.rx_errors++;
            if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
                /* Frame too long or too short. */
                ndev->stats.rx_length_errors++;
            }
            if (status & BD_ENET_RX_NO)	/* Frame alignment */
                ndev->stats.rx_frame_errors++;
            if (status & BD_ENET_RX_CR)	/* CRC Error */
                ndev->stats.rx_crc_errors++;
            if (status & BD_ENET_RX_OV)	/* FIFO overrun */
                ndev->stats.rx_fifo_errors++;
        }

        /* Report late collisions as a frame error.
         * On this error, the BD is closed, but we don't know what we
         * have in the buffer.  So, just drop this frame on the floor.
         */
        if (status & BD_ENET_RX_CL) {
            ndev->stats.rx_errors++;
            ndev->stats.rx_frame_errors++;
            goto rx_processing_done;
        }

        /* Process the incoming frame. */
        ndev->stats.rx_packets++;
        pkt_len = bdp->cbd_datlen;
        ndev->stats.rx_bytes += pkt_len;
        data = (__u8*)__va(bdp->cbd_bufaddr);

        dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                         FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);

        if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
            swap_buffer(data, pkt_len);

        /* This does 16 byte alignment, exactly what we need.
         * The packet length includes FCS, but we don't want to
         * include that when passing upstream as it messes up
         * bridging applications.
         */
        skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);

        if (unlikely(!skb)) {
            printk("%s: Memory squeeze, dropping packet.\n",
                   ndev->name);
            ndev->stats.rx_dropped++;
        } else {
            skb_reserve(skb, NET_IP_ALIGN);
            skb_put(skb, pkt_len - 4);	/* Make room */
            skb_copy_to_linear_data(skb, data, pkt_len - 4);
            skb->protocol = eth_type_trans(skb, ndev);
            netif_rx(skb);
        }

        bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
                                          FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
        /* Clear the status flags for this buffer */
        status &= ~BD_ENET_RX_STATS;

        /* Mark the buffer empty */
        status |= BD_ENET_RX_EMPTY;
        bdp->cbd_sc = status;

        /* Update BD pointer to next entry */
        if (status & BD_ENET_RX_WRAP)
            bdp = fep->rx_bd_base;
        else
            bdp++;
        /* Doing this here will keep the FEC running while we process
         * incoming frames.  On a heavily loaded network, we should be
         * able to keep up at the expense of system resources.
         */
        writel(0, fep->hwp + FEC_R_DES_ACTIVE);
    }
    fep->cur_rx = bdp;

    spin_unlock(&fep->hw_lock);
}
Beispiel #12
0
Datei: en_tx.c Projekt: 8l/akaros
static uint32_t mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
				struct mlx4_en_tx_ring *ring,
				int index, uint8_t owner, uint64_t timestamp)
{
	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
	struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
	struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
	void *end = ring->buf + ring->buf_size;
	struct block *block = tx_info->block;
	int nr_maps = tx_info->nr_maps;
	int i;

#if 0 // AKAROS_PORT
	/* We do not touch skb here, so prefetch skb->users location
	 * to speedup consume_skb()
	 */
	prefetchw(&skb->users);

	if (unlikely(timestamp)) {
		struct skb_shared_hwtstamps hwts;

		mlx4_en_fill_hwtstamps(priv->mdev, &hwts, timestamp);
		skb_tstamp_tx(skb, &hwts);
	}
#endif

	/* Optimize the common case when there are no wraparounds */
	if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
		if (!tx_info->inl) {
			if (tx_info->linear)
				dma_unmap_single(priv->ddev,
						tx_info->map0_dma,
						tx_info->map0_byte_count,
						PCI_DMA_TODEVICE);
			else
				dma_unmap_page(priv->ddev,
					       tx_info->map0_dma,
					       tx_info->map0_byte_count,
					       PCI_DMA_TODEVICE);
			for (i = 1; i < nr_maps; i++) {
				data++;
				dma_unmap_page(priv->ddev,
					(dma_addr_t)be64_to_cpu(data->addr),
					be32_to_cpu(data->byte_count),
					PCI_DMA_TODEVICE);
			}
		}
	} else {
		if (!tx_info->inl) {
			if ((void *) data >= end) {
				data = ring->buf + ((void *)data - end);
			}

			if (tx_info->linear)
				dma_unmap_single(priv->ddev,
						tx_info->map0_dma,
						tx_info->map0_byte_count,
						PCI_DMA_TODEVICE);
			else
				dma_unmap_page(priv->ddev,
					       tx_info->map0_dma,
					       tx_info->map0_byte_count,
					       PCI_DMA_TODEVICE);
			for (i = 1; i < nr_maps; i++) {
				data++;
				/* Check for wraparound before unmapping */
				if ((void *) data >= end)
					data = ring->buf;
				dma_unmap_page(priv->ddev,
					(dma_addr_t)be64_to_cpu(data->addr),
					be32_to_cpu(data->byte_count),
					PCI_DMA_TODEVICE);
			}
		}
	}
	freeb(block);
	return tx_info->nr_txbb;
}
Beispiel #13
0
static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
					 const unsigned char *buffer,
					 int offset, size_t count)
{
	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
	struct onenand_chip *this = mtd->priv;
	dma_addr_t dma_src, dma_dst;
	int bram_offset;
	unsigned long timeout;
	void *buf = (void *)buffer;
	volatile unsigned *done;

	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
		goto out_copy;

	/* panic_write() may be in an interrupt context */
	if (in_interrupt() || oops_in_progress)
		goto out_copy;

	if (buf >= high_memory) {
		struct page *p1;

		if (((size_t)buf & PAGE_MASK) !=
		    ((size_t)(buf + count - 1) & PAGE_MASK))
			goto out_copy;
		p1 = vmalloc_to_page(buf);
		if (!p1)
			goto out_copy;
		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
	}

	dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
	dma_dst = c->phys_base + bram_offset;
	if (dma_mapping_error(&c->pdev->dev, dma_src)) {
		dev_err(&c->pdev->dev,
			"Couldn't DMA map a %d byte buffer\n",
			count);
		return -1;
	}

	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
				     count >> 2, 1, 0, 0, 0);
	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
				dma_src, 0, 0);
	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
				 dma_dst, 0, 0);

	INIT_COMPLETION(c->dma_done);
	omap_start_dma(c->dma_channel);

	timeout = jiffies + msecs_to_jiffies(20);
	done = &c->dma_done.done;
	while (time_before(jiffies, timeout))
		if (*done)
			break;

	dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);

	if (!*done) {
		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
		goto out_copy;
	}

	return 0;

out_copy:
	memcpy(this->base + bram_offset, buf, count);
	return 0;
}
Beispiel #14
0
/**
 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
 * @vio_dev:	vio device struct
 * @id:		vio device id struct
 *
 * Return value:
 *	0 - Success
 *	Non-zero - Failure
 */
static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
				   const struct vio_device_id *id)
{
	struct ibmvtpm_dev *ibmvtpm;
	struct device *dev = &vio_dev->dev;
	struct ibmvtpm_crq_queue *crq_q;
	struct tpm_chip *chip;
	int rc = -ENOMEM, rc1;

	chip = tpm_register_hardware(dev, &tpm_ibmvtpm);
	if (!chip) {
		dev_err(dev, "tpm_register_hardware failed\n");
		return -ENODEV;
	}

	ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
	if (!ibmvtpm) {
		dev_err(dev, "kzalloc for ibmvtpm failed\n");
		goto cleanup;
	}

	crq_q = &ibmvtpm->crq_queue;
	crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
	if (!crq_q->crq_addr) {
		dev_err(dev, "Unable to allocate memory for crq_addr\n");
		goto cleanup;
	}

	crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
	ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
						 CRQ_RES_BUF_SIZE,
						 DMA_BIDIRECTIONAL);

	if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
		dev_err(dev, "dma mapping failed\n");
		goto cleanup;
	}

	rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
				ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
	if (rc == H_RESOURCE)
		rc = ibmvtpm_reset_crq(ibmvtpm);

	if (rc) {
		dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
		goto reg_crq_cleanup;
	}

	rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
			 tpm_ibmvtpm_driver_name, ibmvtpm);
	if (rc) {
		dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
		goto init_irq_cleanup;
	}

	rc = vio_enable_interrupts(vio_dev);
	if (rc) {
		dev_err(dev, "Error %d enabling interrupts\n", rc);
		goto init_irq_cleanup;
	}

	init_waitqueue_head(&ibmvtpm->wq);

	crq_q->index = 0;

	ibmvtpm->dev = dev;
	ibmvtpm->vdev = vio_dev;
	chip->vendor.data = (void *)ibmvtpm;

	spin_lock_init(&ibmvtpm->rtce_lock);

	rc = ibmvtpm_crq_send_init(ibmvtpm);
	if (rc)
		goto init_irq_cleanup;

	rc = ibmvtpm_crq_get_version(ibmvtpm);
	if (rc)
		goto init_irq_cleanup;

	rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
	if (rc)
		goto init_irq_cleanup;

	return rc;
init_irq_cleanup:
	do {
		rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
	} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
reg_crq_cleanup:
	dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
			 DMA_BIDIRECTIONAL);
cleanup:
	if (ibmvtpm) {
		if (crq_q->crq_addr)
			free_page((unsigned long)crq_q->crq_addr);
		kfree(ibmvtpm);
	}

	tpm_remove_hardware(dev);

	return rc;
}
Beispiel #15
0
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring,
				    struct sk_buff *skb)
{
	struct device *dma_dev = bgmac->dma_dev;
	struct net_device *net_dev = bgmac->net_dev;
	int index = ring->end % BGMAC_TX_RING_SLOTS;
	struct bgmac_slot_info *slot = &ring->slots[index];
	int nr_frags;
	u32 flags;
	int i;

	if (skb->len > BGMAC_DESC_CTL1_LEN) {
		netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
		goto err_drop;
	}

	if (skb->ip_summed == CHECKSUM_PARTIAL)
		skb_checksum_help(skb);

	nr_frags = skb_shinfo(skb)->nr_frags;

	/* ring->end - ring->start will return the number of valid slots,
	 * even when ring->end overflows
	 */
	if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
		netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
		netif_stop_queue(net_dev);
		return NETDEV_TX_BUSY;
	}

	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
					DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
		goto err_dma_head;

	flags = BGMAC_DESC_CTL0_SOF;
	if (!nr_frags)
		flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;

	bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
	flags = 0;

	for (i = 0; i < nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);

		index = (index + 1) % BGMAC_TX_RING_SLOTS;
		slot = &ring->slots[index];
		slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
						  len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
			goto err_dma;

		if (i == nr_frags - 1)
			flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;

		bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
	}

	slot->skb = skb;
	ring->end += nr_frags + 1;
	netdev_sent_queue(net_dev, skb->len);

	wmb();

	/* Increase ring->end to point empty slot. We tell hardware the first
	 * slot it should *not* read.
	 */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
		    ring->index_base +
		    (ring->end % BGMAC_TX_RING_SLOTS) *
		    sizeof(struct bgmac_dma_desc));

	if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
		netif_stop_queue(net_dev);

	return NETDEV_TX_OK;

err_dma:
	dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
			 DMA_TO_DEVICE);

	while (i-- > 0) {
		int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
		struct bgmac_slot_info *slot = &ring->slots[index];
		u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
		int len = ctl1 & BGMAC_DESC_CTL1_LEN;

		dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
	}

err_dma_head:
	netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
		   ring->mmio_base);

err_drop:
	dev_kfree_skb(skb);
	net_dev->stats.tx_dropped++;
	net_dev->stats.tx_errors++;
	return NETDEV_TX_OK;
}
/* Main initialization function. */
static
struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
				      int controller_index,
				      int for_tx,
				      enum b43_dmatype type)
{
	struct b43_dmaring *ring;
	int i, err;
	dma_addr_t dma_test;

	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
	if (!ring)
		goto out;

	ring->nr_slots = B43_RXRING_SLOTS;
	if (for_tx)
		ring->nr_slots = B43_TXRING_SLOTS;

	ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
			     GFP_KERNEL);
	if (!ring->meta)
		goto err_kfree_ring;
	for (i = 0; i < ring->nr_slots; i++)
		ring->meta->skb = B43_DMA_PTR_POISON;

	ring->type = type;
	ring->dev = dev;
	ring->mmio_base = b43_dmacontroller_base(type, controller_index);
	ring->index = controller_index;
	if (type == B43_DMA_64BIT)
		ring->ops = &dma64_ops;
	else
		ring->ops = &dma32_ops;
	if (for_tx) {
		ring->tx = true;
		ring->current_slot = -1;
	} else {
		if (ring->index == 0) {
			switch (dev->fw.hdr_format) {
			case B43_FW_HDR_598:
				ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
				ring->frameoffset = B43_DMA0_RX_FW598_FO;
				break;
			case B43_FW_HDR_410:
			case B43_FW_HDR_351:
				ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
				ring->frameoffset = B43_DMA0_RX_FW351_FO;
				break;
			}
		} else
			B43_WARN_ON(1);
	}
#ifdef CPTCFG_B43_DEBUG
	ring->last_injected_overflow = jiffies;
#endif

	if (for_tx) {
		/* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
		BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);

		ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
					    b43_txhdr_size(dev),
					    GFP_KERNEL);
		if (!ring->txhdr_cache)
			goto err_kfree_meta;

		/* test for ability to dma to txhdr_cache */
		dma_test = dma_map_single(dev->dev->dma_dev,
					  ring->txhdr_cache,
					  b43_txhdr_size(dev),
					  DMA_TO_DEVICE);

		if (b43_dma_mapping_error(ring, dma_test,
					  b43_txhdr_size(dev), 1)) {
			/* ugh realloc */
			kfree(ring->txhdr_cache);
			ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
						    b43_txhdr_size(dev),
						    GFP_KERNEL | GFP_DMA);
			if (!ring->txhdr_cache)
				goto err_kfree_meta;

			dma_test = dma_map_single(dev->dev->dma_dev,
						  ring->txhdr_cache,
						  b43_txhdr_size(dev),
						  DMA_TO_DEVICE);

			if (b43_dma_mapping_error(ring, dma_test,
						  b43_txhdr_size(dev), 1)) {

				b43err(dev->wl,
				       "TXHDR DMA allocation failed\n");
				goto err_kfree_txhdr_cache;
			}
		}

		dma_unmap_single(dev->dev->dma_dev,
				 dma_test, b43_txhdr_size(dev),
				 DMA_TO_DEVICE);
	}

	err = alloc_ringmemory(ring);
	if (err)
		goto err_kfree_txhdr_cache;
	err = dmacontroller_setup(ring);
	if (err)
		goto err_free_ringmemory;

      out:
	return ring;

      err_free_ringmemory:
	free_ringmemory(ring);
      err_kfree_txhdr_cache:
	kfree(ring->txhdr_cache);
      err_kfree_meta:
	kfree(ring->meta);
      err_kfree_ring:
	kfree(ring);
	ring = NULL;
	goto out;
}
Beispiel #17
0
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
			     int weight)
{
	u32 end_slot;
	int handled = 0;

	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
	end_slot &= BGMAC_DMA_RX_STATDPTR;
	end_slot -= ring->index_base;
	end_slot &= BGMAC_DMA_RX_STATDPTR;
	end_slot /= sizeof(struct bgmac_dma_desc);

	while (ring->start != end_slot) {
		struct device *dma_dev = bgmac->dma_dev;
		struct bgmac_slot_info *slot = &ring->slots[ring->start];
		struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
		struct sk_buff *skb;
		void *buf = slot->buf;
		dma_addr_t dma_addr = slot->dma_addr;
		u16 len, flags;

		do {
			/* Prepare new skb as replacement */
			if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
				bgmac_dma_rx_poison_buf(dma_dev, slot);
				break;
			}

			/* Unmap buffer to make it accessible to the CPU */
			dma_unmap_single(dma_dev, dma_addr,
					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);

			/* Get info from the header */
			len = le16_to_cpu(rx->len);
			flags = le16_to_cpu(rx->flags);

			/* Check for poison and drop or pass the packet */
			if (len == 0xdead && flags == 0xbeef) {
				netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
					   ring->start);
				put_page(virt_to_head_page(buf));
				bgmac->net_dev->stats.rx_errors++;
				break;
			}

			if (len > BGMAC_RX_ALLOC_SIZE) {
				netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
					   ring->start);
				put_page(virt_to_head_page(buf));
				bgmac->net_dev->stats.rx_length_errors++;
				bgmac->net_dev->stats.rx_errors++;
				break;
			}

			/* Omit CRC. */
			len -= ETH_FCS_LEN;

			skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
			if (unlikely(!skb)) {
				netdev_err(bgmac->net_dev, "build_skb failed\n");
				put_page(virt_to_head_page(buf));
				bgmac->net_dev->stats.rx_errors++;
				break;
			}
			skb_put(skb, BGMAC_RX_FRAME_OFFSET +
				BGMAC_RX_BUF_OFFSET + len);
			skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
				 BGMAC_RX_BUF_OFFSET);

			skb_checksum_none_assert(skb);
			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
			bgmac->net_dev->stats.rx_bytes += len;
			bgmac->net_dev->stats.rx_packets++;
			napi_gro_receive(&bgmac->napi, skb);
			handled++;
		} while (0);

		bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);

		if (++ring->start >= BGMAC_RX_RING_SLOTS)
			ring->start = 0;

		if (handled >= weight) /* Should never be greater */
			break;
	}

	bgmac_dma_rx_update_index(bgmac, ring);

	return handled;
}
Beispiel #18
0
static void fs_enet_tx(struct net_device *dev)
{
	struct fs_enet_private *fep = netdev_priv(dev);
	cbd_t *bdp;
	struct sk_buff *skb;
	int dirtyidx, do_wake, do_restart;
	u16 sc;

	spin_lock(&fep->tx_lock);
	bdp = fep->dirty_tx;

	do_wake = do_restart = 0;
	while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {

		dirtyidx = bdp - fep->tx_bd_base;

		if (fep->tx_free == fep->tx_ring)
			break;

		skb = fep->tx_skbuff[dirtyidx];

		/*
		 * Check for errors.
		 */
		if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
			  BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {

			if (sc & BD_ENET_TX_HB)	/* No heartbeat */
				fep->stats.tx_heartbeat_errors++;
			if (sc & BD_ENET_TX_LC)	/* Late collision */
				fep->stats.tx_window_errors++;
			if (sc & BD_ENET_TX_RL)	/* Retrans limit */
				fep->stats.tx_aborted_errors++;
			if (sc & BD_ENET_TX_UN)	/* Underrun */
				fep->stats.tx_fifo_errors++;
			if (sc & BD_ENET_TX_CSL)	/* Carrier lost */
				fep->stats.tx_carrier_errors++;

			if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
				fep->stats.tx_errors++;
				do_restart = 1;
			}
		} else
			fep->stats.tx_packets++;

		if (sc & BD_ENET_TX_READY)
			printk(KERN_WARNING DRV_MODULE_NAME
			       ": %s HEY! Enet xmit interrupt and TX_READY.\n",
			       dev->name);

		/*
		 * Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (sc & BD_ENET_TX_DEF)
			fep->stats.collisions++;

		/* unmap */
		dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				skb->len, DMA_TO_DEVICE);

		/*
		 * Free the sk buffer associated with this last transmit.
		 */
		dev_kfree_skb_irq(skb);
		fep->tx_skbuff[dirtyidx] = NULL;

		/*
		 * Update pointer to next buffer descriptor to be transmitted.
		 */
		if ((sc & BD_ENET_TX_WRAP) == 0)
			bdp++;
		else
			bdp = fep->tx_bd_base;

		/*
		 * Since we have freed up a buffer, the ring is no longer
		 * full.
		 */
		if (!fep->tx_free++)
			do_wake = 1;
	}

	fep->dirty_tx = bdp;

	if (do_restart)
		(*fep->ops->tx_restart)(dev);

	spin_unlock(&fep->tx_lock);

	if (do_wake)
		netif_wake_queue(dev);
}
Beispiel #19
0
/*
 * extract packet from rx queue
 */
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
	struct bcm_enet_priv *priv;
	struct device *kdev;
	int processed;

	priv = netdev_priv(dev);
	kdev = &priv->pdev->dev;
	processed = 0;

	/* don't scan ring further than number of refilled
	 * descriptor */
	if (budget > priv->rx_desc_count)
		budget = priv->rx_desc_count;

	do {
		struct bcm_enet_desc *desc;
		struct sk_buff *skb;
		int desc_idx;
		u32 len_stat;
		unsigned int len;

		desc_idx = priv->rx_curr_desc;
		desc = &priv->rx_desc_cpu[desc_idx];

		/* make sure we actually read the descriptor status at
		 * each loop */
		rmb();

		len_stat = desc->len_stat;

		/* break if dma ownership belongs to hw */
		if (len_stat & DMADESC_OWNER_MASK)
			break;

		processed++;
		priv->rx_curr_desc++;
		if (priv->rx_curr_desc == priv->rx_ring_size)
			priv->rx_curr_desc = 0;
		priv->rx_desc_count--;

		/* if the packet does not have start of packet _and_
		 * end of packet flag set, then just recycle it */
		if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
			priv->stats.rx_dropped++;
			continue;
		}

		/* recycle packet if it's marked as bad */
		if (unlikely(len_stat & DMADESC_ERR_MASK)) {
			priv->stats.rx_errors++;

			if (len_stat & DMADESC_OVSIZE_MASK)
				priv->stats.rx_length_errors++;
			if (len_stat & DMADESC_CRC_MASK)
				priv->stats.rx_crc_errors++;
			if (len_stat & DMADESC_UNDER_MASK)
				priv->stats.rx_frame_errors++;
			if (len_stat & DMADESC_OV_MASK)
				priv->stats.rx_fifo_errors++;
			continue;
		}

		/* valid packet */
		skb = priv->rx_skb[desc_idx];
		len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
		/* don't include FCS */
		len -= 4;

		if (len < copybreak) {
			struct sk_buff *nskb;

			nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
			if (!nskb) {
				/* forget packet, just rearm desc */
				priv->stats.rx_dropped++;
				continue;
			}

			/* since we're copying the data, we can align
			 * them properly */
			skb_reserve(nskb, NET_IP_ALIGN);
			dma_sync_single_for_cpu(kdev, desc->address,
						len, DMA_FROM_DEVICE);
			memcpy(nskb->data, skb->data, len);
			dma_sync_single_for_device(kdev, desc->address,
						   len, DMA_FROM_DEVICE);
			skb = nskb;
		} else {
			dma_unmap_single(&priv->pdev->dev, desc->address,
					 priv->rx_skb_size, DMA_FROM_DEVICE);
			priv->rx_skb[desc_idx] = NULL;
		}

		skb_put(skb, len);
		skb->dev = dev;
		skb->protocol = eth_type_trans(skb, dev);
		priv->stats.rx_packets++;
		priv->stats.rx_bytes += len;
		dev->last_rx = jiffies;
		netif_receive_skb(skb);

	} while (--budget > 0);

	if (processed || !priv->rx_desc_count) {
		bcm_enet_refill_rx(dev);

		/* kick rx dma */
		enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
				ENETDMA_CHANCFG_REG(priv->rx_chan));
	}

	return processed;
}
Beispiel #20
0
/* NAPI receive function */
static int fs_enet_rx_napi(struct net_device *dev, int *budget)
{
	struct fs_enet_private *fep = netdev_priv(dev);
	const struct fs_platform_info *fpi = fep->fpi;
	cbd_t *bdp;
	struct sk_buff *skb, *skbn, *skbt;
	int received = 0;
	u16 pkt_len, sc;
	int curidx;
	int rx_work_limit = 0;	/* pacify gcc */

	rx_work_limit = min(dev->quota, *budget);

	if (!netif_running(dev))
		return 0;

	/*
	 * First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	/* clear RX status bits for napi*/
	(*fep->ops->napi_clear_rx_event)(dev);

	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {

		curidx = bdp - fep->rx_bd_base;

		/*
		 * Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((sc & BD_ENET_RX_LAST) == 0)
			printk(KERN_WARNING DRV_MODULE_NAME
			       ": %s rcv is not +last\n",
			       dev->name);

		/*
		 * Check for errors.
		 */
		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			fep->stats.rx_errors++;
			/* Frame too long or too short. */
			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
				fep->stats.rx_length_errors++;
			/* Frame alignment */
			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
				fep->stats.rx_frame_errors++;
			/* CRC Error */
			if (sc & BD_ENET_RX_CR)
				fep->stats.rx_crc_errors++;
			/* FIFO overrun */
			if (sc & BD_ENET_RX_OV)
				fep->stats.rx_crc_errors++;

			skb = fep->rx_skbuff[curidx];

			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE);

			skbn = skb;

		} else {

			/* napi, got packet but no quota */
			if (--rx_work_limit < 0)
				break;

			skb = fep->rx_skbuff[curidx];

			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE);

			/*
			 * Process the incoming frame.
			 */
			fep->stats.rx_packets++;
			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */
			fep->stats.rx_bytes += pkt_len + 4;

			if (pkt_len <= fpi->rx_copybreak) {
				/* +2 to make IP header L1 cache aligned */
				skbn = dev_alloc_skb(pkt_len + 2);
				if (skbn != NULL) {
					skb_reserve(skbn, 2);	/* align IP header */
					memcpy(skbn->data, skb->data, pkt_len);
					/* swap */
					skbt = skb;
					skb = skbn;
					skbn = skbt;
				}
			} else
				skbn = dev_alloc_skb(ENET_RX_FRSIZE);

			if (skbn != NULL) {
				skb->dev = dev;
				skb_put(skb, pkt_len);	/* Make room */
				skb->protocol = eth_type_trans(skb, dev);
				received++;
				netif_receive_skb(skb);
			} else {
				printk(KERN_WARNING DRV_MODULE_NAME
				       ": %s Memory squeeze, dropping packet.\n",
				       dev->name);
				fep->stats.rx_dropped++;
				skbn = skb;
			}
		}

		fep->rx_skbuff[curidx] = skbn;
		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
			     DMA_FROM_DEVICE));
		CBDW_DATLEN(bdp, 0);
		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);

		/*
		 * Update BD pointer to next entry.
		 */
		if ((sc & BD_ENET_RX_WRAP) == 0)
			bdp++;
		else
			bdp = fep->rx_bd_base;

		(*fep->ops->rx_bd_done)(dev);
	}

	fep->cur_rx = bdp;

	dev->quota -= received;
	*budget -= received;

	if (rx_work_limit < 0)
		return 1;	/* not done */

	/* done */
	netif_rx_complete(dev);

	(*fep->ops->napi_enable_rx)(dev);

	return 0;
}
Beispiel #21
0
static void handle_endpoint(struct usb_info *ui, unsigned bit)
{
	struct msm_endpoint *ept = ui->ept + bit;
	struct msm_request *req;
	unsigned long flags;
	unsigned info;

#if 0
	INFO("handle_endpoint() %d %s req=%p(%08x)\n",
		ept->num, (ept->flags & EPT_FLAG_IN) ? "in" : "out",
		ept->req, ept->req ? ept->req->item_dma : 0);
#endif

	/* expire all requests that are no longer active */
	spin_lock_irqsave(&ui->lock, flags);
	while ((req = ept->req)) {
		info = req->item->info;

		/* if we've processed all live requests, time to
		 * restart the hardware on the next non-live request
		 */
		if (!req->live) {
			usb_ept_start(ept);
			break;
		}

		/* if the transaction is still in-flight, stop here */
		if (info & INFO_ACTIVE)
			break;

		/* advance ept queue to the next request */
		ept->req = req->next;
		if (ept->req == 0)
			ept->last = 0;

		dma_unmap_single(NULL, req->dma, req->req.length,
				 (ept->flags & EPT_FLAG_IN) ?
				 DMA_TO_DEVICE : DMA_FROM_DEVICE);

		if (info & (INFO_HALTED | INFO_BUFFER_ERROR | INFO_TXN_ERROR)) {
			/* XXX pass on more specific error code */
			req->req.status = -EIO;
			req->req.actual = 0;
			INFO("msm72k_udc: ept %d %s error. info=%08x\n",
			       ept->num,
			       (ept->flags & EPT_FLAG_IN) ? "in" : "out",
			       info);
		} else {
			req->req.status = 0;
			req->req.actual =
				req->req.length - ((info >> 16) & 0x7FFF);
		}
		req->busy = 0;
		req->live = 0;

		if (req->req.complete) {
			spin_unlock_irqrestore(&ui->lock, flags);
			req->req.complete(&ept->ep, &req->req);
			spin_lock_irqsave(&ui->lock, flags);
		}
	}
	spin_unlock_irqrestore(&ui->lock, flags);
}
Beispiel #22
0
int ath10k_htc_send(struct ath10k_htc *htc,
		    enum ath10k_htc_ep_id eid,
		    struct sk_buff *skb)
{
	struct ath10k *ar = htc->ar;
	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
	struct ath10k_hif_sg_item sg_item;
	struct device *dev = htc->ar->dev;
	int credits = 0;
	int ret;

	if (htc->ar->state == ATH10K_STATE_WEDGED)
		return -ECOMM;

	if (eid >= ATH10K_HTC_EP_COUNT) {
		ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
		return -ENOENT;
	}

	skb_push(skb, sizeof(struct ath10k_htc_hdr));

	if (ep->tx_credit_flow_enabled) {
		credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
		spin_lock_bh(&htc->tx_lock);
		if (ep->tx_credits < credits) {
			spin_unlock_bh(&htc->tx_lock);
			ret = -EAGAIN;
			goto err_pull;
		}
		ep->tx_credits -= credits;
		ath10k_dbg(ar, ATH10K_DBG_HTC,
			   "htc ep %d consumed %d credits (total %d)\n",
			   eid, credits, ep->tx_credits);
		spin_unlock_bh(&htc->tx_lock);
	}

	ath10k_htc_prepare_tx_skb(ep, skb);

	skb_cb->eid = eid;
	skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
	ret = dma_mapping_error(dev, skb_cb->paddr);
	if (ret) {
		ret = -EIO;
		goto err_credits;
	}

	sg_item.transfer_id = ep->eid;
	sg_item.transfer_context = skb;
	sg_item.vaddr = skb->data;
	sg_item.paddr = skb_cb->paddr;
	sg_item.len = skb->len;

	ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
	if (ret)
		goto err_unmap;

	return 0;

err_unmap:
	dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
	if (ep->tx_credit_flow_enabled) {
		spin_lock_bh(&htc->tx_lock);
		ep->tx_credits += credits;
		ath10k_dbg(ar, ATH10K_DBG_HTC,
			   "htc ep %d reverted %d credits back (total %d)\n",
			   eid, credits, ep->tx_credits);
		spin_unlock_bh(&htc->tx_lock);

		if (ep->ep_ops.ep_tx_credits)
			ep->ep_ops.ep_tx_credits(htc->ar);
	}
err_pull:
	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
	return ret;
}
Beispiel #23
0
/**
 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
 * descriptor.
 * @dev:  device of the job ring to be used. This device should have
 *        been assigned prior by caam_jr_register().
 * @desc: points to a job descriptor that execute our request. All
 *        descriptors (and all referenced data) must be in a DMAable
 *        region, and all data references must be physical addresses
 *        accessible to CAAM (i.e. within a PAMU window granted
 *        to it).
 * @cbk:  pointer to a callback function to be invoked upon completion
 *        of this request. This has the form:
 *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
 *        where:
 *        @dev:    contains the job ring device that processed this
 *                 response.
 *        @desc:   descriptor that initiated the request, same as
 *                 "desc" being argued to caam_jr_enqueue().
 *        @status: untranslated status received from CAAM. See the
 *                 reference manual for a detailed description of
 *                 error meaning, or see the JRSTA definitions in the
 *                 register header file
 *        @areq:   optional pointer to an argument passed with the
 *                 original request
 * @areq: optional pointer to a user argument for use at callback
 *        time.
 **/
int caam_jr_enqueue(struct device *dev, u32 *desc,
		    void (*cbk)(struct device *dev, u32 *desc,
				u32 status, void *areq),
		    void *areq)
{
	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
	struct caam_jrentry_info *head_entry;
	unsigned long flags;
	int head, tail, desc_size;
	dma_addr_t desc_dma, inpbusaddr;

	desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
	desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, desc_dma)) {
		dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
		return -EIO;
	}

	dma_sync_single_for_device(dev, desc_dma, desc_size, DMA_TO_DEVICE);

	inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
	dma_sync_single_for_device(dev, inpbusaddr,
					sizeof(dma_addr_t) * JOBR_DEPTH,
					DMA_TO_DEVICE);
	spin_lock_irqsave(&jrp->inplock, flags);

	head = jrp->head;
	tail = ACCESS_ONCE(jrp->tail);

	if (!rd_reg32(&jrp->rregs->inpring_avail) ||
	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
		spin_unlock_irqrestore(&jrp->inplock, flags);
		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
		return -EBUSY;
	}

	head_entry = &jrp->entinfo[head];
	head_entry->desc_addr_virt = desc;
	head_entry->desc_size = desc_size;
	head_entry->callbk = (void *)cbk;
	head_entry->cbkarg = areq;
	head_entry->desc_addr_dma = desc_dma;

	jrp->inpring[jrp->inp_ring_write_index] = desc_dma;

	dma_sync_single_for_device(dev, inpbusaddr,
					sizeof(dma_addr_t) * JOBR_DEPTH,
					DMA_TO_DEVICE);

	smp_wmb();

	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
				    (JOBR_DEPTH - 1);
	jrp->head = (head + 1) & (JOBR_DEPTH - 1);

	wmb();

	wr_reg32(&jrp->rregs->inpring_jobadd, 1);

	spin_unlock_irqrestore(&jrp->inplock, flags);

	return 0;
}
struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
				       int *cpu,
				       u32 *sh_desc)
{
	size_t size;
	u32 num_words;
	dma_addr_t hwdesc;
	struct caam_drv_ctx *drv_ctx;
	const cpumask_t *cpus = qman_affine_cpus();
	static DEFINE_PER_CPU(int, last_cpu);

	num_words = desc_len(sh_desc);
	if (num_words > MAX_SDLEN) {
		dev_err(qidev, "Invalid descriptor len: %d words\n",
			num_words);
		return ERR_PTR(-EINVAL);
	}

	drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
	if (!drv_ctx)
		return ERR_PTR(-ENOMEM);

	/*
	 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
	 * and dma-map them.
	 */
	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
					   num_words);
	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
	size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
	hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
				DMA_BIDIRECTIONAL);
	if (dma_mapping_error(qidev, hwdesc)) {
		dev_err(qidev, "DMA map error for preheader + shdesc\n");
		kfree(drv_ctx);
		return ERR_PTR(-ENOMEM);
	}
	drv_ctx->context_a = hwdesc;

	/* If given CPU does not own the portal, choose another one that does */
	if (!cpumask_test_cpu(*cpu, cpus)) {
		int *pcpu = &get_cpu_var(last_cpu);

		*pcpu = cpumask_next(*pcpu, cpus);
		if (*pcpu >= nr_cpu_ids)
			*pcpu = cpumask_first(cpus);
		*cpu = *pcpu;

		put_cpu_var(last_cpu);
	}
	drv_ctx->cpu = *cpu;

	/* Find response FQ hooked with this CPU */
	drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);

	/* Attach request FQ */
	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
					     QMAN_INITFQ_FLAG_SCHED);
	if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
		dev_err(qidev, "create_caam_req_fq failed\n");
		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
		kfree(drv_ctx);
		return ERR_PTR(-ENOMEM);
	}

	drv_ctx->qidev = qidev;
	return drv_ctx;
}
Beispiel #25
0
/* Deferred service handler, run as interrupt-fired tasklet */
static void caam_jr_dequeue(unsigned long devarg)
{
	int hw_idx, sw_idx, i, head, tail;
	struct device *dev = (struct device *)devarg;
	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
	u32 *userdesc, userstatus;
	dma_addr_t outbusaddr;
	void *userarg;
	unsigned long flags;

	outbusaddr = rd_reg64(&jrp->rregs->outring_base);

	spin_lock_irqsave(&jrp->outlock, flags);

	head = ACCESS_ONCE(jrp->head);
	sw_idx = tail = jrp->tail;

	while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
	       rd_reg32(&jrp->rregs->outring_used)) {

		hw_idx = jrp->out_ring_read_index;
		dma_sync_single_for_cpu(dev, outbusaddr,
					sizeof(struct jr_outentry) * JOBR_DEPTH,
					DMA_FROM_DEVICE);

		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
			sw_idx = (tail + i) & (JOBR_DEPTH - 1);

			smp_read_barrier_depends();
			if (jrp->outring[hw_idx].desc ==
			    jrp->entinfo[sw_idx].desc_addr_dma)
				break; /* found */
		}
		/* we should never fail to find a matching descriptor */
		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);

		/* Unmap just-run descriptor so we can post-process */
		dma_unmap_single(dev, jrp->outring[hw_idx].desc,
				 jrp->entinfo[sw_idx].desc_size,
				 DMA_TO_DEVICE);

		/* mark completed, avoid matching on a recycled desc addr */
		jrp->entinfo[sw_idx].desc_addr_dma = 0;

		/* Stash callback params for use outside of lock */
		usercall = jrp->entinfo[sw_idx].callbk;
		userarg = jrp->entinfo[sw_idx].cbkarg;
		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
		userstatus = jrp->outring[hw_idx].jrstatus;

		smp_mb();

		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
					   (JOBR_DEPTH - 1);

		/*
		 * if this job completed out-of-order, do not increment
		 * the tail.  Otherwise, increment tail by 1 plus the
		 * number of subsequent jobs already completed out-of-order
		 */
		if (sw_idx == tail) {
			do {
				tail = (tail + 1) & (JOBR_DEPTH - 1);
				smp_read_barrier_depends();
			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
				 jrp->entinfo[tail].desc_addr_dma == 0);

			jrp->tail = tail;
		}

		/* set done */
		wr_reg32(&jrp->rregs->outring_rmvd, 1);

		spin_unlock_irqrestore(&jrp->outlock, flags);

		/* Finally, execute user's callback */
		usercall(dev, userdesc, userstatus, userarg);

		spin_lock_irqsave(&jrp->outlock, flags);

		head = ACCESS_ONCE(jrp->head);
		sw_idx = tail = jrp->tail;
	}

	spin_unlock_irqrestore(&jrp->outlock, flags);

	/* reenable / unmask IRQs */
	clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
}
Beispiel #26
0
/**
 * arc_emac_rx - processing of Rx packets.
 * @ndev:	Pointer to the network device.
 * @budget:	How many BDs to process on 1 call.
 *
 * returns:	Number of processed BDs
 *
 * Iterate through Rx BDs and deliver received packages to upper layer.
 */
static int arc_emac_rx(struct net_device *ndev, int budget)
{
	struct arc_emac_priv *priv = netdev_priv(ndev);
	unsigned int work_done;

	for (work_done = 0; work_done < budget; work_done++) {
		unsigned int *last_rx_bd = &priv->last_rx_bd;
		struct net_device_stats *stats = &ndev->stats;
		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
		unsigned int pktlen, info = le32_to_cpu(rxbd->info);
		struct sk_buff *skb;
		dma_addr_t addr;

		if (unlikely((info & OWN_MASK) == FOR_EMAC))
			break;

		/* Make a note that we saw a packet at this BD.
		 * So next time, driver starts from this + 1
		 */
		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;

		if (unlikely((info & FIRST_OR_LAST_MASK) !=
			     FIRST_OR_LAST_MASK)) {
			/* We pre-allocate buffers of MTU size so incoming
			 * packets won't be split/chained.
			 */
			if (net_ratelimit())
				netdev_err(ndev, "incomplete packet received\n");

			/* Return ownership to EMAC */
			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
			stats->rx_errors++;
			stats->rx_length_errors++;
			continue;
		}

		/* Prepare the BD for next cycle. netif_receive_skb()
		 * only if new skb was allocated and mapped to avoid holes
		 * in the RX fifo.
		 */
		skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
		if (unlikely(!skb)) {
			if (net_ratelimit())
				netdev_err(ndev, "cannot allocate skb\n");
			/* Return ownership to EMAC */
			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
			stats->rx_errors++;
			stats->rx_dropped++;
			continue;
		}

		addr = dma_map_single(&ndev->dev, (void *)skb->data,
				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
		if (dma_mapping_error(&ndev->dev, addr)) {
			if (net_ratelimit())
				netdev_err(ndev, "cannot map dma buffer\n");
			dev_kfree_skb(skb);
			/* Return ownership to EMAC */
			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
			stats->rx_errors++;
			stats->rx_dropped++;
			continue;
		}

		/* unmap previosly mapped skb */
		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);

		pktlen = info & LEN_MASK;
		stats->rx_packets++;
		stats->rx_bytes += pktlen;
		skb_put(rx_buff->skb, pktlen);
		rx_buff->skb->dev = ndev;
		rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);

		netif_receive_skb(rx_buff->skb);

		rx_buff->skb = skb;
		dma_unmap_addr_set(rx_buff, addr, addr);
		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);

		rxbd->data = cpu_to_le32(addr);

		/* Make sure pointer to data buffer is set */
		wmb();

		/* Return ownership to EMAC */
		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
	}

	return work_done;
}
Beispiel #27
0
static void myri_rx(struct myri_eth *mp, struct net_device *dev)
{
	struct recvq __iomem *rq = mp->rq;
	struct recvq __iomem *rqa = mp->rqack;
	int entry		= sbus_readl(&rqa->head);
	int limit		= sbus_readl(&rqa->tail);
	int drops;

	DRX(("entry[%d] limit[%d] ", entry, limit));
	if (entry == limit)
		return;
	drops = 0;
	DRX(("\n"));
	while (entry != limit) {
		struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
		u32 csum		= sbus_readl(&rxdack->csum);
		int len			= sbus_readl(&rxdack->myri_scatters[0].len);
		int index		= sbus_readl(&rxdack->ctx);
		struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
		struct sk_buff *skb	= mp->rx_skbs[index];

		/* Ack it. */
		sbus_writel(NEXT_RX(entry), &rqa->head);

		/* Check for errors. */
		DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
		dma_sync_single_for_cpu(&mp->myri_op->dev,
					sbus_readl(&rxd->myri_scatters[0].addr),
					RX_ALLOC_SIZE, DMA_FROM_DEVICE);
		if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
			DRX(("ERROR["));
			dev->stats.rx_errors++;
			if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
				DRX(("BAD_LENGTH] "));
				dev->stats.rx_length_errors++;
			} else {
				DRX(("NO_PADDING] "));
				dev->stats.rx_frame_errors++;
			}

			/* Return it to the LANAI. */
	drop_it:
			drops++;
			DRX(("DROP "));
			dev->stats.rx_dropped++;
			dma_sync_single_for_device(&mp->myri_op->dev,
						   sbus_readl(&rxd->myri_scatters[0].addr),
						   RX_ALLOC_SIZE,
						   DMA_FROM_DEVICE);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
			goto next;
		}

		DRX(("len[%d] ", len));
		if (len > RX_COPY_THRESHOLD) {
			struct sk_buff *new_skb;
			u32 dma_addr;

			DRX(("BIGBUFF "));
			new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
			if (new_skb == NULL) {
				DRX(("skb_alloc(FAILED) "));
				goto drop_it;
			}
			dma_unmap_single(&mp->myri_op->dev,
					 sbus_readl(&rxd->myri_scatters[0].addr),
					 RX_ALLOC_SIZE,
					 DMA_FROM_DEVICE);
			mp->rx_skbs[index] = new_skb;
			new_skb->dev = dev;
			skb_put(new_skb, RX_ALLOC_SIZE);
			dma_addr = dma_map_single(&mp->myri_op->dev,
						  new_skb->data,
						  RX_ALLOC_SIZE,
						  DMA_FROM_DEVICE);
			sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);

			/* Trim the original skb for the netif. */
			DRX(("trim(%d) ", len));
			skb_trim(skb, len);
		} else {
			struct sk_buff *copy_skb = dev_alloc_skb(len);

			DRX(("SMALLBUFF "));
			if (copy_skb == NULL) {
				DRX(("dev_alloc_skb(FAILED) "));
				goto drop_it;
			}
			/* DMA sync already done above. */
			copy_skb->dev = dev;
			DRX(("resv_and_put "));
			skb_put(copy_skb, len);
			skb_copy_from_linear_data(skb, copy_skb->data, len);

			/* Reuse original ring buffer. */
			DRX(("reuse "));
			dma_sync_single_for_device(&mp->myri_op->dev,
						   sbus_readl(&rxd->myri_scatters[0].addr),
						   RX_ALLOC_SIZE,
						   DMA_FROM_DEVICE);
			sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
			sbus_writel(index, &rxd->ctx);
			sbus_writel(1, &rxd->num_sg);
			sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);

			skb = copy_skb;
		}

		/* Just like the happy meal we get checksums from this card. */
		skb->csum = csum;
		skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */

		skb->protocol = myri_type_trans(skb, dev);
		DRX(("prot[%04x] netif_rx ", skb->protocol));
		netif_rx(skb);

		dev->stats.rx_packets++;
		dev->stats.rx_bytes += len;
	next:
		DRX(("NEXT\n"));
		entry = NEXT_RX(entry);
	}
}
Beispiel #28
0
int i2s_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
#endif
{
	int i ;
	unsigned long flags, data;
	i2s_config_type* ptri2s_config;
	    
	ptri2s_config = filp->private_data;
	switch (cmd) {
	case I2S_SRATE:
		spin_lock_irqsave(&ptri2s_config->lock, flags);
		{
			data = *(unsigned long*)(RALINK_SYSCTL_BASE+0x834);
			data |=(1<<17);
	    		*(unsigned long*)(RALINK_SYSCTL_BASE+0x834) = data;
	    		
	    		data = *(unsigned long*)(RALINK_SYSCTL_BASE+0x834);
			data &=~(1<<17);
	    		*(unsigned long*)(RALINK_SYSCTL_BASE+0x834) = data;
	    		
	    		audiohw_preinit();
		}	
		if((arg>MAX_SRATE_HZ)||(arg<MIN_SRATE_HZ))
		{
			MSG("audio sampling rate %u should be %d ~ %d Hz\n", (u32)arg, MIN_SRATE_HZ, MAX_SRATE_HZ);
			break;
		}	
		ptri2s_config->srate = arg;
		MSG("set audio sampling rate to %d Hz\n", ptri2s_config->srate);
		spin_unlock_irqrestore(&ptri2s_config->lock, flags);
		break;
	case I2S_TX_VOL:
		spin_lock_irqsave(&ptri2s_config->lock, flags);
		if((int)arg > 127)
		{
			ptri2s_config->txvol = 127;
		}
		else if((int)arg < 96)
		{
			ptri2s_config->txvol = 96;
		}	
		else
		ptri2s_config->txvol = arg;
		
		spin_unlock_irqrestore(&ptri2s_config->lock, flags);
		break;
	case I2S_RX_VOL:
		spin_lock_irqsave(&ptri2s_config->lock, flags);
		if((int)arg > 63)
		{
			ptri2s_config->rxvol = 63;
		}
		else if((int)arg < 0)
		{
			ptri2s_config->rxvol = 0;
		}	
		else
		ptri2s_config->rxvol = arg;

		spin_unlock_irqrestore(&ptri2s_config->lock, flags);
		break;	
	case I2S_TX_ENABLE:
		spin_lock_irqsave(&ptri2s_config->lock, flags);
		MSG("I2S_TXENABLE\n");

		/* allocate tx buffer */
		ptri2s_config->pPage0TxBuf8ptr = (u8*)pci_alloc_consistent(NULL, I2S_PAGE_SIZE*2 , &i2s_txdma_addr);
		if(ptri2s_config->pPage0TxBuf8ptr==NULL)
		{
			MSG("Allocate Tx Page Buffer Failed\n");
			return -1;
		}
		ptri2s_config->pPage1TxBuf8ptr = ptri2s_config->pPage0TxBuf8ptr + I2S_PAGE_SIZE;
		for( i = 0 ; i < MAX_I2S_PAGE ; i ++ )
		{
#if defined(CONFIG_I2S_MMAP)
			ptri2s_config->pMMAPTxBufPtr[i] = ptri2s_config->pMMAPBufPtr[i];
#else

			if(ptri2s_config->pMMAPTxBufPtr[i]==NULL)
				ptri2s_config->pMMAPTxBufPtr[i] = kmalloc(I2S_PAGE_SIZE, GFP_KERNEL);
#endif
		}
#if defined(I2S_FIFO_MODE)
#else
		GdmaI2sTx((u32)ptri2s_config->pPage0TxBuf8ptr, I2S_FIFO_WREG, 0, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
		GdmaI2sTx((u32)ptri2s_config->pPage1TxBuf8ptr, I2S_FIFO_WREG, 1, I2S_PAGE_SIZE, i2s_dma_tx_handler, i2s_unmask_handler);
#endif	
		
		i2s_reset_tx_config(ptri2s_config);
		ptri2s_config->bTxDMAEnable = 1;
		i2s_tx_config(ptri2s_config);
		
		if(ptri2s_config->bRxDMAEnable==0)
			i2s_clock_enable(ptri2s_config);

		audiohw_set_lineout_vol(1, ptri2s_config->txvol, ptri2s_config->txvol);
		
		i2s_tx_enable(ptri2s_config);
#if defined(I2S_FIFO_MODE)
#else
		GdmaUnMaskChannel(GDMA_I2S_TX0);
#endif
		data = i2s_inw(RALINK_REG_INTENA);
		data |=0x0400;
	    	i2s_outw(RALINK_REG_INTENA, data);
	
	    	MSG("I2S_TXENABLE done\n");
		spin_unlock_irqrestore(&ptri2s_config->lock, flags);
		break;
	case I2S_TX_DISABLE:
		spin_lock_irqsave(&ptri2s_config->lock, flags);
		MSG("I2S_TXDISABLE\n");
		i2s_tx_disable(ptri2s_config);
		i2s_reset_tx_config(ptri2s_config);
		if(ptri2s_config->bRxDMAEnable==0)
			i2s_clock_disable(ptri2s_config);
		//i2s_tx_disable(ptri2s_config);
		if(ptri2s_config->bRxDMAEnable==0)
		{
			data = i2s_inw(RALINK_REG_INTENA);
			data &= 0xFFFFFBFF;
		    i2s_outw(RALINK_REG_INTENA, data);
		}
		
		for( i = 0 ; i < MAX_I2S_PAGE ; i ++ )
		{
			if(ptri2s_config->pMMAPTxBufPtr[i] != NULL)
			{
#if defined(CONFIG_I2S_MMAP)
				dma_unmap_single(NULL, i2s_mmap_addr[i], I2S_PAGE_SIZE, DMA_TO_DEVICE);
#endif
				kfree(ptri2s_config->pMMAPTxBufPtr[i]);		
				ptri2s_config->pMMAPTxBufPtr[i] = NULL;
			}
		}
		pci_free_consistent(NULL, I2S_PAGE_SIZE*2, ptri2s_config->pPage0TxBuf8ptr, i2s_txdma_addr);
		ptri2s_config->pPage0TxBuf8ptr = NULL;
		spin_unlock_irqrestore(&ptri2s_config->lock, flags);
		break;
	case I2S_RX_ENABLE:

		spin_lock_irqsave(&ptri2s_config->lock, flags);
		MSG("I2S_RXENABLE\n");
		
		/* allocate rx buffer */
		ptri2s_config->pPage0RxBuf8ptr = (u8*)pci_alloc_consistent(NULL, I2S_PAGE_SIZE*2 , &i2s_rxdma_addr);
		if(ptri2s_config->pPage0RxBuf8ptr==NULL)
		{
			MSG("Allocate Rx Page Buffer Failed\n");
			return -1;
		}
		ptri2s_config->pPage1RxBuf8ptr = ptri2s_config->pPage0RxBuf8ptr + I2S_PAGE_SIZE;
		
		for( i = 0 ; i < MAX_I2S_PAGE ; i ++ )
		{
			if(ptri2s_config->pMMAPRxBufPtr[i]==NULL)
				ptri2s_config->pMMAPRxBufPtr[i] = kmalloc(I2S_PAGE_SIZE, GFP_KERNEL);
		}
#if defined(I2S_FIFO_MODE)
#else		
		GdmaI2sRx(I2S_RX_FIFO_RREG, (u32)ptri2s_config->pPage0RxBuf8ptr, 0, I2S_PAGE_SIZE, i2s_dma_rx_handler, i2s_unmask_handler);
		GdmaI2sRx(I2S_RX_FIFO_RREG, (u32)ptri2s_config->pPage1RxBuf8ptr, 1, I2S_PAGE_SIZE, i2s_dma_rx_handler, i2s_unmask_handler);
#endif
		i2s_reset_rx_config(ptri2s_config);
		ptri2s_config->bRxDMAEnable = 1;
		i2s_rx_config(ptri2s_config);
#if defined(I2S_FIFO_MODE)
#else		
		GdmaUnMaskChannel(GDMA_I2S_RX0);
#endif
		if(ptri2s_config->bTxDMAEnable==0)
			i2s_clock_enable(ptri2s_config);
#if defined(CONFIG_I2S_TXRX)
		audiohw_set_linein_vol(ptri2s_config->rxvol,  ptri2s_config->rxvol);
#endif
		i2s_rx_enable(ptri2s_config);

		data = i2s_inw(RALINK_REG_INTENA);
		data |=0x0400;
	    	i2s_outw(RALINK_REG_INTENA, data);
		spin_unlock_irqrestore(&ptri2s_config->lock, flags);

		break;
	case I2S_RX_DISABLE:
		spin_lock_irqsave(&ptri2s_config->lock, flags);
		MSG("I2S_RXDISABLE\n");
		i2s_reset_rx_config(ptri2s_config);
		if(ptri2s_config->bTxDMAEnable==0)
			i2s_clock_disable(ptri2s_config);
		i2s_rx_disable(ptri2s_config);
		if(ptri2s_config->bRxDMAEnable==0)
		{
			data = i2s_inw(RALINK_REG_INTENA);
			data &= 0xFFFFFBFF;
	    	i2s_outw(RALINK_REG_INTENA, data);
		}
		
		for( i = 0 ; i < MAX_I2S_PAGE ; i ++ )
		{
			if(ptri2s_config->pMMAPRxBufPtr[i] != NULL)
				kfree(ptri2s_config->pMMAPRxBufPtr[i]);		
			ptri2s_config->pMMAPRxBufPtr[i] = NULL;
		}
		
		pci_free_consistent(NULL, I2S_PAGE_SIZE*2, ptri2s_config->pPage0RxBuf8ptr, i2s_rxdma_addr);
		ptri2s_config->pPage0RxBuf8ptr = NULL;
		spin_unlock_irqrestore(&ptri2s_config->lock, flags);
		break;
	case I2S_PUT_AUDIO:
		//MSG("I2S_PUT_AUDIO\n");	
#if defined(I2S_FIFO_MODE)		
		{
			
			long* pData ;
			//MSG("I2S_PUT_AUDIO FIFO\n");
			copy_from_user(ptri2s_config->pMMAPTxBufPtr[0], (char*)arg, I2S_PAGE_SIZE);
			pData = ptri2s_config->pMMAPTxBufPtr[0];
			for(i = 0 ; i < I2S_PAGE_SIZE>>2 ; i++ )	
			{
				int j;
				unsigned long status = i2s_inw(I2S_FF_STATUS);
				while((status&0x0F)==0)
				{
					for(j = 0 ; j < 50 ; j++);
					status = i2s_inw(I2S_FF_STATUS);
				}
				*((volatile uint32_t *)(I2S_TX_FIFO_WREG)) = cpu_to_le32(*pData);
				if(i==16)
					MSG("I2S_PUT_AUDIO FIFO[0x%08X]\n", *pData);
				pData++;
				
					
			}
		}
		break;
#else		
		do{
			spin_lock_irqsave(&ptri2s_config->lock, flags);
			
			if(((ptri2s_config->tx_w_idx+4)%MAX_I2S_PAGE)!=ptri2s_config->tx_r_idx)
			{
				ptri2s_config->tx_w_idx = (ptri2s_config->tx_w_idx+1)%MAX_I2S_PAGE;	
				//printk("put TB[%d] for user write\n",ptri2s_config->tx_w_idx);
#if defined(CONFIG_I2S_MMAP)
				put_user(ptri2s_config->tx_w_idx, (int*)arg);
#else
				copy_from_user(ptri2s_config->pMMAPTxBufPtr[ptri2s_config->tx_w_idx], (char*)arg, I2S_PAGE_SIZE);
#endif
				pi2s_status->txbuffer_len++;
				spin_unlock_irqrestore(&ptri2s_config->lock, flags);	
				break;
			}
			else
			{
				/* Buffer Full */
				//printk("TBF tr=%d, tw=%d\n", ptri2s_config->tx_r_idx, ptri2s_config->tx_w_idx);
				pi2s_status->txbuffer_ovrun++;
				spin_unlock_irqrestore(&ptri2s_config->lock, flags);
				interruptible_sleep_on(&(ptri2s_config->i2s_tx_qh));
				
			}
		}while(1);
		break;
#endif
	case I2S_GET_AUDIO:
#if defined(I2S_FIFO_MODE)			
		{
			
			long* pData ;
			
			
			pData = ptri2s_config->pMMAPRxBufPtr[0];
			for(i = 0 ; i < I2S_PAGE_SIZE>>2 ; i++ )	
			{
				int j;
				unsigned long status = i2s_inw(I2S_FF_STATUS);
				while((status&0x0F0)==0)
				{
					for(j = 0 ; j < 50 ; j++);
					status = i2s_inw(I2S_FF_STATUS);
				}
				
				*pData = i2s_inw(I2S_RX_FIFO_RREG);
				if(i==16)
					MSG("I2S_GET_AUDIO FIFO[0x%08X]\n", *pData);
				pData++;
			}
			
			copy_to_user((char*)arg, ptri2s_config->pMMAPRxBufPtr[0], I2S_PAGE_SIZE);
		}
		break;
#else		
		do{
			spin_lock_irqsave(&ptri2s_config->lock, flags);
			
			if(ptri2s_config->rx_r_idx!=ptri2s_config->rx_w_idx)
			{			
				copy_to_user((char*)arg, ptri2s_config->pMMAPRxBufPtr[ptri2s_config->rx_r_idx], I2S_PAGE_SIZE);
				ptri2s_config->rx_r_idx = (ptri2s_config->rx_r_idx+1)%MAX_I2S_PAGE;
				pi2s_status->rxbuffer_len--;
				spin_unlock_irqrestore(&ptri2s_config->lock, flags);	
				break;
			}
			else
			{
				/* Buffer Full */
				//printk("RBF rr=%d, rw=%d\n", ptri2s_config->rx_r_idx, ptri2s_config->rx_w_idx);
				pi2s_status->rxbuffer_ovrun++;
				spin_unlock_irqrestore(&ptri2s_config->lock, flags);
				interruptible_sleep_on(&(ptri2s_config->i2s_rx_qh));
				
			}
		}while(1);
		break;
#endif		
	case I2S_DEBUG_CLKGEN:
	case I2S_DEBUG_INLBK:
	case I2S_DEBUG_EXLBK:
	case I2S_DEBUG_CODECBYPASS:	
	case I2S_DEBUG_FMT:
	case I2S_DEBUG_RESET:
		i2s_debug_cmd(cmd, arg);
		break;							
	default :
		MSG("i2s_ioctl: command format error\n");
	}

	return 0;
}
Beispiel #29
0
static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev)
{
	struct sk_buff *skb = si->rxskb;
	dma_addr_t dma_addr;
	unsigned int len, stat, data;

	if (!skb) {
		printk(KERN_ERR "sa1100_ir: SKB is NULL!\n");
		return;
	}

	/*
	 * Get the current data position.
	 */
	dma_addr = sa1100_get_dma_pos(si->rxdma);
	len = dma_addr - si->rxbuf_dma;
	if (len > HPSIR_MAX_RXLEN)
		len = HPSIR_MAX_RXLEN;
	dma_unmap_single(si->dev, si->rxbuf_dma, len, DMA_FROM_DEVICE);

	do {
		/*
		 * Read Status, and then Data.
		 */
		stat = Ser2HSSR1;
		rmb();
		data = Ser2HSDR;

		if (stat & (HSSR1_CRE | HSSR1_ROR)) {
			si->stats.rx_errors++;
			if (stat & HSSR1_CRE)
				si->stats.rx_crc_errors++;
			if (stat & HSSR1_ROR)
				si->stats.rx_frame_errors++;
		} else
			skb->data[len++] = data;

		/*
		 * If we hit the end of frame, there's
		 * no point in continuing.
		 */
		if (stat & HSSR1_EOF)
			break;
	} while (Ser2HSSR0 & HSSR0_EIF);

	if (stat & HSSR1_EOF) {
		si->rxskb = NULL;

		skb_put(skb, len);
		skb->dev = dev;
		skb->mac.raw = skb->data;
		skb->protocol = htons(ETH_P_IRDA);
		si->stats.rx_packets++;
		si->stats.rx_bytes += len;

		/*
		 * Before we pass the buffer up, allocate a new one.
		 */
		sa1100_irda_rx_alloc(si);

		netif_rx(skb);
		dev->last_rx = jiffies;
	} else {
		/*
		 * Remap the buffer.
		 */
		si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data,
						HPSIR_MAX_RXLEN,
						DMA_FROM_DEVICE);
	}
}
Beispiel #30
0
/**
 *	ps3stor_setup - Setup a storage device before use
 *	@dev: Pointer to a struct ps3_storage_device
 *	@handler: Pointer to an interrupt handler
 *
 *	Returns 0 for success, or an error code
 */
int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
{
	int error, res, alignment;
	enum ps3_dma_page_size page_size;

	error = ps3_open_hv_device(&dev->sbd);
	if (error) {
		dev_err(&dev->sbd.core,
			"%s:%u: ps3_open_hv_device failed %d\n", __func__,
			__LINE__, error);
		goto fail;
	}

	error = ps3_sb_event_receive_port_setup(&dev->sbd, PS3_BINDING_CPU_ANY,
						&dev->irq);
	if (error) {
		dev_err(&dev->sbd.core,
			"%s:%u: ps3_sb_event_receive_port_setup failed %d\n",
		       __func__, __LINE__, error);
		goto fail_close_device;
	}

	error = request_irq(dev->irq, handler, IRQF_DISABLED,
			    dev->sbd.core.driver->name, dev);
	if (error) {
		dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n",
			__func__, __LINE__, error);
		goto fail_sb_event_receive_port_destroy;
	}

	alignment = min(__ffs(dev->bounce_size),
			__ffs((unsigned long)dev->bounce_buf));
	if (alignment < 12) {
		dev_err(&dev->sbd.core,
			"%s:%u: bounce buffer not aligned (%lx at 0x%p)\n",
			__func__, __LINE__, dev->bounce_size, dev->bounce_buf);
		error = -EINVAL;
		goto fail_free_irq;
	} else if (alignment < 16)
		page_size = PS3_DMA_4K;
	else
		page_size = PS3_DMA_64K;
	dev->sbd.d_region = &dev->dma_region;
	ps3_dma_region_init(&dev->sbd, &dev->dma_region, page_size,
			    PS3_DMA_OTHER, dev->bounce_buf, dev->bounce_size);
	res = ps3_dma_region_create(&dev->dma_region);
	if (res) {
		dev_err(&dev->sbd.core, "%s:%u: cannot create DMA region\n",
			__func__, __LINE__);
		error = -ENOMEM;
		goto fail_free_irq;
	}

	dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf));
	dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf,
					 dev->bounce_size, DMA_BIDIRECTIONAL);
	if (!dev->bounce_dma) {
		dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n",
			__func__, __LINE__);
		error = -ENODEV;
		goto fail_free_dma;
	}

	error = ps3stor_probe_access(dev);
	if (error) {
		dev_err(&dev->sbd.core, "%s:%u: No accessible regions found\n",
			__func__, __LINE__);
		goto fail_unmap_dma;
	}
	return 0;

fail_unmap_dma:
	dma_unmap_single(&dev->sbd.core, dev->bounce_dma, dev->bounce_size,
			 DMA_BIDIRECTIONAL);
fail_free_dma:
	ps3_dma_region_free(&dev->dma_region);
fail_free_irq:
	free_irq(dev->irq, dev);
fail_sb_event_receive_port_destroy:
	ps3_sb_event_receive_port_destroy(&dev->sbd, dev->irq);
fail_close_device:
	ps3_close_hv_device(&dev->sbd);
fail:
	return error;
}