Exemple #1
0
static int sound_alloc_dmap(struct dma_buffparms *dmap)
{
	char *start_addr, *end_addr;
	int dma_pagesize;
	int sz, size;
	struct page *page;

	dmap->mapping_flags &= ~DMA_MAP_MAPPED;

	if (dmap->raw_buf != NULL)
		return 0;	/* Already done */
	if (dma_buffsize < 4096)
		dma_buffsize = 4096;
	dma_pagesize = (dmap->dma < 4) ? (64 * 1024) : (128 * 1024);
	
	/*
	 *	Now check for the Cyrix problem.
	 */
	 
	if(isa_dma_bridge_buggy==2)
		dma_pagesize=32768;
	 
	dmap->raw_buf = NULL;
	dmap->buffsize = dma_buffsize;
	if (dmap->buffsize > dma_pagesize)
		dmap->buffsize = dma_pagesize;
	start_addr = NULL;
	/*
	 * Now loop until we get a free buffer. Try to get smaller buffer if
	 * it fails. Don't accept smaller than 8k buffer for performance
	 * reasons.
	 */
	while (start_addr == NULL && dmap->buffsize > PAGE_SIZE) {
		for (sz = 0, size = PAGE_SIZE; size < dmap->buffsize; sz++, size <<= 1);
		dmap->buffsize = PAGE_SIZE * (1 << sz);
		start_addr = (char *) __get_free_pages(GFP_ATOMIC|GFP_DMA|__GFP_NOWARN, sz);
		if (start_addr == NULL)
			dmap->buffsize /= 2;
	}

	if (start_addr == NULL) {
		printk(KERN_WARNING "Sound error: Couldn't allocate DMA buffer\n");
		return -ENOMEM;
	} else {
		/* make some checks */
		end_addr = start_addr + dmap->buffsize - 1;

		if (debugmem)
			printk(KERN_DEBUG "sound: start 0x%lx, end 0x%lx\n", (long) start_addr, (long) end_addr);
		
		/* now check if it fits into the same dma-pagesize */

		if (((long) start_addr & ~(dma_pagesize - 1)) != ((long) end_addr & ~(dma_pagesize - 1))
		    || end_addr >= (char *) (MAX_DMA_ADDRESS)) {
			printk(KERN_ERR "sound: Got invalid address 0x%lx for %db DMA-buffer\n", (long) start_addr, dmap->buffsize);
			return -EFAULT;
		}
	}
	dmap->raw_buf = start_addr;
	dmap->raw_buf_phys = dma_map_single(NULL, start_addr, dmap->buffsize, DMA_BIDIRECTIONAL);

	for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++)
		SetPageReserved(page);
	return 0;
}
Exemple #2
0
int serial8250_request_dma(struct uart_8250_port *p)
{
    struct uart_8250_dma	*dma = p->dma;
    dma_cap_mask_t		mask;

    /* Default slave configuration parameters */
    dma->rxconf.direction		= DMA_DEV_TO_MEM;
    dma->rxconf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
    dma->rxconf.src_addr		= p->port.mapbase + UART_RX;

    dma->txconf.direction		= DMA_MEM_TO_DEV;
    dma->txconf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
    dma->txconf.dst_addr		= p->port.mapbase + UART_TX;

    dma_cap_zero(mask);
    dma_cap_set(DMA_SLAVE, mask);

    /* Get a channel for RX */
    dma->rxchan = dma_request_slave_channel_compat(mask,
                  dma->fn, dma->rx_param,
                  p->port.dev, "rx");
    if (!dma->rxchan)
        return -ENODEV;

    dmaengine_slave_config(dma->rxchan, &dma->rxconf);

    /* Get a channel for TX */
    dma->txchan = dma_request_slave_channel_compat(mask,
                  dma->fn, dma->tx_param,
                  p->port.dev, "tx");
    if (!dma->txchan) {
        dma_release_channel(dma->rxchan);
        return -ENODEV;
    }

    dmaengine_slave_config(dma->txchan, &dma->txconf);

    /* RX buffer */
    if (!dma->rx_size)
        dma->rx_size = PAGE_SIZE;

    dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
                                     &dma->rx_addr, GFP_KERNEL);
    if (!dma->rx_buf)
        goto err;

    /* TX buffer */
    dma->tx_addr = dma_map_single(dma->txchan->device->dev,
                                  p->port.state->xmit.buf,
                                  UART_XMIT_SIZE,
                                  DMA_TO_DEVICE);
    if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
        dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
                          dma->rx_buf, dma->rx_addr);
        goto err;
    }

    dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");

    return 0;
err:
    dma_release_channel(dma->rxchan);
    dma_release_channel(dma->txchan);

    return -ENOMEM;
}
Exemple #3
0
/*
 * This routine will assign vring's allocated in host/io memory. Code in
 * virtio_ring.c however continues to access this io memory as if it were local
 * memory without io accessors.
 */
static struct virtqueue *vop_find_vq(struct virtio_device *dev,
				     unsigned index,
				     void (*callback)(struct virtqueue *vq),
				     const char *name, bool ctx)
{
	struct _vop_vdev *vdev = to_vopvdev(dev);
	struct vop_device *vpdev = vdev->vpdev;
	struct mic_vqconfig __iomem *vqconfig;
	struct mic_vqconfig config;
	struct virtqueue *vq;
	void __iomem *va;
	struct _mic_vring_info __iomem *info;
	void *used;
	int vr_size, _vr_size, err, magic;
	u8 type = ioread8(&vdev->desc->type);

	if (index >= ioread8(&vdev->desc->num_vq))
		return ERR_PTR(-ENOENT);

	if (!name)
		return ERR_PTR(-ENOENT);

	/* First assign the vring's allocated in host memory */
	vqconfig = _vop_vq_config(vdev->desc) + index;
	memcpy_fromio(&config, vqconfig, sizeof(config));
	_vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
	vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
	va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size);
	if (!va)
		return ERR_PTR(-ENOMEM);
	vdev->vr[index] = va;
	memset_io(va, 0x0, _vr_size);

	info = va + _vr_size;
	magic = ioread32(&info->magic);

	if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
		err = -EIO;
		goto unmap;
	}

	vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
					     sizeof(struct vring_used_elem) *
					     le16_to_cpu(config.num));
	used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
					get_order(vdev->used_size[index]));
	vdev->used_virt[index] = used;
	if (!used) {
		err = -ENOMEM;
		dev_err(_vop_dev(vdev), "%s %d err %d\n",
			__func__, __LINE__, err);
		goto unmap;
	}

	vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx,
			       (void __force *)va, vop_notify, callback,
			       name, used);
	if (!vq) {
		err = -ENOMEM;
		goto free_used;
	}

	vdev->used[index] = dma_map_single(&vpdev->dev, used,
					    vdev->used_size[index],
					    DMA_BIDIRECTIONAL);
	if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
		err = -ENOMEM;
		dev_err(_vop_dev(vdev), "%s %d err %d\n",
			__func__, __LINE__, err);
		goto del_vq;
	}
	writeq(vdev->used[index], &vqconfig->used_address);

	vq->priv = vdev;
	return vq;
del_vq:
	vring_del_virtqueue(vq);
free_used:
	free_pages((unsigned long)used,
		   get_order(vdev->used_size[index]));
unmap:
	vpdev->hw_ops->unmap(vpdev, vdev->vr[index]);
	return ERR_PTR(err);
}
Exemple #4
0
int usb_ept_queue_xfer(struct msm_endpoint *ept, struct usb_request *_req)
{
	unsigned long flags;
	struct msm_request *req = to_msm_request(_req);
	struct msm_request *last;
	struct usb_info *ui = ept->ui;
	struct ept_queue_item *item = req->item;
	unsigned length = req->req.length;

	if (length > 0x4000)
		return -EMSGSIZE;

	spin_lock_irqsave(&ui->lock, flags);

	if (req->busy) {
		req->req.status = -EBUSY;
		spin_unlock_irqrestore(&ui->lock, flags);
		INFO("usb_ept_queue_xfer() tried to queue busy request\n");
		return -EBUSY;
	}

	if (!ui->online && (ept->num != 0)) {
		req->req.status = -ESHUTDOWN;
		spin_unlock_irqrestore(&ui->lock, flags);
		INFO("usb_ept_queue_xfer() called while offline\n");
		return -ESHUTDOWN;
	}

	req->busy = 1;
	req->live = 0;
	req->next = 0;
	req->req.status = -EBUSY;

	req->dma = dma_map_single(NULL, req->req.buf, length,
				  (ept->flags & EPT_FLAG_IN) ?
				  DMA_TO_DEVICE : DMA_FROM_DEVICE);

	/* prepare the transaction descriptor item for the hardware */
	item->next = TERMINATE;
	item->info = INFO_BYTES(length) | INFO_IOC | INFO_ACTIVE;
	item->page0 = req->dma;
	item->page1 = (req->dma + 0x1000) & 0xfffff000;
	item->page2 = (req->dma + 0x2000) & 0xfffff000;
	item->page3 = (req->dma + 0x3000) & 0xfffff000;

	/* Add the new request to the end of the queue */
	last = ept->last;
	if (last) {
		/* Already requests in the queue. add us to the
		 * end, but let the completion interrupt actually
		 * start things going, to avoid hw issues
		 */
		last->next = req;

		/* only modify the hw transaction next pointer if
		 * that request is not live
		 */
		if (!last->live)
			last->item->next = req->item_dma;
	} else {
		/* queue was empty -- kick the hardware */
		ept->req = req;
		usb_ept_start(ept);
	}
	ept->last = req;

	spin_unlock_irqrestore(&ui->lock, flags);
	return 0;
}
Exemple #5
0
static int octeon_mgmt_open(struct net_device *netdev)
{
    struct octeon_mgmt *p = netdev_priv(netdev);
    int port = p->port;
    union cvmx_mixx_ctl mix_ctl;
    union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
    union cvmx_mixx_oring1 oring1;
    union cvmx_mixx_iring1 iring1;
    union cvmx_agl_gmx_prtx_cfg prtx_cfg;
    union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
    union cvmx_mixx_irhwm mix_irhwm;
    union cvmx_mixx_orhwm mix_orhwm;
    union cvmx_mixx_intena mix_intena;
    struct sockaddr sa;

    /* Allocate ring buffers.  */
    p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
                         GFP_KERNEL);
    if (!p->tx_ring)
        return -ENOMEM;
    p->tx_ring_handle =
        dma_map_single(p->dev, p->tx_ring,
                       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
                       DMA_BIDIRECTIONAL);
    p->tx_next = 0;
    p->tx_next_clean = 0;
    p->tx_current_fill = 0;


    p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
                         GFP_KERNEL);
    if (!p->rx_ring)
        goto err_nomem;
    p->rx_ring_handle =
        dma_map_single(p->dev, p->rx_ring,
                       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
                       DMA_BIDIRECTIONAL);

    p->rx_next = 0;
    p->rx_next_fill = 0;
    p->rx_current_fill = 0;

    octeon_mgmt_reset_hw(p);

    mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));

    /* Bring it out of reset if needed. */
    if (mix_ctl.s.reset) {
        mix_ctl.s.reset = 0;
        cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
        do {
            mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
        } while (mix_ctl.s.reset);
    }

    agl_gmx_inf_mode.u64 = 0;
    agl_gmx_inf_mode.s.en = 1;
    cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);

    oring1.u64 = 0;
    oring1.s.obase = p->tx_ring_handle >> 3;
    oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
    cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);

    iring1.u64 = 0;
    iring1.s.ibase = p->rx_ring_handle >> 3;
    iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
    cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);

    /* Disable packet I/O. */
    prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
    prtx_cfg.s.en = 0;
    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);

    memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
    octeon_mgmt_set_mac_address(netdev, &sa);

    octeon_mgmt_change_mtu(netdev, netdev->mtu);

    /*
     * Enable the port HW. Packets are not allowed until
     * cvmx_mgmt_port_enable() is called.
     */
    mix_ctl.u64 = 0;
    mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
    mix_ctl.s.en = 1;           /* Enable the port */
    mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
    /* MII CB-request FIFO programmable high watermark */
    mix_ctl.s.mrq_hwm = 1;
    cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);

    if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
            || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
        /*
         * Force compensation values, as they are not
         * determined properly by HW
         */
        union cvmx_agl_gmx_drv_ctl drv_ctl;

        drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
        if (port) {
            drv_ctl.s.byp_en1 = 1;
            drv_ctl.s.nctl1 = 6;
            drv_ctl.s.pctl1 = 6;
        } else {
            drv_ctl.s.byp_en = 1;
            drv_ctl.s.nctl = 6;
            drv_ctl.s.pctl = 6;
        }
        cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
    }

    octeon_mgmt_rx_fill_ring(netdev);

    /* Clear statistics. */
    /* Clear on read. */
    cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
    cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
    cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);

    cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
    cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
    cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);

    /* Clear any pending interrupts */
    cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));

    if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
                    netdev)) {
        dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
        goto err_noirq;
    }

    /* Interrupt every single RX packet */
    mix_irhwm.u64 = 0;
    mix_irhwm.s.irhwm = 0;
    cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);

    /* Interrupt when we have 1 or more packets to clean.  */
    mix_orhwm.u64 = 0;
    mix_orhwm.s.orhwm = 1;
    cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);

    /* Enable receive and transmit interrupts */
    mix_intena.u64 = 0;
    mix_intena.s.ithena = 1;
    mix_intena.s.othena = 1;
    cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);


    /* Enable packet I/O. */

    rxx_frm_ctl.u64 = 0;
    rxx_frm_ctl.s.pre_align = 1;
    /*
     * When set, disables the length check for non-min sized pkts
     * with padding in the client data.
     */
    rxx_frm_ctl.s.pad_len = 1;
    /* When set, disables the length check for VLAN pkts */
    rxx_frm_ctl.s.vlan_len = 1;
    /* When set, PREAMBLE checking is  less strict */
    rxx_frm_ctl.s.pre_free = 1;
    /* Control Pause Frames can match station SMAC */
    rxx_frm_ctl.s.ctl_smac = 0;
    /* Control Pause Frames can match globally assign Multicast address */
    rxx_frm_ctl.s.ctl_mcst = 1;
    /* Forward pause information to TX block */
    rxx_frm_ctl.s.ctl_bck = 1;
    /* Drop Control Pause Frames */
    rxx_frm_ctl.s.ctl_drp = 1;
    /* Strip off the preamble */
    rxx_frm_ctl.s.pre_strp = 1;
    /*
     * This port is configured to send PREAMBLE+SFD to begin every
     * frame.  GMX checks that the PREAMBLE is sent correctly.
     */
    rxx_frm_ctl.s.pre_chk = 1;
    cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);

    /* Enable the AGL block */
    agl_gmx_inf_mode.u64 = 0;
    agl_gmx_inf_mode.s.en = 1;
    cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);

    /* Configure the port duplex and enables */
    prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
    prtx_cfg.s.tx_en = 1;
    prtx_cfg.s.rx_en = 1;
    prtx_cfg.s.en = 1;
    p->last_duplex = 1;
    prtx_cfg.s.duplex = p->last_duplex;
    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);

    p->last_link = 0;
    netif_carrier_off(netdev);

    if (octeon_mgmt_init_phy(netdev)) {
        dev_err(p->dev, "Cannot initialize PHY.\n");
        goto err_noirq;
    }

    netif_wake_queue(netdev);
    napi_enable(&p->napi);

    return 0;
err_noirq:
    octeon_mgmt_reset_hw(p);
    dma_unmap_single(p->dev, p->rx_ring_handle,
                     ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
                     DMA_BIDIRECTIONAL);
    kfree(p->rx_ring);
err_nomem:
    dma_unmap_single(p->dev, p->tx_ring_handle,
                     ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
                     DMA_BIDIRECTIONAL);
    kfree(p->tx_ring);
    return -ENOMEM;
}
int msm_bam_dmux_write(uint32_t id, struct sk_buff *skb)
{
	int rc = 0;
	struct bam_mux_hdr *hdr;
	unsigned long flags;
	struct sk_buff *new_skb = NULL;
	dma_addr_t dma_address;
	struct tx_pkt_info *pkt;

	if (id >= BAM_DMUX_NUM_CHANNELS)
		return -EINVAL;
	if (!skb)
		return -EINVAL;
	if (!bam_mux_initialized)
		return -ENODEV;

	DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
	spin_lock_irqsave(&bam_ch[id].lock, flags);
	if (!bam_ch_is_open(id)) {
		spin_unlock_irqrestore(&bam_ch[id].lock, flags);
		pr_err("%s: port not open: %d\n", __func__, bam_ch[id].status);
		return -ENODEV;
	}
	spin_unlock_irqrestore(&bam_ch[id].lock, flags);

	/* if skb do not have any tailroom for padding,
	   copy the skb into a new expanded skb */
	if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
		/* revisit, probably dev_alloc_skb and memcpy is effecient */
		new_skb = skb_copy_expand(skb, skb_headroom(skb),
					  4 - (skb->len & 0x3), GFP_ATOMIC);
		if (new_skb == NULL) {
			pr_err("%s: cannot allocate skb\n", __func__);
			return -ENOMEM;
		}
		dev_kfree_skb_any(skb);
		skb = new_skb;
		DBG_INC_WRITE_CPY(skb->len);
	}

	hdr = (struct bam_mux_hdr *)skb_push(skb, sizeof(struct bam_mux_hdr));

	/* caller should allocate for hdr and padding
	   hdr is fine, padding is tricky */
	hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
	hdr->cmd = BAM_MUX_HDR_CMD_DATA;
	hdr->reserved = 0;
	hdr->ch_id = id;
	hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
	if (skb->len & 0x3)
		skb_put(skb, 4 - (skb->len & 0x3));

	hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) + hdr->pkt_len);

	DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
	    __func__, skb->data, skb->tail, skb->len,
	    hdr->pkt_len, hdr->pad_len);

	pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
	if (pkt == NULL) {
		pr_err("%s: mem alloc for tx_pkt_info failed\n", __func__);
		if (new_skb)
			dev_kfree_skb_any(new_skb);
		return -ENOMEM;
	}

	dma_address = dma_map_single(NULL, skb->data, skb->len,
					DMA_TO_DEVICE);
	if (!dma_address) {
		pr_err("%s: dma_map_single() failed\n", __func__);
		if (new_skb)
			dev_kfree_skb_any(new_skb);
		kfree(pkt);
		return -ENOMEM;
	}
	pkt->skb = skb;
	pkt->dma_address = dma_address;
	pkt->is_cmd = 0;
	INIT_WORK(&pkt->work, bam_mux_write_done);
	rc = sps_transfer_one(bam_tx_pipe, dma_address, skb->len,
				pkt, SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT);
	return rc;
}
Exemple #7
0
static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
			bool dma_to_memory)
{
	struct dma_chan *dma_chan;
	unsigned char *dma_buf;
	dma_addr_t dma_phys;
	int ret;
	struct dma_slave_config dma_sconfig;

	dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
						dma_to_memory ? "rx" : "tx");
	if (IS_ERR(dma_chan)) {
		ret = PTR_ERR(dma_chan);
		dev_err(tup->uport.dev,
			"DMA channel alloc failed: %d\n", ret);
		return ret;
	}

	if (dma_to_memory) {
		dma_buf = dma_alloc_coherent(tup->uport.dev,
				TEGRA_UART_RX_DMA_BUFFER_SIZE,
				 &dma_phys, GFP_KERNEL);
		if (!dma_buf) {
			dev_err(tup->uport.dev,
				"Not able to allocate the dma buffer\n");
			dma_release_channel(dma_chan);
			return -ENOMEM;
		}
	} else {
		dma_phys = dma_map_single(tup->uport.dev,
			tup->uport.state->xmit.buf, UART_XMIT_SIZE,
			DMA_TO_DEVICE);
		dma_buf = tup->uport.state->xmit.buf;
	}

	if (dma_to_memory) {
		dma_sconfig.src_addr = tup->uport.mapbase;
		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		dma_sconfig.src_maxburst = 4;
	} else {
		dma_sconfig.dst_addr = tup->uport.mapbase;
		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		dma_sconfig.dst_maxburst = 16;
	}

	ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
	if (ret < 0) {
		dev_err(tup->uport.dev,
			"Dma slave config failed, err = %d\n", ret);
		goto scrub;
	}

	if (dma_to_memory) {
		tup->rx_dma_chan = dma_chan;
		tup->rx_dma_buf_virt = dma_buf;
		tup->rx_dma_buf_phys = dma_phys;
	} else {
		tup->tx_dma_chan = dma_chan;
		tup->tx_dma_buf_virt = dma_buf;
		tup->tx_dma_buf_phys = dma_phys;
	}
	return 0;

scrub:
	dma_release_channel(dma_chan);
	return ret;
}
Exemple #8
0
/**
 * arc_emac_rx - processing of Rx packets.
 * @ndev:	Pointer to the network device.
 * @budget:	How many BDs to process on 1 call.
 *
 * returns:	Number of processed BDs
 *
 * Iterate through Rx BDs and deliver received packages to upper layer.
 */
static int arc_emac_rx(struct net_device *ndev, int budget)
{
	struct arc_emac_priv *priv = netdev_priv(ndev);
	unsigned int work_done;

	for (work_done = 0; work_done < budget; work_done++) {
		unsigned int *last_rx_bd = &priv->last_rx_bd;
		struct net_device_stats *stats = &ndev->stats;
		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
		unsigned int pktlen, info = le32_to_cpu(rxbd->info);
		struct sk_buff *skb;
		dma_addr_t addr;

		if (unlikely((info & OWN_MASK) == FOR_EMAC))
			break;

		/* Make a note that we saw a packet at this BD.
		 * So next time, driver starts from this + 1
		 */
		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;

		if (unlikely((info & FIRST_OR_LAST_MASK) !=
			     FIRST_OR_LAST_MASK)) {
			/* We pre-allocate buffers of MTU size so incoming
			 * packets won't be split/chained.
			 */
			if (net_ratelimit())
				netdev_err(ndev, "incomplete packet received\n");

			/* Return ownership to EMAC */
			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
			stats->rx_errors++;
			stats->rx_length_errors++;
			continue;
		}

		pktlen = info & LEN_MASK;
		stats->rx_packets++;
		stats->rx_bytes += pktlen;
		skb = rx_buff->skb;
		skb_put(skb, pktlen);
		skb->dev = ndev;
		skb->protocol = eth_type_trans(skb, ndev);

		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);

		/* Prepare the BD for next cycle */
		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
							 EMAC_BUFFER_SIZE);
		if (unlikely(!rx_buff->skb)) {
			stats->rx_errors++;
			/* Because receive_skb is below, increment rx_dropped */
			stats->rx_dropped++;
			continue;
		}

		/* receive_skb only if new skb was allocated to avoid holes */
		netif_receive_skb(skb);

		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
		if (dma_mapping_error(&ndev->dev, addr)) {
			if (net_ratelimit())
				netdev_err(ndev, "cannot dma map\n");
			dev_kfree_skb(rx_buff->skb);
			stats->rx_errors++;
			continue;
		}
		dma_unmap_addr_set(rx_buff, addr, addr);
		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);

		rxbd->data = cpu_to_le32(addr);

		/* Make sure pointer to data buffer is set */
		wmb();

		/* Return ownership to EMAC */
		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
	}

	return work_done;
}
Exemple #9
0
/**
 * arc_emac_open - Open the network device.
 * @ndev:	Pointer to the network device.
 *
 * returns: 0, on success or non-zero error value on failure.
 *
 * This function sets the MAC address, requests and enables an IRQ
 * for the EMAC device and starts the Tx queue.
 * It also connects to the phy device.
 */
static int arc_emac_open(struct net_device *ndev)
{
	struct arc_emac_priv *priv = netdev_priv(ndev);
	struct phy_device *phy_dev = priv->phy_dev;
	int i;

	phy_dev->autoneg = AUTONEG_ENABLE;
	phy_dev->speed = 0;
	phy_dev->duplex = 0;
	phy_dev->advertising &= phy_dev->supported;

	priv->last_rx_bd = 0;

	/* Allocate and set buffers for Rx BD's */
	for (i = 0; i < RX_BD_NUM; i++) {
		dma_addr_t addr;
		unsigned int *last_rx_bd = &priv->last_rx_bd;
		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];

		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
							 EMAC_BUFFER_SIZE);
		if (unlikely(!rx_buff->skb))
			return -ENOMEM;

		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
		if (dma_mapping_error(&ndev->dev, addr)) {
			netdev_err(ndev, "cannot dma map\n");
			dev_kfree_skb(rx_buff->skb);
			return -ENOMEM;
		}
		dma_unmap_addr_set(rx_buff, addr, addr);
		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);

		rxbd->data = cpu_to_le32(addr);

		/* Make sure pointer to data buffer is set */
		wmb();

		/* Return ownership to EMAC */
		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);

		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
	}

	/* Clean Tx BD's */
	memset(priv->txbd, 0, TX_RING_SZ);

	/* Initialize logical address filter */
	arc_reg_set(priv, R_LAFL, 0);
	arc_reg_set(priv, R_LAFH, 0);

	/* Set BD ring pointers for device side */
	arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma);
	arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);

	/* Enable interrupts */
	arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);

	/* Set CONTROL */
	arc_reg_set(priv, R_CTRL,
		     (RX_BD_NUM << 24) |	/* RX BD table length */
		     (TX_BD_NUM << 16) |	/* TX BD table length */
		     TXRN_MASK | RXRN_MASK);

	napi_enable(&priv->napi);

	/* Enable EMAC */
	arc_reg_or(priv, R_CTRL, EN_MASK);

	phy_start_aneg(priv->phy_dev);

	netif_start_queue(ndev);

	return 0;
}
Exemple #10
0
static int tegra_startup(struct uart_port *u)
{
    struct tegra_uart_port *t = container_of(u,
                                struct tegra_uart_port, uport);
    int ret = 0;

    t = container_of(u, struct tegra_uart_port, uport);
    sprintf(t->port_name, "tegra_uart_%d", u->line);

    t->use_tx_dma = false;
    if (!TX_FORCE_PIO) {
        t->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
                                               "uart_tx_%d", u->line);
        if (t->tx_dma)
            t->use_tx_dma = true;
        else
            pr_err("%s: failed to allocate TX DMA.\n", __func__);
    }
    if (t->use_tx_dma) {
        t->tx_dma_req.instance = u->line;
        t->tx_dma_req.complete = tegra_tx_dma_complete_callback;
        t->tx_dma_req.to_memory = 0;

        t->tx_dma_req.dest_addr = (unsigned long)t->uport.mapbase;
        t->tx_dma_req.dest_wrap = 4;
        t->tx_dma_req.source_wrap = 0;
        t->tx_dma_req.source_bus_width = 32;
        t->tx_dma_req.dest_bus_width = 8;
        t->tx_dma_req.req_sel = dma_req_sel[t->uport.line];
        t->tx_dma_req.dev = t;
        t->tx_dma_req.size = 0;
        t->xmit_dma_addr = dma_map_single(t->uport.dev,
                                          t->uport.state->xmit.buf, UART_XMIT_SIZE,
                                          DMA_TO_DEVICE);
    }
    t->tx_in_progress = 0;

    t->use_rx_dma = false;
    if (!RX_FORCE_PIO) {
        if (!tegra_uart_init_rx_dma(t))
            t->use_rx_dma = true;
    }

    ret = tegra_uart_hw_init(t);
    if (ret)
        goto fail;

    dev_dbg(u->dev, "Requesting IRQ %d\n", u->irq);
    msleep(1);

    ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED,
                      t->port_name, t);
    if (ret) {
        dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
        goto fail;
    }
    dev_dbg(u->dev,"Started UART port %d\n", u->line);

    return 0;
fail:
    dev_err(u->dev, "Tegra UART startup failed\n");
    return ret;
}
struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
{
	struct data_queue *queue = entry->queue;
	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
	struct sk_buff *skb;
	struct skb_frame_desc *skbdesc;
	unsigned int frame_size;
	unsigned int head_size = 0;
	unsigned int tail_size = 0;
	/*
	 * The frame size includes descriptor size, because the
	 * hardware directly receive the frame into the skbuffer.
	 */
	frame_size = queue->data_size + queue->desc_size + queue->winfo_size;

	/*
	 * The payload should be aligned to a 4-byte boundary,
	 * this means we need at least 3 bytes for moving the frame
	 * into the correct offset.
	 */
	head_size = 4;

	/*
	 * For IV/EIV/ICV assembly we must make sure there is
	 * at least 8 bytes bytes available in headroom for IV/EIV
	 * and 8 bytes for ICV data as tailroon.
	 */
	if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
		head_size += 8;
		tail_size += 8;
	}

	/*
	 * Allocate skbuffer.
	 */
	skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
	if (!skb)
		return NULL;

	/*
	 * Make sure we not have a frame with the requested bytes
	 * available in the head and tail.
	 */
	skb_reserve(skb, head_size);
	skb_put(skb, frame_size);

	/*
	 * Populate skbdesc.
	 */
	skbdesc = get_skb_frame_desc(skb);
	memset(skbdesc, 0, sizeof(*skbdesc));
	skbdesc->entry = entry;

	if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
		dma_addr_t skb_dma;

		skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
					 DMA_FROM_DEVICE);
		if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
			dev_kfree_skb_any(skb);
			return NULL;
		}

		skbdesc->skb_dma = skb_dma;
		skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
	}

	return skb;
}
Exemple #12
0
static int octeon_mgmt_open(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	union cvmx_mixx_ctl mix_ctl;
	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
	union cvmx_mixx_oring1 oring1;
	union cvmx_mixx_iring1 iring1;
	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
	union cvmx_mixx_irhwm mix_irhwm;
	union cvmx_mixx_orhwm mix_orhwm;
	union cvmx_mixx_intena mix_intena;
	struct sockaddr sa;

	/* Allocate ring buffers.  */
	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->tx_ring)
		return -ENOMEM;
	p->tx_ring_handle =
		dma_map_single(p->dev, p->tx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			       DMA_BIDIRECTIONAL);
	p->tx_next = 0;
	p->tx_next_clean = 0;
	p->tx_current_fill = 0;


	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->rx_ring)
		goto err_nomem;
	p->rx_ring_handle =
		dma_map_single(p->dev, p->rx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			       DMA_BIDIRECTIONAL);

	p->rx_next = 0;
	p->rx_next_fill = 0;
	p->rx_current_fill = 0;

	octeon_mgmt_reset_hw(p);

	mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);

	/* Bring it out of reset if needed. */
	if (mix_ctl.s.reset) {
		mix_ctl.s.reset = 0;
		cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
		do {
			mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
		} while (mix_ctl.s.reset);
	}

	if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
		agl_gmx_inf_mode.u64 = 0;
		agl_gmx_inf_mode.s.en = 1;
		cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
	}
	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
		|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		/* Force compensation values, as they are not
		 * determined properly by HW
		 */
		union cvmx_agl_gmx_drv_ctl drv_ctl;

		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
		if (p->port) {
			drv_ctl.s.byp_en1 = 1;
			drv_ctl.s.nctl1 = 6;
			drv_ctl.s.pctl1 = 6;
		} else {
			drv_ctl.s.byp_en = 1;
			drv_ctl.s.nctl = 6;
			drv_ctl.s.pctl = 6;
		}
		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
	}

	oring1.u64 = 0;
	oring1.s.obase = p->tx_ring_handle >> 3;
	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
	cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);

	iring1.u64 = 0;
	iring1.s.ibase = p->rx_ring_handle >> 3;
	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
	cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);

	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
	octeon_mgmt_set_mac_address(netdev, &sa);

	octeon_mgmt_change_mtu(netdev, netdev->mtu);

	/* Enable the port HW. Packets are not allowed until
	 * cvmx_mgmt_port_enable() is called.
	 */
	mix_ctl.u64 = 0;
	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
	mix_ctl.s.en = 1;           /* Enable the port */
	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
	/* MII CB-request FIFO programmable high watermark */
	mix_ctl.s.mrq_hwm = 1;
#ifdef __LITTLE_ENDIAN
	mix_ctl.s.lendian = 1;
#endif
	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);

	/* Read the PHY to find the mode of the interface. */
	if (octeon_mgmt_init_phy(netdev)) {
		dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
		goto err_noirq;
	}

	/* Set the mode of the interface, RGMII/MII. */
	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
		union cvmx_agl_prtx_ctl agl_prtx_ctl;
		int rgmii_mode = (netdev->phydev->supported &
				  (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;

		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);

		/* MII clocks counts are based on the 125Mhz
		 * reference, which has an 8nS period. So our delays
		 * need to be multiplied by this factor.
		 */
#define NS_PER_PHY_CLK 8

		/* Take the DLL and clock tree out of reset */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.clkrst = 0;
		if (rgmii_mode) {
			agl_prtx_ctl.s.dllrst = 0;
			agl_prtx_ctl.s.clktx_byp = 0;
		}
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
		cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */

		/* Wait for the DLL to lock. External 125 MHz
		 * reference clock must be stable at this point.
		 */
		ndelay(256 * NS_PER_PHY_CLK);

		/* Enable the interface */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.enable = 1;
		cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);

		/* Read the value back to force the previous write */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);

		/* Enable the compensation controller */
		agl_prtx_ctl.s.comp = 1;
		agl_prtx_ctl.s.drv_byp = 0;
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
		/* Force write out before wait. */
		cvmx_read_csr(p->agl_prt_ctl);

		/* For compensation state to lock. */
		ndelay(1040 * NS_PER_PHY_CLK);

		/* Default Interframe Gaps are too small.  Recommended
		 * workaround is.
		 *
		 * AGL_GMX_TX_IFG[IFG1]=14
		 * AGL_GMX_TX_IFG[IFG2]=10
		 */
		cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
	}

	octeon_mgmt_rx_fill_ring(netdev);

	/* Clear statistics. */
	/* Clear on read. */
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);

	cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);

	/* Clear any pending interrupts */
	cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));

	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
			netdev)) {
		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
		goto err_noirq;
	}

	/* Interrupt every single RX packet */
	mix_irhwm.u64 = 0;
	mix_irhwm.s.irhwm = 0;
	cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);

	/* Interrupt when we have 1 or more packets to clean.  */
	mix_orhwm.u64 = 0;
	mix_orhwm.s.orhwm = 0;
	cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);

	/* Enable receive and transmit interrupts */
	mix_intena.u64 = 0;
	mix_intena.s.ithena = 1;
	mix_intena.s.othena = 1;
	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);

	/* Enable packet I/O. */

	rxx_frm_ctl.u64 = 0;
	rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
	rxx_frm_ctl.s.pre_align = 1;
	/* When set, disables the length check for non-min sized pkts
	 * with padding in the client data.
	 */
	rxx_frm_ctl.s.pad_len = 1;
	/* When set, disables the length check for VLAN pkts */
	rxx_frm_ctl.s.vlan_len = 1;
	/* When set, PREAMBLE checking is  less strict */
	rxx_frm_ctl.s.pre_free = 1;
	/* Control Pause Frames can match station SMAC */
	rxx_frm_ctl.s.ctl_smac = 0;
	/* Control Pause Frames can match globally assign Multicast address */
	rxx_frm_ctl.s.ctl_mcst = 1;
	/* Forward pause information to TX block */
	rxx_frm_ctl.s.ctl_bck = 1;
	/* Drop Control Pause Frames */
	rxx_frm_ctl.s.ctl_drp = 1;
	/* Strip off the preamble */
	rxx_frm_ctl.s.pre_strp = 1;
	/* This port is configured to send PREAMBLE+SFD to begin every
	 * frame.  GMX checks that the PREAMBLE is sent correctly.
	 */
	rxx_frm_ctl.s.pre_chk = 1;
	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);

	/* Configure the port duplex, speed and enables */
	octeon_mgmt_disable_link(p);
	if (netdev->phydev)
		octeon_mgmt_update_link(p);
	octeon_mgmt_enable_link(p);

	p->last_link = 0;
	p->last_speed = 0;
	/* PHY is not present in simulator. The carrier is enabled
	 * while initializing the phy for simulator, leave it enabled.
	 */
	if (netdev->phydev) {
		netif_carrier_off(netdev);
		phy_start_aneg(netdev->phydev);
	}

	netif_wake_queue(netdev);
	napi_enable(&p->napi);

	return 0;
err_noirq:
	octeon_mgmt_reset_hw(p);
	dma_unmap_single(p->dev, p->rx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->rx_ring);
err_nomem:
	dma_unmap_single(p->dev, p->tx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->tx_ring);
	return -ENOMEM;
}
Exemple #13
0
static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
{
	struct davinci_spi *davinci_spi;
	int int_status = 0;
	int count;
	u8 conv = 1;
	u8 tmp;
	u32 data1_reg_val;
	struct davinci_spi_dma *davinci_spi_dma;
	int word_len, data_type, ret;
	unsigned long tx_reg, rx_reg;
	struct davinci_spi_platform_data *pdata;
	struct device *sdev;

	davinci_spi = spi_master_get_devdata(spi->master);
	pdata = davinci_spi->pdata;
	sdev = davinci_spi->bitbang.master->dev.parent;

	BUG_ON(davinci_spi->dma_channels == NULL);

	davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];

	tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
	rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;

	/* used for macro defs */
	davinci_spi->tx = t->tx_buf;
	davinci_spi->rx = t->rx_buf;

	/* convert len to words based on bits_per_word */
	conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
	davinci_spi->count = t->len / conv;

	INIT_COMPLETION(davinci_spi->done);

	init_completion(&davinci_spi_dma->dma_rx_completion);
	init_completion(&davinci_spi_dma->dma_tx_completion);

	word_len = conv * 8;

	if (word_len <= 8)
		data_type = DAVINCI_DMA_DATA_TYPE_S8;
	else if (word_len <= 16)
		data_type = DAVINCI_DMA_DATA_TYPE_S16;
	else if (word_len <= 32)
		data_type = DAVINCI_DMA_DATA_TYPE_S32;
	else
		return -1;

	ret = davinci_spi_bufs_prep(spi, davinci_spi);
	if (ret)
		return ret;

	/* Put delay val if required */
	iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
			(pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
			davinci_spi->base + SPIDELAY);

	count = davinci_spi->count;	/* the number of elements */
	data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;

	if (!spi->controller_data)
		tmp = 0x1 << (1 - spi->chip_select);
	else
		tmp = CS_DEFAULT;

	data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;

	data1_reg_val |= spi->chip_select << SPIDAT1_DFSEL_SHIFT;
	/* the number of elements */

	clear_io_bits(davinci_spi->base + SPIDEF, 1<<spi->chip_select);

	data1_reg_val |= spi->chip_select << SPIDAT1_DFSEL_SHIFT;

	/* disable all interrupts for dma transfers */
	clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
	/* Disable SPI to write configuration bits in SPIDAT */
	clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
	iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
	/* Enable SPI */
	set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);

	while ((ioread32(davinci_spi->base + SPIBUF)
				& SPIBUF_RXEMPTY_MASK) == 0)
		cpu_relax();


	if (t->tx_buf != NULL) {
		t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
				DMA_TO_DEVICE);
		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
			dev_dbg(sdev, "Couldn't DMA map a %d bytes TX buffer\n",
					count);
			return -1;
		}
		edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, data_type,
			count, 1, 0, ASYNC);
		edma_set_dest(davinci_spi_dma->dma_tx_channel,
				tx_reg, INCR, W8BIT);
		edma_set_src(davinci_spi_dma->dma_tx_channel,
				t->tx_dma, INCR, W8BIT);
		edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
		edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
	} else {
		/* We need TX clocking for RX transaction */
		t->tx_dma = dma_map_single(&spi->dev,
				(void *)davinci_spi->tmp_buf, count + 1,
				DMA_TO_DEVICE);
		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
			dev_dbg(sdev, "Couldn't DMA map a %d bytes TX tmp buffer\n",
					count);
			return -1;
		}
		edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
				data_type, count + 1, 1, 0, ASYNC);
		edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg,
				INCR, W8BIT);
		edma_set_src(davinci_spi_dma->dma_tx_channel,
				t->tx_dma, INCR, W8BIT);
		edma_set_src_index(davinci_spi_dma->dma_tx_channel,
				data_type, 0);
		edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
	}

	if (t->rx_buf != NULL) {
		/* initiate transaction */
		iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);

		t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
				DMA_FROM_DEVICE);
		if (dma_mapping_error(&spi->dev, t->rx_dma)) {
			dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
					count);
			if (t->tx_buf != NULL)
				dma_unmap_single(NULL, t->tx_dma,
						 count, DMA_TO_DEVICE);
			return -1;
		}
		edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, data_type,
				count, 1, 0, ASYNC);
		edma_set_src(davinci_spi_dma->dma_rx_channel,
				rx_reg, INCR, W8BIT);
		edma_set_dest(davinci_spi_dma->dma_rx_channel,
				t->rx_dma, INCR, W8BIT);
		edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
		edma_set_dest_index(davinci_spi_dma->dma_rx_channel, data_type, 0);
	}

	if ((t->tx_buf != NULL) || (t->rx_buf != NULL))
		edma_start(davinci_spi_dma->dma_tx_channel);

	if (t->rx_buf != NULL)
		edma_start(davinci_spi_dma->dma_rx_channel);

	if ((t->rx_buf != NULL) || (t->tx_buf != NULL))
		davinci_spi_set_dma_req(spi, 1);

	if (t->tx_buf != NULL)
		wait_for_completion_interruptible(
				&davinci_spi_dma->dma_tx_completion);

	if (t->rx_buf != NULL)
		wait_for_completion_interruptible(
				&davinci_spi_dma->dma_rx_completion);

	if (t->tx_buf != NULL)
		dma_unmap_single(NULL, t->tx_dma, count, DMA_TO_DEVICE);
	else
		dma_unmap_single(NULL, t->tx_dma, count + 1, DMA_TO_DEVICE);

	if (t->rx_buf != NULL)
		dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);

	/*
	 * Check for bit error, desync error,parity error,timeout error and
	 * receive overflow errors
	 */
	int_status = ioread32(davinci_spi->base + SPIFLG);

	ret = davinci_spi_check_error(davinci_spi, int_status);
	if (ret != 0)
		return ret;

	/* SPI Framework maintains the count only in bytes so convert back */
	davinci_spi->count *= conv;

	return t->len;
}
Exemple #14
0
static int
talitos_process(device_t dev, struct cryptop *crp, int hint)
{
	int i, err = 0, ivsize;
	struct talitos_softc *sc = device_get_softc(dev);
	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
	caddr_t iv;
	struct talitos_session *ses;
	struct talitos_desc *td;
	unsigned long flags;
	/* descriptor mappings */
	int hmac_key, hmac_data, cipher_iv, cipher_key,
		in_fifo, out_fifo, cipher_iv_out;
	static int chsel = -1;

	DPRINTF("%s()\n", __FUNCTION__);

	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
		return EINVAL;
	}
	crp->crp_etype = 0;
	if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
		return EINVAL;
	}

	ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];

        /* enter the channel scheduler */
	spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);

	/* reuse channel that already had/has requests for the required EU */
	for (i = 0; i < sc->sc_num_channels; i++) {
		if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
			break;
	}
	if (i == sc->sc_num_channels) {
		/*
		 * haven't seen this algo the last sc_num_channels or more
		 * use round robin in this case
		 * nb: sc->sc_num_channels must be power of 2
		 */
		chsel = (chsel + 1) & (sc->sc_num_channels - 1);
	} else {
		/*
		 * matches channel with same target execution unit;
		 * use same channel in this case
		 */
		chsel = i;
	}
	sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;

        /* release the channel scheduler lock */
	spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);

	/* acquire the selected channel fifo lock */
	spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);

	/* find and reserve next available descriptor-cryptop pair */
	for (i = 0; i < sc->sc_chfifo_len; i++) {
		if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
			/*
			 * ensure correct descriptor formation by
			 * avoiding inadvertently setting "optional" entries
			 * e.g. not using "optional" dptr2 for MD/HMAC descs
			 */
			memset(&sc->sc_chnfifo[chsel][i].cf_desc,
				0, sizeof(*td));
			/* reserve it with done notification request bit */
			sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
				TALITOS_DONE_NOTIFY;
			break;
		}
	}
	spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);

	if (i == sc->sc_chfifo_len) {
		/* fifo full */
		err = ERESTART;
		goto errout;
	}

	td = &sc->sc_chnfifo[chsel][i].cf_desc;
	sc->sc_chnfifo[chsel][i].cf_crp = crp;

	crd1 = crp->crp_desc;
	if (crd1 == NULL) {
		err = EINVAL;
		goto errout;
	}
	crd2 = crd1->crd_next;
	/* prevent compiler warning */
	hmac_key = 0;
	hmac_data = 0;
	if (crd2 == NULL) {
		td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
		/* assign descriptor dword ptr mappings for this desc. type */
		cipher_iv = 1;
		cipher_key = 2;
		in_fifo = 3;
		cipher_iv_out = 5;
		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
		    crd1->crd_alg == CRYPTO_SHA1 ||
		    crd1->crd_alg == CRYPTO_MD5) {
			out_fifo = 5;
			maccrd = crd1;
			enccrd = NULL;
		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
		    crd1->crd_alg == CRYPTO_3DES_CBC ||
		    crd1->crd_alg == CRYPTO_AES_CBC ||
		    crd1->crd_alg == CRYPTO_ARC4) {
			out_fifo = 4;
			maccrd = NULL;
			enccrd = crd1;
		} else {
			DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
			err = EINVAL;
			goto errout;
		}
	} else {
		if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
			td->hdr |= TD_TYPE_IPSEC_ESP;
		} else {
			DPRINTF("unimplemented: multiple descriptor ipsec\n");
			err = EINVAL;
			goto errout;
		}
		/* assign descriptor dword ptr mappings for this desc. type */
		hmac_key = 0;
		hmac_data = 1;
		cipher_iv = 2;
		cipher_key = 3;
		in_fifo = 4;
		out_fifo = 5;
		cipher_iv_out = 6;
		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
                     crd1->crd_alg == CRYPTO_MD5 ||
                     crd1->crd_alg == CRYPTO_SHA1) &&
		    (crd2->crd_alg == CRYPTO_DES_CBC ||
		     crd2->crd_alg == CRYPTO_3DES_CBC ||
		     crd2->crd_alg == CRYPTO_AES_CBC ||
		     crd2->crd_alg == CRYPTO_ARC4) &&
		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
			maccrd = crd1;
			enccrd = crd2;
		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
		     crd1->crd_alg == CRYPTO_ARC4 ||
		     crd1->crd_alg == CRYPTO_3DES_CBC ||
		     crd1->crd_alg == CRYPTO_AES_CBC) &&
		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
                     crd2->crd_alg == CRYPTO_MD5 ||
                     crd2->crd_alg == CRYPTO_SHA1) &&
		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
			enccrd = crd1;
			maccrd = crd2;
		} else {
			/* We cannot order the SEC as requested */
			printk("%s: cannot do the order\n",
					device_get_nameunit(sc->sc_cdev));
			err = EINVAL;
			goto errout;
		}
	}
	/* assign in_fifo and out_fifo based on input/output struct type */
	if (crp->crp_flags & CRYPTO_F_SKBUF) {
		/* using SKB buffers */
		struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
		if (skb_shinfo(skb)->nr_frags) {
			printk("%s: skb frags unimplemented\n",
					device_get_nameunit(sc->sc_cdev));
			err = EINVAL;
			goto errout;
		}
		td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
			skb->len, DMA_TO_DEVICE);
		td->ptr[in_fifo].len = skb->len;
		td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
			skb->len, DMA_TO_DEVICE);
		td->ptr[out_fifo].len = skb->len;
		td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
			skb->len, DMA_TO_DEVICE);
	} else if (crp->crp_flags & CRYPTO_F_IOV) {
		/* using IOV buffers */
		struct uio *uiop = (struct uio *)crp->crp_buf;
		if (uiop->uio_iovcnt > 1) {
			printk("%s: iov frags unimplemented\n",
					device_get_nameunit(sc->sc_cdev));
			err = EINVAL;
			goto errout;
		}
		td->ptr[in_fifo].ptr = dma_map_single(NULL,
			uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[in_fifo].len = crp->crp_ilen;
		/* crp_olen is never set; always use crp_ilen */
		td->ptr[out_fifo].ptr = dma_map_single(NULL,
			uiop->uio_iov->iov_base,
			crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[out_fifo].len = crp->crp_ilen;
	} else {
		/* using contig buffers */
		td->ptr[in_fifo].ptr = dma_map_single(NULL,
			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[in_fifo].len = crp->crp_ilen;
		td->ptr[out_fifo].ptr = dma_map_single(NULL,
			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
		td->ptr[out_fifo].len = crp->crp_ilen;
	}
	if (enccrd) {
		switch (enccrd->crd_alg) {
		case CRYPTO_3DES_CBC:
			td->hdr |= TALITOS_MODE0_DEU_3DES;
			/* FALLTHROUGH */
		case CRYPTO_DES_CBC:
			td->hdr |= TALITOS_SEL0_DEU
				|  TALITOS_MODE0_DEU_CBC;
			if (enccrd->crd_flags & CRD_F_ENCRYPT)
				td->hdr |= TALITOS_MODE0_DEU_ENC;
			ivsize = 2*sizeof(u_int32_t);
			DPRINTF("%cDES ses %d ch %d len %d\n",
				(td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
				(u32)TALITOS_SESSION(crp->crp_sid),
				chsel, td->ptr[in_fifo].len);
			break;
		case CRYPTO_AES_CBC:
			td->hdr |= TALITOS_SEL0_AESU
				|  TALITOS_MODE0_AESU_CBC;
			if (enccrd->crd_flags & CRD_F_ENCRYPT)
				td->hdr |= TALITOS_MODE0_AESU_ENC;
			ivsize = 4*sizeof(u_int32_t);
			DPRINTF("AES  ses %d ch %d len %d\n",
				(u32)TALITOS_SESSION(crp->crp_sid),
				chsel, td->ptr[in_fifo].len);
			break;
		default:
			printk("%s: unimplemented enccrd->crd_alg %d\n",
					device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
			err = EINVAL;
			goto errout;
		}
		/*
		 * Setup encrypt/decrypt state.  When using basic ops
		 * we can't use an inline IV because hash/crypt offset
		 * must be from the end of the IV to the start of the
		 * crypt data and this leaves out the preceding header
		 * from the hash calculation.  Instead we place the IV
		 * in the state record and set the hash/crypt offset to
		 * copy both the header+IV.
		 */
		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
			td->hdr |= TALITOS_DIR_OUTBOUND;
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
				iv = enccrd->crd_iv;
			else
				iv = (caddr_t) ses->ses_iv;
			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
				crypto_copyback(crp->crp_flags, crp->crp_buf,
				    enccrd->crd_inject, ivsize, iv);
			}
		} else {
			td->hdr |= TALITOS_DIR_INBOUND;
			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
				iv = enccrd->crd_iv;
				bcopy(enccrd->crd_iv, iv, ivsize);
			} else {
				iv = (caddr_t) ses->ses_iv;
				crypto_copydata(crp->crp_flags, crp->crp_buf,
				    enccrd->crd_inject, ivsize, iv);
			}
		}
		td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
			DMA_TO_DEVICE);
		td->ptr[cipher_iv].len = ivsize;
		/*
		 * we don't need the cipher iv out length/pointer
		 * field to do ESP IPsec. Therefore we set the len field as 0,
		 * which tells the SEC not to do anything with this len/ptr
		 * field. Previously, when length/pointer as pointing to iv,
		 * it gave us corruption of packets.
		 */
		td->ptr[cipher_iv_out].len = 0;
	}
	if (enccrd && maccrd) {
		/* this is ipsec only for now */
		td->hdr |= TALITOS_SEL1_MDEU
			|  TALITOS_MODE1_MDEU_INIT
			|  TALITOS_MODE1_MDEU_PAD;
		switch (maccrd->crd_alg) {
			case	CRYPTO_MD5:
				td->hdr |= TALITOS_MODE1_MDEU_MD5;
				break;
			case	CRYPTO_MD5_HMAC:
				td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
				break;
			case	CRYPTO_SHA1:
				td->hdr |= TALITOS_MODE1_MDEU_SHA1;
				break;
			case	CRYPTO_SHA1_HMAC:
				td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
				break;
			default:
				/* We cannot order the SEC as requested */
				printk("%s: cannot do the order\n",
						device_get_nameunit(sc->sc_cdev));
				err = EINVAL;
				goto errout;
		}
		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
			/*
			 * The offset from hash data to the start of
			 * crypt data is the difference in the skips.
			 */
			/* ipsec only for now */
			td->ptr[hmac_key].ptr = dma_map_single(NULL,
				ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
			td->ptr[hmac_key].len = ses->ses_hmac_len;
			td->ptr[in_fifo].ptr  += enccrd->crd_skip;
			td->ptr[in_fifo].len  =  enccrd->crd_len;
			td->ptr[out_fifo].ptr += enccrd->crd_skip;
			td->ptr[out_fifo].len =  enccrd->crd_len;
			/* bytes of HMAC to postpend to ciphertext */
			td->ptr[out_fifo].extent =  ses->ses_mlen;
			td->ptr[hmac_data].ptr += maccrd->crd_skip;
			td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
		}
		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
			printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
					device_get_nameunit(sc->sc_cdev));
		}
	}
	if (!enccrd && maccrd) {
		/* single MD5 or SHA */
		td->hdr |= TALITOS_SEL0_MDEU
				|  TALITOS_MODE0_MDEU_INIT
				|  TALITOS_MODE0_MDEU_PAD;
		switch (maccrd->crd_alg) {
			case	CRYPTO_MD5:
				td->hdr |= TALITOS_MODE0_MDEU_MD5;
				DPRINTF("MD5  ses %d ch %d len %d\n",
					(u32)TALITOS_SESSION(crp->crp_sid),
					chsel, td->ptr[in_fifo].len);
				break;
			case	CRYPTO_MD5_HMAC:
				td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
				break;
			case	CRYPTO_SHA1:
				td->hdr |= TALITOS_MODE0_MDEU_SHA1;
				DPRINTF("SHA1 ses %d ch %d len %d\n",
					(u32)TALITOS_SESSION(crp->crp_sid),
					chsel, td->ptr[in_fifo].len);
				break;
			case	CRYPTO_SHA1_HMAC:
				td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
				break;
			default:
				/* We cannot order the SEC as requested */
				DPRINTF("cannot do the order\n");
				err = EINVAL;
				goto errout;
		}

		if (crp->crp_flags & CRYPTO_F_IOV)
			td->ptr[out_fifo].ptr += maccrd->crd_inject;

		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
			td->ptr[hmac_key].ptr = dma_map_single(NULL,
				ses->ses_hmac, ses->ses_hmac_len,
				DMA_TO_DEVICE);
			td->ptr[hmac_key].len = ses->ses_hmac_len;
		}
	}
	else {
		/* using process key (session data has duplicate) */
		td->ptr[cipher_key].ptr = dma_map_single(NULL,
			enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
			DMA_TO_DEVICE);
		td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
	}
	/* descriptor complete - GO! */
	return talitos_submit(sc, td, chsel);

errout:
	if (err != ERESTART) {
		crp->crp_etype = err;
		crypto_done(crp);
	}
	return err;
}
Exemple #15
0
/**
 * temac_dma_bd_init - Setup buffer descriptor rings
 */
static int temac_dma_bd_init(struct net_device *ndev)
{
	struct temac_local *lp = netdev_priv(ndev);
	struct sk_buff *skb;
	int i;

	lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
	if (!lp->rx_skb)
		goto out;

	/* allocate the tx and rx ring buffer descriptors. */
	/* returns a virtual address and a physical address. */
	lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
					  sizeof(*lp->tx_bd_v) * TX_BD_NUM,
					  &lp->tx_bd_p, GFP_KERNEL);
	if (!lp->tx_bd_v)
		goto out;

	lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
					  sizeof(*lp->rx_bd_v) * RX_BD_NUM,
					  &lp->rx_bd_p, GFP_KERNEL);
	if (!lp->rx_bd_v)
		goto out;

	for (i = 0; i < TX_BD_NUM; i++) {
		lp->tx_bd_v[i].next = lp->tx_bd_p +
				sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
	}

	for (i = 0; i < RX_BD_NUM; i++) {
		lp->rx_bd_v[i].next = lp->rx_bd_p +
				sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);

		skb = netdev_alloc_skb_ip_align(ndev,
						XTE_MAX_JUMBO_FRAME_SIZE);
		if (!skb)
			goto out;

		lp->rx_skb[i] = skb;
		/* returns physical address of skb->data */
		lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
						     skb->data,
						     XTE_MAX_JUMBO_FRAME_SIZE,
						     DMA_FROM_DEVICE);
		lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
		lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
	}

	lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
					  CHNL_CTRL_IRQ_EN |
					  CHNL_CTRL_IRQ_DLY_EN |
					  CHNL_CTRL_IRQ_COAL_EN);
	/* 0x10220483 */
	/* 0x00100483 */
	lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
					  CHNL_CTRL_IRQ_EN |
					  CHNL_CTRL_IRQ_DLY_EN |
					  CHNL_CTRL_IRQ_COAL_EN |
					  CHNL_CTRL_IRQ_IOE);
	/* 0xff010283 */

	lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
	lp->dma_out(lp, RX_TAILDESC_PTR,
		       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
	lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);

	return 0;

out:
	temac_dma_bd_release(ndev);
	return -ENOMEM;
}
void mali_meson_poweron(void)
{
    unsigned long flags;
    u32 p, p_aligned;
    dma_addr_t p_phy;
    int i;
    unsigned int_mask;
    
    if ((last_power_mode != -1) && (last_power_mode != MALI_POWER_MODE_DEEP_SLEEP)) {
        return;
    }

    if (READ_MALI_REG(MALI_PP_PP_VERSION) != MALI_PP_PP_VERSION_MAGIC) {
        printk("mali_meson_poweron: Mali APB bus access failed.");
        return;
    }

    if (READ_MALI_REG(MALI_MMU_DTE_ADDR) != 0) {
        printk("mali_meson_poweron: Mali is not really powered off.");
        return;
    }

    p = (u32)kcalloc(4096 * 4, 1, GFP_KERNEL);
    if (!p) {
        printk("mali_meson_poweron: NOMEM in meson_poweron\n");
        return;
    }

    p_aligned = __ALIGN_MASK(p, 4096);

    /* DTE */
    *(u32 *)(p_aligned) = (virt_to_phys((void *)p_aligned) + OFFSET_MMU_PTE) | MMU_FLAG_DTE_PRESENT;
    /* PTE */
    for (i=0; i<1024; i++) {
        *(u32 *)(p_aligned + OFFSET_MMU_PTE + i*4) = 
            (virt_to_phys((void *)p_aligned) + OFFSET_MMU_VIRTUAL_ZERO + 4096 * i) |
            MMU_FLAG_PTE_PAGE_PRESENT |
            MMU_FLAG_PTE_RD_PERMISSION;
    }

    /* command & data */
    memcpy((void *)(p_aligned + OFFSET_MMU_VIRTUAL_ZERO), poweron_data, 4096);

    p_phy = dma_map_single(NULL, (void *)p_aligned, 4096 * 3, DMA_TO_DEVICE);
    
    /* Set up Mali GP MMU */
    WRITE_MALI_REG(MALI_MMU_DTE_ADDR, p_phy);
    WRITE_MALI_REG(MALI_MMU_CMD, 0);

    if ((READ_MALI_REG(MALI_MMU_STATUS) & 1) != 1) {
        printk("mali_meson_poweron: MMU enabling failed.\n");
    }

    /* Set up Mali command registers */
    WRITE_MALI_REG(MALI_APB_GP_VSCL_START, 0);
    WRITE_MALI_REG(MALI_APB_GP_VSCL_END, 0x38);
    WRITE_MALI_REG(MALI_APB_GP_INT_MASK, 0x3ff);

    spin_lock_irqsave(&lock, flags);

    int_mask = READ_CBUS_REG(A9_0_IRQ_IN1_INTR_MASK);

    /* Set up ARM Mali interrupt */
    WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT_CLR, 1 << 16);
    SET_CBUS_REG_MASK(A9_0_IRQ_IN1_INTR_MASK, 1 << 16);

    /* Start GP */
    WRITE_MALI_REG(MALI_APB_GP_CMD, 1);

    for (i = 0; i<100; i++)
        udelay(500);

    /* check Mali GP interrupt */
    if (READ_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT) & (1<<16)) {
        printk("mali_meson_poweron: Interrupt received.\n");
    } else {
        printk("mali_meson_poweron: No interrupt received.\n");
    }

    WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT_CLR, 1 << 16);
    CLEAR_CBUS_REG_MASK(A9_0_IRQ_IN1_INTR_MASK, 1 << 16);

    /* force reset GP */
    WRITE_MALI_REG(MALI_APB_GP_CMD, 1 << 5);

    /* stop MMU paging and reset */
    WRITE_MALI_REG(MALI_MMU_CMD, 1);
    WRITE_MALI_REG(MALI_MMU_CMD, 1 << 6);

    WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_MASK, int_mask);

    spin_unlock_irqrestore(&lock, flags);

    dma_unmap_single(NULL, p_phy, 4096 * 3, DMA_TO_DEVICE);

    kfree((void *)p);
}
/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
	struct iwl_queue *q = &txq->q;
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
	dma_addr_t phys_addr;
	unsigned long flags;
	u32 idx;
	u16 copy_size, cmd_size;
	bool is_ct_kill = false;
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif

	if (test_bit(STATUS_FW_ERROR, &priv->status)) {
		IWL_WARN(priv, "fw recovery, no hcmd send\n");
		return -EIO;
	}

	if ((priv->ucode_owner == IWL_OWNERSHIP_TM) &&
	    !(cmd->flags & CMD_ON_DEMAND)) {
		IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
		return -EIO;
	}

	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}

	/*
	 * If any of the command structures end up being larger than
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
	 */
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
		return -EINVAL;

	if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
		IWL_WARN(priv, "Not sending command - %s KILL\n",
			 iwl_is_rfkill(priv) ? "RF" : "CT");
		return -EIO;
	}

	spin_lock_irqsave(&priv->hcmd_lock, flags);

	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
		spin_unlock_irqrestore(&priv->hcmd_lock, flags);

		IWL_ERR(priv, "No space in command queue\n");
		is_ct_kill = iwl_check_for_ct_kill(priv);
		if (!is_ct_kill) {
			IWL_ERR(priv, "Restarting adapter due to queue full\n");
			iwlagn_fw_error(priv, false);
		}
		return -ENOSPC;
	}

	idx = get_cmd_index(q, q->write_ptr);
	out_cmd = txq->cmd[idx];
	out_meta = &txq->meta[idx];

	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
	if (cmd->flags & CMD_ASYNC)
		out_meta->callback = cmd->callback;

	/* set up the header */

	out_cmd->hdr.cmd = cmd->id;
	out_cmd->hdr.flags = 0;
	out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
					    INDEX_TO_SEQ(q->write_ptr));

	/* and copy the data that needs to be copied */

	cmd_dest = &out_cmd->cmd.payload[0];
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
	}

	IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
			"%d bytes at %d[%d]:%d\n",
			get_cmd_string(out_cmd->hdr.cmd),
			out_cmd->hdr.cmd,
			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
			q->write_ptr, idx, priv->cmd_queue);

	phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size,
				DMA_BIDIRECTIONAL);
	if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
		idx = -ENOMEM;
		goto out;
	}

	dma_unmap_addr_set(out_meta, mapping, phys_addr);
	dma_unmap_len_set(out_meta, len, copy_size);

	iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
		phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i],
					   cmd->len[i], DMA_BIDIRECTIONAL);
		if (dma_mapping_error(priv->bus->dev, phys_addr)) {
			iwlagn_unmap_tfd(priv, out_meta,
					 &txq->tfds[q->write_ptr],
					 DMA_BIDIRECTIONAL);
			idx = -ENOMEM;
			goto out;
		}

		iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}

	out_meta->flags = cmd->flags;

	txq->need_update = 1;

	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_iwlwifi_dev_hcmd(priv, cmd->flags,
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif

	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
	iwl_txq_update_write_ptr(priv, txq);

 out:
	spin_unlock_irqrestore(&priv->hcmd_lock, flags);
	return idx;
}
static int greth_init_rings(struct greth_private *greth)
{
	struct sk_buff *skb;
	struct greth_bd *rx_bd, *tx_bd;
	u32 dma_addr;
	int i;

	rx_bd = greth->rx_bd_base;
	tx_bd = greth->tx_bd_base;

	/* Initialize descriptor rings and buffers */
	if (greth->gbit_mac) {

		for (i = 0; i < GRETH_RXBD_NUM; i++) {
			skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
			if (skb == NULL) {
				if (netif_msg_ifup(greth))
					dev_err(greth->dev, "Error allocating DMA ring.\n");
				goto cleanup;
			}
			skb_reserve(skb, NET_IP_ALIGN);
			dma_addr = dma_map_single(greth->dev,
						  skb->data,
						  MAX_FRAME_SIZE+NET_IP_ALIGN,
						  DMA_FROM_DEVICE);

			if (dma_mapping_error(greth->dev, dma_addr)) {
				if (netif_msg_ifup(greth))
					dev_err(greth->dev, "Could not create initial DMA mapping\n");
				goto cleanup;
			}
			greth->rx_skbuff[i] = skb;
			greth_write_bd(&rx_bd[i].addr, dma_addr);
			greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
		}

	} else {

		/* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
		for (i = 0; i < GRETH_RXBD_NUM; i++) {

			greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);

			if (greth->rx_bufs[i] == NULL) {
				if (netif_msg_ifup(greth))
					dev_err(greth->dev, "Error allocating DMA ring.\n");
				goto cleanup;
			}

			dma_addr = dma_map_single(greth->dev,
						  greth->rx_bufs[i],
						  MAX_FRAME_SIZE,
						  DMA_FROM_DEVICE);

			if (dma_mapping_error(greth->dev, dma_addr)) {
				if (netif_msg_ifup(greth))
					dev_err(greth->dev, "Could not create initial DMA mapping\n");
				goto cleanup;
			}
			greth_write_bd(&rx_bd[i].addr, dma_addr);
			greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
		}
		for (i = 0; i < GRETH_TXBD_NUM; i++) {

			greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);

			if (greth->tx_bufs[i] == NULL) {
				if (netif_msg_ifup(greth))
					dev_err(greth->dev, "Error allocating DMA ring.\n");
				goto cleanup;
			}

			dma_addr = dma_map_single(greth->dev,
						  greth->tx_bufs[i],
						  MAX_FRAME_SIZE,
						  DMA_TO_DEVICE);

			if (dma_mapping_error(greth->dev, dma_addr)) {
				if (netif_msg_ifup(greth))
					dev_err(greth->dev, "Could not create initial DMA mapping\n");
				goto cleanup;
			}
			greth_write_bd(&tx_bd[i].addr, dma_addr);
			greth_write_bd(&tx_bd[i].stat, 0);
		}
	}
	greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
		       greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);

	/* Initialize pointers. */
	greth->rx_cur = 0;
	greth->tx_next = 0;
	greth->tx_last = 0;
	greth->tx_free = GRETH_TXBD_NUM;

	/* Initialize descriptor base address */
	GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
	GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);

	return 0;

cleanup:
	greth_clean_rings(greth);
	return -ENOMEM;
}
Exemple #19
0
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct sonic_local *lp = netdev_priv(dev);
	dma_addr_t laddr;
	int length;
	int entry = lp->next_tx;

	if (sonic_debug > 2)
;

	length = skb->len;
	if (length < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN))
			return NETDEV_TX_OK;
		length = ETH_ZLEN;
	}

	/*
	 * Map the packet data into the logical DMA address space
	 */

	laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
	if (!laddr) {
;
		dev_kfree_skb(skb);
		return NETDEV_TX_BUSY;
	}

	sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);       /* clear status */
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);   /* single fragment */
	sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
	sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
	sonic_tda_put(dev, entry, SONIC_TD_LINK,
		sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);

	/*
	 * Must set tx_skb[entry] only after clearing status, and
	 * before clearing EOL and before stopping queue
	 */
	wmb();
	lp->tx_len[entry] = length;
	lp->tx_laddr[entry] = laddr;
	lp->tx_skb[entry] = skb;

	wmb();
	sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
				  sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
	lp->eol_tx = entry;

	lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
	if (lp->tx_skb[lp->next_tx] != NULL) {
		/* The ring is full, the ISR has yet to process the next TD. */
		if (sonic_debug > 3)
;
		netif_stop_queue(dev);
		/* after this packet, wait for ISR to free up some TDAs */
	} else netif_start_queue(dev);

	if (sonic_debug > 2)
;

	SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);

	return NETDEV_TX_OK;
}
Exemple #20
0
static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
					 const unsigned char *buffer,
					 int offset, size_t count)
{
	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
	struct onenand_chip *this = mtd->priv;
	dma_addr_t dma_src, dma_dst;
	int bram_offset;
	unsigned long timeout;
	void *buf = (void *)buffer;
	volatile unsigned *done;

	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
		goto out_copy;

	/* panic_write() may be in an interrupt context */
	if (in_interrupt())
		goto out_copy;

	if (buf >= high_memory) {
		struct page *p1;

		if (((size_t)buf & PAGE_MASK) !=
		    ((size_t)(buf + count - 1) & PAGE_MASK))
			goto out_copy;
		p1 = vmalloc_to_page(buf);
		if (!p1)
			goto out_copy;
		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
	}

	dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
	dma_dst = c->phys_base + bram_offset;
	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
		dev_err(&c->pdev->dev,
			"Couldn't DMA map a %d byte buffer\n",
			count);
		return -1;
	}

	omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
				     count >> 2, 1, 0, 0, 0);
	omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
				dma_src, 0, 0);
	omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
				 dma_dst, 0, 0);

	INIT_COMPLETION(c->dma_done);
	omap_start_dma(c->dma_channel);

	timeout = jiffies + msecs_to_jiffies(20);
	done = &c->dma_done.done;
	while (time_before(jiffies, timeout))
		if (*done)
			break;

	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);

	if (!*done) {
		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
		goto out_copy;
	}

	return 0;

out_copy:
	memcpy(this->base + bram_offset, buf, count);
	return 0;
}
Exemple #21
0
/*
 * Send a command
 */
static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
{
	unsigned int cmdr, mr;
	unsigned int block_length;
	struct mmc_data *data = cmd->data;

	unsigned int blocks;
	unsigned int ier = 0;

	host->cmd = cmd;

	/* Needed for leaving busy state before CMD1 */
	if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
		pr_debug("Clearing timeout\n");
		at91_mci_write(host, AT91_MCI_ARGR, 0);
		at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
		while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
			/* spin */
			pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
		}
	}

	cmdr = cmd->opcode;

	if (mmc_resp_type(cmd) == MMC_RSP_NONE)
		cmdr |= AT91_MCI_RSPTYP_NONE;
	else {
		/* if a response is expected then allow maximum response latancy */
		cmdr |= AT91_MCI_MAXLAT;
		/* set 136 bit response for R2, 48 bit response otherwise */
		if (mmc_resp_type(cmd) == MMC_RSP_R2)
			cmdr |= AT91_MCI_RSPTYP_136;
		else
			cmdr |= AT91_MCI_RSPTYP_48;
	}

	if (data) {

		if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
			if (data->blksz & 0x3) {
				pr_debug("Unsupported block size\n");
				cmd->error = -EINVAL;
				mmc_request_done(host->mmc, host->request);
				return;
			}
			if (data->flags & MMC_DATA_STREAM) {
				pr_debug("Stream commands not supported\n");
				cmd->error = -EINVAL;
				mmc_request_done(host->mmc, host->request);
				return;
			}
		}

		block_length = data->blksz;
		blocks = data->blocks;

		/* always set data start - also set direction flag for read */
		if (data->flags & MMC_DATA_READ)
			cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
		else if (data->flags & MMC_DATA_WRITE)
			cmdr |= AT91_MCI_TRCMD_START;

		if (data->flags & MMC_DATA_STREAM)
			cmdr |= AT91_MCI_TRTYP_STREAM;
		if (data->blocks > 1)
			cmdr |= AT91_MCI_TRTYP_MULTIPLE;
	}
	else {
		block_length = 0;
		blocks = 0;
	}

	if (host->flags & FL_SENT_STOP)
		cmdr |= AT91_MCI_TRCMD_STOP;

	if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
		cmdr |= AT91_MCI_OPDCMD;

	/*
	 * Set the arguments and send the command
	 */
	pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
		cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));

	if (!data) {
		at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
		at91_mci_write(host, ATMEL_PDC_RPR, 0);
		at91_mci_write(host, ATMEL_PDC_RCR, 0);
		at91_mci_write(host, ATMEL_PDC_RNPR, 0);
		at91_mci_write(host, ATMEL_PDC_RNCR, 0);
		at91_mci_write(host, ATMEL_PDC_TPR, 0);
		at91_mci_write(host, ATMEL_PDC_TCR, 0);
		at91_mci_write(host, ATMEL_PDC_TNPR, 0);
		at91_mci_write(host, ATMEL_PDC_TNCR, 0);
		ier = AT91_MCI_CMDRDY;
	} else {
		/* zero block length and PDC mode */
		mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
		mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
		mr |= (block_length << 16);
		mr |= AT91_MCI_PDCMODE;
		at91_mci_write(host, AT91_MCI_MR, mr);

		if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
			at91_mci_write(host, AT91_MCI_BLKR,
				AT91_MCI_BLKR_BCNT(blocks) |
				AT91_MCI_BLKR_BLKLEN(block_length));

		/*
		 * Disable the PDC controller
		 */
		at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);

		if (cmdr & AT91_MCI_TRCMD_START) {
			data->bytes_xfered = 0;
			host->transfer_index = 0;
			host->in_use_index = 0;
			if (cmdr & AT91_MCI_TRDIR) {
				/*
				 * Handle a read
				 */
				host->buffer = NULL;
				host->total_length = 0;

				at91_mci_pre_dma_read(host);
				ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
			}
			else {
				/*
				 * Handle a write
				 */
				host->total_length = block_length * blocks;
				/*
				 * at91mci MCI1 rev2xx Data Write Operation and
				 * number of bytes erratum
				 */
				if (at91mci_is_mci1rev2xx())
					if (host->total_length < 12)
						host->total_length = 12;

				host->buffer = kmalloc(host->total_length, GFP_KERNEL);
				if (!host->buffer) {
					pr_debug("Can't alloc tx buffer\n");
					cmd->error = -ENOMEM;
					mmc_request_done(host->mmc, host->request);
					return;
				}

				at91_mci_sg_to_dma(host, data);

				host->physical_address = dma_map_single(NULL,
						host->buffer, host->total_length,
						DMA_TO_DEVICE);

				pr_debug("Transmitting %d bytes\n", host->total_length);

				at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
				at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
						host->total_length : host->total_length / 4);

				ier = AT91_MCI_CMDRDY;
			}
		}
	}

	/*
	 * Send the command and then enable the PDC - not the other way round as
	 * the data sheet says
	 */

	at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
	at91_mci_write(host, AT91_MCI_CMDR, cmdr);

	if (cmdr & AT91_MCI_TRCMD_START) {
		if (cmdr & AT91_MCI_TRDIR)
			at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
	}

	/* Enable selected interrupts */
	at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
}
Exemple #22
0
/**
 * ibmvtpm_crq_process - Process responded crq
 * @crq		crq to be processed
 * @ibmvtpm	vtpm device struct
 *
 * Return value:
 *	Nothing
 */
static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
				struct ibmvtpm_dev *ibmvtpm)
{
	int rc = 0;

	switch (crq->valid) {
	case VALID_INIT_CRQ:
		switch (crq->msg) {
		case INIT_CRQ_RES:
			dev_info(ibmvtpm->dev, "CRQ initialized\n");
			rc = ibmvtpm_crq_send_init_complete(ibmvtpm);
			if (rc)
				dev_err(ibmvtpm->dev, "Unable to send CRQ init complete rc=%d\n", rc);
			return;
		case INIT_CRQ_COMP_RES:
			dev_info(ibmvtpm->dev,
				 "CRQ initialization completed\n");
			return;
		default:
			dev_err(ibmvtpm->dev, "Unknown crq message type: %d\n", crq->msg);
			return;
		}
	case IBMVTPM_VALID_CMD:
		switch (crq->msg) {
		case VTPM_GET_RTCE_BUFFER_SIZE_RES:
			if (be16_to_cpu(crq->len) <= 0) {
				dev_err(ibmvtpm->dev, "Invalid rtce size\n");
				return;
			}
			ibmvtpm->rtce_size = be16_to_cpu(crq->len);
			ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
						    GFP_ATOMIC);
			if (!ibmvtpm->rtce_buf) {
				dev_err(ibmvtpm->dev, "Failed to allocate memory for rtce buffer\n");
				return;
			}

			ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
				ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
				DMA_BIDIRECTIONAL);

			if (dma_mapping_error(ibmvtpm->dev,
					      ibmvtpm->rtce_dma_handle)) {
				kfree(ibmvtpm->rtce_buf);
				ibmvtpm->rtce_buf = NULL;
				dev_err(ibmvtpm->dev, "Failed to dma map rtce buffer\n");
			}

			return;
		case VTPM_GET_VERSION_RES:
			ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
			return;
		case VTPM_TPM_COMMAND_RES:
			/* len of the data in rtce buffer */
			ibmvtpm->res_len = be16_to_cpu(crq->len);
			ibmvtpm->tpm_processing_cmd = false;
			wake_up_interruptible(&ibmvtpm->wq);
			return;
		default:
			return;
		}
	}
	return;
}
static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
					   struct ieee80211_vif *vif)
{
	struct ath_softc *sc = hw->priv;
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_buf *bf;
	struct ath_vif *avp;
	struct sk_buff *skb;
	struct ath_txq *cabq;
	struct ieee80211_tx_info *info;
	int cabq_depth;

	ath9k_reset_beacon_status(sc);

	avp = (void *)vif->drv_priv;
	cabq = sc->beacon.cabq;

	if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active)
		return NULL;

	/* Release the old beacon first */

	bf = avp->av_bcbuf;
	skb = bf->bf_mpdu;
	if (skb) {
		dma_unmap_single(sc->dev, bf->bf_buf_addr,
				 skb->len, DMA_TO_DEVICE);
		dev_kfree_skb_any(skb);
		bf->bf_buf_addr = 0;
	}

	/* Get a new beacon from mac80211 */

	skb = ieee80211_beacon_get(hw, vif);
	bf->bf_mpdu = skb;
	if (skb == NULL)
		return NULL;
	((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
		avp->tsf_adjust;

	info = IEEE80211_SKB_CB(skb);
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		/*
		 * TODO: make sure the seq# gets assigned properly (vs. other
		 * TX frames)
		 */
		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
		sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
	}

	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
		dev_kfree_skb_any(skb);
		bf->bf_mpdu = NULL;
		bf->bf_buf_addr = 0;
		ath_err(common, "dma_mapping_error on beaconing\n");
		return NULL;
	}

	skb = ieee80211_get_buffered_bc(hw, vif);

	/*
	 * if the CABQ traffic from previous DTIM is pending and the current
	 *  beacon is also a DTIM.
	 *  1) if there is only one vif let the cab traffic continue.
	 *  2) if there are more than one vif and we are using staggered
	 *     beacons, then drain the cabq by dropping all the frames in
	 *     the cabq so that the current vifs cab traffic can be scheduled.
	 */
	spin_lock_bh(&cabq->axq_lock);
	cabq_depth = cabq->axq_depth;
	spin_unlock_bh(&cabq->axq_lock);

	if (skb && cabq_depth) {
		if (sc->nvifs > 1) {
			ath_dbg(common, BEACON,
				"Flushing previous cabq traffic\n");
			ath_draintxq(sc, cabq, false);
		}
	}

	ath_beacon_setup(sc, avp, bf, info->control.rates[0].idx);

	while (skb) {
		ath_tx_cabq(hw, skb);
		skb = ieee80211_get_buffered_bc(hw, vif);
	}

	return bf;
}
Exemple #24
0
/**
 * tpm_ibmvtpm_probe - ibm vtpm initialize entry point
 * @vio_dev:	vio device struct
 * @id:		vio device id struct
 *
 * Return value:
 *	0 - Success
 *	Non-zero - Failure
 */
static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
				   const struct vio_device_id *id)
{
	struct ibmvtpm_dev *ibmvtpm;
	struct device *dev = &vio_dev->dev;
	struct ibmvtpm_crq_queue *crq_q;
	struct tpm_chip *chip;
	int rc = -ENOMEM, rc1;

	chip = tpmm_chip_alloc(dev, &tpm_ibmvtpm);
	if (IS_ERR(chip))
		return PTR_ERR(chip);

	ibmvtpm = kzalloc(sizeof(struct ibmvtpm_dev), GFP_KERNEL);
	if (!ibmvtpm) {
		dev_err(dev, "kzalloc for ibmvtpm failed\n");
		goto cleanup;
	}

	ibmvtpm->dev = dev;
	ibmvtpm->vdev = vio_dev;

	crq_q = &ibmvtpm->crq_queue;
	crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
	if (!crq_q->crq_addr) {
		dev_err(dev, "Unable to allocate memory for crq_addr\n");
		goto cleanup;
	}

	crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
	ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
						 CRQ_RES_BUF_SIZE,
						 DMA_BIDIRECTIONAL);

	if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
		dev_err(dev, "dma mapping failed\n");
		goto cleanup;
	}

	rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
				ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
	if (rc == H_RESOURCE)
		rc = ibmvtpm_reset_crq(ibmvtpm);

	if (rc) {
		dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
		goto reg_crq_cleanup;
	}

	rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
			 tpm_ibmvtpm_driver_name, ibmvtpm);
	if (rc) {
		dev_err(dev, "Error %d register irq 0x%x\n", rc, vio_dev->irq);
		goto init_irq_cleanup;
	}

	rc = vio_enable_interrupts(vio_dev);
	if (rc) {
		dev_err(dev, "Error %d enabling interrupts\n", rc);
		goto init_irq_cleanup;
	}

	init_waitqueue_head(&ibmvtpm->wq);

	crq_q->index = 0;

	dev_set_drvdata(&chip->dev, ibmvtpm);

	spin_lock_init(&ibmvtpm->rtce_lock);

	rc = ibmvtpm_crq_send_init(ibmvtpm);
	if (rc)
		goto init_irq_cleanup;

	rc = ibmvtpm_crq_get_version(ibmvtpm);
	if (rc)
		goto init_irq_cleanup;

	rc = ibmvtpm_crq_get_rtce_size(ibmvtpm);
	if (rc)
		goto init_irq_cleanup;

	return tpm_chip_register(chip);
init_irq_cleanup:
	do {
		rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
	} while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
reg_crq_cleanup:
	dma_unmap_single(dev, ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE,
			 DMA_BIDIRECTIONAL);
cleanup:
	if (ibmvtpm) {
		if (crq_q->crq_addr)
			free_page((unsigned long)crq_q->crq_addr);
		kfree(ibmvtpm);
	}

	return rc;
}
Exemple #25
0
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring,
				    struct sk_buff *skb)
{
	struct device *dma_dev = bgmac->core->dma_dev;
	struct net_device *net_dev = bgmac->net_dev;
	struct bgmac_dma_desc *dma_desc;
	struct bgmac_slot_info *slot;
	u32 ctl0, ctl1;
	int free_slots;

	if (skb->len > BGMAC_DESC_CTL1_LEN) {
		bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
		goto err_stop_drop;
	}

	if (ring->start <= ring->end)
		free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
	else
		free_slots = ring->start - ring->end;
	if (free_slots == 1) {
		bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
		netif_stop_queue(net_dev);
		return NETDEV_TX_BUSY;
	}

	slot = &ring->slots[ring->end];
	slot->skb = skb;
	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
					DMA_TO_DEVICE);
	if (dma_mapping_error(dma_dev, slot->dma_addr)) {
		bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
			  ring->mmio_base);
		goto err_stop_drop;
	}

	ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
	if (ring->end == ring->num_slots - 1)
		ctl0 |= BGMAC_DESC_CTL0_EOT;
	ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;

	dma_desc = ring->cpu_base;
	dma_desc += ring->end;
	dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);

	netdev_sent_queue(net_dev, skb->len);

	wmb();

	/* Increase ring->end to point empty slot. We tell hardware the first
	 * slot it should *not* read.
	 */
	if (++ring->end >= BGMAC_TX_RING_SLOTS)
		ring->end = 0;
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
		    ring->index_base +
		    ring->end * sizeof(struct bgmac_dma_desc));

	/* Always keep one slot free to allow detecting bugged calls. */
	if (--free_slots == 1)
		netif_stop_queue(net_dev);

	return NETDEV_TX_OK;

err_stop_drop:
	netif_stop_queue(net_dev);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Exemple #26
0
	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, blp)))
		goto err;

	for_each_sg(assoc, sg, assoc_n, i) {
		bufl->bufers[bufs].addr = dma_map_single(dev,
							 sg_virt(sg),
							 sg->length,
							 DMA_BIDIRECTIONAL);
		bufl->bufers[bufs].len = sg->length;
		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
			goto err;
		bufs++;
	}
	bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
						 DMA_BIDIRECTIONAL);
	bufl->bufers[bufs].len = ivlen;
	if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
		goto err;
	bufs++;

	for_each_sg(sgl, sg, n, i) {
		int y = i + bufs;

		bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
						      sg->length,
						      DMA_BIDIRECTIONAL);
		bufl->bufers[y].len = sg->length;
		if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
			goto err;
	}
Exemple #27
0
static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
			bool dma_to_memory)
{
	struct dma_chan *dma_chan;
	unsigned char *dma_buf;
	dma_addr_t dma_phys;
	int ret;
	struct dma_slave_config dma_sconfig;
	dma_cap_mask_t mask;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	dma_chan = dma_request_channel(mask, NULL, NULL);
	if (!dma_chan) {
		dev_err(tup->uport.dev,
			"Dma channel is not available, will try later\n");
		return -EPROBE_DEFER;
	}

	if (dma_to_memory) {
		dma_buf = dma_alloc_coherent(tup->uport.dev,
				TEGRA_UART_RX_DMA_BUFFER_SIZE,
				 &dma_phys, GFP_KERNEL);
		if (!dma_buf) {
			dev_err(tup->uport.dev,
				"Not able to allocate the dma buffer\n");
			dma_release_channel(dma_chan);
			return -ENOMEM;
		}
	} else {
		dma_phys = dma_map_single(tup->uport.dev,
			tup->uport.state->xmit.buf, UART_XMIT_SIZE,
			DMA_TO_DEVICE);
		dma_buf = tup->uport.state->xmit.buf;
	}

	dma_sconfig.slave_id = tup->dma_req_sel;
	if (dma_to_memory) {
		dma_sconfig.src_addr = tup->uport.mapbase;
		dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		dma_sconfig.src_maxburst = 4;
	} else {
		dma_sconfig.dst_addr = tup->uport.mapbase;
		dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
		dma_sconfig.dst_maxburst = 16;
	}

	ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
	if (ret < 0) {
		dev_err(tup->uport.dev,
			"Dma slave config failed, err = %d\n", ret);
		goto scrub;
	}

	if (dma_to_memory) {
		tup->rx_dma_chan = dma_chan;
		tup->rx_dma_buf_virt = dma_buf;
		tup->rx_dma_buf_phys = dma_phys;
	} else {
		tup->tx_dma_chan = dma_chan;
		tup->tx_dma_buf_virt = dma_buf;
		tup->tx_dma_buf_phys = dma_phys;
	}
	return 0;

scrub:
	dma_release_channel(dma_chan);
	return ret;
}
Exemple #28
0
/* Function to perform hardware set up */
int isp_af_configure(struct af_configuration *afconfig)
{
	int result;
	int buff_size, i;
	unsigned int busyaf;
	struct af_configuration *af_curr_cfg = af_dev_configptr->config;

	if (NULL == afconfig) {
		printk(KERN_ERR "Null argument in configuration. \n");
		return -EINVAL;
	}

	memcpy(af_curr_cfg, afconfig, sizeof(struct af_configuration));
	/* Get the value of PCR register */
	busyaf = isp_reg_readl(OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR);

	if ((busyaf & AF_BUSYAF) == AF_BUSYAF) {
		DPRINTK_ISP_AF("AF_register_setup_ERROR : Engine Busy");
		DPRINTK_ISP_AF("\n Configuration cannot be done ");
		return -AF_ERR_ENGINE_BUSY;
	}

	/* Check IIR Coefficient and start Values */
	result = isp_af_check_iir();
	if (result < 0)
		return result;

	/* Check Paxel Values */
	result = isp_af_check_paxel();
	if (result < 0)
		return result;

	/* Check HMF Threshold Values */
	if (af_curr_cfg->hmf_config.threshold > AF_THRESHOLD_MAX) {
		DPRINTK_ISP_AF("Error : HMF Threshold is incorrect");
		return -AF_ERR_THRESHOLD;
	}

	/* Compute buffer size */
	buff_size = (af_curr_cfg->paxel_config.hz_cnt + 1) *
		(af_curr_cfg->paxel_config.vt_cnt + 1) * AF_PAXEL_SIZE;

	afstat.curr_cfg_buf_size = buff_size;
	/* Deallocate the previous buffers */
	if (afstat.stats_buf_size && buff_size > afstat.stats_buf_size) {
		isp_af_enable(0);
		for (i = 0; i < H3A_MAX_BUFF; i++) {
			ispmmu_kunmap(afstat.af_buff[i].ispmmu_addr);
			free_pages_exact((void *)afstat.af_buff[i].virt_addr,
					afstat.min_buf_size);
			afstat.af_buff[i].virt_addr = 0;
		}
		afstat.stats_buf_size = 0;
	}

	if (!afstat.af_buff[0].virt_addr) {
		afstat.stats_buf_size = buff_size;
		afstat.min_buf_size = PAGE_ALIGN(afstat.stats_buf_size);

		for (i = 0; i < H3A_MAX_BUFF; i++) {
			afstat.af_buff[i].virt_addr =
				(unsigned long)alloc_pages_exact(
					afstat.min_buf_size,
					GFP_KERNEL | GFP_DMA);
			if (afstat.af_buff[i].virt_addr == 0) {
				printk(KERN_ERR "Can't acquire memory for "
				       "buffer[%d]\n", i);
				return -ENOMEM;
			}
			afstat.af_buff[i].phy_addr = dma_map_single(NULL,
					(void *)afstat.af_buff[i].virt_addr,
					afstat.min_buf_size,
					DMA_FROM_DEVICE);
			afstat.af_buff[i].addr_align =
				afstat.af_buff[i].virt_addr;
			while ((afstat.af_buff[i].addr_align & 0xFFFFFFC0) !=
			       afstat.af_buff[i].addr_align)
				afstat.af_buff[i].addr_align++;
			afstat.af_buff[i].ispmmu_addr =
				ispmmu_kmap(afstat.af_buff[i].phy_addr,
					    afstat.min_buf_size);
		}
		isp_af_unlock_buffers();
		isp_af_link_buffers();

		/* First active buffer */
		if (active_buff == NULL)
			active_buff = &afstat.af_buff[0];
		isp_af_set_address(active_buff->ispmmu_addr);
	}

	result = isp_af_register_setup(af_dev_configptr);
	if (result < 0)
		return result;
	af_dev_configptr->size_paxel = buff_size;
	atomic_inc(&afstat.config_counter);
	afstat.initialized = 1;
	afstat.frame_count = 1;
	active_buff->frame_num = 1;
	/* Set configuration flag to indicate HW setup done */
	if (af_curr_cfg->af_config)
		isp_af_enable(1);
	else
		isp_af_enable(0);

	/* Success */
	return 0;
}
/**
 * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
 * @queue:	crq_queue to initialize and register
 * @hostdata:	ibmvscsi_host_data of host
 *
 * Allocates a page for messages, maps it for dma, and registers
 * the crq with the hypervisor.
 * Returns zero on success.
 */
int ibmvscsi_init_crq_queue(struct crq_queue *queue,
			    struct ibmvscsi_host_data *hostdata,
			    int max_requests)
{
	int rc;
	struct vio_dev *vdev = to_vio_dev(hostdata->dev);

	queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);

	if (!queue->msgs)
		goto malloc_failed;
	queue->size = PAGE_SIZE / sizeof(*queue->msgs);

	queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
					  queue->size * sizeof(*queue->msgs),
					  DMA_BIDIRECTIONAL);

	if (dma_mapping_error(queue->msg_token))
		goto map_failed;

	gather_partition_info();
	set_adapter_info(hostdata);

	rc = plpar_hcall_norets(H_REG_CRQ,
				vdev->unit_address,
				queue->msg_token, PAGE_SIZE);
	if (rc == H_Resource) 
		/* maybe kexecing and resource is busy. try a reset */
		rc = ibmvscsi_reset_crq_queue(queue,
					      hostdata);

	if (rc == 2) {
		/* Adapter is good, but other end is not ready */
		printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n");
	} else if (rc != 0) {
		printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc);
		goto reg_crq_failed;
	}

	if (request_irq(vdev->irq,
			ibmvscsi_handle_event,
			0, "ibmvscsi", (void *)hostdata) != 0) {
		printk(KERN_ERR "ibmvscsi: couldn't register irq 0x%x\n",
		       vdev->irq);
		goto req_irq_failed;
	}

	rc = vio_enable_interrupts(vdev);
	if (rc != 0) {
		printk(KERN_ERR "ibmvscsi:  Error %d enabling interrupts!!!\n",
		       rc);
		goto req_irq_failed;
	}

	queue->cur = 0;
	spin_lock_init(&queue->lock);

	tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
		     (unsigned long)hostdata);

	return 0;

      req_irq_failed:
	do {
		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
	} while ((rc == H_Busy) || (H_isLongBusy(rc)));
      reg_crq_failed:
	dma_unmap_single(hostdata->dev,
			 queue->msg_token,
			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
      map_failed:
	free_page((unsigned long)queue->msgs);
      malloc_failed:
	return -1;
}
Exemple #30
0
/*
 * tx request callback
 */
static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct bcm_enet_desc *desc;
	u32 len_stat;
	int ret;

	priv = netdev_priv(dev);

	/* lock against tx reclaim */
	spin_lock(&priv->tx_lock);

	/* make sure  the tx hw queue  is not full,  should not happen
	 * since we stop queue before it's the case */
	if (unlikely(!priv->tx_desc_count)) {
		netif_stop_queue(dev);
		dev_err(&priv->pdev->dev, "xmit called with no tx desc "
			"available?\n");
		ret = NETDEV_TX_BUSY;
		goto out_unlock;
	}

	/* point to the next available desc */
	desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
	priv->tx_skb[priv->tx_curr_desc] = skb;

	/* fill descriptor */
	desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
				       DMA_TO_DEVICE);

	len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
	len_stat |= DMADESC_ESOP_MASK |
		DMADESC_APPEND_CRC |
		DMADESC_OWNER_MASK;

	priv->tx_curr_desc++;
	if (priv->tx_curr_desc == priv->tx_ring_size) {
		priv->tx_curr_desc = 0;
		len_stat |= DMADESC_WRAP_MASK;
	}
	priv->tx_desc_count--;

	/* dma might be already polling, make sure we update desc
	 * fields in correct order */
	wmb();
	desc->len_stat = len_stat;
	wmb();

	/* kick tx dma */
	enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
			ENETDMA_CHANCFG_REG(priv->tx_chan));

	/* stop queue if no more desc available */
	if (!priv->tx_desc_count)
		netif_stop_queue(dev);

	dev->stats.tx_bytes += skb->len;
	dev->stats.tx_packets++;
	ret = NETDEV_TX_OK;

out_unlock:
	spin_unlock(&priv->tx_lock);
	return ret;
}