static int __devinit sonic_probe1(struct net_device *dev)
{
	static unsigned version_printed;
	unsigned int silicon_revision;
	unsigned int val;
	struct sonic_local *lp = netdev_priv(dev);
	int err = -ENODEV;
	int i;

	if (!request_mem_region(dev->base_addr, SONIC_MEM_SIZE, jazz_sonic_string))
		return -EBUSY;

	/*
	 * get the Silicon Revision ID. If this is one of the known
	 * one assume that we found a SONIC ethernet controller at
	 * the expected location.
	 */
	silicon_revision = SONIC_READ(SONIC_SR);
	if (sonic_debug > 1)
		printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);

	i = 0;
	while (known_revisions[i] != 0xffff &&
	       known_revisions[i] != silicon_revision)
		i++;

	if (known_revisions[i] == 0xffff) {
		printk("SONIC ethernet controller not found (0x%4x)\n",
		       silicon_revision);
		goto out;
	}

	if (sonic_debug  &&  version_printed++ == 0)
		printk(version);

	printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ",
	       dev_name(lp->device), dev->base_addr);

	/*
	 * Put the sonic into software reset, then
	 * retrieve and print the ethernet address.
	 */
	SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
	SONIC_WRITE(SONIC_CEP,0);
	for (i=0; i<3; i++) {
		val = SONIC_READ(SONIC_CAP0-i);
		dev->dev_addr[i*2] = val;
		dev->dev_addr[i*2+1] = val >> 8;
	}

	err = -ENOMEM;

	/* Initialize the device structure. */

	lp->dma_bitmode = SONIC_BITMODE32;

	/* Allocate the entire chunk of memory for the descriptors.
           Note that this cannot cross a 64K boundary. */
	if ((lp->descriptors = dma_alloc_coherent(lp->device,
				SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
				&lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
		printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
		       dev_name(lp->device));
		goto out;
	}

	/* Now set up the pointers to point to the appropriate places */
	lp->cda = lp->descriptors;
	lp->tda = lp->cda + (SIZEOF_SONIC_CDA
	                     * SONIC_BUS_SCALE(lp->dma_bitmode));
	lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
	                     * SONIC_BUS_SCALE(lp->dma_bitmode));
	lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
	                     * SONIC_BUS_SCALE(lp->dma_bitmode));

	lp->cda_laddr = lp->descriptors_laddr;
	lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
	                     * SONIC_BUS_SCALE(lp->dma_bitmode));
	lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
	                     * SONIC_BUS_SCALE(lp->dma_bitmode));
	lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
	                     * SONIC_BUS_SCALE(lp->dma_bitmode));

	dev->netdev_ops = &sonic_netdev_ops;
	dev->watchdog_timeo = TX_TIMEOUT;

	/*
	 * clear tally counter
	 */
	SONIC_WRITE(SONIC_CRCT,0xffff);
	SONIC_WRITE(SONIC_FAET,0xffff);
	SONIC_WRITE(SONIC_MPT,0xffff);

	return 0;
out:
	release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
	return err;
}
Example #2
0
File: qp.c Project: Distefano/linux
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret = 0;
	int eqsize;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid) {
		ret = -ENOMEM;
		goto free_sq_qid;
	}

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq) {
			ret = -ENOMEM;
			goto free_rq_qid;
		}

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq) {
			ret = -ENOMEM;
			goto free_sw_sq;
		}
	}

	/*
	 * RQT must be a power of 2.
	 */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr) {
		ret = -ENOMEM;
		goto free_sw_rq;
	}

	ret = alloc_sq(rdev, &wq->sq, user);
	if (ret)
		goto free_hwaddr;
	memset(wq->sq.queue, 0, wq->sq.memsize);
	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
					  wq->rq.memsize, &(wq->rq.dma_addr),
					  GFP_KERNEL);
	if (!wq->rq.queue) {
		ret = -ENOMEM;
		goto free_sq;
	}
	PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
		__func__, wq->sq.queue,
		(unsigned long long)virt_to_phys(wq->sq.queue),
		wq->rq.queue,
		(unsigned long long)virt_to_phys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = rdev->lldi.db_reg;
	wq->gts = rdev->lldi.gts_reg;
	if (user) {
		wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->sq.qid << rdev->qpshift);
		wq->sq.udb &= PAGE_MASK;
		wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->rq.qid << rdev->qpshift);
		wq->rq.udb &= PAGE_MASK;
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto free_dma;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto free_dma;
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
	if (ret)
		goto free_dma;

	PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
	     __func__, wq->sq.qid, wq->rq.qid, wq->db,
	     (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);

	return 0;
free_dma:
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  dma_unmap_addr(&wq->rq, mapping));
free_sq:
	dealloc_sq(rdev, &wq->sq);
free_hwaddr:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
free_sw_rq:
	kfree(wq->rq.sw_rq);
free_sw_sq:
	kfree(wq->sq.sw_sq);
free_rq_qid:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
free_sq_qid:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return ret;
}
Example #3
0
/*
 *  This function will allocate both the DMA descriptor structure, and the
 *  buffers it contains.  These are used to contain the descriptors used
 *  by the system.
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
                      struct list_head *head, const char *name,
                      int nbuf, int ndesc, bool is_tx)
{
    struct ath_common *common = ath9k_hw_common(sc->sc_ah);
    u8 *ds;
    struct ath_buf *bf;
    int i, bsize, error, desc_len;

    ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
            name, nbuf, ndesc);

    INIT_LIST_HEAD(head);

    if (is_tx)
        desc_len = sc->sc_ah->caps.tx_desc_len;
    else
        desc_len = sizeof(struct ath_desc);

    /* ath_desc must be a multiple of DWORDs */
    if ((desc_len % 4) != 0) {
        ath_err(common, "ath_desc not DWORD aligned\n");
        BUG_ON((desc_len % 4) != 0);
        error = -ENOMEM;
        goto fail;
    }

    dd->dd_desc_len = desc_len * nbuf * ndesc;

    /*
     * Need additional DMA memory because we can't use
     * descriptors that cross the 4K page boundary. Assume
     * one skipped descriptor per 4K page.
     */
    if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
        u32 ndesc_skipped =
            ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
        u32 dma_len;

        while (ndesc_skipped) {
            dma_len = ndesc_skipped * desc_len;
            dd->dd_desc_len += dma_len;

            ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
        }
    }

    /* allocate descriptors */
    dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
                                     &dd->dd_desc_paddr, GFP_KERNEL);
    if (dd->dd_desc == NULL) {
        error = -ENOMEM;
        goto fail;
    }
    ds = (u8 *) dd->dd_desc;
    ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
            name, ds, (u32) dd->dd_desc_len,
            ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);

    /* allocate buffers */
    bsize = sizeof(struct ath_buf) * nbuf;
    bf = kzalloc(bsize, GFP_KERNEL);
    if (bf == NULL) {
        error = -ENOMEM;
        goto fail2;
    }
    dd->dd_bufptr = bf;

    for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
        bf->bf_desc = ds;
        bf->bf_daddr = DS2PHYS(dd, ds);

        if (!(sc->sc_ah->caps.hw_caps &
                ATH9K_HW_CAP_4KB_SPLITTRANS)) {
            /*
             * Skip descriptor addresses which can cause 4KB
             * boundary crossing (addr + length) with a 32 dword
             * descriptor fetch.
             */
            while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
                BUG_ON((caddr_t) bf->bf_desc >=
                       ((caddr_t) dd->dd_desc +
                        dd->dd_desc_len));

                ds += (desc_len * ndesc);
                bf->bf_desc = ds;
                bf->bf_daddr = DS2PHYS(dd, ds);
            }
        }
        list_add_tail(&bf->list, head);
    }
    return 0;
fail2:
    dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
                      dd->dd_desc_paddr);
fail:
    memset(dd, 0, sizeof(*dd));
    return error;
}
Example #4
0
File: dma_lib.c Project: E-LLP/n900
/* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
 * @chan: Channel to allocate for
 * @size: Size of buffer in bytes
 * @handle: DMA handle
 *
 * Allocate a buffer to be used by the DMA engine for read/write,
 * similar to dma_alloc_coherent().
 *
 * Returns the virtual address of the buffer, or NULL in case of failure.
 */
void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
			   dma_addr_t *handle)
{
	return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
}
Example #5
0
static int __init sonic_probe1(struct net_device *dev)
{
	static unsigned version_printed = 0;
	unsigned int silicon_revision;
	struct sonic_local *lp = netdev_priv(dev);
	unsigned int base_addr = dev->base_addr;
	int i;
	int err = 0;

	if (!request_mem_region(base_addr, 0x100, xtsonic_string))
		return -EBUSY;

	/*
	 * get the Silicon Revision ID. If this is one of the known
	 * one assume that we found a SONIC ethernet controller at
	 * the expected location.
	 */
	silicon_revision = SONIC_READ(SONIC_SR);
	if (sonic_debug > 1)
		printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);

	i = 0;
	while ((known_revisions[i] != 0xffff) &&
			(known_revisions[i] != silicon_revision))
		i++;

	if (known_revisions[i] == 0xffff) {
		printk("SONIC ethernet controller not found (0x%4x)\n",
				silicon_revision);
		return -ENODEV;
	}

	if (sonic_debug  &&  version_printed++ == 0)
		printk(version);

	/*
	 * Put the sonic into software reset, then retrieve ethernet address.
	 * Note: we are assuming that the boot-loader has initialized the cam.
	 */
	SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
	SONIC_WRITE(SONIC_DCR,
		    SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS);
	SONIC_WRITE(SONIC_CEP,0);
	SONIC_WRITE(SONIC_IMR,0);

	SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
	SONIC_WRITE(SONIC_CEP,0);

	for (i=0; i<3; i++) {
		unsigned int val = SONIC_READ(SONIC_CAP0-i);
		dev->dev_addr[i*2] = val;
		dev->dev_addr[i*2+1] = val >> 8;
	}

	/* Initialize the device structure. */

	lp->dma_bitmode = SONIC_BITMODE32;

	/*
	 *  Allocate local private descriptor areas in uncached space.
	 *  The entire structure must be located within the same 64kb segment.
	 *  A simple way to ensure this is to allocate twice the
	 *  size of the structure -- given that the structure is
	 *  much less than 64 kB, at least one of the halves of
	 *  the allocated area will be contained entirely in 64 kB.
	 *  We also allocate extra space for a pointer to allow freeing
	 *  this structure later on (in xtsonic_cleanup_module()).
	 */
	lp->descriptors =
		dma_alloc_coherent(lp->device,
			SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
			&lp->descriptors_laddr, GFP_KERNEL);

	if (lp->descriptors == NULL) {
		printk(KERN_ERR "%s: couldn't alloc DMA memory for "
				" descriptors.\n", dev_name(lp->device));
		goto out;
	}

	lp->cda = lp->descriptors;
	lp->tda = lp->cda + (SIZEOF_SONIC_CDA
			     * SONIC_BUS_SCALE(lp->dma_bitmode));
	lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
			     * SONIC_BUS_SCALE(lp->dma_bitmode));
	lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
			     * SONIC_BUS_SCALE(lp->dma_bitmode));

	/* get the virtual dma address */

	lp->cda_laddr = lp->descriptors_laddr;
	lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
				         * SONIC_BUS_SCALE(lp->dma_bitmode));
	lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
					 * SONIC_BUS_SCALE(lp->dma_bitmode));
	lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
					 * SONIC_BUS_SCALE(lp->dma_bitmode));

	dev->netdev_ops		= &xtsonic_netdev_ops;
	dev->watchdog_timeo	= TX_TIMEOUT;

	/*
	 * clear tally counter
	 */
	SONIC_WRITE(SONIC_CRCT,0xffff);
	SONIC_WRITE(SONIC_FAET,0xffff);
	SONIC_WRITE(SONIC_MPT,0xffff);

	return 0;
out:
	release_region(dev->base_addr, SONIC_MEM_SIZE);
	return err;
}
Example #6
0
static int mmc_dma_setup(struct mmci_platform_data *plat)
{
	u32 llptrrx, llptrtx;
	int ret = 0;

	/*
	 * There is a quirk with the LPC32XX and SD burst DMA. DMA sg
	 * transfers where DMA is the flow controller will not transfer
	 * the last few bytes to or from the SD card controller and
	 * memory. For RX, the last few bytes in the SD transfer can be
	 * forced out with a software DMA burst request. For TX, this
	 * can't be done, so TX sg support cannot be supported. For TX,
	 * a temporary bouncing buffer is used if more than 1 sg segment
	 * is passed in the data request. The bouncing buffer will get a
	 * contiguous copy of the TX data and it will be used instead.
	 */

	if (plat->dma_tx_size) {
		/* Use pre-allocated memory for the DMA Tx buffer */
		dmac_drvdat.dma_handle_tx = (dma_addr_t)plat->dma_tx_v_base;
		dmac_drvdat.dma_v_base = plat->dma_tx_v_base;
		dmac_drvdat.preallocated_tx_buf = 1;
	} else {
		/* Allocate a chunk of memory for the DMA TX buffers */
		dmac_drvdat.dma_v_base = dma_alloc_coherent(dmac_drvdat.dev,
			DMA_BUFF_SIZE, &dmac_drvdat.dma_handle_tx, GFP_KERNEL);
		dmac_drvdat.preallocated_tx_buf = 0;
	}

	if (dmac_drvdat.dma_v_base == NULL) {
		dev_err(dmac_drvdat.dev, "error getting DMA region\n");
		ret = -ENOMEM;
		goto dma_no_tx_buff;
	}
	dev_info(dmac_drvdat.dev, "DMA buffer: phy:%p, virt:%p\n",
		(void *) dmac_drvdat.dma_handle_tx,
		dmac_drvdat.dma_v_base);

	/* Setup TX DMA channel */
	dmac_drvdat.dmacfgtx.ch = DMA_CH_SDCARD_TX;
	dmac_drvdat.dmacfgtx.tc_inten = 0;
	dmac_drvdat.dmacfgtx.err_inten = 0;
	dmac_drvdat.dmacfgtx.src_size = 4;
	dmac_drvdat.dmacfgtx.src_inc = 1;
	dmac_drvdat.dmacfgtx.src_bsize = DMAC_CHAN_SRC_BURST_8;
	dmac_drvdat.dmacfgtx.src_prph = DMAC_SRC_PERIP(DMA_PERID_SDCARD);
	dmac_drvdat.dmacfgtx.dst_size = 4;
	dmac_drvdat.dmacfgtx.dst_inc = 0;
	dmac_drvdat.dmacfgtx.dst_bsize = DMAC_CHAN_DEST_BURST_8;
	dmac_drvdat.dmacfgtx.dst_prph = DMAC_DEST_PERIP(DMA_PERID_SDCARD);
	dmac_drvdat.dmacfgtx.flowctrl = DMAC_CHAN_FLOW_P_M2P;
	if (lpc178x_dma_ch_get(
		&dmac_drvdat.dmacfgtx, "dma_sd_tx", NULL, NULL) < 0)
	{
		dev_err(dmac_drvdat.dev,
			"Error setting up SD card TX DMA channel\n");
		ret = -ENODEV;
		goto dma_no_txch;
	}

	/* Allocate a linked list for DMA support */
	llptrtx = lpc178x_dma_alloc_llist(
		dmac_drvdat.dmacfgtx.ch, NR_SG * 2);
	if (llptrtx == 0) {
		dev_err(dmac_drvdat.dev,
			"Error allocating list buffer (MMC TX)\n");
		ret = -ENOMEM;
		goto dma_no_txlist;
	}

	/* Setup RX DMA channel */
	dmac_drvdat.dmacfgrx.ch = DMA_CH_SDCARD_RX;
	dmac_drvdat.dmacfgrx.tc_inten = 0;
	dmac_drvdat.dmacfgrx.err_inten = 0;
	dmac_drvdat.dmacfgrx.src_size = 4;
	dmac_drvdat.dmacfgrx.src_inc = 0;
	dmac_drvdat.dmacfgrx.src_bsize = DMAC_CHAN_SRC_BURST_8;
	dmac_drvdat.dmacfgrx.src_prph = DMAC_SRC_PERIP(DMA_PERID_SDCARD);
	dmac_drvdat.dmacfgrx.dst_size = 4;
	dmac_drvdat.dmacfgrx.dst_inc = 1;
	dmac_drvdat.dmacfgrx.dst_bsize = DMAC_CHAN_DEST_BURST_8;
	dmac_drvdat.dmacfgrx.dst_prph = DMAC_DEST_PERIP(DMA_PERID_SDCARD);
	dmac_drvdat.dmacfgrx.flowctrl = DMAC_CHAN_FLOW_D_P2M;
	if (lpc178x_dma_ch_get(
		&dmac_drvdat.dmacfgrx, "dma_sd_rx", NULL, NULL) < 0)
	{
		dev_err(dmac_drvdat.dev,
			"Error setting up SD card RX DMA channel\n");
		ret = -ENODEV;
		goto dma_no_rxch;
	}

	/* Allocate a linked list for DMA support */
	llptrrx = lpc178x_dma_alloc_llist(
		dmac_drvdat.dmacfgrx.ch, NR_SG * 2);
	if (llptrrx == 0) {
		dev_err(dmac_drvdat.dev,
			"Error allocating list buffer (MMC RX)\n");
		ret = -ENOMEM;
		goto dma_no_rxlist;
	}

	return 0;

dma_no_rxlist:
	lpc178x_dma_ch_put(dmac_drvdat.dmacfgrx.ch);
	dmac_drvdat.dmacfgrx.ch = -1;
dma_no_rxch:
	lpc178x_dma_dealloc_llist(dmac_drvdat.dmacfgtx.ch);
dma_no_txlist:
	lpc178x_dma_ch_put(dmac_drvdat.dmacfgtx.ch);
	dmac_drvdat.dmacfgtx.ch = -1;
dma_no_txch:
	if (!dmac_drvdat.preallocated_tx_buf) {
		dma_free_coherent(dmac_drvdat.dev, DMA_BUFF_SIZE,
			dmac_drvdat.dma_v_base,
			dmac_drvdat.dma_handle_tx);
	}
dma_no_tx_buff:
	return ret;
}
Example #7
0
/**
 * temac_dma_bd_init - Setup buffer descriptor rings
 */
static int temac_dma_bd_init(struct net_device *ndev)
{
	struct temac_local *lp = netdev_priv(ndev);
	struct sk_buff *skb;
	int i;

	lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
	if (!lp->rx_skb) {
		dev_err(&ndev->dev,
				"can't allocate memory for DMA RX buffer\n");
		goto out;
	}
	/* allocate the tx and rx ring buffer descriptors. */
	/* returns a virtual address and a physical address. */
	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
					 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
					 &lp->tx_bd_p, GFP_KERNEL);
	if (!lp->tx_bd_v) {
		dev_err(&ndev->dev,
				"unable to allocate DMA TX buffer descriptors");
		goto out;
	}
	lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
					 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
					 &lp->rx_bd_p, GFP_KERNEL);
	if (!lp->rx_bd_v) {
		dev_err(&ndev->dev,
				"unable to allocate DMA RX buffer descriptors");
		goto out;
	}

	memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
	for (i = 0; i < TX_BD_NUM; i++) {
		lp->tx_bd_v[i].next = lp->tx_bd_p +
				sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
	}

	memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
	for (i = 0; i < RX_BD_NUM; i++) {
		lp->rx_bd_v[i].next = lp->rx_bd_p +
				sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);

		skb = netdev_alloc_skb_ip_align(ndev,
						XTE_MAX_JUMBO_FRAME_SIZE);

		if (skb == 0) {
			dev_err(&ndev->dev, "alloc_skb error %d\n", i);
			goto out;
		}
		lp->rx_skb[i] = skb;
		/* returns physical address of skb->data */
		lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
						     skb->data,
						     XTE_MAX_JUMBO_FRAME_SIZE,
						     DMA_FROM_DEVICE);
		lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
		lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
	}

	lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
					  CHNL_CTRL_IRQ_EN |
					  CHNL_CTRL_IRQ_DLY_EN |
					  CHNL_CTRL_IRQ_COAL_EN);
	/* 0x10220483 */
	/* 0x00100483 */
	lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
					  CHNL_CTRL_IRQ_EN |
					  CHNL_CTRL_IRQ_DLY_EN |
					  CHNL_CTRL_IRQ_COAL_EN |
					  CHNL_CTRL_IRQ_IOE);
	/* 0xff010283 */

	lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
	lp->dma_out(lp, RX_TAILDESC_PTR,
		       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
	lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);

	return 0;

out:
	temac_dma_bd_release(ndev);
	return -ENOMEM;
}
Example #8
0
static int cfv_create_genpool(struct cfv_info *cfv)
{
	int err;

	/* dma_alloc can only allocate whole pages, and we need a more
	 * fine graned allocation so we use genpool. We ask for space needed
	 * by IP and a full ring. If the dma allcoation fails we retry with a
	 * smaller allocation size.
	 */
	err = -ENOMEM;
	cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
			(ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
	if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
		return -EINVAL;

	for (;;) {
		if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
			netdev_info(cfv->ndev, "Not enough device memory\n");
			return -ENOMEM;
		}

		cfv->alloc_addr = dma_alloc_coherent(
						cfv->vdev->dev.parent->parent,
						cfv->allocsz, &cfv->alloc_dma,
						GFP_ATOMIC);
		if (cfv->alloc_addr)
			break;

		cfv->allocsz = (cfv->allocsz * 3) >> 2;
	}

	netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
		   cfv->allocsz);

	/* Allocate on 128 bytes boundaries (1 << 7)*/
	cfv->genpool = gen_pool_create(7, -1);
	if (!cfv->genpool)
		goto err;

	err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
				(phys_addr_t)virt_to_phys(cfv->alloc_addr),
				cfv->allocsz, -1);
	if (err)
		goto err;

	/* Reserve some memory for low memory situations. If we hit the roof
	 * in the memory pool, we stop TX flow and release the reserve.
	 */
	cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
	cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
					   cfv->reserved_size);
	if (!cfv->reserved_mem) {
		err = -ENOMEM;
		goto err;
	}

	cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
	return 0;
err:
	cfv_destroy_genpool(cfv);
	return err;
}
Example #9
0
/* Get skb and descriptor buffer */
static int sh_eth_ring_init(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	int rx_ringsize, tx_ringsize, ret = 0;

	/*
	 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
	 * card needs room to do 8 byte alignment, +2 so we can reserve
	 * the first 2 bytes, and +16 gets room for the status word from the
	 * card.
	 */
	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
	if (mdp->cd->rpadir)
		mdp->rx_buf_sz += NET_IP_ALIGN;

	/* Allocate RX and TX skb rings */
	mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
				GFP_KERNEL);
	if (!mdp->rx_skbuff) {
		dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
		ret = -ENOMEM;
		return ret;
	}

	mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
				GFP_KERNEL);
	if (!mdp->tx_skbuff) {
		dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
		ret = -ENOMEM;
		goto skb_ring_free;
	}

	/* Allocate all Rx descriptors. */
	rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
	mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
			GFP_KERNEL);

	if (!mdp->rx_ring) {
		dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
			rx_ringsize);
		ret = -ENOMEM;
		goto desc_ring_free;
	}

	mdp->dirty_rx = 0;

	/* Allocate all Tx descriptors. */
	tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
	mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
			GFP_KERNEL);
	if (!mdp->tx_ring) {
		dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
			tx_ringsize);
		ret = -ENOMEM;
		goto desc_ring_free;
	}
	return ret;

desc_ring_free:
	/* free DMA buffer */
	dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);

skb_ring_free:
	/* Free Rx and Tx skb ring buffer */
	sh_eth_ring_free(ndev);

	return ret;
}
Example #10
0
/* Initialize the CPM Ethernet on SCC.  If EPPC-Bug loaded us, or performed
 * some other network I/O, a whole bunch of this has already been set up.
 * It is no big deal if we do it again, we just have to disable the
 * transmit and receive to make sure we don't catch the CPM with some
 * inconsistent control information.
 */
static int __init scc_enet_init(void)
{
	struct net_device *dev;
	struct scc_enet_private *cep;
	int i, j, k, err;
	uint dp_offset;
	unsigned char	*eap, *ba;
	dma_addr_t	mem_addr;
	bd_t		*bd;
	volatile	cbd_t		*bdp;
	volatile	cpm8xx_t	*cp;
	volatile	scc_t		*sccp;
	volatile	scc_enet_t	*ep;
	volatile	immap_t		*immap;

	cp = cpmp;	/* Get pointer to Communication Processor */

	immap = (immap_t *)(mfspr(SPRN_IMMR) & 0xFFFF0000);	/* and to internal registers */

	bd = (bd_t *)__res;

	dev = alloc_etherdev(sizeof(*cep));
	if (!dev)
		return -ENOMEM;

	cep = dev->priv;
	spin_lock_init(&cep->lock);

	/* Get pointer to SCC area in parameter RAM.
	*/
	ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]);

	/* And another to the SCC register area.
	*/
	sccp = (volatile scc_t *)(&cp->cp_scc[SCC_ENET]);
	cep->sccp = (scc_t *)sccp;		/* Keep the pointer handy */

	/* Disable receive and transmit in case EPPC-Bug started it.
	*/
	sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);

	/* Cookbook style from the MPC860 manual.....
	 * Not all of this is necessary if EPPC-Bug has initialized
	 * the network.
	 * So far we are lucky, all board configurations use the same
	 * pins, or at least the same I/O Port for these functions.....
	 * It can't last though......
	 */

#if (defined(PA_ENET_RXD) && defined(PA_ENET_TXD))
	/* Configure port A pins for Txd and Rxd.
	*/
	immap->im_ioport.iop_papar |=  (PA_ENET_RXD | PA_ENET_TXD);
	immap->im_ioport.iop_padir &= ~(PA_ENET_RXD | PA_ENET_TXD);
	immap->im_ioport.iop_paodr &=                ~PA_ENET_TXD;
#elif (defined(PB_ENET_RXD) && defined(PB_ENET_TXD))
	/* Configure port B pins for Txd and Rxd.
	*/
	immap->im_cpm.cp_pbpar |=  (PB_ENET_RXD | PB_ENET_TXD);
	immap->im_cpm.cp_pbdir &= ~(PB_ENET_RXD | PB_ENET_TXD);
	immap->im_cpm.cp_pbodr &=		 ~PB_ENET_TXD;
#else
#error Exactly ONE pair of PA_ENET_[RT]XD, PB_ENET_[RT]XD must be defined
#endif

#if defined(PC_ENET_LBK)
	/* Configure port C pins to disable External Loopback
	 */
	immap->im_ioport.iop_pcpar &= ~PC_ENET_LBK;
	immap->im_ioport.iop_pcdir |=  PC_ENET_LBK;
	immap->im_ioport.iop_pcso  &= ~PC_ENET_LBK;
	immap->im_ioport.iop_pcdat &= ~PC_ENET_LBK;	/* Disable Loopback */
#endif	/* PC_ENET_LBK */

#ifdef PE_ENET_TCLK
	/* Configure port E for TCLK and RCLK.
	*/
	cp->cp_pepar |=  (PE_ENET_TCLK | PE_ENET_RCLK);
	cp->cp_pedir &= ~(PE_ENET_TCLK | PE_ENET_RCLK);
	cp->cp_peso  &= ~(PE_ENET_TCLK | PE_ENET_RCLK);
#else
	/* Configure port A for TCLK and RCLK.
	*/
	immap->im_ioport.iop_papar |=  (PA_ENET_TCLK | PA_ENET_RCLK);
	immap->im_ioport.iop_padir &= ~(PA_ENET_TCLK | PA_ENET_RCLK);
#endif

	/* Configure port C pins to enable CLSN and RENA.
	*/
	immap->im_ioport.iop_pcpar &= ~(PC_ENET_CLSN | PC_ENET_RENA);
	immap->im_ioport.iop_pcdir &= ~(PC_ENET_CLSN | PC_ENET_RENA);
	immap->im_ioport.iop_pcso  |=  (PC_ENET_CLSN | PC_ENET_RENA);

	/* Configure Serial Interface clock routing.
	 * First, clear all SCC bits to zero, then set the ones we want.
	 */
	cp->cp_sicr &= ~SICR_ENET_MASK;
	cp->cp_sicr |=  SICR_ENET_CLKRT;

	/* Manual says set SDDR, but I can't find anything with that
	 * name.  I think it is a misprint, and should be SDCR.  This
	 * has already been set by the communication processor initialization.
	 */

	/* Allocate space for the buffer descriptors in the DP ram.
	 * These are relative offsets in the DP ram address space.
	 * Initialize base addresses for the buffer descriptors.
	 */
	dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
	ep->sen_genscc.scc_rbase = dp_offset;
	cep->rx_bd_base = cpm_dpram_addr(dp_offset);

	dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
	ep->sen_genscc.scc_tbase = dp_offset;
	cep->tx_bd_base = cpm_dpram_addr(dp_offset);

	cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
	cep->cur_rx = cep->rx_bd_base;

	/* Issue init Rx BD command for SCC.
	 * Manual says to perform an Init Rx parameters here.  We have
	 * to perform both Rx and Tx because the SCC may have been
	 * already running.
	 * In addition, we have to do it later because we don't yet have
	 * all of the BD control/status set properly.
	cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_RX) | CPM_CR_FLG;
	while (cp->cp_cpcr & CPM_CR_FLG);
	 */

	/* Initialize function code registers for big-endian.
	*/
	ep->sen_genscc.scc_rfcr = SCC_EB;
	ep->sen_genscc.scc_tfcr = SCC_EB;

	/* Set maximum bytes per receive buffer.
	 * This appears to be an Ethernet frame size, not the buffer
	 * fragment size.  It must be a multiple of four.
	 */
	ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE;

	/* Set CRC preset and mask.
	*/
	ep->sen_cpres = 0xffffffff;
	ep->sen_cmask = 0xdebb20e3;

	ep->sen_crcec = 0;	/* CRC Error counter */
	ep->sen_alec = 0;	/* alignment error counter */
	ep->sen_disfc = 0;	/* discard frame counter */

	ep->sen_pads = 0x8888;	/* Tx short frame pad character */
	ep->sen_retlim = 15;	/* Retry limit threshold */

	ep->sen_maxflr = PKT_MAXBUF_SIZE;   /* maximum frame length register */
	ep->sen_minflr = PKT_MINBUF_SIZE;  /* minimum frame length register */

	ep->sen_maxd1 = PKT_MAXBLR_SIZE;	/* maximum DMA1 length */
	ep->sen_maxd2 = PKT_MAXBLR_SIZE;	/* maximum DMA2 length */

	/* Clear hash tables.
	*/
	ep->sen_gaddr1 = 0;
	ep->sen_gaddr2 = 0;
	ep->sen_gaddr3 = 0;
	ep->sen_gaddr4 = 0;
	ep->sen_iaddr1 = 0;
	ep->sen_iaddr2 = 0;
	ep->sen_iaddr3 = 0;
	ep->sen_iaddr4 = 0;

	/* Set Ethernet station address.
	 */
	eap = (unsigned char *)&(ep->sen_paddrh);
	for (i=5; i>=0; i--)
		*eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i];

	ep->sen_pper = 0;	/* 'cause the book says so */
	ep->sen_taddrl = 0;	/* temp address (LSB) */
	ep->sen_taddrm = 0;
	ep->sen_taddrh = 0;	/* temp address (MSB) */

	/* Now allocate the host memory pages and initialize the
	 * buffer descriptors.
	 */
	bdp = cep->tx_bd_base;
	for (i=0; i<TX_RING_SIZE; i++) {

		/* Initialize the BD for every fragment in the page.
		*/
		bdp->cbd_sc = 0;
		bdp->cbd_bufaddr = 0;
		bdp++;
	}

	/* Set the last buffer to wrap.
	*/
	bdp--;
	bdp->cbd_sc |= BD_SC_WRAP;

	bdp = cep->rx_bd_base;
	k = 0;
	for (i=0; i<CPM_ENET_RX_PAGES; i++) {

		/* Allocate a page.
		*/
		ba = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE,
				&mem_addr, GFP_KERNEL);
		/* BUG: no check for failure */

		/* Initialize the BD for every fragment in the page.
		*/
		for (j=0; j<CPM_ENET_RX_FRPPG; j++) {
			bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
			bdp->cbd_bufaddr = mem_addr;
			cep->rx_vaddr[k++] = ba;
			mem_addr += CPM_ENET_RX_FRSIZE;
			ba += CPM_ENET_RX_FRSIZE;
			bdp++;
		}
	}

	/* Set the last buffer to wrap.
	*/
	bdp--;
	bdp->cbd_sc |= BD_SC_WRAP;

	/* Let's re-initialize the channel now.  We have to do it later
	 * than the manual describes because we have just now finished
	 * the BD initialization.
	 */
	cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_TRX) | CPM_CR_FLG;
	while (cp->cp_cpcr & CPM_CR_FLG);

	cep->skb_cur = cep->skb_dirty = 0;

	sccp->scc_scce = 0xffff;	/* Clear any pending events */

	/* Enable interrupts for transmit error, complete frame
	 * received, and any transmit buffer we have also set the
	 * interrupt flag.
	 */
	sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);

	/* Install our interrupt handler.
	*/
	cpm_install_handler(CPMVEC_ENET, scc_enet_interrupt, dev);

	/* Set GSMR_H to enable all normal operating modes.
	 * Set GSMR_L to enable Ethernet to MC68160.
	 */
	sccp->scc_gsmrh = 0;
	sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET);

	/* Set sync/delimiters.
	*/
	sccp->scc_dsr = 0xd555;

	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
	 * start frame search 22 bit times after RENA.
	 */
	sccp->scc_psmr = (SCC_PSMR_ENCRC | SCC_PSMR_NIB22);

	/* It is now OK to enable the Ethernet transmitter.
	 * Unfortunately, there are board implementation differences here.
	 */
#if   (!defined (PB_ENET_TENA) &&  defined (PC_ENET_TENA) && !defined (PE_ENET_TENA))
	immap->im_ioport.iop_pcpar |=  PC_ENET_TENA;
	immap->im_ioport.iop_pcdir &= ~PC_ENET_TENA;
#elif ( defined (PB_ENET_TENA) && !defined (PC_ENET_TENA) && !defined (PE_ENET_TENA))
	cp->cp_pbpar |= PB_ENET_TENA;
	cp->cp_pbdir |= PB_ENET_TENA;
#elif ( !defined (PB_ENET_TENA) && !defined (PC_ENET_TENA) && defined (PE_ENET_TENA))
	cp->cp_pepar |=  PE_ENET_TENA;
	cp->cp_pedir &= ~PE_ENET_TENA;
	cp->cp_peso  |=  PE_ENET_TENA;
#else
#error Configuration Error: define exactly ONE of PB_ENET_TENA, PC_ENET_TENA, PE_ENET_TENA
#endif

#if defined(CONFIG_RPXLITE) || defined(CONFIG_RPXCLASSIC)
	/* And while we are here, set the configuration to enable ethernet.
	*/
	*((volatile uint *)RPX_CSR_ADDR) &= ~BCSR0_ETHLPBK;
	*((volatile uint *)RPX_CSR_ADDR) |=
			(BCSR0_ETHEN | BCSR0_COLTESTDIS | BCSR0_FULLDPLXDIS);
#endif

#ifdef CONFIG_BSEIP
	/* BSE uses port B and C for PHY control.
	*/
	cp->cp_pbpar &= ~(PB_BSE_POWERUP | PB_BSE_FDXDIS);
	cp->cp_pbdir |= (PB_BSE_POWERUP | PB_BSE_FDXDIS);
	cp->cp_pbdat |= (PB_BSE_POWERUP | PB_BSE_FDXDIS);

	immap->im_ioport.iop_pcpar &= ~PC_BSE_LOOPBACK;
	immap->im_ioport.iop_pcdir |= PC_BSE_LOOPBACK;
	immap->im_ioport.iop_pcso &= ~PC_BSE_LOOPBACK;
	immap->im_ioport.iop_pcdat &= ~PC_BSE_LOOPBACK;
#endif

#ifdef CONFIG_FADS
	cp->cp_pbpar |= PB_ENET_TENA;
	cp->cp_pbdir |= PB_ENET_TENA;

	/* Enable the EEST PHY.
	*/
	*((volatile uint *)BCSR1) &= ~BCSR1_ETHEN;
#endif

#ifdef CONFIG_MPC885ADS

	/* Deassert PHY reset and enable the PHY.
	 */
	{
		volatile uint __iomem *bcsr = ioremap(BCSR_ADDR, BCSR_SIZE);
		uint tmp;

		tmp = in_be32(bcsr + 1 /* BCSR1 */);
		tmp |= BCSR1_ETHEN;
		out_be32(bcsr + 1, tmp);
		tmp = in_be32(bcsr + 4 /* BCSR4 */);
		tmp |= BCSR4_ETH10_RST;
		out_be32(bcsr + 4, tmp);
		iounmap(bcsr);
	}

	/* On MPC885ADS SCC ethernet PHY defaults to the full duplex mode
	 * upon reset. SCC is set to half duplex by default. So this
	 * inconsistency should be better fixed by the software.
	 */
#endif

	dev->base_addr = (unsigned long)ep;
#if 0
	dev->name = "CPM_ENET";
#endif

	/* The CPM Ethernet specific entries in the device structure. */
	dev->open = scc_enet_open;
	dev->hard_start_xmit = scc_enet_start_xmit;
	dev->tx_timeout = scc_enet_timeout;
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->stop = scc_enet_close;
	dev->get_stats = scc_enet_get_stats;
	dev->set_multicast_list = set_multicast_list;

	err = register_netdev(dev);
	if (err) {
		free_netdev(dev);
		return err;
	}

	/* And last, enable the transmit and receive processing.
	*/
	sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);

	printk("%s: CPM ENET Version 0.2 on SCC%d, ", dev->name, SCC_ENET+1);
	for (i=0; i<5; i++)
		printk("%02x:", dev->dev_addr[i]);
	printk("%02x\n", dev->dev_addr[5]);

	return 0;
}
Example #11
0
static int nusmart_pcm_probe(struct snd_soc_platform *platform)
{
	int ret = 0;

	DBG_PRINT("nusmart_pcm_probe\n");

	pl330_data.base = __io_address(NS115_DMA_330S_BASE);
	ret = request_irq(IRQ_NS115_DMA330_INTR6, nusmart_pl330_interrupt, IRQF_SHARED,
			"alsa_pl330_dmac",  &dma_chan_param);
	if (ret)
	{
		DBG_PRINT("request irq failed\n");
		goto out;
	}
	ret = request_irq(IRQ_NS115_DMA330_INTR7, nusmart_pl330_interrupt, IRQF_SHARED,
			"alsa_pl330_dmac",  &dma_chan_param);
	if (ret)
	{
		DBG_PRINT("request irq failed\n");
		goto err_free_irq1;
	}

	prtd_record = kzalloc(sizeof(*prtd_record), GFP_KERNEL);
	if (prtd_record == NULL) {
		DBG_PRINT("nusmart_pcm_probe can not alloc nusmart_runtime_data for record\n");
		ret = -ENOMEM;
		goto err_free_irq0;
	}

	prtd_record->desc_pool_virt = dma_alloc_coherent(NULL, PL330_POOL_SIZE, &(prtd_record->lli_start), GFP_KERNEL);

	if(prtd_record->desc_pool_virt == NULL)
	{
		DBG_PRINT("nusmart_pcm_probe can not alloc dma descriptor for record\n");
		ret = -ENOMEM;
		goto err_free_prtd_record;
	}

	spin_lock_init(&prtd_record->lock);

	prtd_playback = kzalloc(sizeof(*prtd_playback), GFP_KERNEL);
	if (prtd_playback == NULL) {
		DBG_PRINT("nusmart_pcm_probe can not alloc nusmart_runtime_data for playback\n");
		ret = -ENOMEM;
		goto err_free_prtd_record_pool;
	}

	prtd_playback->desc_pool_virt = dma_alloc_coherent(NULL, PL330_POOL_SIZE, &(prtd_playback->lli_start), GFP_KERNEL);

	if(prtd_playback->desc_pool_virt == NULL)
	{
		DBG_PRINT("nusmart_pcm_probe can not alloc dma descriptor for record\n");
		ret = -ENOMEM;
		goto err_free_prtd_playback;
	}

	spin_lock_init(&prtd_playback->lock);

	goto out;

err_free_prtd_playback:
	kfree(prtd_playback);

err_free_prtd_record_pool:
	dma_free_coherent(NULL, PL330_POOL_SIZE, prtd_record->desc_pool_virt, prtd_record->lli_start);

err_free_prtd_record:
	kfree(prtd_record);

err_free_irq0:
	free_irq(IRQ_NS115_DMA330_INTR7, &dma_chan_param);

err_free_irq1:
	free_irq(IRQ_NS115_DMA330_INTR6, &dma_chan_param);

out:
	return ret;
}
Example #12
0
static int rpmsg_probe(struct virtio_device *vdev)
{
	vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
	const char *names[] = { "input", "output" };
	struct virtqueue *vqs[2];
	struct virtproc_info *vrp;
	struct rproc *vrp_rproc;
	void *bufs_va;
	void *cpu_addr; /* buffer virtual address */
	void *cpu_addr_dma; /* buffer DMA address' virutal address conversion */
	void *rbufs_guest_addr_kva;
	int err = 0, i;
	size_t total_buf_space;

	vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
	if (!vrp)
		return -ENOMEM;

	vrp->vdev = vdev;

	idr_init(&vrp->endpoints);
	mutex_init(&vrp->endpoints_lock);
	mutex_init(&vrp->tx_lock);
	init_waitqueue_head(&vrp->sendq);

	/* We expect two virtqueues, rx and tx (and in this order) */
	err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
	if (err)
		goto free_vrp;

	vrp->rvq = vqs[0];
	vrp->svq = vqs[1];

	/* we expect symmetric tx/rx vrings */
	WARN_ON(virtqueue_get_vring_size(vrp->rvq) !=
		virtqueue_get_vring_size(vrp->svq));

	/* we need less buffers if vrings are small */
	if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2)
		vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2;
	else
		vrp->num_bufs = MAX_RPMSG_NUM_BUFS;

	total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE;

	/* allocate coherent memory for the buffers */
	bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
				     total_buf_space, &vrp->bufs_dma,
				     GFP_KERNEL);
	if (!bufs_va) {
		err = -ENOMEM;
		goto vqs_del;
	}

	dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
					(unsigned long long)vrp->bufs_dma);

	/* half of the buffers is dedicated for RX */
	vrp->rbufs = bufs_va;

	/* and half is dedicated for TX */
	vrp->sbufs = bufs_va + total_buf_space / 2;

	vrp_rproc = vdev_to_rproc(vdev);
	rbufs_guest_addr_kva = vrp->rbufs;
	if (vrp_rproc->ops->kva_to_guest_addr_kva) {
		rbufs_guest_addr_kva = vrp_rproc->ops->kva_to_guest_addr_kva(vrp_rproc, vrp->rbufs, vrp->rvq);
	}
	/* set up the receive buffers */
	for (i = 0; i < vrp->num_bufs / 2; i++) {
		struct scatterlist sg;
		cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
		cpu_addr_dma = rbufs_guest_addr_kva + i*RPMSG_BUF_SIZE;

		sg_init_one(&sg, cpu_addr_dma, RPMSG_BUF_SIZE);

		err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr,
								GFP_KERNEL);
		WARN_ON(err); /* sanity check; this can't really happen */
	}

	/* suppress "tx-complete" interrupts */
	virtqueue_disable_cb(vrp->svq);

	vdev->priv = vrp;

	/* if supported by the remote processor, enable the name service */
	if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
		/* a dedicated endpoint handles the name service msgs */
		vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
						vrp, RPMSG_NS_ADDR);
		if (!vrp->ns_ept) {
			dev_err(&vdev->dev, "failed to create the ns ept\n");
			err = -ENOMEM;
			goto free_coherent;
		}
	}

	/* tell the remote processor it can start sending messages */
	virtqueue_kick(vrp->rvq);

	dev_info(&vdev->dev, "rpmsg host is online\n");

	return 0;

free_coherent:
	dma_free_coherent(vdev->dev.parent->parent, total_buf_space,
			  bufs_va, vrp->bufs_dma);
vqs_del:
	vdev->config->del_vqs(vrp->vdev);
free_vrp:
	kfree(vrp);
	return err;
}
Example #13
0
}

/*!
 * Enable encoder task
 * @param private       struct cam_data * mxc capture instance
 *
 * @return  status
 */
static int csi_enc_enabling_tasks(void *private)
{
	cam_data *cam = (cam_data *) private;
	int err = 0;
	CAMERA_TRACE("IPU:In csi_enc_enabling_tasks\n");

	cam->dummy_frame.vaddress = dma_alloc_coherent(0,
			       PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage),
			       &cam->dummy_frame.paddress,
			       GFP_DMA | GFP_KERNEL);
	if (cam->dummy_frame.vaddress == 0) {
		pr_err("ERROR: v4l2 capture: Allocate dummy frame "
		       "failed.\n");
		return -ENOBUFS;
	}
	cam->dummy_frame.buffer.type = V4L2_BUF_TYPE_PRIVATE;
	cam->dummy_frame.buffer.length =
	    PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
	cam->dummy_frame.buffer.m.offset = cam->dummy_frame.paddress;

	ipu_clear_irq(IPU_IRQ_CSI0_OUT_EOF);
	err = ipu_request_irq(IPU_IRQ_CSI0_OUT_EOF,
			      csi_enc_callback, 0, "Mxc Camera", cam);
	if (err != 0) {
Example #14
0
static int __devinit mxs_hsadc_probe(struct platform_device *pdev)
{
	struct mxs_hsadc_data *pd;
	struct resource *res;
	int rlevel = 0;

	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
	if (pd)
		rlevel++;
	else
		goto quit;

	pd->dev = &pdev->dev;
	platform_set_drvdata(pdev, pd);
	pd->pdev = pdev;
	rlevel++;
	
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
		goto quit;
	pd->hsadc_base = ioremap(res->start, res->end - res->start);
	if (pd->hsadc_base)
		rlevel++;
	else
		goto quit;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (res == NULL)
		goto quit;
	pd->pwm_base = ioremap(res->start, res->end - res->start);
	if (pd->pwm_base)
		rlevel++;
	else
		goto quit;

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (res)
		pd->dev_irq = res->start;
	else
		goto quit;

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
	if (res)
		pd->dma_irq = res->start;
	else
		goto quit;

	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
	if (res)
		pd->dma_ch = res->start;
	else
		goto quit;

	pd->ref_hsadc_clk = clk_get(NULL, "ref_hsadc");
	if (pd->ref_hsadc_clk)
		rlevel++;
	else
		goto quit;

	pd->hsadc_clk = clk_get(NULL, "hsadc");
	if (pd->hsadc_clk)
		rlevel++;
	else
		goto quit;

	pd->pwm_clk = clk_get(NULL, "pwm");
	if (pd->pwm_clk)
		rlevel++;
	else
		goto quit;

	clk_enable(pd->ref_hsadc_clk);
	clk_enable(pd->hsadc_clk);
	clk_enable(pd->pwm_clk);
	rlevel++;

	clk_set_rate(pd->ref_hsadc_clk, REF_HSADC_FREQ);
	clk_set_rate(pd->hsadc_clk, HSADC_FREQ);

    if (request_irq(pd->dma_irq, hsadc_dma_isr, 0, "hsadc dma", pd))
		goto quit;
	else
		rlevel++;

	if (request_irq(pd->dev_irq, hsadc_isr, 0, "hsadc irq", pd))
		goto quit;
	else
		rlevel++;

	if (mxs_dma_request(pd->dma_ch, pd->dev, "hsadc"))
		goto quit;
	else
		rlevel++;

	mxs_dma_disable(pd->dma_ch);

	pd->desc = mxs_dma_alloc_desc();
	if (pd->desc==NULL)
		goto quit;
	
	memset(&pd->desc->cmd, 0, sizeof(pd->desc->cmd));
	rlevel++;

	pd->buf = dma_alloc_coherent(NULL, DMA_BUF_SIZE, &pd->buf_phy, GFP_KERNEL);
	if(!pd->buf)
		goto quit;
	
	rlevel++;

	if(hsadc_init_cdev(pd))
		goto quit;

#if HSADC_DEBUG
	printk(KERN_INFO "%s> probe successed.\n", HSADC_DEVICE_NAME);
#endif

	return 0;

quit:
	pr_err("%s quit at rlevel %d\n", __func__, rlevel);
	switch (rlevel) {
	case 14:
		hsadc_cleanup_cdev(pd);
	case 13:
		if (pd->buf_phy)
			dma_free_coherent(NULL, DMA_BUF_SIZE, pd->buf, pd->buf_phy);
	case 12:
		if (pd->desc)
			mxs_dma_free_desc(pd->desc);
	case 11:
		mxs_dma_release(pd->dma_ch, pd->dev);
	case 10:
		free_irq(pd->dev_irq, pd);
	case 9:
		free_irq(pd->dma_irq, pd);
	case 8:
		clk_disable(pd->pwm_clk);
		clk_disable(pd->hsadc_clk);
		clk_disable(pd->ref_hsadc_clk);
	case 7:
		clk_put(pd->pwm_clk);
	case 6:
		clk_put(pd->hsadc_clk);
	case 5:
		clk_put(pd->ref_hsadc_clk);
	case 4:
		iounmap(pd->pwm_base);
	case 3:
		iounmap(pd->hsadc_base);
	case 2:
		platform_set_drvdata(pdev, NULL);
	case 1:
		kfree(pd);
	case 0:
	default:
		return -ENODEV;
	}
}
Example #15
0
static int srp_indirect_data(struct scst_cmd *sc, struct srp_cmd *cmd,
			     struct srp_indirect_buf *id,
			     enum dma_data_direction dir, srp_rdma_t rdma_io,
			     int dma_map, int ext_desc)
{
	struct iu_entry *iue = NULL;
	struct srp_direct_buf *md = NULL;
	struct scatterlist dummy, *sg = NULL;
	dma_addr_t token = 0;
	int err = 0;
	int nmd, nsg = 0, len, sg_cnt = 0;
	u32 tsize = 0;
	enum dma_data_direction dma_dir;

	iue = scst_cmd_get_tgt_priv(sc);
	if (dir == DMA_TO_DEVICE) {
		scst_cmd_get_write_fields(sc, &sg, &sg_cnt);
		tsize = scst_cmd_get_bufflen(sc);
		dma_dir = DMA_FROM_DEVICE;
	} else {
		sg = scst_cmd_get_sg(sc);
		sg_cnt = scst_cmd_get_sg_cnt(sc);
		tsize = scst_cmd_get_adjusted_resp_data_len(sc);
		dma_dir = DMA_TO_DEVICE;
	}

	dprintk("%p %u %u %d %d\n", iue, tsize, be32_to_cpu(id->len),
		be32_to_cpu(cmd->data_in_desc_cnt),
		be32_to_cpu(cmd->data_out_desc_cnt));

	len = min(tsize, be32_to_cpu(id->len));

	nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);

	if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
	    (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
		md = &id->desc_list[0];
		goto rdma;
	}

	if (ext_desc && dma_map) {
		md = dma_alloc_coherent(iue->target->dev,
					be32_to_cpu(id->table_desc.len),
					&token, GFP_KERNEL);
		if (!md) {
			eprintk("Can't get dma memory %u\n", id->table_desc.len);
			return -ENOMEM;
		}

		sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
		sg_dma_address(&dummy) = token;
		sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
		err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
			      be32_to_cpu(id->table_desc.len));
		if (err) {
			eprintk("Error copying indirect table %d\n", err);
			goto free_mem;
		}
	} else {
		eprintk("This command uses external indirect buffer\n");
		return -EINVAL;
	}

rdma:
	if (dma_map) {
		nsg = dma_map_sg(iue->target->dev, sg, sg_cnt, dma_dir);
		if (!nsg) {
			eprintk("fail to map %p %d\n", iue, sg_cnt);
			err = -ENOMEM;
			goto free_mem;
		}
	}

	err = rdma_io(sc, sg, nsg, md, nmd, dir, len);

	if (dma_map)
		dma_unmap_sg(iue->target->dev, sg, nsg, dma_dir);

free_mem:
	if (token && dma_map)
		dma_free_coherent(iue->target->dev,
				  be32_to_cpu(id->table_desc.len), md, token);

	return err;
}
Example #16
0
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
			    const struct fw_img *fw)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_context_info *ctxt_info;
	struct iwl_context_info_rbd_cfg *rx_cfg;
	u32 control_flags = 0, rb_size;
	int ret;

	ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
				       &trans_pcie->ctxt_info_dma_addr,
				       GFP_KERNEL);
	if (!ctxt_info)
		return -ENOMEM;

	ctxt_info->version.version = 0;
	ctxt_info->version.mac_id =
		cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
	/* size is in DWs */
	ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);

	switch (trans_pcie->rx_buf_size) {
	case IWL_AMSDU_2K:
		rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
		break;
	case IWL_AMSDU_4K:
		rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
		break;
	case IWL_AMSDU_8K:
		rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
		break;
	case IWL_AMSDU_12K:
		rb_size = IWL_CTXT_INFO_RB_SIZE_12K;
		break;
	default:
		WARN_ON(1);
		rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
	}

	BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
	control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG |
			(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
			 IWL_CTXT_INFO_RB_CB_SIZE_POS) |
			(rb_size << IWL_CTXT_INFO_RB_SIZE_POS);
	ctxt_info->control.control_flags = cpu_to_le32(control_flags);

	/* initialize RX default queue */
	rx_cfg = &ctxt_info->rbd_cfg;
	rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
	rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
	rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);

	/* initialize TX command queue */
	ctxt_info->hcmd_cfg.cmd_queue_addr =
		cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
	ctxt_info->hcmd_cfg.cmd_queue_size =
		TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);

	/* allocate ucode sections in dram and set addresses */
	ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
	if (ret) {
		dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
				  ctxt_info, trans_pcie->ctxt_info_dma_addr);
		return ret;
	}

	trans_pcie->ctxt_info = ctxt_info;

	iwl_enable_interrupts(trans);

	/* Configure debug, if exists */
	if (iwl_pcie_dbg_on(trans))
		iwl_pcie_apply_destination(trans);

	/* kick FW self load */
	iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
	iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);

	/* Context info will be released upon alive or failure to get one */

	return 0;
}
Example #17
0
static int q6audio_init(void)
{
	struct audio_client *ac = 0;
	int res;

	mutex_lock(&audio_lock);
	if (ac_control) {
		res = 0;
		goto done;
	}

	pr_info("audio: init: codecs\n");
	icodec_rx_clk = clk_get(0, "icodec_rx_clk");
	icodec_tx_clk = clk_get(0, "icodec_tx_clk");
	ecodec_clk = clk_get(0, "ecodec_clk");
	sdac_clk = clk_get(0, "sdac_clk");
	audio_data = dma_alloc_coherent(NULL, 4096, &audio_phys, GFP_KERNEL);

	adsp = dal_attach(AUDIO_DAL_DEVICE, AUDIO_DAL_PORT,
			  callback, 0);
	if (!adsp) {
		pr_err("audio_init: cannot attach to adsp\n");
		res = -ENODEV;
		goto done;
	}
	pr_info("audio: init: INIT\n");
	audio_init(adsp);
	dal_trace(adsp);

	ac = audio_client_alloc(0);
	if (!ac) {
		pr_err("audio_init: cannot allocate client\n");
		res = -ENOMEM;
		goto done;
	}

	pr_info("audio: init: OPEN control\n");
	if (audio_open_control(ac)) {
		pr_err("audio_init: cannot open control channel\n");
		res = -ENODEV;
		goto done;
	}

	pr_info("audio: init: attach ACDB\n");
	acdb = dal_attach(ACDB_DAL_DEVICE, ACDB_DAL_PORT, 0, 0);
	if (!acdb) {
		pr_err("audio_init: cannot attach to acdb channel\n");
		res = -ENODEV;
		goto done;
	}

	pr_info("audio: init: attach ADIE\n");
	adie = dal_attach(ADIE_DAL_DEVICE, ADIE_DAL_PORT, 0, 0);
	if (!adie) {
		pr_err("audio_init: cannot attach to adie\n");
		res = -ENODEV;
		goto done;
	}
	if (analog_ops->init)
		analog_ops->init();

	res = 0;
	ac_control = ac;

	wake_lock_init(&idlelock, WAKE_LOCK_IDLE, "audio_pcm_idle");
	wake_lock_init(&wakelock, WAKE_LOCK_SUSPEND, "audio_pcm_suspend");
done:
	if ((res < 0) && ac)
		audio_client_free(ac);
	mutex_unlock(&audio_lock);

	return res;
}
static int pruss_probe(struct platform_device *dev)
{
	struct uio_info *p;
	struct uio_pruss_dev *gdev;
	struct resource *regs_prussio;
	int ret = -ENODEV, cnt = 0, len;
	struct uio_pruss_pdata *pdata = dev->dev.platform_data;

	gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL);
	if (!gdev)
		return -ENOMEM;

	gdev->info = kzalloc(sizeof(*p) * MAX_PRUSS_EVT, GFP_KERNEL);
	if (!gdev->info) {
		kfree(gdev);
		return -ENOMEM;
	}
	/* Power on PRU in case its not done as part of boot-loader */
	gdev->pruss_clk = clk_get(&dev->dev, "pruss");
	if (IS_ERR(gdev->pruss_clk)) {
		dev_err(&dev->dev, "Failed to get clock\n");
		kfree(gdev->info);
		kfree(gdev);
		ret = PTR_ERR(gdev->pruss_clk);
		return ret;
	} else {
		clk_enable(gdev->pruss_clk);
	}

	regs_prussio = platform_get_resource(dev, IORESOURCE_MEM, 0);
	if (!regs_prussio) {
		dev_err(&dev->dev, "No PRUSS I/O resource specified\n");
		goto out_free;
	}

	if (!regs_prussio->start) {
		dev_err(&dev->dev, "Invalid memory resource\n");
		goto out_free;
	}

	if (pdata->sram_pool) {
		gdev->sram_pool = pdata->sram_pool;
		gdev->sram_vaddr =
			gen_pool_alloc(gdev->sram_pool, sram_pool_sz);
		if (!gdev->sram_vaddr) {
			dev_err(&dev->dev, "Could not allocate SRAM pool\n");
			goto out_free;
		}
		gdev->sram_paddr =
			gen_pool_virt_to_phys(gdev->sram_pool,
					      gdev->sram_vaddr);
	}

	gdev->ddr_vaddr = dma_alloc_coherent(&dev->dev, extram_pool_sz,
				&(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA);
	if (!gdev->ddr_vaddr) {
		dev_err(&dev->dev, "Could not allocate external memory\n");
		goto out_free;
	}

	len = resource_size(regs_prussio);
	gdev->prussio_vaddr = ioremap(regs_prussio->start, len);
	if (!gdev->prussio_vaddr) {
		dev_err(&dev->dev, "Can't remap PRUSS I/O  address range\n");
		goto out_free;
	}

	gdev->pintc_base = pdata->pintc_base;
	gdev->hostirq_start = platform_get_irq(dev, 0);

	for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) {
		p->mem[0].addr = regs_prussio->start;
		p->mem[0].size = resource_size(regs_prussio);
		p->mem[0].memtype = UIO_MEM_PHYS;

		p->mem[1].addr = gdev->sram_paddr;
		p->mem[1].size = sram_pool_sz;
		p->mem[1].memtype = UIO_MEM_PHYS;

		p->mem[2].addr = gdev->ddr_paddr;
		p->mem[2].size = extram_pool_sz;
		p->mem[2].memtype = UIO_MEM_PHYS;

		p->name = kasprintf(GFP_KERNEL, "pruss_evt%d", cnt);
		p->version = DRV_VERSION;

		/* Register PRUSS IRQ lines */
		p->irq = gdev->hostirq_start + cnt;
		p->handler = pruss_handler;
		p->priv = gdev;

		ret = uio_register_device(&dev->dev, p);
		if (ret < 0)
			goto out_free;
	}

	platform_set_drvdata(dev, gdev);
	return 0;

out_free:
	pruss_cleanup(dev, gdev);
	return ret;
}
Example #19
0
static int __devinit
#else
static int
#endif
igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct rte_uio_pci_dev *udev;
	dma_addr_t map_dma_addr;
	void *map_addr;
	int err;

	udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
	if (!udev)
		return -ENOMEM;

	mutex_init(&udev->lock);
	/*
	 * enable device: ask low-level code to enable I/O and
	 * memory
	 */
	err = pci_enable_device(dev);
	if (err != 0) {
		dev_err(&dev->dev, "Cannot enable PCI device\n");
		goto fail_free;
	}

	/* enable bus mastering on the device */
	pci_set_master(dev);

	/* remap IO memory */
	err = igbuio_setup_bars(dev, &udev->info);
	if (err != 0)
		goto fail_release_iomem;

	/* set 64-bit DMA mask */
	err = pci_set_dma_mask(dev,  DMA_BIT_MASK(64));
	if (err != 0) {
		dev_err(&dev->dev, "Cannot set DMA mask\n");
		goto fail_release_iomem;
	}

	err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
	if (err != 0) {
		dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
		goto fail_release_iomem;
	}

	/* fill uio infos */
	udev->info.name = "igb_uio";
	udev->info.version = "0.1";
	udev->info.irqcontrol = igbuio_pci_irqcontrol;
	udev->info.open = igbuio_pci_open;
	udev->info.release = igbuio_pci_release;
	udev->info.priv = udev;
	udev->pdev = dev;

	err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
	if (err != 0)
		goto fail_release_iomem;

	/* register uio driver */
	err = uio_register_device(&dev->dev, &udev->info);
	if (err != 0)
		goto fail_remove_group;

	pci_set_drvdata(dev, udev);

	/*
	 * Doing a harmless dma mapping for attaching the device to
	 * the iommu identity mapping if kernel boots with iommu=pt.
	 * Note this is not a problem if no IOMMU at all.
	 */
	map_addr = dma_alloc_coherent(&dev->dev, 1024, &map_dma_addr,
			GFP_KERNEL);
	if (map_addr)
		memset(map_addr, 0, 1024);

	if (!map_addr)
		dev_info(&dev->dev, "dma mapping failed\n");
	else {
		dev_info(&dev->dev, "mapping 1K dma=%#llx host=%p\n",
			 (unsigned long long)map_dma_addr, map_addr);

		dma_free_coherent(&dev->dev, 1024, map_addr, map_dma_addr);
		dev_info(&dev->dev, "unmapping 1K dma=%#llx host=%p\n",
			 (unsigned long long)map_dma_addr, map_addr);
	}

	return 0;

fail_remove_group:
	sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
fail_release_iomem:
	igbuio_pci_release_iomem(&udev->info);
	pci_disable_device(dev);
fail_free:
	kfree(udev);

	return err;
}
Example #20
0
static int __init goldfish_mmc_probe(struct platform_device *pdev)
{
	struct mmc_host *mmc;
	struct goldfish_mmc_host *host = NULL;
	struct resource *res;
	int ret = 0;
	int irq;
	dma_addr_t buf_addr;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

	mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev);
	if (mmc == NULL) {
		ret = -ENOMEM;
		goto err_alloc_host_failed;
	}

	host = mmc_priv(mmc);
	host->mmc = mmc;
#ifdef CONFIG_ARM
	host->reg_base = (void __iomem *)IO_ADDRESS(res->start - IO_START);
	host->virt_base = dma_alloc_writecombine(&pdev->dev, BUFFER_SIZE,
						 &buf_addr, GFP_KERNEL);
#elif	CONFIG_X86
	/*
	 * Use NULL for dev for ISA-like devices
	 */
	host->reg_base = ioremap(res->start, res->end - res->start + 1);
	host->virt_base =
	    dma_alloc_coherent(NULL, BUFFER_SIZE, &buf_addr, GFP_KERNEL);
#else
#error NOT SUPPORTED
#endif
	if (host->virt_base == 0) {
		ret = -EBUSY;
		goto dma_alloc_failed;
	}
	host->phys_base = buf_addr;

	host->id = pdev->id;
	host->irq = irq;

	mmc->ops = &goldfish_mmc_ops;
	mmc->f_min = 400000;
	mmc->f_max = 24000000;
	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
	mmc->caps = MMC_CAP_4_BIT_DATA;

	/* Use scatterlist DMA to reduce per-transfer costs.
	 * NOTE max_seg_size assumption that small blocks aren't
	 * normally used (except e.g. for reading SD registers).
	 */
	mmc->max_phys_segs = 32;
	mmc->max_hw_segs = 32;
	mmc->max_blk_size = 2048;	/* MMC_BLOCK_LENGTH is 11 bits (+1) */
	mmc->max_blk_count = 2048;	/* MMC_BLOCK_COUNT is 11 bits (+1) */
	mmc->max_req_size = BUFFER_SIZE;
	mmc->max_seg_size = mmc->max_req_size;

	ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host);
	if (ret)
		goto err_request_irq_failed;

	host->dev = &pdev->dev;
	platform_set_drvdata(pdev, host);

	ret = device_create_file(&pdev->dev, &dev_attr_cover_switch);
	if (ret)
		dev_warn(mmc_dev(host->mmc),
			 "Unable to create sysfs attributes\n");

	GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base);
	GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE,
			   MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA |
			   MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT);

	mmc_add_host(mmc);

	return 0;

err_request_irq_failed:
#ifdef	CONFIG_ARM
	dma_free_writecombine(&pdev->dev, BUFFER_SIZE, host->virt_base,
			      host->phys_base);
#elif	CONFIG_X86
	dma_free_coherent(NULL, BUFFER_SIZE, host->virt_base, host->phys_base);
#else
#error NOT SUPPORTED
#endif
dma_alloc_failed:
	mmc_free_host(host->mmc);
err_alloc_host_failed:
	return ret;
}
Example #21
0
/* remember to add cleanup code (above) if you add anything here */
static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
{
	int i;

	/* QTDs for control/bulk/intr transfers */
	ehci->qtd_pool = dma_pool_create ("ehci_qtd",
			ehci_to_hcd(ehci)->self.controller,
			sizeof (struct ehci_qtd),
			32 /* byte alignment (for hw parts) */,
			4096 /* can't cross 4K */);
	if (!ehci->qtd_pool) {
		goto fail;
	}

	/* QHs for control/bulk/intr transfers */
	ehci->qh_pool = dma_pool_create ("ehci_qh",
			ehci_to_hcd(ehci)->self.controller,
			sizeof(struct ehci_qh_hw),
			32 /* byte alignment (for hw parts) */,
			4096 /* can't cross 4K */);
	if (!ehci->qh_pool) {
		goto fail;
	}
	ehci->async = ehci_qh_alloc (ehci, flags);
	if (!ehci->async) {
		goto fail;
	}

	/* ITD for high speed ISO transfers */
	ehci->itd_pool = dma_pool_create ("ehci_itd",
			ehci_to_hcd(ehci)->self.controller,
			sizeof (struct ehci_itd),
			32 /* byte alignment (for hw parts) */,
			4096 /* can't cross 4K */);
	if (!ehci->itd_pool) {
		goto fail;
	}

	/* SITD for full/low speed split ISO transfers */
	ehci->sitd_pool = dma_pool_create ("ehci_sitd",
			ehci_to_hcd(ehci)->self.controller,
			sizeof (struct ehci_sitd),
			32 /* byte alignment (for hw parts) */,
			4096 /* can't cross 4K */);
	if (!ehci->sitd_pool) {
		goto fail;
	}

	/* Hardware periodic table */
	ehci->periodic = (__le32 *)
		dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller,
			ehci->periodic_size * sizeof(__le32),
			&ehci->periodic_dma, 0);
	if (ehci->periodic == NULL) {
		goto fail;
	}
	for (i = 0; i < ehci->periodic_size; i++)
		ehci->periodic [i] = EHCI_LIST_END(ehci);

	/* software shadow of hardware table */
	ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
	if (ehci->pshadow != NULL)
		return 0;

fail:
	ehci_dbg (ehci, "couldn't init memory\n");
	ehci_mem_cleanup (ehci);
	return -ENOMEM;
}
Example #22
0
static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
			     struct srp_indirect_buf *id,
			     enum dma_data_direction dir, srp_rdma_t rdma_io,
			     int dma_map, int ext_desc)
{
	struct iu_entry *iue = NULL;
	struct srp_direct_buf *md = NULL;
	struct scatterlist dummy, *sg = NULL;
	dma_addr_t token = 0;
	int err = 0;
	int nmd, nsg = 0, len;

	if (dma_map || ext_desc) {
		iue = (struct iu_entry *) sc->SCp.ptr;
		sg = sc->request_buffer;

		dprintk("%p %u %u %d %d\n",
			iue, sc->request_bufflen, id->len,
			cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
	}

	nmd = id->table_desc.len / sizeof(struct srp_direct_buf);

	if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
	    (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
		md = &id->desc_list[0];
		goto rdma;
	}

	if (ext_desc && dma_map) {
		md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
				&token, GFP_KERNEL);
		if (!md) {
			eprintk("Can't get dma memory %u\n", id->table_desc.len);
			return -ENOMEM;
		}

		sg_init_one(&dummy, md, id->table_desc.len);
		sg_dma_address(&dummy) = token;
		sg_dma_len(&dummy) = id->table_desc.len;
		err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
			      id->table_desc.len);
		if (err) {
			eprintk("Error copying indirect table %d\n", err);
			goto free_mem;
		}
	} else {
		eprintk("This command uses external indirect buffer\n");
		return -EINVAL;
	}

rdma:
	if (dma_map) {
		nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
		if (!nsg) {
			eprintk("fail to map %p %d\n", iue, sc->use_sg);
			err = -EIO;
			goto free_mem;
		}
		len = min(sc->request_bufflen, id->len);
	} else
		len = id->len;

	err = rdma_io(sc, sg, nsg, md, nmd, dir, len);

	if (dma_map)
		dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);

free_mem:
	if (token && dma_map)
		dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);

	return err;
}
Example #23
0
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
{
	int ret = 0;
	bool used = false;
	long val;
	unsigned long flags;
	void __iomem *vaddr = NULL;
	dma_addr_t paddr;
	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

	 /* This shouldn't be happening */
	if (WARN_ON(mode != CS_MODE_SYSFS))
		return -EINVAL;

	/*
	 * If we don't have a buffer release the lock and allocate memory.
	 * Otherwise keep the lock and move along.
	 */
	spin_lock_irqsave(&drvdata->spinlock, flags);
	if (!drvdata->vaddr) {
		spin_unlock_irqrestore(&drvdata->spinlock, flags);

		/*
		 * Contiguous  memory can't be allocated while a spinlock is
		 * held.  As such allocate memory here and free it if a buffer
		 * has already been allocated (from a previous session).
		 */
		vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
					   &paddr, GFP_KERNEL);
		if (!vaddr)
			return -ENOMEM;

		/* Let's try again */
		spin_lock_irqsave(&drvdata->spinlock, flags);
	}

	if (drvdata->reading) {
		ret = -EBUSY;
		goto out;
	}

	val = local_xchg(&drvdata->mode, mode);
	/*
	 * In sysFS mode we can have multiple writers per sink.  Since this
	 * sink is already enabled no memory is needed and the HW need not be
	 * touched.
	 */
	if (val == CS_MODE_SYSFS)
		goto out;

	/*
	 * If drvdata::buf == NULL, use the memory allocated above.
	 * Otherwise a buffer still exists from a previous session, so
	 * simply use that.
	 */
	if (drvdata->buf == NULL) {
		used = true;
		drvdata->vaddr = vaddr;
		drvdata->paddr = paddr;
		drvdata->buf = drvdata->vaddr;
	}

	memset(drvdata->vaddr, 0, drvdata->size);

	tmc_etr_enable_hw(drvdata);
out:
	spin_unlock_irqrestore(&drvdata->spinlock, flags);

	/* Free memory outside the spinlock if need be */
	if (!used && vaddr)
		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);

	if (!ret)
		dev_info(drvdata->dev, "TMC-ETR enabled\n");

	return ret;
}
Example #24
0
static int __init sdma_init(struct sdma_engine *sdma)
{
	int i, ret;
	dma_addr_t ccb_phys;

	switch (sdma->version) {
	case 1:
		sdma->num_events = 32;
		break;
	case 2:
		sdma->num_events = 48;
		break;
	default:
		dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
		return -ENODEV;
	}

	clk_enable(sdma->clk);

	/* Be sure SDMA has not started yet */
	__raw_writel(0, sdma->regs + SDMA_H_C0PTR);

	sdma->channel_control = dma_alloc_coherent(NULL,
			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
			sizeof(struct sdma_context_data),
			&ccb_phys, GFP_KERNEL);

	if (!sdma->channel_control) {
		ret = -ENOMEM;
		goto err_dma_alloc;
	}

	sdma->context = (void *)sdma->channel_control +
		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
	sdma->context_phys = ccb_phys +
		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);

	/* Zero-out the CCB structures array just allocated */
	memset(sdma->channel_control, 0,
			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));

	/* disable all channels */
	for (i = 0; i < sdma->num_events; i++)
		__raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));

	/* All channels have priority 0 */
	for (i = 0; i < MAX_DMA_CHANNELS; i++)
		__raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);

	ret = sdma_request_channel(&sdma->channel[0]);
	if (ret)
		goto err_dma_alloc;

	sdma_config_ownership(&sdma->channel[0], false, true, false);

	/* Set Command Channel (Channel Zero) */
	__raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);

	/* Set bits of CONFIG register but with static context switching */
	/* FIXME: Check whether to set ACR bit depending on clock ratios */
	__raw_writel(0, sdma->regs + SDMA_H_CONFIG);

	__raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);

	/* Set bits of CONFIG register with given context switching mode */
	__raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);

	/* Initializes channel's priorities */
	sdma_set_channel_priority(&sdma->channel[0], 7);

	clk_disable(sdma->clk);

	return 0;

err_dma_alloc:
	clk_disable(sdma->clk);
	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
	return ret;
}
Example #25
0
static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
	struct Scsi_Host *host;
	struct rtsx_dev *dev;
	int err = 0;
	struct task_struct *th;

	RTSX_DEBUGP("Realtek PCI-E card reader detected\n");

	err = pci_enable_device(pci);
	if (err < 0) {
		printk(KERN_ERR "PCI enable device failed!\n");
		return err;
	}

	err = pci_request_regions(pci, CR_DRIVER_NAME);
	if (err < 0) {
		printk(KERN_ERR "PCI request regions for %s failed!\n", CR_DRIVER_NAME);
		pci_disable_device(pci);
		return err;
	}

	/*
	 * Ask the SCSI layer to allocate a host structure, with extra
	 * space at the end for our private rtsx_dev structure.
	 */
	host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
	if (!host) {
		printk(KERN_ERR "Unable to allocate the scsi host\n");
		pci_release_regions(pci);
		pci_disable_device(pci);
		return -ENOMEM;
	}

	dev = host_to_rtsx(host);
	memset(dev, 0, sizeof(struct rtsx_dev));

	dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
	if (dev->chip == NULL) {
		goto errout;
	}

	spin_lock_init(&dev->reg_lock);
	mutex_init(&(dev->dev_mutex));
	sema_init(&(dev->sema), 0);
	init_completion(&(dev->notify));
	init_waitqueue_head(&dev->delay_wait);

	dev->pci = pci;
	dev->irq = -1;

	printk(KERN_INFO "Resource length: 0x%x\n", (unsigned int)pci_resource_len(pci, 0));
	dev->addr = pci_resource_start(pci, 0);
	dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
	if (dev->remap_addr == NULL) {
		printk(KERN_ERR "ioremap error\n");
		err = -ENXIO;
		goto errout;
	}

	/* Using "unsigned long" cast here to eliminate gcc warning in 64-bit system */
	printk(KERN_INFO "Original address: 0x%lx, remapped address: 0x%lx\n",
			(unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));

	dev->rtsx_resv_buf = dma_alloc_coherent(&(pci->dev), RTSX_RESV_BUF_LEN,
			&(dev->rtsx_resv_buf_addr), GFP_KERNEL);
	if (dev->rtsx_resv_buf == NULL) {
		printk(KERN_ERR "alloc dma buffer fail\n");
		err = -ENXIO;
		goto errout;
	}
	dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
	dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
	dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
	dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;

	dev->chip->rtsx = dev;

	rtsx_init_options(dev->chip);

	printk(KERN_INFO "pci->irq = %d\n", pci->irq);

	if (dev->chip->msi_en) {
		if (pci_enable_msi(pci) < 0)
			dev->chip->msi_en = 0;
	}

	if (rtsx_acquire_irq(dev) < 0) {
		err = -EBUSY;
		goto errout;
	}

	pci_set_master(pci);
	synchronize_irq(dev->irq);

	err = scsi_add_host(host, &pci->dev);
	if (err) {
		printk(KERN_ERR "Unable to add the scsi host\n");
		goto errout;
	}

	rtsx_init_chip(dev->chip);

	/* Start up our control thread */
	th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME);
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start control thread\n");
		err = PTR_ERR(th);
		goto errout;
	}

	/* Take a reference to the host for the control thread and
	 * count it among all the threads we have launched.  Then
	 * start it up. */
	scsi_host_get(rtsx_to_host(dev));
	atomic_inc(&total_threads);
	wake_up_process(th);

	/* Start up the thread for delayed SCSI-device scanning */
	th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start the device-scanning thread\n");
		quiesce_and_remove_host(dev);
		err = PTR_ERR(th);
		goto errout;
	}

	/* Take a reference to the host for the scanning thread and
	 * count it among all the threads we have launched.  Then
	 * start it up. */
	scsi_host_get(rtsx_to_host(dev));
	atomic_inc(&total_threads);
	wake_up_process(th);

	/* Start up the thread for polling thread */
	th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling");
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start the device-polling thread\n");
		quiesce_and_remove_host(dev);
		err = PTR_ERR(th);
		goto errout;
	}

	/* Take a reference to the host for the polling thread and
	 * count it among all the threads we have launched.  Then
	 * start it up. */
	scsi_host_get(rtsx_to_host(dev));
	atomic_inc(&total_threads);
	wake_up_process(th);

	pci_set_drvdata(pci, dev);

	return 0;

	/* We come here if there are any problems */
errout:
	printk(KERN_ERR "rtsx_probe() failed\n");
	release_everything(dev);

	return err;
}
Example #26
0
int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
{
	struct macb_device *macb;
	struct eth_device *netdev;
	u32 ncfgr;
        
	macb = malloc(sizeof(struct macb_device));
	if (!macb) {
		printf("Error: Failed to allocate memory for MACB%d\n", id);
		return -1;
	}
	memset(macb, 0, sizeof(struct macb_device));

	netdev = &macb->netdev;

	macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE,
					     &macb->rx_buffer_dma);
	macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE,
					   &macb->rx_ring_dma);
	macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE,
					   &macb->tx_ring_dma);

	/* TODO: we need check the rx/tx_ring_dma is dcache line aligned */

	macb->regs = regs;
	macb->phy_addr = phy_addr;

	if (macb_is_gem(macb))
		sprintf(netdev->name, "gmac%d", id);
	else
		sprintf(netdev->name, "macb%d", id);

	netdev->init = macb_init;
	netdev->halt = macb_halt;
	netdev->send = macb_send;
	netdev->recv = macb_recv;
	netdev->write_hwaddr = macb_write_hwaddr;

	/*
	 * Do some basic initialization so that we at least can talk
	 * to the PHY
	 */
	if (macb_is_gem(macb)) {
		ncfgr = gem_mdc_clk_div(id, macb);
		ncfgr |= macb_dbw(macb);
	} else {
		ncfgr = macb_mdc_clk_div(id, macb);
	}

	macb_writel(macb, NCFGR, ncfgr);
	netdev->enetaddr[0]=0x30;
	netdev->enetaddr[1]=0x31;
	netdev->enetaddr[2]=0x32;
	netdev->enetaddr[3]=0x33;
	netdev->enetaddr[4]=0x34;
	netdev->enetaddr[5]=0x35;
	eth_register(netdev);

#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB) 
	miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write);
	macb->bus = miiphy_get_dev_by_name(netdev->name);
#endif 
	return 0;
}
Example #27
0
File: alloc.c Project: Addision/LVS
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
		   struct mlx4_buf *buf)
{
	dma_addr_t t;

	if (size <= max_direct) {
		buf->nbufs        = 1;
		buf->npages       = 1;
		buf->page_shift   = get_order(size) + PAGE_SHIFT;
		buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
						       size, &t, GFP_KERNEL);
		if (!buf->direct.buf)
			return -ENOMEM;

		buf->direct.map = t;

		while (t & ((1 << buf->page_shift) - 1)) {
			--buf->page_shift;
			buf->npages *= 2;
		}

		memset(buf->direct.buf, 0, size);
	} else {
		int i;

		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
		buf->npages      = buf->nbufs;
		buf->page_shift  = PAGE_SHIFT;
		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
					   GFP_KERNEL);
		if (!buf->page_list)
			return -ENOMEM;

		for (i = 0; i < buf->nbufs; ++i) {
			buf->page_list[i].buf =
				dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
						   &t, GFP_KERNEL);
			if (!buf->page_list[i].buf)
				goto err_free;

			buf->page_list[i].map = t;

			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
		}

		if (BITS_PER_LONG == 64) {
			struct page **pages;
			pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
			if (!pages)
				goto err_free;
			for (i = 0; i < buf->nbufs; ++i)
				pages[i] = virt_to_page(buf->page_list[i].buf);
			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
			kfree(pages);
			if (!buf->direct.buf)
				goto err_free;
		}
	}

	return 0;

err_free:
	mlx4_buf_free(dev, size, buf);

	return -ENOMEM;
}
Example #28
0
static int mace_open(struct net_device *dev)
{
	struct mace_data *mp = netdev_priv(dev);
	volatile struct mace *mb = mp->mace;

	/* reset the chip */
	mace_reset(dev);

	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
		return -EAGAIN;
	}
	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
		free_irq(dev->irq, dev);
		return -EAGAIN;
	}

	/* Allocate the DMA ring buffers */

	mp->tx_ring = dma_alloc_coherent(mp->device,
			N_TX_RING * MACE_BUFF_SIZE,
			&mp->tx_ring_phys, GFP_KERNEL);
	if (mp->tx_ring == NULL) {
		printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
		goto out1;
	}

	mp->rx_ring = dma_alloc_coherent(mp->device,
			N_RX_RING * MACE_BUFF_SIZE,
			&mp->rx_ring_phys, GFP_KERNEL);
	if (mp->rx_ring == NULL) {
		printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
		goto out2;
	}

	mace_dma_off(dev);

	/* Not sure what these do */

	psc_write_word(PSC_ENETWR_CTL, 0x9000);
	psc_write_word(PSC_ENETRD_CTL, 0x9000);
	psc_write_word(PSC_ENETWR_CTL, 0x0400);
	psc_write_word(PSC_ENETRD_CTL, 0x0400);

	mace_rxdma_reset(dev);
	mace_txdma_reset(dev);

	/* turn it on! */
	mb->maccc = ENXMT | ENRCV;
	/* enable all interrupts except receive interrupts */
	mb->imr = RCVINT;
	return 0;

out2:
	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
	                  mp->tx_ring, mp->tx_ring_phys);
out1:
	free_irq(dev->irq, dev);
	free_irq(mp->dma_intr, dev);
	return -ENOMEM;
}
Example #29
0
static int msm72k_probe(struct platform_device *pdev)
{
	struct usb_info *ui;
	struct msm_hsusb_gadget_platform_data *pdata;
	struct msm_otg *otg;
	int retval;

	INFO("msm72k_probe\n");
	ui = kzalloc(sizeof(struct usb_info), GFP_KERNEL);
	if (!ui)
		return -ENOMEM;

	ui->pdev = pdev;

	if (pdev->dev.platform_data) {
		pdata = pdev->dev.platform_data;
		ui->phy_reset = pdata->phy_reset;
		ui->phy_init_seq = pdata->phy_init_seq;
		ui->chg_init = pdata->chg_init;
		ui->chg_connected = pdata->chg_connected;
		ui->chg_vbus_draw = pdata->chg_vbus_draw;
		ui->usb_connected = pdata->usb_connected;
	}

	if (ui->chg_init)
		ui->chg_init(1);

	ui->buf = dma_alloc_coherent(&pdev->dev, 4096, &ui->dma, GFP_KERNEL);
	if (!ui->buf)
		return usb_free(ui, -ENOMEM);

	ui->pool = dma_pool_create("msm72k_udc", NULL, 32, 32, 0);
	if (!ui->pool)
		return usb_free(ui, -ENOMEM);


	/* FIXME: dmb cannot be called from interrupt context
	 * for the first time; Need to verify on how it needs
	 * to be fixed
	 */
	dmb();

	ui->xceiv = otg_get_transceiver();
	if (!ui->xceiv)
		return usb_free(ui, -ENODEV);

	otg = to_msm_otg(ui->xceiv);
	ui->addr = otg->regs;

	ui->gadget.ops = &msm72k_ops;
	ui->gadget.is_dualspeed = 1;
	device_initialize(&ui->gadget.dev);
	strcpy(ui->gadget.dev.bus_id, "gadget");
	ui->gadget.dev.parent = &pdev->dev;
	ui->gadget.dev.dma_mask = pdev->dev.dma_mask;

	the_usb_info = ui;

	pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, DRIVER_NAME,
					PM_QOS_DEFAULT_VALUE);
	pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ, DRIVER_NAME,
					PM_QOS_DEFAULT_VALUE);
	usb_debugfs_init(ui);

	usb_prepare(ui);

	retval = otg_set_peripheral(ui->xceiv, &ui->gadget);
	if (retval) {
		pr_err("%s: Cannot bind the transceiver, retval:(%d)\n",
				__func__, retval);
		return usb_free(ui, retval);
	}

	return 0;
}
Example #30
0
/*
 * img_update_realloc: This function allocates the contiguous pages to
 * accommodate the requested size of data. The memory address and size
 * values are stored globally and on every call to this function the new
 * size is checked to see if more data is required than the existing size.
 * If true the previous memory is freed and new allocation is done to
 * accommodate the new size. If the incoming size is less then than the
 * already allocated size, then that memory is reused. This function is
 * called with lock held and returns with lock held.
 */
static int img_update_realloc(unsigned long size)
{
	unsigned char *image_update_buffer = NULL;
	unsigned long rc;
	unsigned long img_buf_phys_addr;
	int ordernum;
	int dma_alloc = 0;

	/*
	 * check if the buffer of sufficient size has been
	 * already allocated
	 */
	if (rbu_data.image_update_buffer_size >= size) {
		/*
		 * check for corruption
		 */
		if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
			printk(KERN_ERR "dell_rbu:%s: corruption "
				"check failed\n", __func__);
			return -EINVAL;
		}
		/*
		 * we have a valid pre-allocated buffer with
		 * sufficient size
		 */
		return 0;
	}

	/*
	 * free any previously allocated buffer
	 */
	img_update_free();

	spin_unlock(&rbu_data.lock);

	ordernum = get_order(size);
	image_update_buffer =
		(unsigned char *) __get_free_pages(GFP_KERNEL, ordernum);

	img_buf_phys_addr =
		(unsigned long) virt_to_phys(image_update_buffer);

	if (img_buf_phys_addr > BIOS_SCAN_LIMIT) {
		free_pages((unsigned long) image_update_buffer, ordernum);
		ordernum = -1;
		image_update_buffer = dma_alloc_coherent(NULL, size,
			&dell_rbu_dmaaddr, GFP_KERNEL);
		dma_alloc = 1;
	}

	spin_lock(&rbu_data.lock);

	if (image_update_buffer != NULL) {
		rbu_data.image_update_buffer = image_update_buffer;
		rbu_data.image_update_buffer_size = size;
		rbu_data.bios_image_size =
			rbu_data.image_update_buffer_size;
		rbu_data.image_update_ordernum = ordernum;
		rbu_data.dma_alloc = dma_alloc;
		rc = 0;
	} else {
		pr_debug("Not enough memory for image update:"
			"size = %ld\n", size);
		rc = -ENOMEM;
	}

	return rc;
}