Esempio n. 1
0
int smsspi_register(void)
{
	struct smsdevice_params_t params;
	int ret = 0;
	struct _spi_device_st *spi_device;
	struct _spi_dev_cb_st common_cb;

	PDEBUG("entering \n");
	printk(KERN_WARNING"enter smsspi_register\n");

	spi_device =
	    kmalloc(sizeof(struct _spi_device_st), GFP_KERNEL);
        if(!spi_device)
        {
          printk("spi_device is null smsspi_register\n") ;
	  return 0;
        }
	spi_dev = spi_device;

	INIT_LIST_HEAD(&spi_device->txqueue);


	spi_device->txbuf =
	    dma_alloc_coherent(NULL, MAX(TX_BUFFER_SIZE, PAGE_SIZE),
			       &spi_device->txbuf_phy_addr,
			       GFP_KERNEL | GFP_DMA);
	if (!spi_device->txbuf) {
		printk(KERN_INFO "%s dma_alloc_coherent(...) failed\n",
		       __func__);
		ret = -ENOMEM;
		goto txbuf_error;
	}

	printk(KERN_INFO "smsmdtv: spi_device->txbuf = 0x%x  spi_device->txbuf_phy_addr= 0x%x\n",
                        (unsigned int)spi_device->txbuf, spi_device->txbuf_phy_addr);

	

	spi_device->phy_dev =
	    smsspiphy_init(NULL, smsspi_int_handler, spi_device);
	
	if (spi_device->phy_dev == 0) {
		printk(KERN_INFO "%s smsspiphy_init(...) failed\n", __func__);
		goto phy_error;
	}

	common_cb.allocate_rx_buf = allocate_rx_buf;
	common_cb.free_rx_buf = free_rx_buf;
	common_cb.msg_found_cb = msg_found;
	common_cb.transfer_data_cb = smsspibus_xfer;

	ret =
	    smsspicommon_init(&spi_device->dev, spi_device, spi_device->phy_dev,
			      &common_cb);
	if (ret) {
		printk(KERN_INFO "%s smsspiphy_init(...) failed\n", __func__);
		goto common_error;
	}

	/* register in smscore */
	memset(&params, 0, sizeof(params));
	params.context = spi_device;
	params.device = &smsspi_device.dev;
	params.buffer_size = RX_BUFFER_SIZE;
	params.num_buffers = NUM_RX_BUFFERS;
	params.flags = SMS_DEVICE_NOT_READY;
	params.sendrequest_handler = smsspi_write;
	strcpy(params.devpath, "spi");
	params.device_type = default_type;

	if (0) {
		/* device family */
		/* params.setmode_handler = smsspi_setmode; */
	} else {
		params.flags =
		    SMS_DEVICE_FAMILY2 | SMS_DEVICE_NOT_READY |
		    SMS_ROM_NO_RESPONSE;
		params.preload_handler = smsspi_preload;
		params.postload_handler = smsspi_postload;
	}

	ret = smscore_register_device(&params, &spi_device->coredev);
	if (ret < 0) {
		printk(KERN_INFO "%s smscore_register_device(...) failed\n",
		       __func__);
		goto reg_device_error;
	}

	ret = smscore_start_device(spi_device->coredev);
	if (ret < 0) {
		printk(KERN_INFO "%s smscore_start_device(...) failed\n",
		       __func__);
		goto start_device_error;
	}
        spi_resume_fail = 0 ;
        spi_suspended = 0 ;

#if 0 //ZTE:added by wangtao for cmmbtest 20110712
{
	smsspibus_ssp_resume(spi_dev->phy_dev) ;
       // smsspi_preload(spi_dev);
	printk(KERN_INFO "%s [test] smsspi_preload test end\n",__func__);
	}	
#endif
	
	printk(KERN_WARNING"helike exit smsspi_register\n");

	PDEBUG("exiting\n");
	return 0;

start_device_error:
	smscore_unregister_device(spi_device->coredev);

reg_device_error:

common_error:
	smsspiphy_deinit(spi_device->phy_dev);

phy_error:
	dma_free_coherent(NULL, TX_BUFFER_SIZE, spi_device->txbuf,
			  spi_device->txbuf_phy_addr);

txbuf_error:

	PDEBUG("exiting error %d\n", ret);

	return ret;
}
Esempio n. 2
0
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		     struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	int user = (uctx != &rdev->uctx);
	struct c4iw_wr_wait wr_wait;
	int ret;
	struct sk_buff *skb;

	cq->cqid = c4iw_get_cqid(rdev, uctx);
	if (!cq->cqid) {
		ret = -ENOMEM;
		goto err1;
	}

	if (!user) {
		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
		if (!cq->sw_queue) {
			ret = -ENOMEM;
			goto err2;
		}
	}
	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
				       &cq->dma_addr, GFP_KERNEL);
	if (!cq->queue) {
		ret = -ENOMEM;
		goto err3;
	}
	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
	memset(cq->queue, 0, cq->memsize);

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err4;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_WRITE;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);
	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
			V_FW_RI_RES_WR_IQANUS(0) |
			V_FW_RI_RES_WR_IQANUD(1) |
			F_FW_RI_RES_WR_IQANDST |
			V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
			F_FW_RI_RES_WR_IQDROPRSS |
			V_FW_RI_RES_WR_IQPCIECH(2) |
			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
			F_FW_RI_RES_WR_IQO |
			V_FW_RI_RES_WR_IQESIZE(1));
	res->u.cq.iqsize = cpu_to_be16(cq->size);
	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err4;
	PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
	if (ret)
		goto err4;

	cq->gen = 1;
	cq->gts = rdev->lldi.gts_reg;
	cq->rdev = rdev;
	if (user) {
		cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(cq->cqid << rdev->cqshift);
		cq->ugts &= PAGE_MASK;
	}
	return 0;
err4:
	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
			  pci_unmap_addr(cq, mapping));
err3:
	kfree(cq->sw_queue);
err2:
	c4iw_put_cqid(rdev, cq->cqid, uctx);
err1:
	return ret;
}
Esempio n. 3
0
static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
                             struct srp_indirect_buf *id,
                             enum dma_data_direction dir, srp_rdma_t rdma_io,
                             int dma_map, int ext_desc)
{
    struct iu_entry *iue = NULL;
    struct srp_direct_buf *md = NULL;
    struct scatterlist dummy, *sg = NULL;
    dma_addr_t token = 0;
    int err = 0;
    int nmd, nsg = 0, len;

    if (dma_map || ext_desc) {
        iue = (struct iu_entry *) sc->SCp.ptr;
        sg = scsi_sglist(sc);

        dprintk("%p %u %u %d %d\n",
                iue, scsi_bufflen(sc), id->len,
                cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
    }

    nmd = id->table_desc.len / sizeof(struct srp_direct_buf);

    if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
            (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
        md = &id->desc_list[0];
        goto rdma;
    }

    if (ext_desc && dma_map) {
        md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
                                &token, GFP_KERNEL);
        if (!md) {
            eprintk("Can't get dma memory %u\n", id->table_desc.len);
            return -ENOMEM;
        }

        sg_init_one(&dummy, md, id->table_desc.len);
        sg_dma_address(&dummy) = token;
        sg_dma_len(&dummy) = id->table_desc.len;
        err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
                      id->table_desc.len);
        if (err) {
            eprintk("Error copying indirect table %d\n", err);
            goto free_mem;
        }
    } else {
        eprintk("This command uses external indirect buffer\n");
        return -EINVAL;
    }

rdma:
    if (dma_map) {
        nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
                         DMA_BIDIRECTIONAL);
        if (!nsg) {
            eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc));
            err = -EIO;
            goto free_mem;
        }
        len = min(scsi_bufflen(sc), id->len);
    } else
        len = id->len;

    err = rdma_io(sc, sg, nsg, md, nmd, dir, len);

    if (dma_map)
        dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);

free_mem:
    if (token && dma_map)
        dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);

    return err;
}
Esempio n. 4
0
int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
	u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
	u32 first_evt_idx)
{
	int res;
	struct ipa_mem_buffer mem;
	struct IpaHwMhiInitCmdData_t *init_cmd_data;
	struct IpaHwMhiMsiCmdData_t *msi_cmd;

	if (!ipa_uc_mhi_ctx) {
		IPAERR("Not initialized\n");
		return -EFAULT;
	}

	ipa_inc_client_enable_clks();

	res = ipa_uc_update_hw_flags(0);
	if (res) {
		IPAERR("ipa_uc_update_hw_flags failed %d\n", res);
		goto disable_clks;
	}

	mem.size = sizeof(*init_cmd_data);
	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
		GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
		res = -ENOMEM;
		goto disable_clks;
	}
	memset(mem.base, 0, mem.size);
	init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base;
	init_cmd_data->msiAddress = msi->addr_low;
	init_cmd_data->mmioBaseAddress = mmio_addr;
	init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr;
	init_cmd_data->deviceMhiDataBaseAddress = host_data_addr;
	init_cmd_data->firstChannelIndex = first_ch_idx;
	init_cmd_data->firstEventRingIndex = first_evt_idx;
	res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0,
		false, HZ);
	if (res) {
		IPAERR("ipa_uc_send_cmd failed %d\n", res);
		dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
			mem.phys_base);
		goto disable_clks;
	}

	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);

	mem.size = sizeof(*msi_cmd);
	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
		GFP_KERNEL);
	if (!mem.base) {
		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
		res = -ENOMEM;
		goto disable_clks;
	}

	msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base;
	msi_cmd->msiAddress_hi = msi->addr_hi;
	msi_cmd->msiAddress_low = msi->addr_low;
	msi_cmd->msiData = msi->data;
	msi_cmd->msiMask = msi->mask;
	res = ipa_uc_send_cmd((u32)mem.phys_base,
		IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ);
	if (res) {
		IPAERR("ipa_uc_send_cmd failed %d\n", res);
		dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
			mem.phys_base);
		goto disable_clks;
	}

	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);

	res = 0;

disable_clks:
	ipa_dec_client_disable_clks();
	return res;

}
Esempio n. 5
0
/*
 *  This function will allocate both the DMA descriptor structure, and the
 *  buffers it contains.  These are used to contain the descriptors used
 *  by the system.
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
		      struct list_head *head, const char *name,
		      int nbuf, int ndesc, bool is_tx)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	u8 *ds;
	struct ath_buf *bf;
	int i, bsize, error, desc_len;

	ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
		name, nbuf, ndesc);

	INIT_LIST_HEAD(head);

	if (is_tx)
		desc_len = sc->sc_ah->caps.tx_desc_len;
	else
		desc_len = sizeof(struct ath_desc);

	/* ath_desc must be a multiple of DWORDs */
	if ((desc_len % 4) != 0) {
		ath_err(common, "ath_desc not DWORD aligned\n");
		BUG_ON((desc_len % 4) != 0);
		error = -ENOMEM;
		goto fail;
	}

	dd->dd_desc_len = desc_len * nbuf * ndesc;

	/*
	 * Need additional DMA memory because we can't use
	 * descriptors that cross the 4K page boundary. Assume
	 * one skipped descriptor per 4K page.
	 */
	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
		u32 ndesc_skipped =
			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
		u32 dma_len;

		while (ndesc_skipped) {
			dma_len = ndesc_skipped * desc_len;
			dd->dd_desc_len += dma_len;

			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
		}
	}

	/* allocate descriptors */
	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
					 &dd->dd_desc_paddr, GFP_KERNEL);
	if (dd->dd_desc == NULL) {
		error = -ENOMEM;
		goto fail;
	}
	ds = (u8 *) dd->dd_desc;
	ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
		name, ds, (u32) dd->dd_desc_len,
		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);

	/* allocate buffers */
	bsize = sizeof(struct ath_buf) * nbuf;
	bf = kzalloc(bsize, GFP_KERNEL);
	if (bf == NULL) {
		error = -ENOMEM;
		goto fail2;
	}
	dd->dd_bufptr = bf;

	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
		bf->bf_desc = ds;
		bf->bf_daddr = DS2PHYS(dd, ds);

		if (!(sc->sc_ah->caps.hw_caps &
		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
			/*
			 * Skip descriptor addresses which can cause 4KB
			 * boundary crossing (addr + length) with a 32 dword
			 * descriptor fetch.
			 */
			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
				BUG_ON((caddr_t) bf->bf_desc >=
				       ((caddr_t) dd->dd_desc +
					dd->dd_desc_len));

				ds += (desc_len * ndesc);
				bf->bf_desc = ds;
				bf->bf_daddr = DS2PHYS(dd, ds);
			}
		}
		list_add_tail(&bf->list, head);
	}
	return 0;
fail2:
	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
			  dd->dd_desc_paddr);
fail:
	memset(dd, 0, sizeof(*dd));
	return error;
}
/* Allocate and initialize rx and tx DMA chains */
static inline int stmp_appuart_dma_init(struct stmp_appuart_port *s)
{
	int err = 0;
	struct stmp3xxx_dma_descriptor *t = &s->tx_desc;
#ifndef RX_CHAIN
	struct stmp3xxx_dma_descriptor *r = &s->rx_desc;
#else
	int i;
#endif

	err = stmp3xxx_dma_request(s->dma_rx, s->dev, s->dev->bus_id);
	if (err)
		goto out;
	err = stmp3xxx_dma_request(s->dma_tx, s->dev, s->dev->bus_id);
	if (err)
		goto out1;

#ifndef RX_CHAIN
	err = stmp3xxx_dma_allocate_command(s->dma_rx, r);
	if (err)
		goto out2;
#endif
	err = stmp3xxx_dma_allocate_command(s->dma_tx, t);
	if (err)
		goto out3;
	t->virtual_buf_ptr = dma_alloc_coherent(s->dev,
					   TX_BUFFER_SIZE,
					   &t->command->buf_ptr,
					   GFP_DMA);
	if (!t->virtual_buf_ptr)
		goto out4;
#ifdef DEBUG
	memset(t->virtual_buf_ptr, 0x4B, TX_BUFFER_SIZE);
#endif

#ifndef RX_CHAIN
	r->virtual_buf_ptr = dma_alloc_coherent(s->dev,
					   RX_BUFFER_SIZE,
					   &r->command->buf_ptr,
					   GFP_DMA);
	if (!r->virtual_buf_ptr)
		goto out5;
#ifdef DEBUG
	memset(r->virtual_buf_ptr, 0x4C, RX_BUFFER_SIZE);
#endif
#else
	stmp3xxx_dma_make_chain(s->dma_rx, &s->rx_chain, s->rxd, RX_CHAIN);
	for (i = 0; i < RX_CHAIN; i++) {
		struct stmp3xxx_dma_descriptor *r = s->rxd + i;

		r->command->cmd =
			BF_APBX_CHn_CMD_XFER_COUNT(RX_BUFFER_SIZE) |
			BF_APBX_CHn_CMD_CMDWORDS(1) |
			BM_APBX_CHn_CMD_WAIT4ENDCMD |
			BM_APBX_CHn_CMD_SEMAPHORE |
			BM_APBX_CHn_CMD_IRQONCMPLT |
			BM_APBX_CHn_CMD_CHAIN |
			BF_APBX_CHn_CMD_COMMAND(
				BV_APBX_CHn_CMD_COMMAND__DMA_WRITE);
		r->virtual_buf_ptr = dma_alloc_coherent(s->dev,
					   RX_BUFFER_SIZE,
					   &r->command->buf_ptr,
					   GFP_DMA);
		r->command->pio_words[0] = /* BM_UARTAPP_CTRL0_RUN | */
			BF_UARTAPP_CTRL0_XFER_COUNT(RX_BUFFER_SIZE)|
			BM_UARTAPP_CTRL0_RXTO_ENABLE |
			BF_UARTAPP_CTRL0_RXTIMEOUT(3);
	}
#endif
	return 0;

	/*
	 * would be necessary on other error paths

	dma_free_coherent( s->dev, RX_BUFFER_SIZE, r->virtual_buf_ptr,
			   r->command->buf_ptr);
	*/
out5:
	dma_free_coherent(s->dev, TX_BUFFER_SIZE, t->virtual_buf_ptr,
			   t->command->buf_ptr);
out4:
	stmp3xxx_dma_free_command(s->dma_tx, t);
out3:
#ifndef RX_CHAIN
	stmp3xxx_dma_free_command(s->dma_rx, r);
#endif
out2:
	stmp3xxx_dma_release(s->dma_tx);
out1:
	stmp3xxx_dma_release(s->dma_rx);
out:
	WARN_ON(err);
	return err;
}
Esempio n. 7
0
int cppi41_dma_block_init(u8 dma_num, u8 q_mgr, u8 num_order,
                          u32 *sched_tbl, u8 tbl_size)
{
    const struct cppi41_dma_block *dma_block;
    unsigned num_desc, num_reg;
    void *ptr;
    int error, i;
    u16 q_num;
    u32 val;

    if (dma_num >= cppi41_num_dma_block ||
            q_mgr >= cppi41_num_queue_mgr ||
            !tbl_size || sched_tbl == NULL)
        return -EINVAL;

    error = cppi41_queue_alloc(CPPI41_FREE_DESC_QUEUE |
                               CPPI41_UNASSIGNED_QUEUE, q_mgr, &q_num);
    if (error) {
        printk(KERN_ERR "ERROR: %s: Unable to allocate teardown "
               "descriptor queue.\n", __func__);
        return error;
    }
    DBG("Teardown descriptor queue %d in queue manager 0 "
        "allocated\n", q_num);

    /*
     * Tell the hardware about the Teardown descriptor
     * queue manager and queue number.
     */
    dma_block = &cppi41_dma_block[dma_num];
    cppi_writel((q_mgr << DMA_TD_DESC_QMGR_SHIFT) |
                (q_num << DMA_TD_DESC_QNUM_SHIFT),
                dma_block->global_ctrl_base +
                DMA_TEARDOWN_FREE_DESC_CTRL_REG);
    DBG("Teardown free descriptor control @ %p, value: %x\n",
        dma_block->global_ctrl_base + DMA_TEARDOWN_FREE_DESC_CTRL_REG,
        cppi_readl(dma_block->global_ctrl_base +
                   DMA_TEARDOWN_FREE_DESC_CTRL_REG));

    num_desc = 1 << num_order;
    dma_teardown[dma_num].rgn_size = num_desc *
                                     sizeof(struct cppi41_teardown_desc);

    /* Pre-allocate teardown descriptors. */
    ptr = dma_alloc_coherent(NULL, dma_teardown[dma_num].rgn_size,
                             &dma_teardown[dma_num].phys_addr,
                             GFP_KERNEL | GFP_DMA);
    if (ptr == NULL) {
        printk(KERN_ERR "ERROR: %s: Unable to allocate teardown "
               "descriptors.\n", __func__);
        error = -ENOMEM;
        goto free_queue;
    }
    dma_teardown[dma_num].virt_addr = ptr;

    error = cppi41_mem_rgn_alloc(q_mgr, dma_teardown[dma_num].phys_addr, 5,
                                 num_order, &dma_teardown[dma_num].mem_rgn);
    if (error) {
        printk(KERN_ERR "ERROR: %s: Unable to allocate queue manager "
               "memory region for teardown descriptors.\n", __func__);
        goto free_mem;
    }

    error = cppi41_queue_init(&dma_teardown[dma_num].queue_obj, 0, q_num);
    if (error) {
        printk(KERN_ERR "ERROR: %s: Unable to initialize teardown "
               "free descriptor queue.\n", __func__);
        goto free_rgn;
    }

    dma_teardown[dma_num].q_num = q_num;
    dma_teardown[dma_num].q_mgr = q_mgr;
    dma_teardown[dma_num].num_desc = num_desc;
    /*
     * Push all teardown descriptors to the free teardown queue
     * for the CPPI 4.1 system.
     */
    cppi41_init_teardown_queue(dma_num);

    /* Initialize the DMA scheduler. */
    num_reg = (tbl_size + 3) / 4;
    for (i = 0; i < num_reg; i++) {
        val = sched_tbl[i];
        cppi_writel(val, dma_block->sched_table_base +
                    DMA_SCHED_TABLE_WORD_REG(i));
        DBG("DMA scheduler table @ %p, value written: %x\n",
            dma_block->sched_table_base + DMA_SCHED_TABLE_WORD_REG(i),
            val);
    }

    cppi_writel((tbl_size - 1) << DMA_SCHED_LAST_ENTRY_SHIFT |
                DMA_SCHED_ENABLE_MASK,
                dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG);
    DBG("DMA scheduler control @ %p, value: %x\n",
        dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG,
        cppi_readl(dma_block->sched_ctrl_base + DMA_SCHED_CTRL_REG));

    return 0;

free_rgn:
    cppi41_mem_rgn_free(q_mgr, dma_teardown[dma_num].mem_rgn);
free_mem:
    dma_free_coherent(NULL, dma_teardown[dma_num].rgn_size,
                      dma_teardown[dma_num].virt_addr,
                      dma_teardown[dma_num].phys_addr);
free_queue:
    cppi41_queue_free(q_mgr, q_num);
    return error;
}
Esempio n. 8
0
static long mxc_ipu_ioctl(struct file *file,
		unsigned int cmd, unsigned long arg)
{
	int ret = 0;

	switch (cmd) {
	case IPU_INIT_CHANNEL:
		{
			ipu_channel_parm parm;

			if (copy_from_user
					(&parm, (ipu_channel_parm *) arg,
					 sizeof(ipu_channel_parm)))
				return -EFAULT;

			if (!parm.flag) {
				ret =
					ipu_init_channel(parm.channel,
							&parm.params);
			} else {
				ret = ipu_init_channel(parm.channel, NULL);
			}
		}
		break;
	case IPU_UNINIT_CHANNEL:
		{
		ipu_channel_t ch;
		int __user *argp = (void __user *)arg;
		if (get_user(ch, argp))
				return -EFAULT;
			ipu_uninit_channel(ch);
		}
		break;
	case IPU_INIT_CHANNEL_BUFFER:
		{
			ipu_channel_buf_parm parm;
			if (copy_from_user
				(&parm, (ipu_channel_buf_parm *) arg,
				sizeof(ipu_channel_buf_parm)))
				return -EFAULT;

			ret =
				ipu_init_channel_buffer(
						parm.channel, parm.type,
						parm.pixel_fmt,
						parm.width, parm.height,
						parm.stride,
						parm.rot_mode,
						parm.phyaddr_0,
						parm.phyaddr_1,
						parm.u_offset,
						parm.v_offset);

		}
		break;
	case IPU_UPDATE_CHANNEL_BUFFER:
		{
			ipu_channel_buf_parm parm;
			if (copy_from_user
				(&parm, (ipu_channel_buf_parm *) arg,
				sizeof(ipu_channel_buf_parm)))
				return -EFAULT;

			if ((parm.phyaddr_0 != (dma_addr_t) NULL)
				&& (parm.phyaddr_1 == (dma_addr_t) NULL)) {
				ret =
					ipu_update_channel_buffer(
							parm.channel,
							parm.type,
							parm.bufNum,
							parm.phyaddr_0);
			} else if ((parm.phyaddr_0 == (dma_addr_t) NULL)
				&& (parm.phyaddr_1 != (dma_addr_t) NULL)) {
				ret =
					ipu_update_channel_buffer(
							parm.channel,
							parm.type,
							parm.bufNum,
							parm.phyaddr_1);
			} else {
				ret = -1;
			}

		}
		break;
	case IPU_SELECT_CHANNEL_BUFFER:
		{
			ipu_channel_buf_parm parm;
			if (copy_from_user
				(&parm, (ipu_channel_buf_parm *) arg,
				sizeof(ipu_channel_buf_parm)))
				return -EFAULT;

			ret =
				ipu_select_buffer(parm.channel,
					parm.type, parm.bufNum);

		}
		break;
	case IPU_SELECT_MULTI_VDI_BUFFER:
		{
			uint32_t parm;
			if (copy_from_user
				(&parm, (uint32_t *) arg,
				sizeof(uint32_t)))
				return -EFAULT;

			ret = ipu_select_multi_vdi_buffer(parm);
		}
		break;
	case IPU_LINK_CHANNELS:
		{
			ipu_channel_link link;
			if (copy_from_user
				(&link, (ipu_channel_link *) arg,
				sizeof(ipu_channel_link)))
				return -EFAULT;

			ret = ipu_link_channels(link.src_ch,
				link.dest_ch);

		}
		break;
	case IPU_UNLINK_CHANNELS:
		{
			ipu_channel_link link;
			if (copy_from_user
				(&link, (ipu_channel_link *) arg,
				sizeof(ipu_channel_link)))
				return -EFAULT;

			ret = ipu_unlink_channels(link.src_ch,
				link.dest_ch);

		}
		break;
	case IPU_ENABLE_CHANNEL:
		{
			ipu_channel_t ch;
			int __user *argp = (void __user *)arg;
			if (get_user(ch, argp))
				return -EFAULT;
			ipu_enable_channel(ch);
		}
		break;
	case IPU_DISABLE_CHANNEL:
		{
			ipu_channel_info info;
			if (copy_from_user
				(&info, (ipu_channel_info *) arg,
				 sizeof(ipu_channel_info)))
				return -EFAULT;

			ret = ipu_disable_channel(info.channel,
				info.stop);
		}
		break;
	case IPU_ENABLE_IRQ:
		{
			uint32_t irq;
			int __user *argp = (void __user *)arg;
			if (get_user(irq, argp))
				return -EFAULT;
			ipu_enable_irq(irq);
		}
		break;
	case IPU_DISABLE_IRQ:
		{
			uint32_t irq;
			int __user *argp = (void __user *)arg;
			if (get_user(irq, argp))
				return -EFAULT;
			ipu_disable_irq(irq);
		}
		break;
	case IPU_CLEAR_IRQ:
		{
			uint32_t irq;
			int __user *argp = (void __user *)arg;
			if (get_user(irq, argp))
				return -EFAULT;
			ipu_clear_irq(irq);
		}
		break;
	case IPU_FREE_IRQ:
		{
			ipu_irq_info info;

			if (copy_from_user
					(&info, (ipu_irq_info *) arg,
					 sizeof(ipu_irq_info)))
				return -EFAULT;

			ipu_free_irq(info.irq, info.dev_id);
			irq_info[info.irq].irq_pending = 0;
		}
		break;
	case IPU_REQUEST_IRQ_STATUS:
		{
			uint32_t irq;
			int __user *argp = (void __user *)arg;
			if (get_user(irq, argp))
				return -EFAULT;
			ret = ipu_get_irq_status(irq);
		}
		break;
	case IPU_REGISTER_GENERIC_ISR:
		{
			ipu_event_info info;
			if (copy_from_user
					(&info, (ipu_event_info *) arg,
					 sizeof(ipu_event_info)))
				return -EFAULT;

			ret =
				ipu_request_irq(info.irq,
					mxc_ipu_generic_handler,
					0, "video_sink", info.dev);
			if (ret == 0)
				init_waitqueue_head(&(irq_info[info.irq].waitq));
		}
		break;
	case IPU_GET_EVENT:
		/* User will have to allocate event_type
		structure and pass the pointer in arg */
		{
			ipu_event_info info;
			int r = -1;

			if (copy_from_user
					(&info, (ipu_event_info *) arg,
					 sizeof(ipu_event_info)))
				return -EFAULT;

			r = get_events(&info);
			if (r == -1) {
				if ((file->f_flags & O_NONBLOCK) &&
					(irq_info[info.irq].irq_pending == 0))
					return -EAGAIN;
				wait_event_interruptible_timeout(irq_info[info.irq].waitq,
						(irq_info[info.irq].irq_pending != 0), 2 * HZ);
				r = get_events(&info);
			}
			ret = -1;
			if (r == 0) {
				if (!copy_to_user((ipu_event_info *) arg,
					&info, sizeof(ipu_event_info)))
					ret = 0;
			}
		}
		break;
	case IPU_ALOC_MEM:
		{
			ipu_mem_info info;
			if (copy_from_user
					(&info, (ipu_mem_info *) arg,
					 sizeof(ipu_mem_info)))
				return -EFAULT;

			info.vaddr = dma_alloc_coherent(0,
					PAGE_ALIGN(info.size),
					&info.paddr,
					GFP_DMA | GFP_KERNEL);
			if (info.vaddr == 0) {
				printk(KERN_ERR "dma alloc failed!\n");
				return -ENOBUFS;
			}
			if (copy_to_user((ipu_mem_info *) arg, &info,
					sizeof(ipu_mem_info)) > 0)
				return -EFAULT;
		}
		break;
	case IPU_FREE_MEM:
		{
			ipu_mem_info info;
			if (copy_from_user
					(&info, (ipu_mem_info *) arg,
					 sizeof(ipu_mem_info)))
				return -EFAULT;

			if (info.vaddr)
				dma_free_coherent(0, PAGE_ALIGN(info.size),
					info.vaddr, info.paddr);
			else
				return -EFAULT;
		}
		break;
	case IPU_IS_CHAN_BUSY:
		{
			ipu_channel_t chan;
			if (copy_from_user
					(&chan, (ipu_channel_t *)arg,
					 sizeof(ipu_channel_t)))
				return -EFAULT;

			if (ipu_is_channel_busy(chan))
				ret = 1;
			else
				ret = 0;
		}
		break;
	case IPU_CALC_STRIPES_SIZE:
		{
			ipu_stripe_parm stripe_parm;

			if (copy_from_user (&stripe_parm, (ipu_stripe_parm *)arg,
					 sizeof(ipu_stripe_parm)))
				return -EFAULT;
			ipu_calc_stripes_sizes(stripe_parm.input_width,
						stripe_parm.output_width,
						stripe_parm.maximal_stripe_width,
						stripe_parm.cirr,
						stripe_parm.equal_stripes,
						stripe_parm.input_pixelformat,
						stripe_parm.output_pixelformat,
						&stripe_parm.left,
						&stripe_parm.right);
			if (copy_to_user((ipu_stripe_parm *) arg, &stripe_parm,
					sizeof(ipu_stripe_parm)) > 0)
				return -EFAULT;
		}
		break;
	case IPU_UPDATE_BUF_OFFSET:
		{
			ipu_buf_offset_parm offset_parm;

			if (copy_from_user (&offset_parm, (ipu_buf_offset_parm *)arg,
					 sizeof(ipu_buf_offset_parm)))
				return -EFAULT;
			ret = ipu_update_channel_offset(offset_parm.channel,
							offset_parm.type,
							offset_parm.pixel_fmt,
							offset_parm.width,
							offset_parm.height,
							offset_parm.stride,
							offset_parm.u_offset,
							offset_parm.v_offset,
							offset_parm.vertical_offset,
							offset_parm.horizontal_offset);
		}
		break;
	case IPU_CSC_UPDATE:
		{
			int param[5][3];
			ipu_csc_update csc;
			if (copy_from_user(&csc, (void *) arg,
					   sizeof(ipu_csc_update)))
				return -EFAULT;
			if (copy_from_user(&param[0][0], (void *) csc.param,
					   sizeof(param)))
				return -EFAULT;
			ipu_set_csc_coefficients(csc.channel, param);
		}
		break;
	default:
		break;
	}
	return ret;
}
Esempio n. 9
0
static int mace_open(struct net_device *dev)
{
	struct mace_data *mp = netdev_priv(dev);
	volatile struct mace *mb = mp->mace;

	/* reset the chip */
	mace_reset(dev);

	if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
		return -EAGAIN;
	}
	if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
		printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
		free_irq(dev->irq, dev);
		return -EAGAIN;
	}

	/* Allocate the DMA ring buffers */

	mp->tx_ring = dma_alloc_coherent(mp->device,
			N_TX_RING * MACE_BUFF_SIZE,
			&mp->tx_ring_phys, GFP_KERNEL);
	if (mp->tx_ring == NULL) {
		printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
		goto out1;
	}

	mp->rx_ring = dma_alloc_coherent(mp->device,
			N_RX_RING * MACE_BUFF_SIZE,
			&mp->rx_ring_phys, GFP_KERNEL);
	if (mp->rx_ring == NULL) {
		printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
		goto out2;
	}

	mace_dma_off(dev);

	/* Not sure what these do */

	psc_write_word(PSC_ENETWR_CTL, 0x9000);
	psc_write_word(PSC_ENETRD_CTL, 0x9000);
	psc_write_word(PSC_ENETWR_CTL, 0x0400);
	psc_write_word(PSC_ENETRD_CTL, 0x0400);

	mace_rxdma_reset(dev);
	mace_txdma_reset(dev);

	/* turn it on! */
	mb->maccc = ENXMT | ENRCV;
	/* enable all interrupts except receive interrupts */
	mb->imr = RCVINT;
	return 0;

out2:
	dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
	                  mp->tx_ring, mp->tx_ring_phys);
out1:
	free_irq(dev->irq, dev);
	free_irq(mp->dma_intr, dev);
	return -ENOMEM;
}
static int vdmafb_probe(struct platform_device *pdev)
{
	int ret = 0;
	struct vdmafb_dev *fbdev;
	struct resource *res;
	int fbsize;
	struct backlight_properties props;
	struct backlight_device *bl;

	fbdev = devm_kzalloc(&pdev->dev, sizeof(*fbdev), GFP_KERNEL);
	if (!fbdev)
		return -ENOMEM;

	platform_set_drvdata(pdev, fbdev);

	fbdev->info.fbops = &vdmafb_ops;
	fbdev->info.device = &pdev->dev;
	fbdev->info.par = fbdev;

	fbdev->dma_template = devm_kzalloc(&pdev->dev,
		sizeof(struct dma_interleaved_template) +
		sizeof(struct data_chunk), GFP_KERNEL);
	if (!fbdev->dma_template)
		return -ENOMEM;

	vdmafb_init_var(fbdev, pdev);
	vdmafb_init_fix(fbdev);

	/* Request I/O resource */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "I/O resource request failed\n");
		return -ENXIO;
	}
	res->flags &= ~IORESOURCE_CACHEABLE;
	fbdev->regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(fbdev->regs))
		return PTR_ERR(fbdev->regs);

	/* Allocate framebuffer memory */
	fbsize = fbdev->info.fix.smem_len;
	fbdev->fb_virt = dma_alloc_coherent(&pdev->dev, PAGE_ALIGN(fbsize),
					    &fbdev->fb_phys, GFP_KERNEL);
	if (!fbdev->fb_virt) {
		dev_err(&pdev->dev,
			"Frame buffer memory allocation failed\n");
		return -ENOMEM;
	}
	fbdev->info.fix.smem_start = fbdev->fb_phys;
	fbdev->info.screen_base = fbdev->fb_virt;
	fbdev->info.pseudo_palette = fbdev->pseudo_palette;

	pr_debug("%s virt=%p phys=%x size=%d\n", __func__,
		fbdev->fb_virt, fbdev->fb_phys, fbsize);

	/* Clear framebuffer */
	memset_io(fbdev->fb_virt, 0, fbsize);

	fbdev->dma = dma_request_slave_channel(&pdev->dev, "video");
	if (IS_ERR_OR_NULL(fbdev->dma)) {
		dev_err(&pdev->dev, "Failed to allocate DMA channel (%d).\n", ret);
		if (fbdev->dma)
			ret = PTR_ERR(fbdev->dma);
		else
			ret = -EPROBE_DEFER;
		goto err_dma_free;
	}

	/* Setup and enable the framebuffer */
	vdmafb_setupfb(fbdev);

	ret = fb_alloc_cmap(&fbdev->info.cmap, 256, 0);
	if (ret) {
		dev_err(&pdev->dev, "fb_alloc_cmap failed\n");
	}

	/* Register framebuffer */
	ret = register_framebuffer(&fbdev->info);
	if (ret) {
		dev_err(&pdev->dev, "Framebuffer registration failed\n");
		goto err_channel_free;
	}

	/* Register backlight */
	memset(&props, 0, sizeof(struct backlight_properties));
	props.type = BACKLIGHT_RAW;
	props.max_brightness = 1023;
	bl = backlight_device_register("backlight", &pdev->dev, fbdev,
				       &vdmafb_bl_ops, &props);
	if (IS_ERR(bl)) {
		dev_err(&pdev->dev, "error %ld on backlight register\n",
				PTR_ERR(bl));
	} else {
		fbdev->backlight = bl;
		bl->props.power = FB_BLANK_UNBLANK;
		bl->props.fb_blank = FB_BLANK_UNBLANK;
		bl->props.brightness = vdmafb_bl_get_brightness(bl);
	}

	return 0;

err_channel_free:
	dma_release_channel(fbdev->dma);
err_dma_free:
	dma_free_coherent(&pdev->dev, PAGE_ALIGN(fbsize), fbdev->fb_virt,
			  fbdev->fb_phys);

	return ret;
}
Esempio n. 11
0
static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
{
	bool ret = false;
	void *src_addr;
	void *dst_addr;
	dma_addr_t src_phys_addr;
	dma_addr_t dst_phys_addr;
	struct pci_dev *pdev = test->pdev;
	struct device *dev = &pdev->dev;
	void *orig_src_addr;
	dma_addr_t orig_src_phys_addr;
	void *orig_dst_addr;
	dma_addr_t orig_dst_phys_addr;
	size_t offset;
	size_t alignment = test->alignment;
	u32 src_crc32;
	u32 dst_crc32;

	orig_src_addr = dma_alloc_coherent(dev, size + alignment,
					   &orig_src_phys_addr, GFP_KERNEL);
	if (!orig_src_addr) {
		dev_err(dev, "failed to allocate source buffer\n");
		ret = false;
		goto err;
	}

	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
		offset = src_phys_addr - orig_src_phys_addr;
		src_addr = orig_src_addr + offset;
	} else {
		src_phys_addr = orig_src_phys_addr;
		src_addr = orig_src_addr;
	}

	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
				 lower_32_bits(src_phys_addr));

	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
				 upper_32_bits(src_phys_addr));

	get_random_bytes(src_addr, size);
	src_crc32 = crc32_le(~0, src_addr, size);

	orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
					   &orig_dst_phys_addr, GFP_KERNEL);
	if (!orig_dst_addr) {
		dev_err(dev, "failed to allocate destination address\n");
		ret = false;
		goto err_orig_src_addr;
	}

	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
		offset = dst_phys_addr - orig_dst_phys_addr;
		dst_addr = orig_dst_addr + offset;
	} else {
		dst_phys_addr = orig_dst_phys_addr;
		dst_addr = orig_dst_addr;
	}

	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
				 lower_32_bits(dst_phys_addr));
	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
				 upper_32_bits(dst_phys_addr));

	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
				 size);

	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
				 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);

	wait_for_completion(&test->irq_raised);

	dst_crc32 = crc32_le(~0, dst_addr, size);
	if (dst_crc32 == src_crc32)
		ret = true;

	dma_free_coherent(dev, size + alignment, orig_dst_addr,
			  orig_dst_phys_addr);

err_orig_src_addr:
	dma_free_coherent(dev, size + alignment, orig_src_addr,
			  orig_src_phys_addr);

err:
	return ret;
}
Esempio n. 12
0
ssize_t xfifo_dma_write(struct file *filp, const char __user *buf, size_t count,
        loff_t *f_pos)
{
        struct xfifo_dma_dev *dev = filp->private_data;
        size_t transfer_size;
 
        int retval = 0;
 
        if (mutex_lock_interruptible(&dev->mutex)) {
                return -EINTR;
        }
 
        dev->writes++;
 
        transfer_size = count;
        if (count > dev->fifo_depth) {
                transfer_size = dev->fifo_depth;
        }
 
        /* Allocate a DMA buffer for the transfer */
        dev->buffer_v_addr = dma_alloc_coherent(&dev->pdev->dev, transfer_size,
                &dev->buffer_d_addr, GFP_KERNEL);
        if (!dev->buffer_v_addr) {
                dev_err(&dev->pdev->dev,
                        "coherent DMA buffer allocation failed\n");
                retval = -ENOMEM;
                goto fail_buffer;
        }
 
        PDEBUG("dma buffer alloc - d @0x%0x v @0x%0x\n",
                (u32)dev->buffer_d_addr, (u32)dev->buffer_v_addr);
 
        if (request_dma(dev->dma_channel, MODULE_NAME)) {
                dev_err(&dev->pdev->dev,
                        "unable to alloc DMA channel %d\n",
                        dev->dma_channel);
                retval = -EBUSY;
                goto fail_client_data;
        }
 
        dev->busy = 1;
        dev->count = transfer_size;
 
        set_dma_mode(dev->dma_channel, DMA_MODE_WRITE);
        set_dma_addr(dev->dma_channel, dev->buffer_d_addr);
        set_dma_count(dev->dma_channel, transfer_size);
        set_pl330_client_data(dev->dma_channel, dev->client_data);
        set_pl330_done_callback(dev->dma_channel,
                xfifo_dma_done_callback, dev);
        set_pl330_fault_callback(dev->dma_channel,
                xfifo_dma_fault_callback, dev);
        set_pl330_incr_dev_addr(dev->dma_channel, 0);
 
        /* Load our DMA buffer with the user data */
        copy_from_user(dev->buffer_v_addr, buf, transfer_size);
 
        xfifo_dma_reset_fifo();
        /* Kick off the DMA */
        enable_dma(dev->dma_channel);
 
        mutex_unlock(&dev->mutex);
 
        wait_event_interruptible(xfifo_dma_wait, dev->busy == 0);
 
        /* Deallocate the DMA buffer and free the channel */
        free_dma(dev->dma_channel);
 
        dma_free_coherent(&dev->pdev->dev, dev->count, dev->buffer_v_addr,
                dev->buffer_d_addr);
 
        PDEBUG("dma write %d bytes\n", transfer_size);
 
        return transfer_size;
 
fail_client_data:
        dma_free_coherent(&dev->pdev->dev, transfer_size, dev->buffer_v_addr,
                dev->buffer_d_addr);
fail_buffer:
        mutex_unlock(&dev->mutex);
        return retval;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
                                  struct vm_area_struct *vma)
{
    struct videobuf_dma_contig_memory *mem;
    struct videobuf_mapping *map;
    unsigned int first;
    int retval;
    unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;

    dev_dbg(q->dev, "%s\n", __func__);
    if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
        return -EINVAL;

    /* look for first buffer to map */
    for (first = 0; first < VIDEO_MAX_FRAME; first++) {
        if (!q->bufs[first])
            continue;

        if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
            continue;
        if (q->bufs[first]->boff == offset)
            break;
    }
    if (VIDEO_MAX_FRAME == first) {
        dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
                offset);
        return -EINVAL;
    }

    /* create mapping + update buffer list */
    map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
    if (!map)
        return -ENOMEM;

    q->bufs[first]->map = map;
    map->start = vma->vm_start;
    map->end = vma->vm_end;
    map->q = q;

    q->bufs[first]->baddr = vma->vm_start;

    mem = q->bufs[first]->priv;
    BUG_ON(!mem);
    MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);

    mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
    mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
                                    &mem->dma_handle, GFP_KERNEL);
    if (!mem->vaddr) {
        dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
                mem->size);
        goto error;
    }
    dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
            mem->vaddr, mem->size);

    /* Try to remap memory */

    size = vma->vm_end - vma->vm_start;
    size = (size < mem->size) ? size : mem->size;

    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    retval = remap_pfn_range(vma, vma->vm_start,
                             mem->dma_handle >> PAGE_SHIFT,
                             size, vma->vm_page_prot);
    if (retval) {
        dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
        dma_free_coherent(q->dev, mem->size,
                          mem->vaddr, mem->dma_handle);
        goto error;
    }

    vma->vm_ops          = &videobuf_vm_ops;
    vma->vm_flags       |= VM_DONTEXPAND;
    vma->vm_private_data = map;

    dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
            map, q, vma->vm_start, vma->vm_end,
            (long int) q->bufs[first]->bsize,
            vma->vm_pgoff, first);

    videobuf_vm_open(vma);

    return 0;

error:
    kfree(map);
    return -ENOMEM;
}
Esempio n. 14
0
/* Handle probe of a modem device */
static int sproc_probe(struct platform_device *pdev)
{
	struct ste_modem_device *mdev =
		container_of(pdev, struct ste_modem_device, pdev);
	struct sproc *sproc;
	struct rproc *rproc;
	int err;

	dev_dbg(&mdev->pdev.dev, "probe ste-modem\n");

	if (!mdev->ops.setup || !mdev->ops.kick || !mdev->ops.kick_subscribe ||
	    !mdev->ops.power) {
		dev_err(&mdev->pdev.dev, "invalid mdev ops\n");
		return -EINVAL;
	}

	rproc = rproc_alloc(&mdev->pdev.dev, mdev->pdev.name, &sproc_ops,
			    SPROC_MODEM_FIRMWARE, sizeof(*sproc));
	if (!rproc)
		return -ENOMEM;

	sproc = rproc->priv;
	sproc->mdev = mdev;
	sproc->rproc = rproc;
	mdev->drv_data = sproc;

	/* Provide callback functions to modem device */
	sproc->mdev->ops.setup(sproc->mdev, &sproc_dev_cb);

	/* Set the STE-modem specific firmware handler */
	rproc->fw_ops = &sproc_fw_ops;

	/*
	 * STE-modem requires the firmware to be located
	 * at the start of the shared memory region. So we need to
	 * reserve space for firmware at the start.
	 */
	sproc->fw_addr = dma_alloc_coherent(rproc->dev.parent, SPROC_FW_SIZE,
					    &sproc->fw_dma_addr,
					    GFP_KERNEL);
	if (!sproc->fw_addr) {
		sproc_err(sproc, "Cannot allocate memory for fw\n");
		err = -ENOMEM;
		goto free_rproc;
	}

	/* Register as a remoteproc device */
	err = rproc_add(rproc);
	if (err)
		goto free_mem;

	return 0;

free_mem:
	dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE,
			  sproc->fw_addr, sproc->fw_dma_addr);
free_rproc:
	/* Reset device data upon error */
	mdev->drv_data = NULL;
	rproc_put(rproc);
	return err;
}
Esempio n. 15
0
static int init_tdm(struct tdm_priv *priv)
{
	u8 *buf;
	int i;
	int buf_size;
	dma_addr_t physaddr = 0;
	int ret = 0;
	struct tdm_adapter *adap;

	if (!priv) {
		pr_err("%s: Invalid handle\n", __func__);
		return -EINVAL;
	}

	adap = priv->adap;

	/*
	   Allocate memory for Rx/Tx buffer according to active time slots
	   BufferSize = NUM_OF_TDM_BUF * NUM_SAMPLES_PER_FRAME * slot_width *
	   num_ch
	 */
	/*Allocating Rx Buffer*/
	buf_size = TDM_BUF_SIZE(adap->adapt_cfg.num_ch,
					adap->adapt_cfg.slot_width,
			 adap->adapt_cfg.num_frames);
	buf = dma_alloc_coherent(priv->device, buf_size, &physaddr, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_alloc_ip;
	}
	priv->dma_input_paddr = physaddr;
	priv->dma_input_vaddr = buf;
	priv->tdm_input_data = ALIGN_ADDRESS(buf, ALIGNED_8_BYTES);

	/*Allocating Tx Buffer*/
	buf = dma_alloc_coherent(priv->device, buf_size, &physaddr, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_alloc_op;
	}
	priv->dma_output_paddr = physaddr;
	priv->dma_output_vaddr = buf;
	priv->tdm_output_data = ALIGN_ADDRESS(buf, ALIGNED_8_BYTES);

	/* allocate memory for TCD buffer discriptors */
	buf = dma_alloc_coherent(priv->device, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE,
		&physaddr, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_alloc_rx;
	}

	memset(buf, 0, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE);
	priv->dma_rx_tcd_paddr = physaddr;
	priv->dma_rx_tcd_vaddr = buf;
	for (i = 0; i < NUM_OF_TDM_BUF; i++) {
		priv->dma_rx_tcd[i] = ALIGN_ADDRESS(buf, ALIGNED_32_BYTES);
		buf += TCD_BUFFER_SIZE;
	}

	buf = dma_alloc_coherent(priv->device, 3 * TCD_BUFFER_SIZE, &physaddr,
			       GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_alloc_tx;
	}
	memset(buf, 0, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE);
	priv->dma_tx_tcd_paddr = physaddr;
	priv->dma_tx_tcd_vaddr = buf;
	for (i = 0; i < NUM_OF_TDM_BUF; i++) {
		priv->dma_tx_tcd[i] = ALIGN_ADDRESS(buf, ALIGNED_32_BYTES);
		buf += TCD_BUFFER_SIZE;
	}

	priv->phase_rx = 0;
	priv->phase_tx = 0;
	return 0;

err_alloc_tx:
	dma_free_coherent(priv->device, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE,
		priv->dma_rx_tcd_vaddr, priv->dma_rx_tcd_paddr);
err_alloc_rx:
	dma_free_coherent(priv->device, buf_size, priv->dma_output_vaddr,
			  priv->dma_output_paddr);
err_alloc_op:
	dma_free_coherent(priv->device, buf_size, priv->dma_input_vaddr,
			  priv->dma_input_paddr);
err_alloc_ip:
	return ret;
}
Esempio n. 16
0
static int mxc_ipu_ioctl(struct inode *inode, struct file *file,
			 unsigned int cmd, unsigned long arg)
{
	int ret = 0;

	switch (cmd) {

	case IPU_INIT_CHANNEL:
		{
			ipu_channel_parm parm;
			if (copy_from_user
			    (&parm, (ipu_channel_parm *) arg,
			     sizeof(ipu_channel_parm))) {
				return -EFAULT;
			}
			if (!parm.flag) {
				ret =
				    ipu_init_channel(parm.channel,
						     &parm.params);
			} else {
				ret = ipu_init_channel(parm.channel, NULL);
			}
		}
		break;

	case IPU_UNINIT_CHANNEL:
		{
			ipu_channel_t ch;
			int __user *argp = (void __user *)arg;
			if (get_user(ch, argp))
				return -EFAULT;
			ipu_uninit_channel(ch);
		}
		break;

	case IPU_INIT_CHANNEL_BUFFER:
		{
			ipu_channel_buf_parm parm;
			if (copy_from_user
			    (&parm, (ipu_channel_buf_parm *) arg,
			     sizeof(ipu_channel_buf_parm))) {
				return -EFAULT;
			}
			ret =
			    ipu_init_channel_buffer(parm.channel, parm.type,
						    parm.pixel_fmt,
						    parm.width, parm.height,
						    parm.stride,
						    parm.rot_mode,
						    parm.phyaddr_0,
						    parm.phyaddr_1,
						    parm.u_offset,
						    parm.v_offset);

		}
		break;

	case IPU_UPDATE_CHANNEL_BUFFER:
		{
			ipu_channel_buf_parm parm;
			if (copy_from_user
			    (&parm, (ipu_channel_buf_parm *) arg,
			     sizeof(ipu_channel_buf_parm))) {
				return -EFAULT;
			}
			if ((parm.phyaddr_0 != (dma_addr_t) NULL)
			    && (parm.phyaddr_1 == (dma_addr_t) NULL)) {
				ret =
				    ipu_update_channel_buffer(parm.channel,
							      parm.type,
							      parm.bufNum,
							      parm.phyaddr_0);
			} else if ((parm.phyaddr_0 == (dma_addr_t) NULL)
				   && (parm.phyaddr_1 != (dma_addr_t) NULL)) {
				ret =
				    ipu_update_channel_buffer(parm.channel,
							      parm.type,
							      parm.bufNum,
							      parm.phyaddr_1);
			} else {
				ret = -1;
			}

		}
		break;
	case IPU_SELECT_CHANNEL_BUFFER:
		{
			ipu_channel_buf_parm parm;
			if (copy_from_user
			    (&parm, (ipu_channel_buf_parm *) arg,
			     sizeof(ipu_channel_buf_parm))) {
				return -EFAULT;
			}
			ret =
			    ipu_select_buffer(parm.channel, parm.type,
					      parm.bufNum);

		}
		break;
	case IPU_LINK_CHANNELS:
		{
			ipu_channel_link link;
			if (copy_from_user
			    (&link, (ipu_channel_link *) arg,
			     sizeof(ipu_channel_link))) {
				return -EFAULT;
			}
			ret = ipu_link_channels(link.src_ch, link.dest_ch);

		}
		break;
	case IPU_UNLINK_CHANNELS:
		{
			ipu_channel_link link;
			if (copy_from_user
			    (&link, (ipu_channel_link *) arg,
			     sizeof(ipu_channel_link))) {
				return -EFAULT;
			}
			ret = ipu_unlink_channels(link.src_ch, link.dest_ch);

		}
		break;
	case IPU_ENABLE_CHANNEL:
		{
			ipu_channel_t ch;
			int __user *argp = (void __user *)arg;
			if (get_user(ch, argp))
				return -EFAULT;
			ipu_enable_channel(ch);
		}
		break;
	case IPU_DISABLE_CHANNEL:
		{
			ipu_channel_info info;
			if (copy_from_user
			    (&info, (ipu_channel_info *) arg,
			     sizeof(ipu_channel_info))) {
				return -EFAULT;
			}
			ret = ipu_disable_channel(info.channel, info.stop);
		}
		break;
	case IPU_ENABLE_IRQ:
		{
			uint32_t irq;
			int __user *argp = (void __user *)arg;
			if (get_user(irq, argp))
				return -EFAULT;
			ipu_enable_irq(irq);
		}
		break;
	case IPU_DISABLE_IRQ:
		{
			uint32_t irq;
			int __user *argp = (void __user *)arg;
			if (get_user(irq, argp))
				return -EFAULT;
			ipu_disable_irq(irq);
		}
		break;
	case IPU_CLEAR_IRQ:
		{
			uint32_t irq;
			int __user *argp = (void __user *)arg;
			if (get_user(irq, argp))
				return -EFAULT;
			ipu_clear_irq(irq);
		}
		break;
	case IPU_FREE_IRQ:
		{
			ipu_irq_info info;
			if (copy_from_user
			    (&info, (ipu_irq_info *) arg,
			     sizeof(ipu_irq_info))) {
				return -EFAULT;
			}
			ipu_free_irq(info.irq, info.dev_id);
		}
		break;
	case IPU_REQUEST_IRQ_STATUS:
		{
			uint32_t irq;
			int __user *argp = (void __user *)arg;
			if (get_user(irq, argp))
				return -EFAULT;
			ret = ipu_get_irq_status(irq);
		}
		break;
	case IPU_SDC_INIT_PANEL:
		{
			ipu_sdc_panel_info sinfo;
			if (copy_from_user
			    (&sinfo, (ipu_sdc_panel_info *) arg,
			     sizeof(ipu_sdc_panel_info))) {
				return -EFAULT;
			}
			ret =
			    ipu_sdc_init_panel(sinfo.panel, sinfo.pixel_clk,
					       sinfo.width, sinfo.height,
					       sinfo.pixel_fmt,
					       sinfo.hStartWidth,
					       sinfo.hSyncWidth,
					       sinfo.hEndWidth,
					       sinfo.vStartWidth,
					       sinfo.vSyncWidth,
					       sinfo.vEndWidth, sinfo.signal);
		}
		break;
	case IPU_SDC_SET_WIN_POS:
		{
			ipu_sdc_window_pos pos;
			if (copy_from_user
			    (&pos, (ipu_sdc_window_pos *) arg,
			     sizeof(ipu_sdc_window_pos))) {
				return -EFAULT;
			}
			ret =
			    ipu_disp_set_window_pos(pos.channel, pos.x_pos,
						   pos.y_pos);

		}
		break;
	case IPU_SDC_SET_GLOBAL_ALPHA:
		{
			ipu_sdc_global_alpha g;
			if (copy_from_user
			    (&g, (ipu_sdc_global_alpha *) arg,
			     sizeof(ipu_sdc_global_alpha))) {
				return -EFAULT;
			}
			ret = ipu_sdc_set_global_alpha(g.enable, g.alpha);
		}
		break;
	case IPU_SDC_SET_COLOR_KEY:
		{
			ipu_sdc_color_key c;
			if (copy_from_user
			    (&c, (ipu_sdc_color_key *) arg,
			     sizeof(ipu_sdc_color_key))) {
				return -EFAULT;
			}
			ret =
			    ipu_sdc_set_color_key(c.channel, c.enable,
						  c.colorKey);
		}
		break;
	case IPU_SDC_SET_BRIGHTNESS:
		{
			uint8_t b;
			int __user *argp = (void __user *)arg;
			if (get_user(b, argp))
				return -EFAULT;
			ret = ipu_sdc_set_brightness(b);

		}
		break;
	case IPU_REGISTER_GENERIC_ISR:
		{
			ipu_event_info info;
			if (copy_from_user
			    (&info, (ipu_event_info *) arg,
			     sizeof(ipu_event_info))) {
				return -EFAULT;
			}
			ret =
			    ipu_request_irq(info.irq, mxc_ipu_generic_handler,
					    0, "video_sink", info.dev);
		}
		break;
	case IPU_GET_EVENT:
		/* User will have to allocate event_type structure and pass the pointer in arg */
		{
			ipu_event_info info;
			int r = -1;

			if (copy_from_user
					(&info, (ipu_event_info *) arg,
					 sizeof(ipu_event_info)))
				return -EFAULT;

			r = get_events(&info);
			if (r == -1) {
				wait_event_interruptible_timeout(waitq,
						(pending_events != 0), 2 * HZ);
				r = get_events(&info);
			}
			ret = -1;
			if (r == 0) {
				if (!copy_to_user((ipu_event_info *) arg,
					&info, sizeof(ipu_event_info)))
					ret = 0;
			}
		}
		break;
	case IPU_ADC_WRITE_TEMPLATE:
		{
			ipu_adc_template temp;
			if (copy_from_user
			    (&temp, (ipu_adc_template *) arg, sizeof(temp))) {
				return -EFAULT;
			}
			ret =
			    ipu_adc_write_template(temp.disp, temp.pCmd,
						   temp.write);
		}
		break;
	case IPU_ADC_UPDATE:
		{
			ipu_adc_update update;
			if (copy_from_user
			    (&update, (ipu_adc_update *) arg, sizeof(update))) {
				return -EFAULT;
			}
			ret =
			    ipu_adc_set_update_mode(update.channel, update.mode,
						    update.refresh_rate,
						    update.addr, update.size);
		}
		break;
	case IPU_ADC_SNOOP:
		{
			ipu_adc_snoop snoop;
			if (copy_from_user
			    (&snoop, (ipu_adc_snoop *) arg, sizeof(snoop))) {
				return -EFAULT;
			}
			ret =
			    ipu_adc_get_snooping_status(snoop.statl,
							snoop.stath);
		}
		break;
	case IPU_ADC_CMD:
		{
			ipu_adc_cmd cmd;
			if (copy_from_user
			    (&cmd, (ipu_adc_cmd *) arg, sizeof(cmd))) {
				return -EFAULT;
			}
			ret =
			    ipu_adc_write_cmd(cmd.disp, cmd.type, cmd.cmd,
					      cmd.params, cmd.numParams);
		}
		break;
	case IPU_ADC_INIT_PANEL:
		{
			ipu_adc_panel panel;
			if (copy_from_user
			    (&panel, (ipu_adc_panel *) arg, sizeof(panel))) {
				return -EFAULT;
			}
			ret =
			    ipu_adc_init_panel(panel.disp, panel.width,
					       panel.height, panel.pixel_fmt,
					       panel.stride, panel.signal,
					       panel.addr, panel.vsync_width,
					       panel.mode);
		}
		break;
	case IPU_ADC_IFC_TIMING:
		{
			ipu_adc_ifc_timing t;
			if (copy_from_user
			    (&t, (ipu_adc_ifc_timing *) arg, sizeof(t))) {
				return -EFAULT;
			}
			ret =
			    ipu_adc_init_ifc_timing(t.disp, t.read,
						    t.cycle_time, t.up_time,
						    t.down_time,
						    t.read_latch_time,
						    t.pixel_clk);
		}
		break;
	case IPU_CSI_INIT_INTERFACE:
		{
			ipu_csi_interface c;
			if (copy_from_user
			    (&c, (ipu_csi_interface *) arg, sizeof(c)))
				return -EFAULT;
			ret =
			    ipu_csi_init_interface(c.width, c.height,
						   c.pixel_fmt, c.signal);
		}
		break;
	case IPU_CSI_ENABLE_MCLK:
		{
			ipu_csi_mclk m;
			if (copy_from_user(&m, (ipu_csi_mclk *) arg, sizeof(m)))
				return -EFAULT;
			ret = ipu_csi_enable_mclk(m.src, m.flag, m.wait);
		}
		break;
	case IPU_CSI_READ_MCLK_FLAG:
		{
			ret = ipu_csi_read_mclk_flag();
		}
		break;
	case IPU_CSI_FLASH_STROBE:
		{
			bool strobe;
			int __user *argp = (void __user *)arg;
			if (get_user(strobe, argp))
				return -EFAULT;
			ipu_csi_flash_strobe(strobe);
		}
		break;
	case IPU_CSI_GET_WIN_SIZE:
		{
			ipu_csi_window_size w;
			int dummy = 0;
			ipu_csi_get_window_size(&w.width, &w.height, dummy);
			if (copy_to_user
			    ((ipu_csi_window_size *) arg, &w, sizeof(w)))
				return -EFAULT;
		}
		break;
	case IPU_CSI_SET_WIN_SIZE:
		{
			ipu_csi_window_size w;
			int dummy = 0;
			if (copy_from_user
			    (&w, (ipu_csi_window_size *) arg, sizeof(w)))
				return -EFAULT;
			ipu_csi_set_window_size(w.width, w.height, dummy);
		}
		break;
	case IPU_CSI_SET_WINDOW:
		{
			ipu_csi_window p;
			int dummy = 0;
			if (copy_from_user
			    (&p, (ipu_csi_window *) arg, sizeof(p)))
				return -EFAULT;
			ipu_csi_set_window_pos(p.left, p.top, dummy);
		}
		break;
	case IPU_PF_SET_PAUSE_ROW:
		{
			uint32_t p;
			int __user *argp = (void __user *)arg;
			if (get_user(p, argp))
				return -EFAULT;
			ret = ipu_pf_set_pause_row(p);
		}
		break;
	case IPU_ALOC_MEM:
		{
			ipu_mem_info info;
			if (copy_from_user
					(&info, (ipu_mem_info *) arg,
					 sizeof(ipu_mem_info)))
				return -EFAULT;

			info.vaddr = dma_alloc_coherent(0,
					PAGE_ALIGN(info.size),
					&info.paddr,
					GFP_DMA | GFP_KERNEL);
			if (info.vaddr == 0) {
				printk(KERN_ERR "dma alloc failed!\n");
				return -ENOBUFS;
			}
			if (copy_to_user((ipu_mem_info *) arg, &info,
					sizeof(ipu_mem_info)) > 0)
				return -EFAULT;
		}
		break;
	case IPU_FREE_MEM:
		{
			ipu_mem_info info;
			if (copy_from_user
					(&info, (ipu_mem_info *) arg,
					 sizeof(ipu_mem_info)))
				return -EFAULT;

			if (info.vaddr != 0)
				dma_free_coherent(0, PAGE_ALIGN(info.size),
						info.vaddr, info.paddr);
			else
				return -EFAULT;
		}
		break;
	case IPU_IS_CHAN_BUSY:
		{
			ipu_channel_t chan;
			if (copy_from_user
					(&chan, (ipu_channel_t *)arg,
					 sizeof(ipu_channel_t)))
				return -EFAULT;

			if (ipu_is_channel_busy(chan))
				ret = 1;
			else
				ret = 0;
		}
		break;
	default:
		break;

	}
	return ret;
}
Esempio n. 17
0
static int init_tdm(struct tdm_priv *priv)
{
	u8 *buf;
	int i;
	int buf_size;
	dma_addr_t physaddr = 0;
	int ret = 0;

	/*
	   Allocate memory for Rx/Tx buffer according to active time slots
	   BufferSize = NUM_OF_TDM_BUF*NUM_OF_FRAMES*Active_CH
	 */
	buf_size = TDM_BUF_SIZE(priv->cfg.num_ch, priv->cfg.ch_width,
			 priv->cfg.num_frames);
	buf = dma_alloc_coherent(priv->device, buf_size, &physaddr, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_alloc_ip;
	}
	priv->dma_input_paddr = physaddr;
	priv->dma_input_vaddr = buf;
	priv->tdm_input_data = ALIGN_ADDRESS(buf, ALIGNED_8_BYTES);

	buf = dma_alloc_coherent(priv->device, buf_size, &physaddr, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_alloc_op;
	}
	priv->dma_output_paddr = physaddr;
	priv->dma_output_vaddr = buf;
	priv->tdm_output_data = ALIGN_ADDRESS(buf, ALIGNED_8_BYTES);

	/* allocate memory for TCD buffer discriptors */
	buf = dma_alloc_coherent(priv->device, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE,
		&physaddr, GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_alloc_rx;
	}

	memset(buf, 0, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE);
	priv->dma_rx_tcd_paddr = physaddr;
	priv->dma_rx_tcd_vaddr = buf;
	for (i = 0; i < NUM_OF_TDM_BUF; i++) {
		priv->dma_rx_tcd[i] = ALIGN_ADDRESS(buf, ALIGNED_32_BYTES);
		buf += TCD_BUFFER_SIZE;
	}

	buf = dma_alloc_coherent(priv->device, 3 * TCD_BUFFER_SIZE, &physaddr,
			       GFP_KERNEL);
	if (!buf) {
		ret = -ENOMEM;
		goto err_alloc_tx;
	}
	memset(buf, 0, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE);
	priv->dma_tx_tcd_paddr = physaddr;
	priv->dma_tx_tcd_vaddr = buf;
	for (i = 0; i < NUM_OF_TDM_BUF; i++) {
		priv->dma_tx_tcd[i] = ALIGN_ADDRESS(buf, ALIGNED_32_BYTES);
		buf += TCD_BUFFER_SIZE;
	}

	priv->phase_rx = 0;
	priv->phase_tx = 0;
	return 0;

err_alloc_tx:
	dma_free_coherent(priv->device, NUM_OF_TDM_BUF * TCD_BUFFER_SIZE,
		priv->dma_rx_tcd_vaddr, priv->dma_rx_tcd_paddr);
err_alloc_rx:
	dma_free_coherent(priv->device, buf_size, priv->dma_output_vaddr,
			  priv->dma_output_paddr);
err_alloc_op:
	dma_free_coherent(priv->device, buf_size, priv->dma_input_vaddr,
			  priv->dma_input_paddr);
err_alloc_ip:
	return ret;
}
Esempio n. 18
0
static bool device_init_rings(struct vnt_private *pDevice)
{
	void *vir_pool;

	/*allocate all RD/TD rings a single pool*/
	vir_pool = dma_zalloc_coherent(&pDevice->pcid->dev,
					 pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
					 pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
					 pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
					 pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
					 &pDevice->pool_dma, GFP_ATOMIC);
	if (vir_pool == NULL) {
		dev_err(&pDevice->pcid->dev, "allocate desc dma memory failed\n");
		return false;
	}

	pDevice->aRD0Ring = vir_pool;
	pDevice->aRD1Ring = vir_pool +
		pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc);

	pDevice->rd0_pool_dma = pDevice->pool_dma;
	pDevice->rd1_pool_dma = pDevice->rd0_pool_dma +
		pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc);

	pDevice->tx0_bufs = dma_zalloc_coherent(&pDevice->pcid->dev,
						  pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
						  pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
						  CB_BEACON_BUF_SIZE +
						  CB_MAX_BUF_SIZE,
						  &pDevice->tx_bufs_dma0,
						  GFP_ATOMIC);
	if (pDevice->tx0_bufs == NULL) {
		dev_err(&pDevice->pcid->dev, "allocate buf dma memory failed\n");

		dma_free_coherent(&pDevice->pcid->dev,
				    pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
				    pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
				    pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
				    pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
				    vir_pool, pDevice->pool_dma
			);
		return false;
	}

	pDevice->td0_pool_dma = pDevice->rd1_pool_dma +
		pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc);

	pDevice->td1_pool_dma = pDevice->td0_pool_dma +
		pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc);

	/* vir_pool: pvoid type */
	pDevice->apTD0Rings = vir_pool
		+ pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc)
		+ pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc);

	pDevice->apTD1Rings = vir_pool
		+ pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc)
		+ pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc)
		+ pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc);

	pDevice->tx1_bufs = pDevice->tx0_bufs +
		pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ;

	pDevice->tx_beacon_bufs = pDevice->tx1_bufs +
		pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ;

	pDevice->pbyTmpBuff = pDevice->tx_beacon_bufs +
		CB_BEACON_BUF_SIZE;

	pDevice->tx_bufs_dma1 = pDevice->tx_bufs_dma0 +
		pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ;

	pDevice->tx_beacon_dma = pDevice->tx_bufs_dma1 +
		pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ;

	return true;
}
/*!
 * PrpENC enable channel setup function
 *
 * @param cam       struct cam_data * mxc capture instance
 *
 * @return  status
 */
static int prp_enc_setup(cam_data *cam)
{
	ipu_channel_params_t enc;
	int err = 0;
	dma_addr_t dummy = cam->dummy_frame.buffer.m.offset;
#ifdef CONFIG_MXC_MIPI_CSI2
	void *mipi_csi2_info;
	int ipu_id;
	int csi_id;
#endif

	CAMERA_TRACE("In prp_enc_setup\n");
	if (!cam) {
		printk(KERN_ERR "cam private is NULL\n");
		return -ENXIO;
	}
	memset(&enc, 0, sizeof(ipu_channel_params_t));

	ipu_csi_get_window_size(cam->ipu, &enc.csi_prp_enc_mem.in_width,
				&enc.csi_prp_enc_mem.in_height, cam->csi);

	enc.csi_prp_enc_mem.in_pixel_fmt = IPU_PIX_FMT_UYVY;
	enc.csi_prp_enc_mem.out_width = cam->v2f.fmt.pix.width;
	enc.csi_prp_enc_mem.out_height = cam->v2f.fmt.pix.height;
	enc.csi_prp_enc_mem.csi = cam->csi;
	if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
		enc.csi_prp_enc_mem.out_width = cam->v2f.fmt.pix.height;
		enc.csi_prp_enc_mem.out_height = cam->v2f.fmt.pix.width;
	}

	if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUV420P;
		pr_info("YUV420\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YVU420) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YVU420P;
		pr_info("YVU420\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUV422P;
		pr_info("YUV422P\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUYV;
		pr_info("YUYV\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_UYVY;
		pr_info("UYVY\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_NV12;
		pr_info("NV12\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_BGR24;
		pr_info("BGR24\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB24;
		pr_info("RGB24\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB565;
		pr_info("RGB565\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_BGR32;
		pr_info("BGR32\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB32;
		pr_info("RGB32\n");
	} else {
		printk(KERN_ERR "format not supported\n");
		return -EINVAL;
	}

#ifdef CONFIG_MXC_MIPI_CSI2
	mipi_csi2_info = mipi_csi2_get_info();

	if (mipi_csi2_info) {
		if (mipi_csi2_get_status(mipi_csi2_info)) {
			ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
			csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);

			if (cam->ipu == ipu_get_soc(ipu_id)
				&& cam->csi == csi_id) {
				enc.csi_prp_enc_mem.mipi_en = true;
				enc.csi_prp_enc_mem.mipi_vc =
				mipi_csi2_get_virtual_channel(mipi_csi2_info);
				enc.csi_prp_enc_mem.mipi_id =
				mipi_csi2_get_datatype(mipi_csi2_info);

				mipi_csi2_pixelclk_enable(mipi_csi2_info);
			} else {
				enc.csi_prp_enc_mem.mipi_en = false;
				enc.csi_prp_enc_mem.mipi_vc = 0;
				enc.csi_prp_enc_mem.mipi_id = 0;
			}
		} else {
			enc.csi_prp_enc_mem.mipi_en = false;
			enc.csi_prp_enc_mem.mipi_vc = 0;
			enc.csi_prp_enc_mem.mipi_id = 0;
		}
	}
#endif

	err = ipu_init_channel(cam->ipu, CSI_PRP_ENC_MEM, &enc);
	if (err != 0) {
		printk(KERN_ERR "ipu_init_channel %d\n", err);
		return err;
	}

	grotation = cam->rotation;
	if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
		if (cam->rot_enc_bufs_vaddr[0]) {
			dma_free_coherent(0, cam->rot_enc_buf_size[0],
					  cam->rot_enc_bufs_vaddr[0],
					  cam->rot_enc_bufs[0]);
		}
		if (cam->rot_enc_bufs_vaddr[1]) {
			dma_free_coherent(0, cam->rot_enc_buf_size[1],
					  cam->rot_enc_bufs_vaddr[1],
					  cam->rot_enc_bufs[1]);
		}
		cam->rot_enc_buf_size[0] =
		    PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
		cam->rot_enc_bufs_vaddr[0] =
		    (void *)dma_alloc_coherent(0, cam->rot_enc_buf_size[0],
					       &cam->rot_enc_bufs[0],
					       GFP_DMA | GFP_KERNEL);
		if (!cam->rot_enc_bufs_vaddr[0]) {
			printk(KERN_ERR "alloc enc_bufs0\n");
			return -ENOMEM;
		}
		cam->rot_enc_buf_size[1] =
		    PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
		cam->rot_enc_bufs_vaddr[1] =
		    (void *)dma_alloc_coherent(0, cam->rot_enc_buf_size[1],
					       &cam->rot_enc_bufs[1],
					       GFP_DMA | GFP_KERNEL);
		if (!cam->rot_enc_bufs_vaddr[1]) {
			dma_free_coherent(0, cam->rot_enc_buf_size[0],
					  cam->rot_enc_bufs_vaddr[0],
					  cam->rot_enc_bufs[0]);
			cam->rot_enc_bufs_vaddr[0] = NULL;
			cam->rot_enc_bufs[0] = 0;
			printk(KERN_ERR "alloc enc_bufs1\n");
			return -ENOMEM;
		}

		err = ipu_init_channel_buffer(cam->ipu, CSI_PRP_ENC_MEM,
					      IPU_OUTPUT_BUFFER,
					      enc.csi_prp_enc_mem.out_pixel_fmt,
					      enc.csi_prp_enc_mem.out_width,
					      enc.csi_prp_enc_mem.out_height,
					      enc.csi_prp_enc_mem.out_width,
					      IPU_ROTATE_NONE,
					      cam->rot_enc_bufs[0],
					      cam->rot_enc_bufs[1], 0, 0, 0);
		if (err != 0) {
			printk(KERN_ERR "CSI_PRP_ENC_MEM err\n");
			return err;
		}

		err = ipu_init_channel(cam->ipu, MEM_ROT_ENC_MEM, NULL);
		if (err != 0) {
			printk(KERN_ERR "MEM_ROT_ENC_MEM channel err\n");
			return err;
		}

		err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_ENC_MEM,
					      IPU_INPUT_BUFFER,
					      enc.csi_prp_enc_mem.out_pixel_fmt,
					      enc.csi_prp_enc_mem.out_width,
					      enc.csi_prp_enc_mem.out_height,
					      enc.csi_prp_enc_mem.out_width,
					      cam->rotation,
					      cam->rot_enc_bufs[0],
					      cam->rot_enc_bufs[1], 0, 0, 0);
		if (err != 0) {
			printk(KERN_ERR "MEM_ROT_ENC_MEM input buffer\n");
			return err;
		}

		err =
		    ipu_init_channel_buffer(cam->ipu, MEM_ROT_ENC_MEM,
					    IPU_OUTPUT_BUFFER,
					    enc.csi_prp_enc_mem.out_pixel_fmt,
					    enc.csi_prp_enc_mem.out_height,
					    enc.csi_prp_enc_mem.out_width,
					    cam->v2f.fmt.pix.bytesperline /
					    bytes_per_pixel(enc.csi_prp_enc_mem.
							    out_pixel_fmt),
					    IPU_ROTATE_NONE,
					    dummy, dummy, 0,
					    cam->offset.u_offset,
					    cam->offset.v_offset);
		if (err != 0) {
			printk(KERN_ERR "MEM_ROT_ENC_MEM output buffer\n");
			return err;
		}

		err = ipu_link_channels(cam->ipu,
					CSI_PRP_ENC_MEM, MEM_ROT_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR
			       "link CSI_PRP_ENC_MEM-MEM_ROT_ENC_MEM\n");
			return err;
		}

		err = ipu_enable_channel(cam->ipu, CSI_PRP_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR "ipu_enable_channel CSI_PRP_ENC_MEM\n");
			return err;
		}
		err = ipu_enable_channel(cam->ipu, MEM_ROT_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR "ipu_enable_channel MEM_ROT_ENC_MEM\n");
			return err;
		}

		ipu_select_buffer(cam->ipu, CSI_PRP_ENC_MEM,
				  IPU_OUTPUT_BUFFER, 0);
		ipu_select_buffer(cam->ipu, CSI_PRP_ENC_MEM,
				  IPU_OUTPUT_BUFFER, 1);
	} else {
		err =
		    ipu_init_channel_buffer(cam->ipu, CSI_PRP_ENC_MEM,
					    IPU_OUTPUT_BUFFER,
					    enc.csi_prp_enc_mem.out_pixel_fmt,
					    enc.csi_prp_enc_mem.out_width,
					    enc.csi_prp_enc_mem.out_height,
					    cam->v2f.fmt.pix.bytesperline /
					    bytes_per_pixel(enc.csi_prp_enc_mem.
							    out_pixel_fmt),
					    cam->rotation,
					    dummy, dummy, 0,
					    cam->offset.u_offset,
					    cam->offset.v_offset);
		if (err != 0) {
			printk(KERN_ERR "CSI_PRP_ENC_MEM output buffer\n");
			return err;
		}
		err = ipu_enable_channel(cam->ipu, CSI_PRP_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR "ipu_enable_channel CSI_PRP_ENC_MEM\n");
			return err;
		}
	}

	return err;
}
Esempio n. 20
0
File: dim2.c Progetto: krzk/linux
static void dma_free(struct mbo *mbo, u32 size)
{
	struct device *dev = mbo->ifp->driver_dev;

	dma_free_coherent(dev, size, mbo->virt_address, mbo->bus_address);
}
Esempio n. 21
0
/**
 * sunxi_free_one_event_buffer - Frees one event buffer
 * @otgc: Pointer to our controller context structure
 * @evt: Pointer to event buffer to be freed
 */
static void sunxi_free_one_event_buffer(struct sunxi_otgc *otgc,
		struct sunxi_otgc_event_buffer *evt)
{
	dma_free_coherent(otgc->dev, evt->length, evt->buf, evt->dma);
	kfree(evt);
}
Esempio n. 22
0
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
{
	dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
			  mq->msg_pool.host, dma_unmap_addr(mq, mapping));
}
Esempio n. 23
0
static int pxa_irda_start(struct net_device *dev)
{
	struct pxa_irda *si = netdev_priv(dev);
	int err;

	si->speed = 9600;

	err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev);
	if (err)
		goto err_irq1;

	err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev);
	if (err)
		goto err_irq2;

	/*
	 * The interrupt must remain disabled for now.
	 */
	disable_irq(IRQ_STUART);
	disable_irq(IRQ_ICP);

	err = -EBUSY;
	si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
	if (si->rxdma < 0)
		goto err_rx_dma;

	si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev);
	if (si->txdma < 0)
		goto err_tx_dma;

	err = -ENOMEM;
	si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
					     &si->dma_rx_buff_phy, GFP_KERNEL );
	if (!si->dma_rx_buff)
		goto err_dma_rx_buff;

	si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
					     &si->dma_tx_buff_phy, GFP_KERNEL );
	if (!si->dma_tx_buff)
		goto err_dma_tx_buff;

	/* Setup the serial port for the initial speed. */
	pxa_irda_startup(si);

	/*
	 * Open a new IrLAP layer instance.
	 */
	si->irlap = irlap_open(dev, &si->qos, "pxa");
	err = -ENOMEM;
	if (!si->irlap)
		goto err_irlap;

	/*
	 * Now enable the interrupt and start the queue
	 */
	enable_irq(IRQ_STUART);
	enable_irq(IRQ_ICP);
	netif_start_queue(dev);

	printk(KERN_DEBUG "pxa_ir: irda driver opened\n");

	return 0;

err_irlap:
	pxa_irda_shutdown(si);
	dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
err_dma_tx_buff:
	dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
err_dma_rx_buff:
	pxa_free_dma(si->txdma);
err_tx_dma:
	pxa_free_dma(si->rxdma);
err_rx_dma:
	free_irq(IRQ_ICP, dev);
err_irq2:
	free_irq(IRQ_STUART, dev);
err_irq1:

	return err;
}
Esempio n. 24
0
void plat_free_consistent_dmaable_memory(struct device *dev, u32 size, void * addr,u32 dma_addr)
{
	dma_free_coherent(dev, size, addr, dma_addr);
}
Esempio n. 25
0
/*
 * Probe for NAND controller
 */
static int __init lpc32xx_nand_probe(struct platform_device *pdev)
{
	struct lpc32xx_nand_host *host;
	struct mtd_info *mtd;
	struct nand_chip *chip;
	struct resource *rc;
	int res;

	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (rc == NULL) {
		dev_err(&pdev->dev,"No memory resource found for"
			" device\n");
		return -ENXIO;
	}

	/* Allocate memory for the device structure (and zero it) */
	host = kzalloc(sizeof(struct lpc32xx_nand_host), GFP_KERNEL);
	if (!host) {
		dev_err(&pdev->dev,"failed to allocate device structure\n");
		return -ENOMEM;
	}
	host->io_base_dma = (dma_addr_t) rc->start;

	host->io_base = ioremap(rc->start, rc->end - rc->start + 1);
	if (host->io_base == NULL) {
		dev_err(&pdev->dev,"ioremap failed\n");
		res = -EIO;
		goto err_exit1;
	}

	host->ncfg = pdev->dev.platform_data;
	if (!host->ncfg) {
		dev_err(&pdev->dev,"Missing platform data\n");
		res = -ENOENT;
		goto err_exit1;
	}

	mtd = &host->mtd;
	chip = &host->nand_chip;
	chip->priv = host;
	mtd->priv = chip;
	mtd->owner = THIS_MODULE;
	mtd->dev.parent = &pdev->dev;

	/* Get NAND clock */
	host->clk = clk_get(&pdev->dev, "nand_ck");
	if (IS_ERR(host->clk)) {
		 dev_err(&pdev->dev,"Clock failure\n");
		res = -ENOENT;
		goto err_exit2;
	}
	clk_enable(host->clk);

	/* Set NAND IO addresses and command/ready functions */
	chip->IO_ADDR_R = SLC_DATA(host->io_base);
	chip->IO_ADDR_W = SLC_DATA(host->io_base);
	chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
	chip->dev_ready = lpc32xx_nand_device_ready;
	chip->chip_delay = 20; /* 20us command delay time */

	/* Init NAND controller */
	lpc32xx_nand_setup(host);
	lpc32xx_wp_disable(host);

	platform_set_drvdata(pdev, host);

	/* NAND callbacks for LPC32xx SLC hardware */
	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
	chip->read_byte = lpc32xx_nand_read_byte;
	chip->read_buf = lpc32xx_nand_read_buf;
	chip->write_buf = lpc32xx_nand_write_buf;
	chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
	chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
	chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
	chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
	chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
	chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
	chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
	chip->ecc.correct   = nand_correct_data;
	chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
	chip->verify_buf = lpc32xx_verify_buf;

	/*
	 * Allocate a large enough buffer for a single huge page plus
	 * extra space for the spare area and ECC storage area
	 */
	host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
	host->data_buf = dma_alloc_coherent(&pdev->dev, host->dma_buf_len,
		&host->data_buf_dma, GFP_KERNEL);
	if (host->data_buf == NULL) {
		dev_err(&pdev->dev, "Error allocating memory\n");
		res = -ENOMEM;
		goto err_exit3;
	}

	/* Get free DMA channel and alloc DMA descriptor link list */
	res = lpc32xx_nand_dma_setup(host, LPC32XX_MAX_DMA_DESCRIPTORS);
	if(res) {
		res = -EIO;
		goto err_exit4;
	}

	init_waitqueue_head(&host->dma_waitq);

	/* Find NAND device */
	if (nand_scan_ident(mtd, 1)) {
		res = -ENXIO;
		goto err_exit5;
	}

	/* OOB and ECC CPU and DMA work areas */
	host->ecc_buf_dma = host->data_buf_dma + LPC32XX_DMA_DATA_SIZE;
	host->ecc_buf = (uint32_t *) (host->data_buf + LPC32XX_DMA_DATA_SIZE);

	/*
	 * Small page FLASH has a unique OOB layout, but large and huge
	 * page FLASH use the standard layout. Small page FLASH uses a
	 * custom BBT marker layout.
	 */
	if (mtd->writesize <= 512)
		chip->ecc.layout = &lpc32xx_nand_oob_16;

	/* These sizes remain the same regardless of page size */
	chip->ecc.size = 256;
	chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
	chip->ecc.prepad = chip->ecc.postpad = 0;

	/* Avoid extra scan if using BBT, setup BBT support */
	if (host->ncfg->use_bbt) {
		chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;

		/*
		 * Use a custom BBT marker setup for small page FLASH that
		 * won't interfere with the ECC layout. Large and huge page
		 * FLASH use the standard layout.
		 */
		if (mtd->writesize <= 512) {
			chip->bbt_td = &bbt_smallpage_main_descr;
			chip->bbt_md = &bbt_smallpage_mirror_descr;
		}
	}

	/*
	 * Fills out all the uninitialized function pointers with the defaults
	 */
	if (nand_scan_tail(mtd)) {
		res = -ENXIO;
		goto err_exit5;
	}

	/* Standard layout in FLASH for bad block tables */
	if (host->ncfg->use_bbt) {
		if (nand_default_bbt(mtd) < 0)
			dev_err(&pdev->dev, "Error initializing default bad"
				" block tables\n");
	}

	res = lpc32xx_add_partitions(host);
	if (!res)
		return res;

	nand_release(mtd);

err_exit5:
	/* Free the DMA channel used by us */
	lpc32xx_dma_ch_disable(host->dmach);
	lpc32xx_dma_dealloc_llist(host->dmach);
	lpc32xx_dma_ch_put(host->dmach);
	host->dmach = -1;
err_exit4:
	dma_free_coherent(&pdev->dev, host->dma_buf_len,
		host->data_buf, host->data_buf_dma);
err_exit3:
	clk_disable(host->clk);
	clk_put(host->clk);
	platform_set_drvdata(pdev, NULL);
err_exit2:
	lpc32xx_wp_enable(host);
	iounmap(host->io_base);
err_exit1:
	kfree(host);

	return res;
}
Esempio n. 26
0
int ali_openVG_release(struct inode *inode, struct file *filp)
{
	struct ali_openVG_handle *handle = filp->private_data;
	struct list_head* head = handle->mem_list;
	struct list_head* allocated_head = handle->allocated_list;	
#if MMU_ENABLED		
	struct ali_ovg_mmuAllocator *temp;	
	int i;
#endif

	ovg_precord();
	
	OVG_PRINTK("Ovg kernel driver close ! \n");
	/*memory session begin*/

	if(memfp.ovg_session_end)
		memfp.ovg_session_end(filp);

#if MMU_ENABLED
	temp = mmuSubsystem.first_allocator;
	while(temp){
		if(temp->ops->ovg_session_end)
			temp->ops->ovg_session_end(filp);
		temp = temp->next;
	}

	ali_ovg_mmuRelease(filp);

#endif

#if MMU_ENABLED || MMU_DBG_ENABLED
	for(i = 0; i < MAX_PDT_NUM; i++)
		if(pdtRecord[i].tgid == current->tgid)
			break;
	if(i == MAX_PDT_NUM)
	{
		printk("No pdt found\n");
	}
	pdtRecord[i].ref --;
	if(pdtRecord[i].ref == 0)
	{
		dma_free_coherent(NULL, PAGE_SIZE, handle->pdt, ((unsigned int)handle->pdt & 0x1FFFFFFF));
		ovg_memory_release_dbg(PAGE_SIZE);
		pdtRecord[i].tgid = 0;
		pdtRecord[i].pdt = 0;
	}

#endif	

	kfree(head);
	kfree(allocated_head);
	
	kfree(handle);
	
	ovg_memory_release_dbg(sizeof(struct list_head));
	ovg_memory_release_dbg(sizeof(struct list_head));
	ovg_memory_release_dbg(sizeof(struct ali_openVG_handle));
	
#if MEMORY_DEBUG
	OVG_PRINTK("memory allocate %d , memory release %d, leak %d \n"
		,memory_usage, memory_release, (memory_usage - memory_release) );
#endif	
	return 0;
}
Esempio n. 27
0
/**
 * dwc3_free_one_event_buffer - Frees one event buffer
 * @dwc: Pointer to our controller context structure
 * @evt: Pointer to event buffer to be freed
 */
static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
		struct dwc3_event_buffer *evt)
{
	dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
}
Esempio n. 28
0
static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
{
	struct fb_info *info;
	struct sh_mobile_lcdc_priv *priv;
	struct sh_mobile_lcdc_info *pdata = pdev->dev.platform_data;
	struct resource *res;
	int error;
	void *buf;
	int i, j;

	if (!pdata) {
		dev_err(&pdev->dev, "no platform data defined\n");
		return -EINVAL;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	i = platform_get_irq(pdev, 0);
	if (!res || i < 0) {
		dev_err(&pdev->dev, "cannot get platform resources\n");
		return -ENOENT;
	}

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		dev_err(&pdev->dev, "cannot allocate device data\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pdev, priv);

	error = request_irq(i, sh_mobile_lcdc_irq, IRQF_DISABLED,
			    dev_name(&pdev->dev), priv);
	if (error) {
		dev_err(&pdev->dev, "unable to request irq\n");
		goto err1;
	}

	priv->irq = i;
	atomic_set(&priv->hw_usecnt, -1);

	j = 0;
	for (i = 0; i < ARRAY_SIZE(pdata->ch); i++) {
		struct sh_mobile_lcdc_chan *ch = priv->ch + j;

		ch->lcdc = priv;
		memcpy(&ch->cfg, &pdata->ch[i], sizeof(pdata->ch[i]));

		error = sh_mobile_lcdc_check_interface(ch);
		if (error) {
			dev_err(&pdev->dev, "unsupported interface type\n");
			goto err1;
		}
		init_waitqueue_head(&ch->frame_end_wait);
		init_completion(&ch->vsync_completion);
		ch->pan_offset = 0;

		/* probe the backlight is there is one defined */
		if (ch->cfg.bl_info.max_brightness)
			ch->bl = sh_mobile_lcdc_bl_probe(&pdev->dev, ch);

		switch (pdata->ch[i].chan) {
		case LCDC_CHAN_MAINLCD:
			ch->enabled = 1 << 1;
			ch->reg_offs = lcdc_offs_mainlcd;
			j++;
			break;
		case LCDC_CHAN_SUBLCD:
			ch->enabled = 1 << 2;
			ch->reg_offs = lcdc_offs_sublcd;
			j++;
			break;
		}
	}

	if (!j) {
		dev_err(&pdev->dev, "no channels defined\n");
		error = -EINVAL;
		goto err1;
	}

	/* for dual channel LCDC (MAIN + SUB) force shared bpp setting */
	if (j == 2)
		priv->forced_bpp = pdata->ch[0].bpp;

	priv->base = ioremap_nocache(res->start, resource_size(res));
	if (!priv->base)
		goto err1;

	error = sh_mobile_lcdc_setup_clocks(pdev, pdata->clock_source, priv);
	if (error) {
		dev_err(&pdev->dev, "unable to setup clocks\n");
		goto err1;
	}

	priv->meram_dev = pdata->meram_dev;

	for (i = 0; i < j; i++) {
		struct fb_var_screeninfo *var;
		const struct fb_videomode *lcd_cfg, *max_cfg = NULL;
		struct sh_mobile_lcdc_chan *ch = priv->ch + i;
		struct sh_mobile_lcdc_chan_cfg *cfg = &ch->cfg;
		const struct fb_videomode *mode = cfg->lcd_cfg;
		unsigned long max_size = 0;
		int k;
		int num_cfg;

		ch->info = framebuffer_alloc(0, &pdev->dev);
		if (!ch->info) {
			dev_err(&pdev->dev, "unable to allocate fb_info\n");
			error = -ENOMEM;
			break;
		}

		info = ch->info;
		var = &info->var;
		info->fbops = &sh_mobile_lcdc_ops;
		info->par = ch;

		mutex_init(&ch->open_lock);

		for (k = 0, lcd_cfg = mode;
		     k < cfg->num_cfg && lcd_cfg;
		     k++, lcd_cfg++) {
			unsigned long size = lcd_cfg->yres * lcd_cfg->xres;
			/* NV12 buffers must have even number of lines */
			if ((cfg->nonstd) && cfg->bpp == 12 &&
					(lcd_cfg->yres & 0x1)) {
				dev_err(&pdev->dev, "yres must be multiple of 2"
						" for YCbCr420 mode.\n");
				error = -EINVAL;
				goto err1;
			}

			if (size > max_size) {
				max_cfg = lcd_cfg;
				max_size = size;
			}
		}

		if (!mode)
			max_size = MAX_XRES * MAX_YRES;
		else if (max_cfg)
			dev_dbg(&pdev->dev, "Found largest videomode %ux%u\n",
				max_cfg->xres, max_cfg->yres);

		info->fix = sh_mobile_lcdc_fix;
		info->fix.smem_len = max_size * 2 * cfg->bpp / 8;

		 /* Only pan in 2 line steps for NV12 */
		if (cfg->nonstd && cfg->bpp == 12)
			info->fix.ypanstep = 2;

		if (!mode) {
			mode = &default_720p;
			num_cfg = 1;
		} else {
			num_cfg = cfg->num_cfg;
		}

		fb_videomode_to_modelist(mode, num_cfg, &info->modelist);

		fb_videomode_to_var(var, mode);
		var->width = cfg->lcd_size_cfg.width;
		var->height = cfg->lcd_size_cfg.height;
		/* Default Y virtual resolution is 2x panel size */
		var->yres_virtual = var->yres * 2;
		var->activate = FB_ACTIVATE_NOW;

		error = sh_mobile_lcdc_set_bpp(var, cfg->bpp, cfg->nonstd);
		if (error)
			break;

		buf = dma_alloc_coherent(&pdev->dev, info->fix.smem_len,
					 &ch->dma_handle, GFP_KERNEL);
		if (!buf) {
			dev_err(&pdev->dev, "unable to allocate buffer\n");
			error = -ENOMEM;
			break;
		}

		info->pseudo_palette = &ch->pseudo_palette;
		info->flags = FBINFO_FLAG_DEFAULT;

		error = fb_alloc_cmap(&info->cmap, PALETTE_NR, 0);
		if (error < 0) {
			dev_err(&pdev->dev, "unable to allocate cmap\n");
			dma_free_coherent(&pdev->dev, info->fix.smem_len,
					  buf, ch->dma_handle);
			break;
		}

		info->fix.smem_start = ch->dma_handle;
		if (var->nonstd)
			info->fix.line_length = var->xres;
		else
			info->fix.line_length = var->xres * (cfg->bpp / 8);

		info->screen_base = buf;
		info->device = &pdev->dev;
		ch->display_var = *var;
	}

	if (error)
		goto err1;

	error = sh_mobile_lcdc_start(priv);
	if (error) {
		dev_err(&pdev->dev, "unable to start hardware\n");
		goto err1;
	}

	for (i = 0; i < j; i++) {
		struct sh_mobile_lcdc_chan *ch = priv->ch + i;

		info = ch->info;

		if (info->fbdefio) {
			ch->sglist = vmalloc(sizeof(struct scatterlist) *
					info->fix.smem_len >> PAGE_SHIFT);
			if (!ch->sglist) {
				dev_err(&pdev->dev, "cannot allocate sglist\n");
				goto err1;
			}
		}

		info->bl_dev = ch->bl;

		error = register_framebuffer(info);
		if (error < 0)
			goto err1;

		dev_info(info->dev,
			 "registered %s/%s as %dx%d %dbpp.\n",
			 pdev->name,
			 (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
			 "mainlcd" : "sublcd",
			 info->var.xres, info->var.yres,
			 ch->cfg.bpp);

		/* deferred io mode: disable clock to save power */
		if (info->fbdefio || info->state == FBINFO_STATE_SUSPENDED)
			sh_mobile_lcdc_clk_off(priv);
	}
Esempio n. 29
0
/*
 * Function w83977af_open (iobase, irq)
 *
 *    Open driver instance
 *
 */
static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
			 unsigned int dma)
{
	struct net_device *dev;
        struct w83977af_ir *self;
	int err;

	IRDA_DEBUG(0, "%s()\n", __func__ );

	/* Lock the port that we need */
	if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
		IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
		      __func__ , iobase);
		return -ENODEV;
	}

	if (w83977af_probe(iobase, irq, dma) == -1) {
		err = -1;
		goto err_out;
	}
	/*
	 *  Allocate new instance of the driver
	 */
	dev = alloc_irdadev(sizeof(struct w83977af_ir));
	if (dev == NULL) {
		printk( KERN_ERR "IrDA: Can't allocate memory for "
			"IrDA control block!\n");
		err = -ENOMEM;
		goto err_out;
	}

	self = netdev_priv(dev);
	spin_lock_init(&self->lock);
   

	/* Initialize IO */
	self->io.fir_base   = iobase;
        self->io.irq       = irq;
        self->io.fir_ext   = CHIP_IO_EXTENT;
        self->io.dma       = dma;
        self->io.fifo_size = 32;

	/* Initialize QoS for this device */
	irda_init_max_qos_capabilies(&self->qos);
	
	/* The only value we must override it the baudrate */

	/* FIXME: The HP HDLS-1100 does not support 1152000! */
	self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
		IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);

	/* The HP HDLS-1100 needs 1 ms according to the specs */
	self->qos.min_turn_time.bits = qos_mtt_bits;
	irda_qos_bits_to_value(&self->qos);
	
	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
	self->rx_buff.truesize = 14384; 
	self->tx_buff.truesize = 4000;
	
	/* Allocate memory if needed */
	self->rx_buff.head =
		dma_alloc_coherent(NULL, self->rx_buff.truesize,
				   &self->rx_buff_dma, GFP_KERNEL);
	if (self->rx_buff.head == NULL) {
		err = -ENOMEM;
		goto err_out1;
	}

	memset(self->rx_buff.head, 0, self->rx_buff.truesize);
	
	self->tx_buff.head =
		dma_alloc_coherent(NULL, self->tx_buff.truesize,
				   &self->tx_buff_dma, GFP_KERNEL);
	if (self->tx_buff.head == NULL) {
		err = -ENOMEM;
		goto err_out2;
	}
	memset(self->tx_buff.head, 0, self->tx_buff.truesize);

	self->rx_buff.in_frame = FALSE;
	self->rx_buff.state = OUTSIDE_FRAME;
	self->tx_buff.data = self->tx_buff.head;
	self->rx_buff.data = self->rx_buff.head;
	self->netdev = dev;

	dev->netdev_ops	= &w83977_netdev_ops;

	err = register_netdev(dev);
	if (err) {
		IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
		goto err_out3;
	}
	IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);

	/* Need to store self somewhere */
	dev_self[i] = self;
	
	return 0;
err_out3:
	dma_free_coherent(NULL, self->tx_buff.truesize,
			  self->tx_buff.head, self->tx_buff_dma);
err_out2:	
	dma_free_coherent(NULL, self->rx_buff.truesize,
			  self->rx_buff.head, self->rx_buff_dma);
err_out1:
	free_netdev(dev);
err_out:
	release_region(iobase, CHIP_IO_EXTENT);
	return err;
}
static int te_create_free_cmd_list(struct tlk_device *dev)
{
	int cmd_desc_count, ret = 0;
	struct te_cmd_req_desc *req_desc;
	struct te_cmd_req_desc_compat *req_desc_compat;
	int bitmap_size;
	bool use_reqbuf;

	/*
	 * Check if new shared req/param register SMC is supported.
	 *
	 * If it is, TLK can map in the shared req/param buffers and do_smc
	 * only needs to send the offsets within each (with cache coherency
	 * being maintained by HW through an NS mapping).
	 *
	 * If the SMC support is not yet present, then fallback to the old
	 * mode of writing to an uncached buffer to maintain coherency (and
	 * phys addresses are passed in do_smc).
	 */
	dev->req_param_buf = NULL;
	use_reqbuf = !tlk_generic_smc(dev->tlk_info,
					TE_SMC_REGISTER_REQ_BUF, 0, 0, 0);

	if (use_reqbuf) {
		dev->req_param_buf = kmalloc((2 * PAGE_SIZE), GFP_KERNEL);

		if (!dev->req_param_buf) {
			ret = -ENOMEM;
			goto error;
		}

		/* requests in the first page, params in the second */
		dev->req_addr   = (struct te_request *) dev->req_param_buf;
		dev->param_addr = (struct te_oper_param *)
					(dev->req_param_buf + PAGE_SIZE);

		tlk_generic_smc(dev->tlk_info, TE_SMC_REGISTER_REQ_BUF,
				(uintptr_t)dev->req_addr, (2 * PAGE_SIZE), 0);
	} else {
		dev->req_addr = dma_alloc_coherent(NULL, PAGE_SIZE,
					&dev->req_addr_phys, GFP_KERNEL);
		dev->param_addr = dma_alloc_coherent(NULL, PAGE_SIZE,
					&dev->param_addr_phys, GFP_KERNEL);

#ifdef CONFIG_TRUSTY
		dev->param_pages = dma_alloc_coherent(NULL, PAGE_SIZE,
					&dev->param_pages_phys, GFP_KERNEL);
		if (!dev->param_pages) {
			ret = -ENOMEM;
			goto error;
		}

		if (dev->param_pages_phys & 0xFFFFFFFF00000000) {
			pr_err("Unsupported address range\n");
			dma_free_coherent(NULL, PAGE_SIZE,
					  dev->param_pages,
					  dev->param_pages_phys);
			ret = -ENOMEM;
			goto error;
		}

		dev->param_pages_size = PAGE_SIZE;
		dev->param_pages_tail = 0;
#endif
	}

	if (!dev->req_addr || !dev->param_addr) {
		ret = -ENOMEM;
		goto error;
	}

	dev->req_addr_compat = (struct te_request_compat *)
					dev->req_addr;
	dev->param_addr_compat = (struct te_oper_param_compat *)
					dev->param_addr;
	/* alloc param bitmap allocator */
	bitmap_size = BITS_TO_LONGS(TE_PARAM_MAX) * sizeof(long);
	dev->param_bitmap = kzalloc(bitmap_size, GFP_KERNEL);

	for (cmd_desc_count = 0;
		cmd_desc_count < TE_CMD_DESC_MAX; cmd_desc_count++) {

		req_desc = kzalloc(sizeof(struct te_cmd_req_desc), GFP_KERNEL);
		if (req_desc == NULL) {
			pr_err("Failed to allocate cmd req descriptor\n");
			ret = -ENOMEM;
			goto error;
		}
		req_desc->req_addr = dev->req_addr + cmd_desc_count;
		INIT_LIST_HEAD(&(req_desc->list));

		/* Add the cmd param descriptor to free list */
		list_add_tail(&req_desc->list, &(dev->free_cmd_list));
	}

	for (cmd_desc_count = 0;
		cmd_desc_count < TE_CMD_DESC_MAX_COMPAT; cmd_desc_count++) {

		req_desc_compat = kzalloc(sizeof(struct te_cmd_req_desc_compat),
				GFP_KERNEL);
		if (req_desc_compat == NULL) {
			pr_err("Failed to allocate cmd req descriptor\n");
			ret = -ENOMEM;
			goto error;
		}
		req_desc_compat->req_addr =
			dev->req_addr_compat + cmd_desc_count;
		INIT_LIST_HEAD(&(req_desc_compat->list));

		/* Add the cmd param descriptor to free list */
		list_add_tail(&req_desc_compat->list, &(dev->free_cmd_list));
	}

error:
	return ret;
}