Esempio n. 1
0
static int omap_pcm_prepare(struct snd_pcm_substream *substream)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct omap_runtime_data *prtd = runtime->private_data;
	struct omap_pcm_dma_data *dma_data = prtd->dma_data;
	struct omap_dma_channel_params dma_params;
	int bytes;

	/* return if this is a bufferless transfer e.g.
	 * codec <--> BT codec or GSM modem -- lg FIXME */
	if (!prtd->dma_data)
		return 0;

	memset(&dma_params, 0, sizeof(dma_params));
	dma_params.data_type			= dma_data->data_type;
	dma_params.trigger			= dma_data->dma_req;
	dma_params.sync_mode			= dma_data->sync_mode;
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		dma_params.src_amode		= OMAP_DMA_AMODE_POST_INC;
		dma_params.dst_amode		= OMAP_DMA_AMODE_CONSTANT;
		dma_params.src_or_dst_synch	= OMAP_DMA_DST_SYNC;
		dma_params.src_start		= runtime->dma_addr;
		dma_params.dst_start		= dma_data->port_addr;
		dma_params.dst_port		= OMAP_DMA_PORT_MPUI;
		dma_params.dst_fi		= dma_data->packet_size;
	} else {
		dma_params.src_amode		= OMAP_DMA_AMODE_CONSTANT;
		dma_params.dst_amode		= OMAP_DMA_AMODE_POST_INC;
		dma_params.src_or_dst_synch	= OMAP_DMA_SRC_SYNC;
		dma_params.src_start		= dma_data->port_addr;
		dma_params.dst_start		= runtime->dma_addr;
		dma_params.src_port		= OMAP_DMA_PORT_MPUI;
		dma_params.src_fi		= dma_data->packet_size;
	}
	/*
	 * Set DMA transfer frame size equal to ALSA period size and frame
	 * count as no. of ALSA periods. Then with DMA frame interrupt enabled,
	 * we can transfer the whole ALSA buffer with single DMA transfer but
	 * still can get an interrupt at each period bounary
	 */
	bytes = snd_pcm_lib_period_bytes(substream);
	dma_params.elem_count	= bytes >> dma_data->data_type;
	dma_params.frame_count	= runtime->periods;
	omap_set_dma_params(prtd->dma_ch, &dma_params);

	if ((cpu_is_omap1510()))
		omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ |
			      OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ);
	else
		omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ);

	if (!(cpu_class_is_omap1())) {
		omap_set_dma_src_burst_mode(prtd->dma_ch,
						OMAP_DMA_DATA_BURST_16);
		omap_set_dma_dest_burst_mode(prtd->dma_ch,
						OMAP_DMA_DATA_BURST_16);
	}

	return 0;
}
Esempio n. 2
0
/**
 * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
 * params after setting it. Dont do this while dma is running!!
 *
 * @param chain_id - Chained logical channel id.
 * @param params
 *
 * @return - Success : 0
 * 	     Failure : -EINVAL
 */
int omap_modify_dma_chain_params(int chain_id,
								 struct omap_dma_channel_params params)
{
	int *channels;
	u32 i;
	
	/* Check for input params */
	if (unlikely((chain_id < 0
				  || chain_id >= dma_lch_count))) {
		IOLog("Invalid chain id\n");
		return -EINVAL;
	}
	
	/* Check if the chain exists */
	if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
		IOLog("Chain doesn't exists\n");
		return -EINVAL;
	}
	channels = dma_linked_lch[chain_id].linked_dmach_q;
	
	for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
		/*
		 * Allowing client drivers to set common parameters now,
		 * so that later only relevant (src_start, dest_start
		 * and element count) can be set
		 */
		omap_set_dma_params(channels[i], &params);
	}
	
	return 0;
}
Esempio n. 3
0
static int omap_pcm_prepare(struct snd_pcm_substream *substream)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct omap_runtime_data *prtd = runtime->private_data;
	struct omap_pcm_dma_data *dma_data = prtd->dma_data;
	struct omap_dma_channel_params dma_params;
	int sync_mode;

	memset(&dma_params, 0, sizeof(dma_params));

	/* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
	if (cpu_is_omap34xx() &&
			(prtd->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD))
		sync_mode = OMAP_DMA_SYNC_FRAME;
	else
		sync_mode = OMAP_DMA_SYNC_ELEMENT;

	/*
	 * Note: Regardless of interface data formats supported by OMAP McBSP
	 * or EAC blocks, internal representation is always fixed 16-bit/sample
	 */
	dma_params.data_type			= OMAP_DMA_DATA_TYPE_S16;
	dma_params.trigger			= dma_data->dma_req;
	dma_params.sync_mode			= sync_mode;
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
		dma_params.src_amode		= OMAP_DMA_AMODE_POST_INC;
		dma_params.dst_amode		= OMAP_DMA_AMODE_CONSTANT;
		dma_params.src_or_dst_synch	= OMAP_DMA_DST_SYNC;
		dma_params.src_start		= runtime->dma_addr;
		dma_params.dst_start		= dma_data->port_addr;
		dma_params.dst_port		= OMAP_DMA_PORT_MPUI;
	} else {
		dma_params.src_amode		= OMAP_DMA_AMODE_CONSTANT;
		dma_params.dst_amode		= OMAP_DMA_AMODE_POST_INC;
		dma_params.src_or_dst_synch	= OMAP_DMA_SRC_SYNC;
		dma_params.src_start		= dma_data->port_addr;
		dma_params.dst_start		= runtime->dma_addr;
		dma_params.src_port		= OMAP_DMA_PORT_MPUI;
	}
	/*
	 * Set DMA transfer frame size equal to ALSA period size and frame
	 * count as no. of ALSA periods. Then with DMA frame interrupt enabled,
	 * we can transfer the whole ALSA buffer with single DMA transfer but
	 * still can get an interrupt at each period bounary
	 */
	dma_params.elem_count	= snd_pcm_lib_period_bytes(substream) / 2;
	dma_params.frame_count	= runtime->periods;
	omap_set_dma_params(prtd->dma_ch, &dma_params);

	omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ);

	omap_set_dma_src_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16);
	omap_set_dma_dest_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16);

	return 0;
}
static int abe_dbg_start_dma(struct omap_abe *abe, int circular)
{
    struct omap_dma_channel_params dma_params;
    int err;

    /* start the DMA in either :-
     *
     * 1) circular buffer mode where the DMA will restart when it get to
     *    the end of the buffer.
     * 2) default mode, where DMA stops at the end of the buffer.
     */

    abe->debugfs.dma_req = OMAP44XX_DMA_ABE_REQ_7;
    err = omap_request_dma(abe->debugfs.dma_req, "ABE debug",
                           abe_dbg_dma_irq, abe, &abe->debugfs.dma_ch);
    if (abe->debugfs.circular) {
        /*
         * Link channel with itself so DMA doesn't need any
         * reprogramming while looping the buffer
         */
        omap_dma_link_lch(abe->debugfs.dma_ch, abe->debugfs.dma_ch);
    }

    memset(&dma_params, 0, sizeof(dma_params));
    dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
    dma_params.trigger = abe->debugfs.dma_req;
    dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
    dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
    dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
    dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
    dma_params.src_start = OMAP_ABE_D_DEBUG_FIFO_ADDR + ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_DMEM_BASE_OFFSET_MPU;
    dma_params.dst_start = abe->debugfs.buffer_addr;
    dma_params.src_port = OMAP_DMA_PORT_MPUI;
    dma_params.src_ei = 1;
    dma_params.src_fi = 1 - abe->debugfs.elem_bytes;

    /* 128 bytes shifted into words */
    dma_params.elem_count = abe->debugfs.elem_bytes >> 2;
    dma_params.frame_count =
        abe->debugfs.buffer_bytes / abe->debugfs.elem_bytes;
    omap_set_dma_params(abe->debugfs.dma_ch, &dma_params);

    omap_enable_dma_irq(abe->debugfs.dma_ch, OMAP_DMA_FRAME_IRQ);
    omap_set_dma_src_burst_mode(abe->debugfs.dma_ch, OMAP_DMA_DATA_BURST_16);
    omap_set_dma_dest_burst_mode(abe->debugfs.dma_ch, OMAP_DMA_DATA_BURST_16);

    abe->debugfs.reader_offset = 0;

    pm_runtime_get_sync(abe->dev);
    omap_start_dma(abe->debugfs.dma_ch);
    return 0;
}
static int hist_buf_dma(struct ispstat *hist)
{
    dma_addr_t dma_addr = hist->active_buf->dma_addr;

    if (unlikely(!dma_addr)) {
        dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
        hist_reset_mem(hist);
        return STAT_NO_BUF;
    }

    isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
    isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
                ISPHIST_CNT_CLEAR);
    omap3isp_flush(hist->isp);
    hist->dma_config.dst_start = dma_addr;
    hist->dma_config.elem_count = hist->buf_size / sizeof(u32);
    omap_set_dma_params(hist->dma_ch, &hist->dma_config);

    omap_start_dma(hist->dma_ch);

    return STAT_BUF_WAITING_DMA;
}
Esempio n. 6
0
static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
				u8 rndis_mode, dma_addr_t dma_addr, u32 len)
{
	struct tusb_omap_dma_ch		*chdat = to_chdat(channel);
	struct tusb_omap_dma		*tusb_dma = chdat->tusb_dma;
	struct musb			*musb = chdat->musb;
	struct device			*dev = musb->controller;
	struct musb_hw_ep		*hw_ep = chdat->hw_ep;
	void __iomem			*mbase = musb->mregs;
	void __iomem			*ep_conf = hw_ep->conf;
	dma_addr_t			fifo = hw_ep->fifo_sync;
	struct omap_dma_channel_params	dma_params;
	u32				dma_remaining;
	int				src_burst, dst_burst;
	u16				csr;
	int				ch;
	s8				dmareq;
	s8				sync_dev;

	if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
		return false;

	/*
	 * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
	 * register which will cause missed DMA interrupt. We could try to
	 * use a timer for the callback, but it is unsafe as the XFR_SIZE
	 * register is corrupt, and we won't know if the DMA worked.
	 */
	if (dma_addr & 0x2)
		return false;

	/*
	 * Because of HW issue #10, it seems like mixing sync DMA and async
	 * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
	 * using the channel for DMA.
	 */
	if (chdat->tx)
		dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
	else
		dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);

	dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
	if (dma_remaining) {
		dev_dbg(musb->controller, "Busy %s dma ch%i, not using: %08x\n",
			chdat->tx ? "tx" : "rx", chdat->ch,
			dma_remaining);
		return false;
	}

	chdat->transfer_len = len & ~0x1f;

	if (len < packet_sz)
		chdat->transfer_packet_sz = chdat->transfer_len;
	else
		chdat->transfer_packet_sz = packet_sz;

	if (tusb_dma->multichannel) {
		ch = chdat->ch;
		dmareq = chdat->dmareq;
		sync_dev = chdat->sync_dev;
	} else {
		if (tusb_omap_use_shared_dmareq(chdat) != 0) {
			dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum);
			return false;
		}
		if (tusb_dma->ch < 0) {
			/* REVISIT: This should get blocked earlier, happens
			 * with MSC ErrorRecoveryTest
			 */
			WARN_ON(1);
			return false;
		}

		ch = tusb_dma->ch;
		dmareq = tusb_dma->dmareq;
		sync_dev = tusb_dma->sync_dev;
		omap_set_dma_callback(ch, tusb_omap_dma_cb, channel);
	}

	chdat->packet_sz = packet_sz;
	chdat->len = len;
	channel->actual_len = 0;
	chdat->dma_addr = dma_addr;
	channel->status = MUSB_DMA_STATUS_BUSY;

	/* Since we're recycling dma areas, we need to clean or invalidate */
	if (chdat->tx)
		dma_map_single(dev, phys_to_virt(dma_addr), len,
				DMA_TO_DEVICE);
	else
		dma_map_single(dev, phys_to_virt(dma_addr), len,
				DMA_FROM_DEVICE);

	/* Use 16-bit transfer if dma_addr is not 32-bit aligned */
	if ((dma_addr & 0x3) == 0) {
		dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
		dma_params.elem_count = 8;		/* Elements in frame */
	} else {
		dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
		dma_params.elem_count = 16;		/* Elements in frame */
		fifo = hw_ep->fifo_async;
	}

	dma_params.frame_count	= chdat->transfer_len / 32; /* Burst sz frame */

	dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
		chdat->epnum, chdat->tx ? "tx" : "rx",
		ch, dma_addr, chdat->transfer_len, len,
		chdat->transfer_packet_sz, packet_sz);

	/*
	 * Prepare omap DMA for transfer
	 */
	if (chdat->tx) {
		dma_params.src_amode	= OMAP_DMA_AMODE_POST_INC;
		dma_params.src_start	= (unsigned long)dma_addr;
		dma_params.src_ei	= 0;
		dma_params.src_fi	= 0;

		dma_params.dst_amode	= OMAP_DMA_AMODE_DOUBLE_IDX;
		dma_params.dst_start	= (unsigned long)fifo;
		dma_params.dst_ei	= 1;
		dma_params.dst_fi	= -31;	/* Loop 32 byte window */

		dma_params.trigger	= sync_dev;
		dma_params.sync_mode	= OMAP_DMA_SYNC_FRAME;
		dma_params.src_or_dst_synch	= 0;	/* Dest sync */

		src_burst = OMAP_DMA_DATA_BURST_16;	/* 16x32 read */
		dst_burst = OMAP_DMA_DATA_BURST_8;	/* 8x32 write */
	} else {
		dma_params.src_amode	= OMAP_DMA_AMODE_DOUBLE_IDX;
		dma_params.src_start	= (unsigned long)fifo;
		dma_params.src_ei	= 1;
		dma_params.src_fi	= -31;	/* Loop 32 byte window */

		dma_params.dst_amode	= OMAP_DMA_AMODE_POST_INC;
		dma_params.dst_start	= (unsigned long)dma_addr;
		dma_params.dst_ei	= 0;
		dma_params.dst_fi	= 0;

		dma_params.trigger	= sync_dev;
		dma_params.sync_mode	= OMAP_DMA_SYNC_FRAME;
		dma_params.src_or_dst_synch	= 1;	/* Source sync */

		src_burst = OMAP_DMA_DATA_BURST_8;	/* 8x32 read */
		dst_burst = OMAP_DMA_DATA_BURST_16;	/* 16x32 write */
	}

	dev_dbg(musb->controller, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
		chdat->epnum, chdat->tx ? "tx" : "rx",
		(dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16,
		((dma_addr & 0x3) == 0) ? "sync" : "async",
		dma_params.src_start, dma_params.dst_start);

	omap_set_dma_params(ch, &dma_params);
	omap_set_dma_src_burst_mode(ch, src_burst);
	omap_set_dma_dest_burst_mode(ch, dst_burst);
	omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED);

	/*
	 * Prepare MUSB for DMA transfer
	 */
	if (chdat->tx) {
		musb_ep_select(mbase, chdat->epnum);
		csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
		csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
			| MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
		csr &= ~MUSB_TXCSR_P_UNDERRUN;
		musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
	} else {
		musb_ep_select(mbase, chdat->epnum);
		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
		csr |= MUSB_RXCSR_DMAENAB;
		csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
		musb_writew(hw_ep->regs, MUSB_RXCSR,
			csr | MUSB_RXCSR_P_WZC_BITS);
	}

	/*
	 * Start DMA transfer
	 */
	omap_start_dma(ch);

	if (chdat->tx) {
		/* Send transfer_packet_sz packets at a time */
		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
			chdat->transfer_packet_sz);

		musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
			TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
	} else {
		/* Receive transfer_packet_sz packets at a time */
		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
			chdat->transfer_packet_sz << 16);

		musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
			TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
	}

	return true;
}
Esempio n. 7
0
static int aes_dma_init(struct aes_hwa_ctx *ctx)
{
	int err = -ENOMEM;
	struct omap_dma_channel_params dma_params;

	ctx->dma_lch_out = -1;
	ctx->dma_lch_in = -1;

	ctx->buflen = PAGE_SIZE;
	ctx->buflen &= ~(AES_BLOCK_SIZE - 1);

	dprintk(KERN_INFO "aes_dma_init(%p)\n", ctx);

	/* Allocate and map cache buffers */
	ctx->buf_in = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_in,
		GFP_KERNEL);
	if (!ctx->buf_in) {
		dprintk(KERN_ERR "SMC: Unable to alloc AES in cache buffer\n");
		return -ENOMEM;
	}

	ctx->buf_out = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_out,
		GFP_KERNEL);
	if (!ctx->buf_out) {
		dprintk(KERN_ERR "SMC: Unable to alloc AES out cache buffer\n");
		dma_free_coherent(NULL, ctx->buflen, ctx->buf_in,
			ctx->dma_addr_in);
		return -ENOMEM;
	}

	/* Request DMA channels */
	err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback, ctx,
		&ctx->dma_lch_in);
	if (err) {
		dprintk(KERN_ERR "SMC: Unable to request AES RX DMA channel\n");
		goto err_dma_in;
	}

	err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback,
		ctx, &ctx->dma_lch_out);
	if (err) {
		dprintk(KERN_ERR "SMC: Unable to request AES TX DMA channel\n");
		goto err_dma_out;
	}

	dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
	dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES;
	dma_params.src_ei = 0;
	dma_params.src_fi = 0;
	dma_params.dst_ei = 0;
	dma_params.dst_fi = 0;
	dma_params.read_prio = 0;
	dma_params.write_prio = 0;
	dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;

	/* IN */
	dma_params.trigger = ctx->dma_in;
	dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC;
	dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;

	omap_set_dma_params(ctx->dma_lch_in, &dma_params);
	omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8);
	omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8);
	omap_set_dma_src_data_pack(ctx->dma_lch_in, 1);

	/* OUT */
	dma_params.trigger = ctx->dma_out;
	dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
	dma_params.src_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;

	omap_set_dma_params(ctx->dma_lch_out, &dma_params);
	omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8);
	omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8);
	omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1);

	omap_dma_base = ioremap(0x4A056000, 0x1000);
	if (!omap_dma_base) {
		printk(KERN_ERR "SMC: Unable to ioremap DMA registers\n");
		goto err_dma_out;
	}

	dprintk(KERN_INFO "aes_dma_init(%p) configured DMA channels"
		"(RX = %d, TX = %d)\n", ctx, ctx->dma_lch_in, ctx->dma_lch_out);

	return 0;

err_dma_out:
	omap_free_dma(ctx->dma_lch_in);
err_dma_in:
	dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in);
	dma_free_coherent(NULL, ctx->buflen, ctx->buf_out, ctx->dma_addr_out);

	return err;
}
Esempio n. 8
0
static int aes_dma_start(struct aes_hwa_ctx *ctx)
{
	int err, fast = 0, in, out;
	size_t count;
	dma_addr_t addr_in, addr_out;
	struct omap_dma_channel_params dma_params;
	struct tf_crypto_aes_operation_state *state =
		crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
	static size_t last_count;
	unsigned long flags;

	in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32));
	out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32));

	fast = in && out;

	if (fast) {
		count = min(ctx->total, sg_dma_len(ctx->in_sg));
		count = min(count, sg_dma_len(ctx->out_sg));

		if (count != ctx->total)
			return -EINVAL;

		/* Only call dma_map_sg if it has not yet been done */
		if (!(ctx->req->base.flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) {
			err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
			if (!err)
				return -EINVAL;

			err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
			if (!err) {
				dma_unmap_sg(
					NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
				return -EINVAL;
			}
		}
		ctx->req->base.flags &= ~CRYPTO_TFM_REQ_DMA_VISIBLE;

		addr_in = sg_dma_address(ctx->in_sg);
		addr_out = sg_dma_address(ctx->out_sg);

		ctx->flags |= FLAGS_FAST;
	} else {
		count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in,
			ctx->buflen, ctx->total, 0);
		addr_in = ctx->dma_addr_in;
		addr_out = ctx->dma_addr_out;

		ctx->flags &= ~FLAGS_FAST;
	}

	ctx->total -= count;

	/* Configure HWA */
	tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);

	tf_aes_restore_registers(state, ctx->flags & FLAGS_ENCRYPT ? 1 : 0);

	OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG)
		| AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
		| AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);

	ctx->dma_size = count;
	if (!fast)
		dma_sync_single_for_device(NULL, addr_in, count,
			DMA_TO_DEVICE);

	dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
	dma_params.frame_count = count / AES_BLOCK_SIZE;
	dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES;
	dma_params.src_ei = 0;
	dma_params.src_fi = 0;
	dma_params.dst_ei = 0;
	dma_params.dst_fi = 0;
	dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
	dma_params.read_prio = 0;
	dma_params.write_prio = 0;

	/* IN */
	dma_params.trigger = ctx->dma_in;
	dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC;
	dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.src_start = addr_in;
	dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;

	if (reconfigure_dma) {
		omap_set_dma_params(ctx->dma_lch_in, &dma_params);
		omap_set_dma_dest_burst_mode(ctx->dma_lch_in,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_burst_mode(ctx->dma_lch_in,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_data_pack(ctx->dma_lch_in, 1);
	} else {
		if (last_count != count)
			omap_set_dma_transfer_params(ctx->dma_lch_in,
				dma_params.data_type,
				dma_params.elem_count, dma_params.frame_count,
				dma_params.sync_mode, dma_params.trigger,
				dma_params.src_or_dst_synch);

		/* Configure input start address */
		__raw_writel(dma_params.src_start,
			omap_dma_base + (0x60 * (ctx->dma_lch_in) + 0x9c));
	}

	/* OUT */
	dma_params.trigger = ctx->dma_out;
	dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
	dma_params.src_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.dst_start = addr_out;
	dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;

	if (reconfigure_dma) {
		omap_set_dma_params(ctx->dma_lch_out, &dma_params);
		omap_set_dma_dest_burst_mode(ctx->dma_lch_out,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_burst_mode(ctx->dma_lch_out,
			OMAP_DMA_DATA_BURST_8);
		omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1);
		reconfigure_dma = false;
	} else {
		if (last_count != count) {
			omap_set_dma_transfer_params(ctx->dma_lch_out,
				dma_params.data_type,
				dma_params.elem_count, dma_params.frame_count,
				dma_params.sync_mode, dma_params.trigger,
				dma_params.src_or_dst_synch);
			last_count = count;
		}
		/* Configure output start address */
		__raw_writel(dma_params.dst_start,
			omap_dma_base + (0x60 * (ctx->dma_lch_out) + 0xa0));
	}

	/* Is this really needed? */
	omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ);
	omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ);

	wmb();

	omap_start_dma(ctx->dma_lch_in);
	omap_start_dma(ctx->dma_lch_out);

	spin_lock_irqsave(&ctx->lock, flags);
	if (ctx->next_req) {
		struct ablkcipher_request *req =
			ablkcipher_request_cast(ctx->next_req);

		if (!(ctx->next_req->flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) {
			err = dma_map_sg(NULL, req->src, 1, DMA_TO_DEVICE);
			if (!err) {
				/* Silently fail for now... */
				spin_unlock_irqrestore(&ctx->lock, flags);
				return 0;
			}

			err = dma_map_sg(NULL, req->dst, 1, DMA_FROM_DEVICE);
			if (!err) {
				dma_unmap_sg(NULL, req->src, 1, DMA_TO_DEVICE);
				/* Silently fail for now... */
				spin_unlock_irqrestore(&ctx->lock, flags);
				return 0;
			}

			ctx->next_req->flags |= CRYPTO_TFM_REQ_DMA_VISIBLE;
			ctx->next_req = NULL;
		}
	}

	if (ctx->backlog) {
		ctx->backlog->complete(ctx->backlog, -EINPROGRESS);
		ctx->backlog = NULL;
	}
	spin_unlock_irqrestore(&ctx->lock, flags);

	return 0;
}
Esempio n. 9
0
/*
 *Static function, perform AES encryption/decryption using the DMA for data
 *transfer.
 *
 *inputs: src : pointer of the input data to process
 *        nb_blocks : number of block to process
 *        dma_use : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA)
 *                     | PUBLIC_CRYPTO_DMA_USE_POLLING (poll the end of DMA)
 *output: dest : pointer of the output data (can be eq to src)
 */
static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks,
			      u32 ctrl, bool is_kernel)
{
	/*
	 *Note: The DMA only sees physical addresses !
	 */

	int dma_ch0;
	int dma_ch1;
	struct omap_dma_channel_params ch0_parameters;
	struct omap_dma_channel_params ch1_parameters;
	u32 length = nb_blocks * AES_BLOCK_SIZE;
	u32 length_loop = 0;
	u32 nb_blocksLoop = 0;
	struct tf_device *dev = tf_get_device();

	dprintk(KERN_INFO
		"%s: In=0x%08x, Out=0x%08x, Len=%u\n",
		__func__,
		(unsigned int)src,
		(unsigned int)dest,
		(unsigned int)length);

	/*lock the DMA */
	while (!mutex_trylock(&dev->sm.dma_mutex))
		cpu_relax();

	if (tf_dma_request(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
		mutex_unlock(&dev->sm.dma_mutex);
		return false;
	}
	if (tf_dma_request(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
		omap_free_dma(dma_ch0);
		mutex_unlock(&dev->sm.dma_mutex);
		return false;
	}

	while (length > 0) {

		/*
		 * At this time, we are sure that the DMAchannels
		 *are available and not used by other public crypto operation
		 */

		/*DMA used for Input and Output */
		OUTREG32(&paes_reg->AES_SYSCONFIG,
			INREG32(&paes_reg->AES_SYSCONFIG)
			| AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
			| AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);

		/*check length */
		if (length <= dev->dma_buffer_length)
			length_loop = length;
		else
			length_loop = dev->dma_buffer_length;

		/*The length is always a multiple of the block size */
		nb_blocksLoop = length_loop / AES_BLOCK_SIZE;

		/*
		 * Copy the data from the user input buffer into a preallocated
		 * buffer which has correct properties from efficient DMA
		 * transfers.
		 */
		if (!is_kernel) {
			if (copy_from_user(
				 dev->dma_buffer, src, length_loop)) {
				omap_free_dma(dma_ch0);
				omap_free_dma(dma_ch1);
				mutex_unlock(&dev->sm.dma_mutex);
				return false;
			}
		} else {
			memcpy(dev->dma_buffer, src, length_loop);
		}

		/*DMA1: Mem -> AES */
		tf_dma_set_channel_common_params(&ch0_parameters,
			nb_blocksLoop,
			DMA_CEN_Elts_per_Frame_AES,
			AES1_REGS_HW_ADDR + 0x60,
			(u32)dev->dma_buffer_phys,
			OMAP44XX_DMA_AES1_P_DATA_IN_REQ);

		ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC;
		ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT;
		ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC;

		dprintk(KERN_INFO "%s: omap_set_dma_params(ch0)\n", __func__);
		omap_set_dma_params(dma_ch0, &ch0_parameters);

		omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8);
		omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_data_pack(dma_ch0, 1);

		/*DMA2: AES -> Mem */
		tf_dma_set_channel_common_params(&ch1_parameters,
			nb_blocksLoop,
			DMA_CEN_Elts_per_Frame_AES,
			(u32)dev->dma_buffer_phys,
			AES1_REGS_HW_ADDR + 0x60,
			OMAP44XX_DMA_AES1_P_DATA_OUT_REQ);

		ch1_parameters.src_amode = OMAP_DMA_AMODE_CONSTANT;
		ch1_parameters.dst_amode = OMAP_DMA_AMODE_POST_INC;
		ch1_parameters.src_or_dst_synch = OMAP_DMA_SRC_SYNC;

		dprintk(KERN_INFO "%s: omap_set_dma_params(ch1)\n", __func__);
		omap_set_dma_params(dma_ch1, &ch1_parameters);

		omap_set_dma_src_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8);
		omap_set_dma_dest_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8);
		omap_set_dma_dest_data_pack(dma_ch1, 1);

		wmb();

		dprintk(KERN_INFO
			"%s: Start DMA channel %d\n",
			__func__, (unsigned int)dma_ch1);
		tf_dma_start(dma_ch1, OMAP_DMA_BLOCK_IRQ);
		dprintk(KERN_INFO
			"%s: Start DMA channel %d\n",
			__func__, (unsigned int)dma_ch0);
		tf_dma_start(dma_ch0, OMAP_DMA_BLOCK_IRQ);

		dprintk(KERN_INFO
			"%s: Waiting for IRQ\n", __func__);
		tf_dma_wait(2);

		/*Unset DMA synchronisation requests */
		OUTREG32(&paes_reg->AES_SYSCONFIG,
				INREG32(&paes_reg->AES_SYSCONFIG)
			& (~AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT)
			& (~AES_SYSCONFIG_DMA_REQ_IN_EN_BIT));

		omap_clear_dma(dma_ch0);
		omap_clear_dma(dma_ch1);

		/*
		 *The dma transfer is complete
		 */

		pr_info("%s completing\n", __func__);
#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
		tf_aes_fault_injection(ctrl, dev->dma_buffer);
#endif

		/*The DMA output is in the preallocated aligned buffer
		 *and needs to be copied to the output buffer.*/
		if (!is_kernel) {
			if (copy_to_user(
				dest, dev->dma_buffer, length_loop)) {
				omap_free_dma(dma_ch0);
				omap_free_dma(dma_ch1);
				mutex_unlock(&dev->sm.dma_mutex);
				return false;
			}
		} else {
			memcpy(dest, dev->dma_buffer, length_loop);
		}

		src += length_loop;
		dest += length_loop;
		length -= length_loop;
	}

	/*For safety reasons, let's clean the working buffer */
	memset(dev->dma_buffer, 0, length_loop);

	/*release the DMA */
	omap_free_dma(dma_ch0);
	omap_free_dma(dma_ch1);

	mutex_unlock(&dev->sm.dma_mutex);

	dprintk(KERN_INFO "%s: Success\n", __func__);

	return true;
}
Esempio n. 10
0
/**
 * @brief omap_request_dma_chain : Request a chain of DMA channels
 *
 * @param dev_id - Device id using the dma channel
 * @param dev_name - Device name
 * @param callback - Call back function
 * @chain_id -
 * @no_of_chans - Number of channels requested
 * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
 * 					      OMAP_DMA_DYNAMIC_CHAIN
 * @params - Channel parameters
 *
 * @return - Success : 0
 * 	     Failure: -EINVAL/-ENOMEM
 */
int omap_request_dma_chain(int dev_id, const char *dev_name,
						   void (*callback) (int lch, u16 ch_status,
											 void *data),
						   int *chain_id, int no_of_chans, int chain_mode,
						   struct omap_dma_channel_params params)
{
	int *channels;
	int i, err;
	
	/* Is the chain mode valid ? */
	if (chain_mode != OMAP_DMA_STATIC_CHAIN
		&& chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
		IOLog("Invalid chain mode requested\n");
		return -EINVAL;
	}
	
	if (unlikely((no_of_chans < 1
				  || no_of_chans > dma_lch_count))) {
		IOLog("Invalid Number of channels requested\n");
		return -EINVAL;
	}
	
	/* Allocate a queue to maintain the status of the channels
	 * in the chain */
	channels = malloc(sizeof(*channels) * no_of_chans);
	
	if (channels == NULL) {
		IOLog("omap_dma: No memory for channel queue\n");
		return -ENOMEM;
	}
	
	/* request and reserve DMA channels for the chain */
	for (i = 0; i < no_of_chans; i++) {
		err = omap_request_dma(dev_id, dev_name,
							   callback, NULL, &channels[i]);
		if (err < 0) {
			int j;
			for (j = 0; j < i; j++)
				omap_free_dma(channels[j]);
			
			free(channels);
			
			IOLog("omap_dma: Request failed %d\n", err);
			return err;
		}
		
		dma_chan[channels[i]].prev_linked_ch = -1;
		dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
		
		/*
		 * Allowing client drivers to set common parameters now,
		 * so that later only relevant (src_start, dest_start
		 * and element count) can be set
		 */
		omap_set_dma_params(channels[i], &params);
	}
	
	*chain_id = channels[0];
	dma_linked_lch[*chain_id].linked_dmach_q = channels;
	dma_linked_lch[*chain_id].chain_mode = chain_mode;
	dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
	dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
	
	for (i = 0; i < no_of_chans; i++)
		dma_chan[channels[i]].chain_id = *chain_id;
	
	/* Reset the Queue pointers */
	OMAP_DMA_CHAIN_QINIT(*chain_id);
	
	/* Set up the chain */
	if (no_of_chans == 1)
		create_dma_lch_chain(channels[0], channels[0]);
	else {
		for (i = 0; i < (no_of_chans - 1); i++)
			create_dma_lch_chain(channels[i], channels[i + 1]);
	}
	
	return 0;
}