Ejemplo n.º 1
0
static void serial_omap_start_tx(struct uart_port *port)
{
	struct uart_omap_port *up = (struct uart_omap_port *)port;
#ifdef CONFIG_PM
		/* Disallow OCP bus idle. UART TX irqs are not seen during
		 * bus idle. Alternative is to set kernel timer at fifo
		 * drain rate.
		 */
		unsigned int tmp;
		tmp = (serial_in(up, UART_OMAP_SYSC) & 0x7) | (1 << 3);
		serial_out(up, UART_OMAP_SYSC, tmp); /* no-idle */
#endif

	if (up->use_dma && !(up->port.x_char)) {

		struct circ_buf *xmit = &up->port.state->xmit;
		unsigned int start = up->uart_dma.tx_buf_dma_phys +
				     (xmit->tail & (UART_XMIT_SIZE - 1));
		if (uart_circ_empty(xmit) || up->uart_dma.tx_dma_state)
			return;
		spin_lock(&(up->uart_dma.tx_lock));
		up->uart_dma.tx_dma_state = 1;
		spin_unlock(&(up->uart_dma.tx_lock));

		up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
		/* It is a circular buffer. See if the buffer has wounded back.
		 * If yes it will have to be transferred in two separate dma
		 * transfers */
		if (start + up->uart_dma.tx_buf_size >= up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
			up->uart_dma.tx_buf_size = (up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start;

		if (up->uart_dma.tx_dma_channel == 0xFF) {
			omap_request_dma(uart_dma_tx[up->pdev->id-1],
					 "UART Tx DMA",
					 (void *)uart_tx_dma_callback, up,
					&(up->uart_dma.tx_dma_channel));
		}
		omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
					 OMAP_DMA_AMODE_CONSTANT,
					 UART_BASE(up->pdev->id - 1), 0, 0);
		omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
			OMAP_DMA_AMODE_POST_INC, start, 0, 0);

		omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
					     OMAP_DMA_DATA_TYPE_S8,
					     up->uart_dma.tx_buf_size, 1,
					     OMAP_DMA_SYNC_ELEMENT,
					     uart_dma_tx[(up->pdev->id)-1], 0);

		omap_start_dma(up->uart_dma.tx_dma_channel);

	} else if (!(up->ier & UART_IER_THRI)) {
		up->ier |= UART_IER_THRI;
		serial_out(up, UART_IER, up->ier);
	}

	if (up->restore_autorts) {
		serial_omap_set_autorts(up, 1);
		up->restore_autorts = 0;
	}
}
Ejemplo n.º 2
0
/* Prepare to transfer the next segment of a scatterlist */
static void
mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
{
	int dma_ch = host->dma_ch;
	unsigned long data_addr;
	u16 buf, frame;
	u32 count;
	struct scatterlist *sg = &data->sg[host->sg_idx];
	int src_port = 0;
	int dst_port = 0;
	int sync_dev = 0;

	data_addr = host->phys_base + OMAP_MMC_REG_DATA;
	frame = data->blksz;
	count = sg_dma_len(sg);

	if ((data->blocks == 1) && (count > data->blksz))
		count = frame;

	host->dma_len = count;

	/* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
	 * Use 16 or 32 word frames when the blocksize is at least that large.
	 * Blocksize is usually 512 bytes; but not for some SD reads.
	 */
	if (cpu_is_omap15xx() && frame > 32)
		frame = 32;
	else if (frame > 64)
		frame = 64;
	count /= frame;
	frame >>= 1;

	if (!(data->flags & MMC_DATA_WRITE)) {
		buf = 0x800f | ((frame - 1) << 8);

		if (cpu_class_is_omap1()) {
			src_port = OMAP_DMA_PORT_TIPB;
			dst_port = OMAP_DMA_PORT_EMIFF;
		}
		if (cpu_is_omap24xx())
			sync_dev = OMAP24XX_DMA_MMC1_RX;

		omap_set_dma_src_params(dma_ch, src_port,
					OMAP_DMA_AMODE_CONSTANT,
					data_addr, 0, 0);
		omap_set_dma_dest_params(dma_ch, dst_port,
					 OMAP_DMA_AMODE_POST_INC,
					 sg_dma_address(sg), 0, 0);
		omap_set_dma_dest_data_pack(dma_ch, 1);
		omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
	} else {
		buf = 0x0f80 | ((frame - 1) << 0);

		if (cpu_class_is_omap1()) {
			src_port = OMAP_DMA_PORT_EMIFF;
			dst_port = OMAP_DMA_PORT_TIPB;
		}
		if (cpu_is_omap24xx())
			sync_dev = OMAP24XX_DMA_MMC1_TX;

		omap_set_dma_dest_params(dma_ch, dst_port,
					 OMAP_DMA_AMODE_CONSTANT,
					 data_addr, 0, 0);
		omap_set_dma_src_params(dma_ch, src_port,
					OMAP_DMA_AMODE_POST_INC,
					sg_dma_address(sg), 0, 0);
		omap_set_dma_src_data_pack(dma_ch, 1);
		omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
	}

	/* Max limit for DMA frame count is 0xffff */
	BUG_ON(count > 0xffff);

	OMAP_MMC_WRITE(host, BUF, buf);
	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
				     frame, count, OMAP_DMA_SYNC_FRAME,
				     sync_dev, 0);
}
Ejemplo n.º 3
0
static int omapvout_dss_perform_vrfb_dma(struct omapvout_device *vout,
					int buf_idx, bool vrfb_cfg)
{
	int rc = 0;
	int rot = 0;
	struct omapvout_dss_vrfb *vrfb;
	u32 src_paddr;
	u32 dst_paddr;

	/* It is assumed that the caller has locked the vout mutex */

	if (vout->dss->vrfb.req_status != DMA_CHAN_ALLOTED)
		return -EINVAL;

	vrfb = &vout->dss->vrfb;

	if (vrfb_cfg) {
		enum omap_color_mode dss_fmt;
		int bytespp;
		int w, h;
		u32 fmt = vout->pix.pixelformat;

		w = vout->crop.width;
		h = vout->crop.height;

		dss_fmt = omapvout_dss_color_mode(vout->pix.pixelformat);
		omap_vrfb_setup(&vrfb->ctx[0], vrfb->phy_addr[0],
				w, h, dss_fmt, vout->rotation);
		omap_vrfb_setup(&vrfb->ctx[1], vrfb->phy_addr[1],
				w, h, dss_fmt, vout->rotation);

		bytespp = omapvout_dss_format_bytespp(vout->pix.pixelformat);
		vrfb->en = (w * bytespp) / 4; /* 32 bit ES */
		vrfb->fn = h;
		vrfb->dst_ei = 1;
		if (fmt == V4L2_PIX_FMT_YUYV || fmt == V4L2_PIX_FMT_UYVY) {
			vrfb->dst_fi = (OMAP_VRFB_LINE_LEN * bytespp * 2)
							- (vrfb->en * 4) + 1;
		} else {
			vrfb->dst_fi = (OMAP_VRFB_LINE_LEN * bytespp)
							- (vrfb->en * 4) + 1;
		}
	}

	switch (vout->rotation) {
	case 1:
		rot = 3;
		break;
	case 3:
		rot = 1;
		break;
	default:
		rot = vout->rotation;
		break;
	}

	src_paddr = vout->queue.bufs[buf_idx]->baddr;
	dst_paddr = vrfb->ctx[vrfb->next].paddr[rot];

	omap_set_dma_transfer_params(vrfb->dma_ch, OMAP_DMA_DATA_TYPE_S32,
				vrfb->en, vrfb->fn, OMAP_DMA_SYNC_ELEMENT,
				vrfb->dma_id, 0x0);
	omap_set_dma_src_params(vrfb->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
				src_paddr, 0, 0);
	omap_set_dma_src_burst_mode(vrfb->dma_ch, OMAP_DMA_DATA_BURST_16);
	omap_set_dma_dest_params(vrfb->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
				dst_paddr, vrfb->dst_ei, vrfb->dst_fi);
	omap_set_dma_dest_burst_mode(vrfb->dma_ch, OMAP_DMA_DATA_BURST_16);
	omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);

	vrfb->dma_complete = false;
	omap_start_dma(vrfb->dma_ch);
	wait_event_interruptible_timeout(vrfb->wait, vrfb->dma_complete,
							VRFB_TX_TIMEOUT);

	if (!vrfb->dma_complete) {
		DBG("VRFB DMA timeout\n");
		omap_stop_dma(vrfb->dma_ch);
		return -EINVAL;
	}

	return rc;
}
Ejemplo n.º 4
0
static void serial_omap_start_tx(struct uart_port *port)
{
	struct uart_omap_port *up = (struct uart_omap_port *)port;
	struct circ_buf *xmit;
	unsigned int start;
	int ret = 0;

	if (!up->use_dma) {
		pm_runtime_get_sync(&up->pdev->dev);
		serial_omap_enable_ier_thri(up);
		pm_runtime_mark_last_busy(&up->pdev->dev);
		pm_runtime_put_autosuspend(&up->pdev->dev);
		return;
	}

	if (up->uart_dma.tx_dma_used)
		return;

	xmit = &up->port.state->xmit;

	if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
		pm_runtime_get_sync(&up->pdev->dev);
		ret = omap_request_dma(up->uart_dma.uart_dma_tx,
				"UART Tx DMA",
				(void *)uart_tx_dma_callback, up,
				&(up->uart_dma.tx_dma_channel));

		if (ret < 0) {
			serial_omap_enable_ier_thri(up);
			return;
		}
	}
	spin_lock(&(up->uart_dma.tx_lock));
	up->uart_dma.tx_dma_used = true;
	spin_unlock(&(up->uart_dma.tx_lock));

	start = up->uart_dma.tx_buf_dma_phys +
				(xmit->tail & (UART_XMIT_SIZE - 1));

	up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
	/*
	 * It is a circular buffer. See if the buffer has wounded back.
	 * If yes it will have to be transferred in two separate dma
	 * transfers
	 */
	if (start + up->uart_dma.tx_buf_size >=
			up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
		up->uart_dma.tx_buf_size =
			(up->uart_dma.tx_buf_dma_phys +
			UART_XMIT_SIZE) - start;

	omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
				OMAP_DMA_AMODE_CONSTANT,
				up->uart_dma.uart_base, 0, 0);
	omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
				OMAP_DMA_AMODE_POST_INC, start, 0, 0);
	omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
				OMAP_DMA_DATA_TYPE_S8,
				up->uart_dma.tx_buf_size, 1,
				OMAP_DMA_SYNC_ELEMENT,
				up->uart_dma.uart_dma_tx, 0);
	/* FIXME: Cache maintenance needed here? */
	omap_start_dma(up->uart_dma.tx_dma_channel);
}
int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
				struct videobuf_buffer *vb)
{
	dma_addr_t dmabuf;
	struct vid_vrfb_dma *tx;
	enum dss_rotation rotation;
	u32 dest_frame_index = 0, src_element_index = 0;
	u32 dest_element_index = 0, src_frame_index = 0;
	u32 elem_count = 0, frame_count = 0, pixsize = 2;

	if (!is_rotation_enabled(vout))
		return 0;

	dmabuf = vout->buf_phy_addr[vb->i];
	/* If rotation is enabled, copy input buffer into VRFB
	 * memory space using DMA. We are copying input buffer
	 * into VRFB memory space of desired angle and DSS will
	 * read image VRFB memory for 0 degree angle
	 */
	pixsize = vout->bpp * vout->vrfb_bpp;
	/*
	 * DMA transfer in double index mode
	 */

	/* Frame index */
	dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
			(vout->pix.width * vout->bpp)) + 1;

	/* Source and destination parameters */
	src_element_index = 0;
	src_frame_index = 0;
	dest_element_index = 1;
	/* Number of elements per frame */
	elem_count = vout->pix.width * vout->bpp;
	frame_count = vout->pix.height;
	tx = &vout->vrfb_dma_tx;
	tx->tx_status = 0;
	omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
			(elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
			tx->dev_id, 0x0);
	/* src_port required only for OMAP1 */
	omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
			dmabuf, src_element_index, src_frame_index);
	/*set dma source burst mode for VRFB */
	omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
	rotation = calc_rotation(vout);

	/* dest_port required only for OMAP1 */
	omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
			vout->vrfb_context[vb->i].paddr[0], dest_element_index,
			dest_frame_index);
	/*set dma dest burst mode for VRFB */
	omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
	omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);

	omap_start_dma(tx->dma_ch);
	interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);

	if (tx->tx_status == 0) {
		omap_stop_dma(tx->dma_ch);
		return -EINVAL;
	}
	/* Store buffers physical address into an array. Addresses
	 * from this array will be used to configure DSS */
	vout->queued_buf_addr[vb->i] = (u8 *)
		vout->vrfb_context[vb->i].paddr[rotation];
	return 0;
}
Ejemplo n.º 6
0
/*
 * Routine to configure and start DMA for the MMC card
 */
static int
mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
{
	int sync_dev, sync_dir = 0;
	int dma_ch = 0, ret = 0, err = 1;
	struct mmc_data *data = req->data;

	/*
	 * If for some reason the DMA transfer is still active,
	 * we wait for timeout period and free the dma
	 */
	if (host->dma_ch != -1) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(100);
		if (down_trylock(&host->sem)) {
			omap_free_dma(host->dma_ch);
			host->dma_ch = -1;
			up(&host->sem);
			return err;
		}
	} else {
		if (down_trylock(&host->sem))
			return err;
	}

	if (!(data->flags & MMC_DATA_WRITE)) {
		host->dma_dir = DMA_FROM_DEVICE;
		if (host->id == OMAP_MMC1_DEVID)
			sync_dev = OMAP24XX_DMA_MMC1_RX;
		else if (host->id == OMAP_MMC2_DEVID)
			sync_dev = OMAP24XX_DMA_MMC2_RX;
		else
#ifdef CONFIG_OMAP_HS_MMC3
			sync_dev = OMAP34XX_DMA_MMC3_RX;
#else
			sync_dev = OMAP24XX_DMA_MMC2_RX;
#endif
	} else {
		host->dma_dir = DMA_TO_DEVICE;
		if (host->id == OMAP_MMC1_DEVID)
			sync_dev = OMAP24XX_DMA_MMC1_TX;
		else if(host->id == OMAP_MMC2_DEVID)
			sync_dev = OMAP24XX_DMA_MMC2_TX;
		else
#ifdef CONFIG_OMAP_HS_MMC3
			sync_dev = OMAP34XX_DMA_MMC3_TX;
#else
			sync_dev = OMAP24XX_DMA_MMC2_TX;
#endif
	}

	ret = omap_request_dma(sync_dev, "MMC/SD", mmc_omap_dma_cb,
			host, &dma_ch);
	if (ret != 0) {
		dev_dbg(mmc_dev(host->mmc),
			"%s: omap_request_dma() failed with %d\n",
			mmc_hostname(host->mmc), ret);
		return ret;
	}

	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
			data->sg_len, host->dma_dir);
	host->dma_ch = dma_ch;

	if (!(data->flags & MMC_DATA_WRITE))
		mmc_omap_config_dma_param(1, host, data);
	else
		mmc_omap_config_dma_param(0, host, data);

	if ((data->blksz % 4) == 0)
		omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
			(data->blksz / 4), data->blocks, OMAP_DMA_SYNC_FRAME,
			sync_dev, sync_dir);
	else
		/* REVISIT: The MMC buffer increments only when MSB is written.
		 * Return error for blksz which is non multiple of four.
		 */
		return -EINVAL;

	omap_start_dma(dma_ch);
	return 0;
}
Ejemplo n.º 7
0
static void configure_channel(struct dma_channel *channel,
				u16 packet_sz, u8 mode,
				dma_addr_t dma_addr, u32 len)
{
	struct musb_dma_channel *musb_channel = channel->private_data;
	struct musb_dma_controller *controller = musb_channel->controller;
	void __iomem *mbase = controller->base;
	u8 bchannel = musb_channel->idx;
	u16 csr = 0;

	DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
			channel, packet_sz, dma_addr, len, mode);
	if (musb_channel->sysdma_channel != -1) {
		/* System DMA */
		/* RX: set src = FIFO */
		omap_set_dma_transfer_params(musb_channel->sysdma_channel,
					OMAP_DMA_DATA_TYPE_S8,
					len ? len : 1, 1, /* One frame */
					OMAP_DMA_SYNC_ELEMENT,
					OMAP24XX_DMA_NO_DEVICE,
					0); /* Src Sync */

		omap_set_dma_src_params(musb_channel->sysdma_channel, 0,
					OMAP_DMA_AMODE_CONSTANT,
					MUSB_FIFO_ADDRESS(musb_channel->epnum),
					0, 0);

		omap_set_dma_dest_params(musb_channel->sysdma_channel, 0,
					OMAP_DMA_AMODE_POST_INC, dma_addr,
					0, 0);

		omap_set_dma_dest_data_pack(musb_channel->sysdma_channel, 1);
		omap_set_dma_dest_burst_mode(musb_channel->sysdma_channel,
					OMAP_DMA_DATA_BURST_16);

		omap_start_dma(musb_channel->sysdma_channel);

	} else { /* Mentor DMA */

	if (mode) {
		csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
		BUG_ON(len < packet_sz);

		if (packet_sz >= 64) {
			csr |= MUSB_HSDMA_BURSTMODE_INCR16
					<< MUSB_HSDMA_BURSTMODE_SHIFT;
		} else if (packet_sz >= 32) {
			csr |= MUSB_HSDMA_BURSTMODE_INCR8
					<< MUSB_HSDMA_BURSTMODE_SHIFT;
		} else if (packet_sz >= 16) {
			csr |= MUSB_HSDMA_BURSTMODE_INCR4
					<< MUSB_HSDMA_BURSTMODE_SHIFT;
		}
	}

	csr |= (musb_channel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
		| (1 << MUSB_HSDMA_ENABLE_SHIFT)
		| (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
		| (musb_channel->transmit
				? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
				: 0);

	/* address/count */
	musb_write_hsdma_addr(mbase, bchannel, dma_addr);
	musb_write_hsdma_count(mbase, bchannel, len);

	/* control (this should start things) */
	musb_writew(mbase,
		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_CONTROL),
		csr);
	}
}