Esempio n. 1
0
static void serial_omap_start_tx(struct uart_port *port)
{
	struct uart_omap_port *up = (struct uart_omap_port *)port;
	struct circ_buf *xmit;
	unsigned int start;
	int ret = 0;

	if (!up->use_dma) {
		serial_omap_enable_ier_thri(up);
		return;
	}

	if (up->uart_dma.tx_dma_used)
		return;

	xmit = &up->port.state->xmit;

	if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
		ret = omap_request_dma(up->uart_dma.uart_dma_tx,
				"UART Tx DMA",
				(void *)uart_tx_dma_callback, up,
				&(up->uart_dma.tx_dma_channel));

		if (ret < 0) {
			serial_omap_enable_ier_thri(up);
			return;
		}
	}
	spin_lock(&(up->uart_dma.tx_lock));
	up->uart_dma.tx_dma_used = true;
	spin_unlock(&(up->uart_dma.tx_lock));

	start = up->uart_dma.tx_buf_dma_phys +
				(xmit->tail & (UART_XMIT_SIZE - 1));

	up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
	/*
	 * It is a circular buffer. See if the buffer has wounded back.
	 * If yes it will have to be transferred in two separate dma
	 * transfers
	 */
	if (start + up->uart_dma.tx_buf_size >=
			up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
		up->uart_dma.tx_buf_size =
			(up->uart_dma.tx_buf_dma_phys +
			UART_XMIT_SIZE) - start;

	omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
				OMAP_DMA_AMODE_CONSTANT,
				up->uart_dma.uart_base, 0, 0);
	omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
				OMAP_DMA_AMODE_POST_INC, start, 0, 0);
	omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
				OMAP_DMA_DATA_TYPE_S8,
				up->uart_dma.tx_buf_size, 1,
				OMAP_DMA_SYNC_ELEMENT,
				up->uart_dma.uart_dma_tx, 0);
	/* FIXME: Cache maintenance needed here? */
	omap_start_dma(up->uart_dma.tx_dma_channel);
}
Esempio n. 2
0
/* this may get called several times by oss emulation */
static int omap_pcm_hw_params(struct snd_pcm_substream *substream,
			      struct snd_pcm_hw_params *params)
{
	struct snd_pcm_runtime *runtime = substream->runtime;
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct omap_runtime_data *prtd = runtime->private_data;
	struct omap_pcm_dma_data *dma_data = rtd->dai->cpu_dai->dma_data;
	int err = 0;

	/* return if this is a bufferless transfer e.g.
	 * codec <--> BT codec or GSM modem -- lg FIXME */
	if (!dma_data)
		return 0;

	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
	runtime->dma_bytes = params_buffer_bytes(params);

	if (prtd->dma_data)
		return 0;
	prtd->dma_data = dma_data;
	err = omap_request_dma(dma_data->dma_req, dma_data->name,
			       omap_pcm_dma_irq, substream, &prtd->dma_ch);
	if (!err) {
		/*
		 * Link channel with itself so DMA doesn't need any
		 * reprogramming while looping the buffer
		 */
		omap_dma_link_lch(prtd->dma_ch, prtd->dma_ch);
	}

	return err;
}
Esempio n. 3
0
static int serial_omap_start_rxdma(struct uart_omap_port *up)
{
	int ret = 0;

	if (up->uart_dma.rx_dma_channel == -1) {
		ret = omap_request_dma(up->uart_dma.uart_dma_rx,
				"UART Rx DMA",
				(void *)uart_rx_dma_callback, up,
				&(up->uart_dma.rx_dma_channel));
		if (ret < 0)
			return ret;

		omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0,
				OMAP_DMA_AMODE_CONSTANT,
				up->uart_dma.uart_base, 0, 0);
		omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0,
				OMAP_DMA_AMODE_POST_INC,
				up->uart_dma.rx_buf_dma_phys, 0, 0);
		omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel,
				OMAP_DMA_DATA_TYPE_S8,
				up->uart_dma.rx_buf_size, 1,
				OMAP_DMA_SYNC_ELEMENT,
				up->uart_dma.uart_dma_rx, 0);
	}
	up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys;
	/* FIXME: Cache maintenance needed here? */
	omap_start_dma(up->uart_dma.rx_dma_channel);
	mod_timer(&up->uart_dma.rx_timer, jiffies +
				usecs_to_jiffies(up->uart_dma.rx_timeout));
	up->uart_dma.rx_dma_used = true;
	return ret;
}
Esempio n. 4
0
static void serial_omap_start_tx(struct uart_port *port)
{
	struct uart_omap_port *up = (struct uart_omap_port *)port;
#ifdef CONFIG_OMAP3_PM
		/* Disallow OCP bus idle. UART TX irqs are not seen during
		 * bus idle. Alternative is to set kernel timer at fifo
		 * drain rate.
		 */
		unsigned int tmp;
		tmp = (serial_in(up, UART_OMAP_SYSC) & 0x7) | (1 << 3);
		serial_out(up, UART_OMAP_SYSC, tmp); /* no-idle */
#endif

	if (up->use_dma && !(up->port.x_char)) {

		struct circ_buf *xmit = &up->port.info->xmit;
		unsigned int start = up->uart_dma.tx_buf_dma_phys +
				     (xmit->tail & (UART_XMIT_SIZE - 1));
		if (uart_circ_empty(xmit) || up->uart_dma.tx_dma_state)
			return;
		spin_lock(&(up->uart_dma.tx_lock));
		up->uart_dma.tx_dma_state = 1;
		spin_unlock(&(up->uart_dma.tx_lock));

		up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
		/* It is a circular buffer. See if the buffer has wounded back.
		 * If yes it will have to be transferred in two separate dma
		 * transfers */
		if (start + up->uart_dma.tx_buf_size >=
				up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
			up->uart_dma.tx_buf_size =
				(up->uart_dma.tx_buf_dma_phys +
				UART_XMIT_SIZE) - start;

		if (up->uart_dma.tx_dma_channel == 0xFF)
			omap_request_dma(uart_dma_tx[up->pdev->id-1],
					"UART Tx DMA",
					(void *)uart_tx_dma_callback, up,
					&(up->uart_dma.tx_dma_channel));
		omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
					 OMAP_DMA_AMODE_CONSTANT,
					 UART_BASE(up->pdev->id - 1), 0, 0);
		omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
					OMAP_DMA_AMODE_POST_INC, start, 0, 0);

		omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
					     OMAP_DMA_DATA_TYPE_S8,
					     up->uart_dma.tx_buf_size, 1,
					     OMAP_DMA_SYNC_ELEMENT,
					     uart_dma_tx[(up->pdev->id)-1], 0);

		omap_start_dma(up->uart_dma.tx_dma_channel);

	} else if (!(up->ier & UART_IER_THRI)) {
		up->ier |= UART_IER_THRI;
		serial_out(up, UART_IER, up->ier);
	}
}
/*
 * Routine to configure and start DMA for the MMC card
 */
static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
					struct mmc_request *req)
{
	int dma_ch = 0, ret = 0, err = 1, i;
	struct mmc_data *data = req->data;

	/* Sanity check: all the SG entries must be aligned by block size. */
	for (i = 0; i < data->sg_len; i++) {
		struct scatterlist *sgl;

		sgl = data->sg + i;
		if (sgl->length % data->blksz)
			return -EINVAL;
	}
	if ((data->blksz % 4) != 0)
		/* REVISIT: The MMC buffer increments only when MSB is written.
		 * Return error for blksz which is non multiple of four.
		 */
		return -EINVAL;

	/*
	 * If for some reason the DMA transfer is still active,
	 * we wait for timeout period and free the dma
	 */
	if (host->dma_ch != -1) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(100);
		if (down_trylock(&host->sem)) {
			omap_free_dma(host->dma_ch);
			host->dma_ch = -1;
			up(&host->sem);
			return err;
		}
	} else {
		if (down_trylock(&host->sem))
			return err;
	}

	ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
			       "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
	if (ret != 0) {
		dev_err(mmc_dev(host->mmc),
			"%s: omap_request_dma() failed with %d\n",
			mmc_hostname(host->mmc), ret);
		return ret;
	}

	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
			data->sg_len, omap_hsmmc_get_dma_dir(host, data));
	host->dma_ch = dma_ch;
	host->dma_sg_idx = 0;

	omap_hsmmc_config_dma_params(host, data, data->sg);

	return 0;
}
static int abe_dbg_start_dma(struct omap_abe *abe, int circular)
{
    struct omap_dma_channel_params dma_params;
    int err;

    /* start the DMA in either :-
     *
     * 1) circular buffer mode where the DMA will restart when it get to
     *    the end of the buffer.
     * 2) default mode, where DMA stops at the end of the buffer.
     */

    abe->debugfs.dma_req = OMAP44XX_DMA_ABE_REQ_7;
    err = omap_request_dma(abe->debugfs.dma_req, "ABE debug",
                           abe_dbg_dma_irq, abe, &abe->debugfs.dma_ch);
    if (abe->debugfs.circular) {
        /*
         * Link channel with itself so DMA doesn't need any
         * reprogramming while looping the buffer
         */
        omap_dma_link_lch(abe->debugfs.dma_ch, abe->debugfs.dma_ch);
    }

    memset(&dma_params, 0, sizeof(dma_params));
    dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
    dma_params.trigger = abe->debugfs.dma_req;
    dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
    dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
    dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
    dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
    dma_params.src_start = OMAP_ABE_D_DEBUG_FIFO_ADDR + ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_DMEM_BASE_OFFSET_MPU;
    dma_params.dst_start = abe->debugfs.buffer_addr;
    dma_params.src_port = OMAP_DMA_PORT_MPUI;
    dma_params.src_ei = 1;
    dma_params.src_fi = 1 - abe->debugfs.elem_bytes;

    /* 128 bytes shifted into words */
    dma_params.elem_count = abe->debugfs.elem_bytes >> 2;
    dma_params.frame_count =
        abe->debugfs.buffer_bytes / abe->debugfs.elem_bytes;
    omap_set_dma_params(abe->debugfs.dma_ch, &dma_params);

    omap_enable_dma_irq(abe->debugfs.dma_ch, OMAP_DMA_FRAME_IRQ);
    omap_set_dma_src_burst_mode(abe->debugfs.dma_ch, OMAP_DMA_DATA_BURST_16);
    omap_set_dma_dest_burst_mode(abe->debugfs.dma_ch, OMAP_DMA_DATA_BURST_16);

    abe->debugfs.reader_offset = 0;

    pm_runtime_get_sync(abe->dev);
    omap_start_dma(abe->debugfs.dma_ch);
    return 0;
}
Esempio n. 7
0
int omap_request_alsa_sound_dma(int device_id, const char *device_name,
			   void *data, int **channels)
{
	int i, err = 0;
	int *chan = NULL;
	FN_IN;
	if (unlikely((NULL == channels) || (NULL == device_name))) {
		BUG();
		return -EPERM;
	}
	/* Try allocate memory for the num channels */
	*channels = kmalloc(sizeof(int) * nr_linked_channels, GFP_KERNEL);
	chan = *channels;
	if (NULL == chan) {
		ERR("No Memory for channel allocs!\n");
		FN_OUT(-ENOMEM);
		return -ENOMEM;
	}
	spin_lock(&dma_list_lock);
	for (i = 0; i < nr_linked_channels; i++) {
		err = omap_request_dma(device_id,
				device_name,
				sound_dma_irq_handler,
				data,
				&chan[i]);

		/* Handle Failure condition here */
		if (err < 0) {
			int j;

			for (j = 0; j < i; j++)
				omap_free_dma(chan[j]);

			spin_unlock(&dma_list_lock);
			kfree(chan);
			*channels = NULL;
			ERR("Error in requesting channel %d=0x%x\n", i,
			    err);
			FN_OUT(err);
			return err;
		}
	}

	/* Chain the channels together */
	if (!cpu_is_omap15xx())
		omap_sound_dma_link_lch(data);

	spin_unlock(&dma_list_lock);
	FN_OUT(0);
	return 0;
}
Esempio n. 8
0
static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
				struct musb_hw_ep *hw_ep, u8 transmit)
{
	struct musb_dma_controller *controller = container_of(c,
			struct musb_dma_controller, controller);
	struct musb_dma_channel *musb_channel = NULL;
	struct dma_channel *channel = NULL;
	u8 bit;

	for (bit = 0; bit < MUSB_HSDMA_CHANNELS; bit++) {
		if (!(controller->used_channels & (1 << bit))) {
			controller->used_channels |= (1 << bit);
			musb_channel = &(controller->channel[bit]);
			musb_channel->controller = controller;
			musb_channel->idx = bit;
			musb_channel->epnum = hw_ep->epnum;
			musb_channel->transmit = transmit;
			channel = &(musb_channel->channel);
			channel->private_data = musb_channel;
			channel->status = MUSB_DMA_STATUS_FREE;
			channel->max_len = 0x10000;
			/* Tx => mode 1; Rx => mode 0 */
			channel->desired_mode = transmit;
			channel->actual_len = 0;
			musb_channel->sysdma_channel = -1;

			if (!transmit && use_system_dma()) {
				int ret;
				ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE,
					"MUSB SysDMA", musb_sysdma_completion,
					(void *) musb_channel,
					&(musb_channel->sysdma_channel));

				if (ret) {
					printk(KERN_ERR "request_dma failed:"
							" %d\n", ret);
					controller->used_channels &=
								~(1 << bit);
					channel->status =
							MUSB_DMA_STATUS_UNKNOWN;
					musb_channel->sysdma_channel = -1;
					channel = NULL;
				}
			}
			break;
		}
	}

	return channel;
}
Esempio n. 9
0
static int camif_open(void)
{
	int ret;
	
	if (!this->camera)
		return 0;
	
	// Camera interrupt
	if ((ret = request_irq(INT_CAMERA, camera_interrupt, SA_INTERRUPT,
			       "camera", NULL))) {
		err("Failed to acquire camera interrupt\n");
		return ret;
	}
	
#ifndef NO_CAM_DMA	
	if ((ret = omap_request_dma(eCameraRx, "camera dma", dma_callback,
				    NULL, (dma_regs_t **)&camera_dma_regs))) {
		err("No DMA available for camera\n");
		camera_dma_regs = NULL;
		return ret;
	}

	//dbg("Camera DMA at %p\n", camera_dma_regs);
	omap_dma_setup(eCameraRx, eDmaIn);
#endif

	CameraFPGAEnable();
	camif_start(current_exclk);

	if ((ret = this->camera->open())) {
		err("Error from Camera open\n");
		return ret;
	}
		
#ifdef MEASURE_FR
	dmac_sum = dmac_delta = dmac_N = 0;
#endif
	/*
	 * wait a few frames for camera to stabilize
	 */
	camif_wait_for_vsync_edge(EN_V_DOWN);
	camif_wait_for_vsync_edge(EN_V_DOWN);
	camif_wait_for_vsync_edge(EN_V_DOWN);

	return ret;
}
Esempio n. 10
0
static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
{
	const char *dma_dev_name;
	int sync_dev, dma_ch, is_read, r;

	is_read = !(data->flags & MMC_DATA_WRITE);
	del_timer_sync(&host->dma_timer);
	if (host->dma_ch >= 0) {
		if (is_read == host->dma_is_read)
			return 0;
		omap_free_dma(host->dma_ch);
		host->dma_ch = -1;
	}

	if (is_read) {
		if (host->id == 0) {
			sync_dev = OMAP_DMA_MMC_RX;
			dma_dev_name = "MMC1 read";
		} else {
			sync_dev = OMAP_DMA_MMC2_RX;
			dma_dev_name = "MMC2 read";
		}
	} else {
		if (host->id == 0) {
			sync_dev = OMAP_DMA_MMC_TX;
			dma_dev_name = "MMC1 write";
		} else {
			sync_dev = OMAP_DMA_MMC2_TX;
			dma_dev_name = "MMC2 write";
		}
	}
	r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
			     host, &dma_ch);
	if (r != 0) {
		dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
		return r;
	}
	host->dma_ch = dma_ch;
	host->dma_is_read = is_read;

	return 0;
}
/*
 * Requests a dma transfer
 */
int request_dma(struct dma_transfer *transfer){
       int success;
       transfer->finished = 0;
       printk("\nRequesting OMAP DMA transfer\n");
       success = omap_request_dma(
               transfer->device_id,
               "dma_test",
               dma_callback,
               (void *) transfer,
               &(transfer->transfer_id));
       if (success) {
          printk(" Request failed\n");
          transfer->request_success = 0;
          return 1;
       }else{
          printk(" Request succeeded, transfer id is %d\n",
               transfer->transfer_id);
          transfer->request_success = 1;
       }
       return 0;
}
int omap3isp_hist_init(struct isp_device *isp)
{
    struct ispstat *hist = &isp->isp_hist;
    struct omap3isp_hist_config *hist_cfg;
    int ret = -1;

    hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL);
    if (hist_cfg == NULL)
        return -ENOMEM;

    memset(hist, 0, sizeof(*hist));
    if (HIST_CONFIG_DMA)
        ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST",
                               hist_dma_cb, hist, &hist->dma_ch);
    if (ret) {
        if (HIST_CONFIG_DMA)
            dev_warn(isp->dev, "hist: DMA request channel failed. "
                     "Using PIO only.\n");
        hist->dma_ch = -1;
    } else {
        dev_dbg(isp->dev, "hist: DMA channel = %d\n", hist->dma_ch);
        hist_dma_config(hist);
        omap_enable_dma_irq(hist->dma_ch, OMAP_DMA_BLOCK_IRQ);
    }

    hist->ops = &hist_ops;
    hist->priv = hist_cfg;
    hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
    hist->isp = isp;

    ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
    if (ret) {
        kfree(hist_cfg);
        if (HIST_USING_DMA(hist))
            omap_free_dma(hist->dma_ch);
    }

    return ret;
}
Esempio n. 13
0
static void serial_omap_start_rxdma(struct uart_omap_port *up)
{
#ifdef CONFIG_OMAP3_PM
	/* Disallow OCP bus idle. UART TX irqs are not seen during
	 * bus idle. Alternative is to set kernel timer at fifo
	 * drain rate.
	 */
	unsigned int tmp;
	tmp = (serial_in(up, UART_OMAP_SYSC) & 0x7) | (1 << 3);
	serial_out(up, UART_OMAP_SYSC, tmp); /* no-idle */
#endif
	if (up->uart_dma.rx_dma_channel == 0xFF)
		omap_request_dma(uart_dma_rx[up->pdev->id-1], "UART Rx DMA",
			(void *)uart_rx_dma_callback, up,
				&(up->uart_dma.rx_dma_channel));

	omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0,
				OMAP_DMA_AMODE_CONSTANT,
				UART_BASE(up->pdev->id - 1), 0, 0);
	omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0,
				OMAP_DMA_AMODE_POST_INC,
				up->uart_dma.rx_buf_dma_phys, 0, 0);
	omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel,
				OMAP_DMA_DATA_TYPE_S8,
				up->uart_dma.rx_buf_size, 1,
				OMAP_DMA_SYNC_ELEMENT,
				uart_dma_rx[up->pdev->id-1], 0);
	up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys;
	omap_writel(0, OMAP34XX_DMA4_BASE
			+ OMAP_DMA4_CDAC(up->uart_dma.rx_dma_channel));
	omap_start_dma(up->uart_dma.rx_dma_channel);
	mod_timer(&up->uart_dma.rx_timer, jiffies +
				usecs_to_jiffies(up->uart_dma.rx_timeout));
	up->uart_dma.rx_dma_state = 1;

}
Esempio n. 14
0
int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
			      bool static_vrfb_allocation)
{
	int ret = 0, i, j;
	struct omap_vout_device *vout;
	struct video_device *vfd;
	int image_width, image_height;
	int vrfb_num_bufs = VRFB_NUM_BUFS;
	struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
	struct omap2video_device *vid_dev =
		container_of(v4l2_dev, struct omap2video_device, v4l2_dev);

	vout = vid_dev->vouts[vid_num];
	vfd = vout->vfd;

	for (i = 0; i < VRFB_NUM_BUFS; i++) {
		if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
			dev_info(&pdev->dev, ": VRFB allocation failed\n");
			for (j = 0; j < i; j++)
				omap_vrfb_release_ctx(&vout->vrfb_context[j]);
			ret = -ENOMEM;
			goto free_buffers;
		}
	}

	/* Calculate VRFB memory size */
	/* allocate for worst case size */
	image_width = VID_MAX_WIDTH / TILE_SIZE;
	if (VID_MAX_WIDTH % TILE_SIZE)
		image_width++;

	image_width = image_width * TILE_SIZE;
	image_height = VID_MAX_HEIGHT / TILE_SIZE;

	if (VID_MAX_HEIGHT % TILE_SIZE)
		image_height++;

	image_height = image_height * TILE_SIZE;
	vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);

	/*
	 * Request and Initialize DMA, for DMA based VRFB transfer
	 */
	vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
	vout->vrfb_dma_tx.dma_ch = -1;
	vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
	ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
			omap_vout_vrfb_dma_tx_callback,
			(void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
	if (ret < 0) {
		vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
		dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
				" video%d\n", vfd->minor);
	}
	init_waitqueue_head(&vout->vrfb_dma_tx.wait);

	/* statically allocated the VRFB buffer is done through
	   commands line aruments */
	if (static_vrfb_allocation) {
		if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
			ret =  -ENOMEM;
			goto release_vrfb_ctx;
		}
		vout->vrfb_static_allocation = 1;
	}
	return 0;

release_vrfb_ctx:
	for (j = 0; j < VRFB_NUM_BUFS; j++)
		omap_vrfb_release_ctx(&vout->vrfb_context[j]);
free_buffers:
	omap_vout_free_buffers(vout);

	return ret;
}
Esempio n. 15
0
/*
 * Routine to configure and start DMA for the MMC card
 */
static int
mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
{
	int sync_dev, sync_dir = 0;
	int dma_ch = 0, ret = 0, err = 1;
	struct mmc_data *data = req->data;

	/*
	 * If for some reason the DMA transfer is still active,
	 * we wait for timeout period and free the dma
	 */
	if (host->dma_ch != -1) {
		set_current_state(TASK_UNINTERRUPTIBLE);
		schedule_timeout(100);
		if (down_trylock(&host->sem)) {
			omap_free_dma(host->dma_ch);
			host->dma_ch = -1;
			up(&host->sem);
			return err;
		}
	} else {
		if (down_trylock(&host->sem))
			return err;
	}

	if (!(data->flags & MMC_DATA_WRITE)) {
		host->dma_dir = DMA_FROM_DEVICE;
		if (host->id == OMAP_MMC1_DEVID)
			sync_dev = OMAP24XX_DMA_MMC1_RX;
		else
			sync_dev = OMAP24XX_DMA_MMC2_RX;
	} else {
		host->dma_dir = DMA_TO_DEVICE;
		if (host->id == OMAP_MMC1_DEVID)
			sync_dev = OMAP24XX_DMA_MMC1_TX;
		else
			sync_dev = OMAP24XX_DMA_MMC2_TX;
	}

	ret = omap_request_dma(sync_dev, "MMC/SD", mmc_omap_dma_cb,
			host, &dma_ch);
	if (ret != 0) {
		dev_dbg(mmc_dev(host->mmc),
			"%s: omap_request_dma() failed with %d\n",
			mmc_hostname(host->mmc), ret);
		return ret;
	}

	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
			data->sg_len, host->dma_dir);
	host->dma_ch = dma_ch;

	if (!(data->flags & MMC_DATA_WRITE))
		mmc_omap_config_dma_param(1, host, data);
	else
		mmc_omap_config_dma_param(0, host, data);

	if ((data->blksz % 4) == 0)
		omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
			(data->blksz / 4), data->blocks, OMAP_DMA_SYNC_FRAME,
			sync_dev, sync_dir);
	else
		/* REVISIT: The MMC buffer increments only when MSB is written.
		 * Return error for blksz which is non multiple of four.
		 */
		return -EINVAL;

	omap_start_dma(dma_ch);
	return 0;
}
Esempio n. 16
0
static void serial_omap_start_tx(struct uart_port *port)
{
	struct uart_omap_port *up = (struct uart_omap_port *)port;
	struct omap_uart_port_info *pdata = up->pdev->dev.platform_data;
	struct circ_buf *xmit;
	unsigned int start;
	int ret = 0;

	if (!up->use_dma) {
		pm_runtime_get_sync(&up->pdev->dev);
		serial_omap_enable_ier_thri(up);
		if (pdata && pdata->set_noidle)
			pdata->set_noidle(up->pdev);
		pm_runtime_mark_last_busy(&up->pdev->dev);
		pm_runtime_put_autosuspend(&up->pdev->dev);
		return;
	}

	if (up->uart_dma.tx_dma_used)
		return;

	xmit = &up->port.state->xmit;

	if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
		pm_runtime_get_sync(&up->pdev->dev);
		ret = omap_request_dma(up->uart_dma.uart_dma_tx,
				"UART Tx DMA",
				(void *)uart_tx_dma_callback, up,
				&(up->uart_dma.tx_dma_channel));

		if (ret < 0) {
			serial_omap_enable_ier_thri(up);
			return;
		}
	}
	spin_lock(&(up->uart_dma.tx_lock));
	up->uart_dma.tx_dma_used = true;
	spin_unlock(&(up->uart_dma.tx_lock));

	start = up->uart_dma.tx_buf_dma_phys +
				(xmit->tail & (UART_XMIT_SIZE - 1));

	up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit);
	if (start + up->uart_dma.tx_buf_size >=
			up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE)
		up->uart_dma.tx_buf_size =
			(up->uart_dma.tx_buf_dma_phys +
			UART_XMIT_SIZE) - start;

	omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0,
				OMAP_DMA_AMODE_CONSTANT,
				up->uart_dma.uart_base, 0, 0);
	omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0,
				OMAP_DMA_AMODE_POST_INC, start, 0, 0);
	omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel,
				OMAP_DMA_DATA_TYPE_S8,
				up->uart_dma.tx_buf_size, 1,
				OMAP_DMA_SYNC_ELEMENT,
				up->uart_dma.uart_dma_tx, 0);
	
	omap_start_dma(up->uart_dma.tx_dma_channel);
}
Esempio n. 17
0
static int omapvout_dss_acquire_vrfb(struct omapvout_device *vout)
{
	int rc = 0;
	int size;
	int w, h;
	int max_pixels;
	struct omapvout_dss_vrfb *vrfb;

	/* It is assumed that the caller has locked the vout mutex */

	vrfb = &vout->dss->vrfb;
	vrfb->dma_id = OMAP_DMA_NO_DEVICE;
	vrfb->dma_ch = -1;
	vrfb->req_status = DMA_CHAN_NOT_ALLOTED;
	vrfb->next = 0;

	rc = omap_vrfb_request_ctx(&vrfb->ctx[0]);
	if (rc != 0) {
		DBG("VRFB context allocation 0 failed %d\n", rc);
		goto failed_ctx0;
	}

	rc = omap_vrfb_request_ctx(&vrfb->ctx[1]);
	if (rc != 0) {
		DBG("VRFB context allocation 1 failed %d\n", rc);
		goto failed_ctx1;
	}

	/* Determine the VFRB buffer size by oversizing for the VRFB */
	w = vout->max_video_width;
	h = vout->max_video_height;
	max_pixels = w * h;
	w += 32; /* Oversize as typical for VRFB */
	h += 32;
	size = PAGE_ALIGN(w * h * (vout->max_video_buffer_size / max_pixels));
	vrfb->size = size;

	rc = omapvout_mem_alloc(size, &vrfb->phy_addr[0], &vrfb->virt_addr[0]);
	if (rc != 0) {
		DBG("VRFB buffer alloc 0 failed %d\n", rc);
		goto failed_mem0;
	}

	rc = omapvout_mem_alloc(size, &vrfb->phy_addr[1], &vrfb->virt_addr[1]);
	if (rc != 0) {
		DBG("VRFB buffer alloc 1 failed %d\n", rc);
		goto failed_mem1;
	}

	rc = omap_request_dma(vrfb->dma_id, "VRFB DMA",
				omapvout_dss_vrfb_dma_cb,
				(void *)vrfb,
				&vrfb->dma_ch);
	if (rc != 0) {
		printk(KERN_INFO "No VRFB DMA channel for %d\n", vout->id);
		goto failed_dma;
	}

	vrfb->req_status = DMA_CHAN_ALLOTED;
	init_waitqueue_head(&vrfb->wait);

	return rc;

failed_dma:
	omapvout_mem_free(vrfb->phy_addr[1], vrfb->virt_addr[1], size);
failed_mem1:
	omapvout_mem_free(vrfb->phy_addr[0], vrfb->virt_addr[0], size);
failed_mem0:
	omap_vrfb_release_ctx(&vrfb->ctx[1]);
failed_ctx1:
	omap_vrfb_release_ctx(&vrfb->ctx[0]);
failed_ctx0:
	return rc;
}
Esempio n. 18
0
static int omap1610_irda_start(struct net_device *dev)
{
	struct omap1610_irda *si = dev->priv;
	int err;
	unsigned long flags = 0;

#ifdef CONFIG_MACH_OMAP_H3
	u8 ioExpanderVal = 0;
#endif

	__ECHO_IN;
	si->speed = 9600;

	err = request_irq(dev->irq, omap1610_irda_irq, 0, dev->name, dev);
	if (err)
		goto err_irq;

	/*
	 * The interrupt must remain disabled for now.
	 */

	disable_irq(dev->irq);

	/*  Request DMA channels for IrDA hardware */

	if (omap_request_dma(OMAP_DMA_UART3_RX, "IrDA Rx DMA",
			     (void *)omap1610_irda_rx_dma_callback,
			     dev, &(si->rx_dma_channel))) {
		printk(KERN_ERR "Failed to request IrDA Rx DMA \n");
		goto err_irq;
	}

	if (omap_request_dma(OMAP_DMA_UART3_TX, "IrDA Tx DMA",
			     (void *)omap1610_irda_tx_dma_callback,
			     dev, &(si->tx_dma_channel))) {
		printk(KERN_ERR "Failed to request IrDA Tx DMA \n");
		goto err_irq;
	}

	/* Allocate TX and RX buffers for DMA channels */

	si->rx_buf_dma_virt =
	    dma_alloc_coherent(NULL, 4096, &(si->rx_buf_dma_phys), flags);

	si->tx_buf_dma_virt =
	    dma_alloc_coherent(NULL, 4096, &(si->tx_buf_dma_phys), flags);

	/*
	 * Setup the serial port for the specified config.
	 */

#if CONFIG_MACH_OMAP_H3

	if ((err = read_gpio_expa(&ioExpanderVal, 0x26))) {
		printk(KERN_ERR "Error reading from I/O EXPANDER \n");
		return err;
	}

	ioExpanderVal |= 0x40;	/* 'P6' Enable IRDA_TX and IRDA_RX */

	if ((err = write_gpio_expa(ioExpanderVal, 0x26))) {
		printk(KERN_ERR "Error writing to I/O EXPANDER \n");
		return err;
	}
#endif
	err = omap1610_irda_startup(dev);

	if (err)
		goto err_startup;

	omap1610_irda_set_speed(dev, si->speed = 9600);

	/*
	 * Open a new IrLAP layer instance.
	 */

	si->irlap = irlap_open(dev, &si->qos, "omap_sir");

	err = -ENOMEM;
	if (!si->irlap)
		goto err_irlap;

	/* Now enable the interrupt and start the queue  */
	si->open = 1;

	/* Start RX DMA */

	omap1610_irda_start_rx_dma(si);

	enable_irq(dev->irq);
	netif_start_queue(dev);

	__ECHO_OUT;

	return 0;

      err_irlap:
	si->open = 0;
	omap1610_irda_shutdown(si);
      err_startup:
      err_irq:
	free_irq(dev->irq, dev);
	return err;
}
Esempio n. 19
0
static int __init omap1_cam_probe(struct platform_device *pdev)
{
	struct omap1_cam_dev *pcdev;
	struct resource *res;
	struct clk *clk;
	void __iomem *base;
	unsigned int irq;
	int err = 0;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (!res || (int)irq <= 0) {
		err = -ENODEV;
		goto exit;
	}

	clk = clk_get(&pdev->dev, "armper_ck");
	if (IS_ERR(clk)) {
		err = PTR_ERR(clk);
		goto exit;
	}

	pcdev = kzalloc(sizeof(*pcdev) + resource_size(res), GFP_KERNEL);
	if (!pcdev) {
		dev_err(&pdev->dev, "Could not allocate pcdev\n");
		err = -ENOMEM;
		goto exit_put_clk;
	}

	pcdev->res = res;
	pcdev->clk = clk;

	pcdev->pdata = pdev->dev.platform_data;
	pcdev->pflags = pcdev->pdata->flags;

	if (pcdev->pdata)
		pcdev->camexclk = pcdev->pdata->camexclk_khz * 1000;

	switch (pcdev->camexclk) {
	case 6000000:
	case 8000000:
	case 9600000:
	case 12000000:
	case 24000000:
		break;
	default:
		dev_warn(&pdev->dev,
				"Incorrect sensor clock frequency %ld kHz, "
				"should be one of 0, 6, 8, 9.6, 12 or 24 MHz, "
				"please correct your platform data\n",
				pcdev->pdata->camexclk_khz);
		pcdev->camexclk = 0;
	case 0:
		dev_info(&pdev->dev,
				"Not providing sensor clock\n");
	}

	INIT_LIST_HEAD(&pcdev->capture);
	spin_lock_init(&pcdev->lock);

	/*
	 * Request the region.
	 */
	if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) {
		err = -EBUSY;
		goto exit_kfree;
	}

	base = ioremap(res->start, resource_size(res));
	if (!base) {
		err = -ENOMEM;
		goto exit_release;
	}
	pcdev->irq = irq;
	pcdev->base = base;

	sensor_reset(pcdev, true);

	err = omap_request_dma(OMAP_DMA_CAMERA_IF_RX, DRIVER_NAME,
			dma_isr, (void *)pcdev, &pcdev->dma_ch);
	if (err < 0) {
		dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n");
		err = -EBUSY;
		goto exit_iounmap;
	}
	dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch);

	/* preconfigure DMA */
	omap_set_dma_src_params(pcdev->dma_ch, OMAP_DMA_PORT_TIPB,
			OMAP_DMA_AMODE_CONSTANT, res->start + REG_CAMDATA,
			0, 0);
	omap_set_dma_dest_burst_mode(pcdev->dma_ch, OMAP_DMA_DATA_BURST_4);
	/* setup DMA autoinitialization */
	omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch);

	err = request_irq(pcdev->irq, cam_isr, 0, DRIVER_NAME, pcdev);
	if (err) {
		dev_err(&pdev->dev, "Camera interrupt register failed\n");
		goto exit_free_dma;
	}

	pcdev->soc_host.drv_name	= DRIVER_NAME;
	pcdev->soc_host.ops		= &omap1_host_ops;
	pcdev->soc_host.priv		= pcdev;
	pcdev->soc_host.v4l2_dev.dev	= &pdev->dev;
	pcdev->soc_host.nr		= pdev->id;

	err = soc_camera_host_register(&pcdev->soc_host);
	if (err)
		goto exit_free_irq;

	dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n");

	return 0;

exit_free_irq:
	free_irq(pcdev->irq, pcdev);
exit_free_dma:
	omap_free_dma(pcdev->dma_ch);
exit_iounmap:
	iounmap(base);
exit_release:
	release_mem_region(res->start, resource_size(res));
exit_kfree:
	kfree(pcdev);
exit_put_clk:
	clk_put(clk);
exit:
	return err;
}
Esempio n. 20
0
static int omap2_onenand_probe(struct platform_device *pdev)
{
	struct omap_onenand_platform_data *pdata;
	struct omap2_onenand *c;
	struct onenand_chip *this;
	int r;
	struct resource *res;
	struct mtd_part_parser_data ppdata = {};

	pdata = dev_get_platdata(&pdev->dev);
	if (pdata == NULL) {
		dev_err(&pdev->dev, "platform data missing\n");
		return -ENODEV;
	}

	c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
	if (!c)
		return -ENOMEM;

	init_completion(&c->irq_done);
	init_completion(&c->dma_done);
	c->flags = pdata->flags;
	c->gpmc_cs = pdata->cs;
	c->gpio_irq = pdata->gpio_irq;
	c->dma_channel = pdata->dma_channel;
	if (c->dma_channel < 0) {
		/* if -1, don't use DMA */
		c->gpio_irq = 0;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		r = -EINVAL;
		dev_err(&pdev->dev, "error getting memory resource\n");
		goto err_kfree;
	}

	c->phys_base = res->start;
	c->mem_size = resource_size(res);

	if (request_mem_region(c->phys_base, c->mem_size,
			       pdev->dev.driver->name) == NULL) {
		dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
						c->phys_base, c->mem_size);
		r = -EBUSY;
		goto err_kfree;
	}
	c->onenand.base = ioremap(c->phys_base, c->mem_size);
	if (c->onenand.base == NULL) {
		r = -ENOMEM;
		goto err_release_mem_region;
	}

	if (pdata->onenand_setup != NULL) {
		r = pdata->onenand_setup(c->onenand.base, &c->freq);
		if (r < 0) {
			dev_err(&pdev->dev, "Onenand platform setup failed: "
				"%d\n", r);
			goto err_iounmap;
		}
		c->setup = pdata->onenand_setup;
	}

	if (c->gpio_irq) {
		if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
			dev_err(&pdev->dev,  "Failed to request GPIO%d for "
				"OneNAND\n", c->gpio_irq);
			goto err_iounmap;
	}
	gpio_direction_input(c->gpio_irq);

	if ((r = request_irq(gpio_to_irq(c->gpio_irq),
			     omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
			     pdev->dev.driver->name, c)) < 0)
		goto err_release_gpio;
	}

	if (c->dma_channel >= 0) {
		r = omap_request_dma(0, pdev->dev.driver->name,
				     omap2_onenand_dma_cb, (void *) c,
				     &c->dma_channel);
		if (r == 0) {
			omap_set_dma_write_mode(c->dma_channel,
						OMAP_DMA_WRITE_NON_POSTED);
			omap_set_dma_src_data_pack(c->dma_channel, 1);
			omap_set_dma_src_burst_mode(c->dma_channel,
						    OMAP_DMA_DATA_BURST_8);
			omap_set_dma_dest_data_pack(c->dma_channel, 1);
			omap_set_dma_dest_burst_mode(c->dma_channel,
						     OMAP_DMA_DATA_BURST_8);
		} else {
			dev_info(&pdev->dev,
				 "failed to allocate DMA for OneNAND, "
				 "using PIO instead\n");
			c->dma_channel = -1;
		}
	}

	dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
		 "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
		 c->onenand.base, c->freq);

	c->pdev = pdev;
	c->mtd.name = dev_name(&pdev->dev);
	c->mtd.priv = &c->onenand;
	c->mtd.owner = THIS_MODULE;

	c->mtd.dev.parent = &pdev->dev;

	this = &c->onenand;
	if (c->dma_channel >= 0) {
		this->wait = omap2_onenand_wait;
		if (c->flags & ONENAND_IN_OMAP34XX) {
			this->read_bufferram = omap3_onenand_read_bufferram;
			this->write_bufferram = omap3_onenand_write_bufferram;
		} else {
			this->read_bufferram = omap2_onenand_read_bufferram;
			this->write_bufferram = omap2_onenand_write_bufferram;
		}
	}

	if (pdata->regulator_can_sleep) {
		c->regulator = regulator_get(&pdev->dev, "vonenand");
		if (IS_ERR(c->regulator)) {
			dev_err(&pdev->dev,  "Failed to get regulator\n");
			r = PTR_ERR(c->regulator);
			goto err_release_dma;
		}
		c->onenand.enable = omap2_onenand_enable;
		c->onenand.disable = omap2_onenand_disable;
	}

	if (pdata->skip_initial_unlocking)
		this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;

	if ((r = onenand_scan(&c->mtd, 1)) < 0)
		goto err_release_regulator;

	ppdata.of_node = pdata->of_node;
	r = mtd_device_parse_register(&c->mtd, NULL, &ppdata,
				      pdata ? pdata->parts : NULL,
				      pdata ? pdata->nr_parts : 0);
	if (r)
		goto err_release_onenand;

	platform_set_drvdata(pdev, c);

	return 0;

err_release_onenand:
	onenand_release(&c->mtd);
err_release_regulator:
	regulator_put(c->regulator);
err_release_dma:
	if (c->dma_channel != -1)
		omap_free_dma(c->dma_channel);
	if (c->gpio_irq)
		free_irq(gpio_to_irq(c->gpio_irq), c);
err_release_gpio:
	if (c->gpio_irq)
		gpio_free(c->gpio_irq);
err_iounmap:
	iounmap(c->onenand.base);
err_release_mem_region:
	release_mem_region(c->phys_base, c->mem_size);
err_kfree:
	kfree(c);

	return r;
}
Esempio n. 21
0
static int musb_sdma_channel_request(void *musb_channel, int *ch_num)
{
	return omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "MUSB SysDMA",
			 musb_sysdma_completion, (void *) musb_channel, ch_num);
}
Esempio n. 22
0
/**
 * @brief omap_request_dma_chain : Request a chain of DMA channels
 *
 * @param dev_id - Device id using the dma channel
 * @param dev_name - Device name
 * @param callback - Call back function
 * @chain_id -
 * @no_of_chans - Number of channels requested
 * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
 * 					      OMAP_DMA_DYNAMIC_CHAIN
 * @params - Channel parameters
 *
 * @return - Success : 0
 * 	     Failure: -EINVAL/-ENOMEM
 */
int omap_request_dma_chain(int dev_id, const char *dev_name,
						   void (*callback) (int lch, u16 ch_status,
											 void *data),
						   int *chain_id, int no_of_chans, int chain_mode,
						   struct omap_dma_channel_params params)
{
	int *channels;
	int i, err;
	
	/* Is the chain mode valid ? */
	if (chain_mode != OMAP_DMA_STATIC_CHAIN
		&& chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
		IOLog("Invalid chain mode requested\n");
		return -EINVAL;
	}
	
	if (unlikely((no_of_chans < 1
				  || no_of_chans > dma_lch_count))) {
		IOLog("Invalid Number of channels requested\n");
		return -EINVAL;
	}
	
	/* Allocate a queue to maintain the status of the channels
	 * in the chain */
	channels = malloc(sizeof(*channels) * no_of_chans);
	
	if (channels == NULL) {
		IOLog("omap_dma: No memory for channel queue\n");
		return -ENOMEM;
	}
	
	/* request and reserve DMA channels for the chain */
	for (i = 0; i < no_of_chans; i++) {
		err = omap_request_dma(dev_id, dev_name,
							   callback, NULL, &channels[i]);
		if (err < 0) {
			int j;
			for (j = 0; j < i; j++)
				omap_free_dma(channels[j]);
			
			free(channels);
			
			IOLog("omap_dma: Request failed %d\n", err);
			return err;
		}
		
		dma_chan[channels[i]].prev_linked_ch = -1;
		dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
		
		/*
		 * Allowing client drivers to set common parameters now,
		 * so that later only relevant (src_start, dest_start
		 * and element count) can be set
		 */
		omap_set_dma_params(channels[i], &params);
	}
	
	*chain_id = channels[0];
	dma_linked_lch[*chain_id].linked_dmach_q = channels;
	dma_linked_lch[*chain_id].chain_mode = chain_mode;
	dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
	dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
	
	for (i = 0; i < no_of_chans; i++)
		dma_chan[channels[i]].chain_id = *chain_id;
	
	/* Reset the Queue pointers */
	OMAP_DMA_CHAIN_QINIT(*chain_id);
	
	/* Set up the chain */
	if (no_of_chans == 1)
		create_dma_lch_chain(channels[0], channels[0]);
	else {
		for (i = 0; i < (no_of_chans - 1); i++)
			create_dma_lch_chain(channels[i], channels[i + 1]);
	}
	
	return 0;
}
Esempio n. 23
0
static int __devinit omap2_onenand_probe(struct platform_device *pdev)
{
	struct omap_onenand_platform_data *pdata;
	struct omap2_onenand *c;
	int r;

	pdata = pdev->dev.platform_data;
	if (pdata == NULL) {
		dev_err(&pdev->dev, "platform data missing\n");
		return -ENODEV;
	}

	c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
	if (!c)
		return -ENOMEM;

	init_completion(&c->irq_done);
	init_completion(&c->dma_done);
	c->gpmc_cs = pdata->cs;
	c->gpio_irq = pdata->gpio_irq;
	c->dma_channel = pdata->dma_channel;
	if (c->dma_channel < 0) {
		
		c->gpio_irq = 0;
	}

	r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
	if (r < 0) {
		dev_err(&pdev->dev, "Cannot request GPMC CS\n");
		goto err_kfree;
	}

	if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
			       pdev->dev.driver->name) == NULL) {
		dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
			"size: 0x%x\n",	c->phys_base, ONENAND_IO_SIZE);
		r = -EBUSY;
		goto err_free_cs;
	}
	c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
	if (c->onenand.base == NULL) {
		r = -ENOMEM;
		goto err_release_mem_region;
	}

	if (pdata->onenand_setup != NULL) {
		r = pdata->onenand_setup(c->onenand.base, c->freq);
		if (r < 0) {
			dev_err(&pdev->dev, "Onenand platform setup failed: "
				"%d\n", r);
			goto err_iounmap;
		}
		c->setup = pdata->onenand_setup;
	}

	if (c->gpio_irq) {
		if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
			dev_err(&pdev->dev,  "Failed to request GPIO%d for "
				"OneNAND\n", c->gpio_irq);
			goto err_iounmap;
	}
	gpio_direction_input(c->gpio_irq);

	if ((r = request_irq(gpio_to_irq(c->gpio_irq),
			     omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
			     pdev->dev.driver->name, c)) < 0)
		goto err_release_gpio;
	}

	if (c->dma_channel >= 0) {
		r = omap_request_dma(0, pdev->dev.driver->name,
				     omap2_onenand_dma_cb, (void *) c,
				     &c->dma_channel);
		if (r == 0) {
			omap_set_dma_write_mode(c->dma_channel,
						OMAP_DMA_WRITE_NON_POSTED);
			omap_set_dma_src_data_pack(c->dma_channel, 1);
			omap_set_dma_src_burst_mode(c->dma_channel,
						    OMAP_DMA_DATA_BURST_8);
			omap_set_dma_dest_data_pack(c->dma_channel, 1);
			omap_set_dma_dest_burst_mode(c->dma_channel,
						     OMAP_DMA_DATA_BURST_8);
		} else {
			dev_info(&pdev->dev,
				 "failed to allocate DMA for OneNAND, "
				 "using PIO instead\n");
			c->dma_channel = -1;
		}
	}

	dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
		 "base %p\n", c->gpmc_cs, c->phys_base,
		 c->onenand.base);

	c->pdev = pdev;
	c->mtd.name = dev_name(&pdev->dev);
	c->mtd.priv = &c->onenand;
	c->mtd.owner = THIS_MODULE;

	c->mtd.dev.parent = &pdev->dev;

	if (c->dma_channel >= 0) {
		struct onenand_chip *this = &c->onenand;

		this->wait = omap2_onenand_wait;
		if (cpu_is_omap34xx()) {
			this->read_bufferram = omap3_onenand_read_bufferram;
			this->write_bufferram = omap3_onenand_write_bufferram;
		} else {
			this->read_bufferram = omap2_onenand_read_bufferram;
			this->write_bufferram = omap2_onenand_write_bufferram;
		}
	}

	if ((r = onenand_scan(&c->mtd, 1)) < 0)
		goto err_release_dma;

	switch ((c->onenand.version_id >> 4) & 0xf) {
	case 0:
		c->freq = 40;
		break;
	case 1:
		c->freq = 54;
		break;
	case 2:
		c->freq = 66;
		break;
	case 3:
		c->freq = 83;
		break;
	}

#ifdef CONFIG_MTD_PARTITIONS
	if (pdata->parts != NULL)
		r = add_mtd_partitions(&c->mtd, pdata->parts,
				       pdata->nr_parts);
	else
#endif
		r = add_mtd_device(&c->mtd);
	if (r < 0)
		goto err_release_onenand;

	platform_set_drvdata(pdev, c);

	return 0;

err_release_onenand:
	onenand_release(&c->mtd);
err_release_dma:
	if (c->dma_channel != -1)
		omap_free_dma(c->dma_channel);
	if (c->gpio_irq)
		free_irq(gpio_to_irq(c->gpio_irq), c);
err_release_gpio:
	if (c->gpio_irq)
		gpio_free(c->gpio_irq);
err_iounmap:
	iounmap(c->onenand.base);
err_release_mem_region:
	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
err_free_cs:
	gpmc_cs_free(c->gpmc_cs);
err_kfree:
	kfree(c);

	return r;
}
Esempio n. 24
0
static struct dma_channel *
tusb_omap_dma_allocate(struct dma_controller *c,
		struct musb_hw_ep *hw_ep,
		u8 tx)
{
	int ret, i;
	const char		*dev_name;
	struct tusb_omap_dma	*tusb_dma;
	struct musb		*musb;
	void __iomem		*tbase;
	struct dma_channel	*channel = NULL;
	struct tusb_omap_dma_ch	*chdat = NULL;
	u32			reg;

	tusb_dma = container_of(c, struct tusb_omap_dma, controller);
	musb = tusb_dma->musb;
	tbase = musb->ctrl_base;

	reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
	if (tx)
		reg &= ~(1 << hw_ep->epnum);
	else
		reg &= ~(1 << (hw_ep->epnum + 15));
	musb_writel(tbase, TUSB_DMA_INT_MASK, reg);

	/* REVISIT: Why does dmareq5 not work? */
	if (hw_ep->epnum == 0) {
		dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
		return NULL;
	}

	for (i = 0; i < MAX_DMAREQ; i++) {
		struct dma_channel *ch = dma_channel_pool[i];
		if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
			ch->status = MUSB_DMA_STATUS_FREE;
			channel = ch;
			chdat = ch->private_data;
			break;
		}
	}

	if (!channel)
		return NULL;

	if (tx) {
		chdat->tx = 1;
		dev_name = "TUSB transmit";
	} else {
		chdat->tx = 0;
		dev_name = "TUSB receive";
	}

	chdat->musb = tusb_dma->musb;
	chdat->tbase = tusb_dma->tbase;
	chdat->hw_ep = hw_ep;
	chdat->epnum = hw_ep->epnum;
	chdat->dmareq = -1;
	chdat->completed_len = 0;
	chdat->tusb_dma = tusb_dma;

	channel->max_len = 0x7fffffff;
	channel->desired_mode = 0;
	channel->actual_len = 0;

	if (tusb_dma->multichannel) {
		ret = tusb_omap_dma_allocate_dmareq(chdat);
		if (ret != 0)
			goto free_dmareq;

		ret = omap_request_dma(chdat->sync_dev, dev_name,
				tusb_omap_dma_cb, channel, &chdat->ch);
		if (ret != 0)
			goto free_dmareq;
	} else if (tusb_dma->ch == -1) {
		tusb_dma->dmareq = 0;
		tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0;

		/* Callback data gets set later in the shared dmareq case */
		ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared",
				tusb_omap_dma_cb, NULL, &tusb_dma->ch);
		if (ret != 0)
			goto free_dmareq;

		chdat->dmareq = -1;
		chdat->ch = -1;
	}

	dev_dbg(musb->controller, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
		chdat->epnum,
		chdat->tx ? "tx" : "rx",
		chdat->ch >= 0 ? "dedicated" : "shared",
		chdat->ch >= 0 ? chdat->ch : tusb_dma->ch,
		chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq,
		chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev);

	return channel;

free_dmareq:
	tusb_omap_dma_free_dmareq(chdat);

	dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n", chdat->epnum);
	channel->status = MUSB_DMA_STATUS_UNKNOWN;

	return NULL;
}
Esempio n. 25
0
static int aes_dma_init(struct aes_hwa_ctx *ctx)
{
	int err = -ENOMEM;
	struct omap_dma_channel_params dma_params;

	ctx->dma_lch_out = -1;
	ctx->dma_lch_in = -1;

	ctx->buflen = PAGE_SIZE;
	ctx->buflen &= ~(AES_BLOCK_SIZE - 1);

	dprintk(KERN_INFO "aes_dma_init(%p)\n", ctx);

	/* Allocate and map cache buffers */
	ctx->buf_in = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_in,
		GFP_KERNEL);
	if (!ctx->buf_in) {
		dprintk(KERN_ERR "SMC: Unable to alloc AES in cache buffer\n");
		return -ENOMEM;
	}

	ctx->buf_out = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_out,
		GFP_KERNEL);
	if (!ctx->buf_out) {
		dprintk(KERN_ERR "SMC: Unable to alloc AES out cache buffer\n");
		dma_free_coherent(NULL, ctx->buflen, ctx->buf_in,
			ctx->dma_addr_in);
		return -ENOMEM;
	}

	/* Request DMA channels */
	err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback, ctx,
		&ctx->dma_lch_in);
	if (err) {
		dprintk(KERN_ERR "SMC: Unable to request AES RX DMA channel\n");
		goto err_dma_in;
	}

	err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback,
		ctx, &ctx->dma_lch_out);
	if (err) {
		dprintk(KERN_ERR "SMC: Unable to request AES TX DMA channel\n");
		goto err_dma_out;
	}

	dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
	dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES;
	dma_params.src_ei = 0;
	dma_params.src_fi = 0;
	dma_params.dst_ei = 0;
	dma_params.dst_fi = 0;
	dma_params.read_prio = 0;
	dma_params.write_prio = 0;
	dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;

	/* IN */
	dma_params.trigger = ctx->dma_in;
	dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC;
	dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;

	omap_set_dma_params(ctx->dma_lch_in, &dma_params);
	omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8);
	omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8);
	omap_set_dma_src_data_pack(ctx->dma_lch_in, 1);

	/* OUT */
	dma_params.trigger = ctx->dma_out;
	dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
	dma_params.src_start = AES1_REGS_HW_ADDR + 0x60;
	dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
	dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;

	omap_set_dma_params(ctx->dma_lch_out, &dma_params);
	omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8);
	omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8);
	omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1);

	omap_dma_base = ioremap(0x4A056000, 0x1000);
	if (!omap_dma_base) {
		printk(KERN_ERR "SMC: Unable to ioremap DMA registers\n");
		goto err_dma_out;
	}

	dprintk(KERN_INFO "aes_dma_init(%p) configured DMA channels"
		"(RX = %d, TX = %d)\n", ctx, ctx->dma_lch_in, ctx->dma_lch_out);

	return 0;

err_dma_out:
	omap_free_dma(ctx->dma_lch_in);
err_dma_in:
	dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in);
	dma_free_coherent(NULL, ctx->buflen, ctx->buf_out, ctx->dma_addr_out);

	return err;
}
Esempio n. 26
0
static int omap_irda_start(struct net_device *dev)
{
	struct omap_irda *omap_ir = netdev_priv(dev);
	int err;

	omap_ir->speed = 9600;

	err = request_irq(dev->irq, omap_irda_irq, 0, dev->name, dev);
	if (err)
		goto err_irq;

	/*
	 * The interrupt must remain disabled for now.
	 */
	disable_irq(dev->irq);

	/*  Request DMA channels for IrDA hardware */
	if (omap_request_dma(omap_ir->pdata->rx_channel, "IrDA Rx DMA",
			(void *)omap_irda_rx_dma_callback,
			dev, &(omap_ir->rx_dma_channel))) {
		printk(KERN_ERR "Failed to request IrDA Rx DMA\n");
		goto err_irq;
	}

	if (omap_request_dma(omap_ir->pdata->tx_channel, "IrDA Tx DMA",
			(void *)omap_irda_tx_dma_callback,
			dev, &(omap_ir->tx_dma_channel))) {
		printk(KERN_ERR "Failed to request IrDA Tx DMA\n");
		goto err_irq;
	}

	/* Allocate TX and RX buffers for DMA channels */
	omap_ir->rx_buf_dma_virt =
		dma_alloc_coherent(NULL, IRDA_SKB_MAX_MTU,
				&(omap_ir->rx_buf_dma_phys),
				GFP_KERNEL);

	if (!omap_ir->rx_buf_dma_virt) {
		printk(KERN_ERR "Unable to allocate memory for rx_buf_dma\n");
		goto err_irq;
	}

	omap_ir->tx_buf_dma_virt =
		dma_alloc_coherent(NULL, IRDA_SIR_MAX_FRAME,
				&(omap_ir->tx_buf_dma_phys),
				GFP_KERNEL);

	if (!omap_ir->tx_buf_dma_virt) {
		printk(KERN_ERR "Unable to allocate memory for tx_buf_dma\n");
		goto err_mem1;
	}

	/*
	 * Setup the serial port for the specified config.
	 */
	if (omap_ir->pdata->select_irda)
		omap_ir->pdata->select_irda(omap_ir->dev, IR_SEL);

	err = omap_irda_startup(dev);

	if (err)
		goto err_startup;

	omap_irda_set_speed(dev, omap_ir->speed = 9600);

	/*
	 * Open a new IrLAP layer instance.
	 */
	omap_ir->irlap = irlap_open(dev, &omap_ir->qos, "omap_sir");

	err = -ENOMEM;
	if (!omap_ir->irlap)
		goto err_irlap;

	/* Now enable the interrupt and start the queue */
	omap_ir->open = 1;

	/* Start RX DMA */
	omap_irda_start_rx_dma(omap_ir);

	enable_irq(dev->irq);
	netif_start_queue(dev);

	return 0;

err_irlap:
	omap_ir->open = 0;
	omap_irda_shutdown(omap_ir);
err_startup:
	dma_free_coherent(NULL, IRDA_SIR_MAX_FRAME,
			omap_ir->tx_buf_dma_virt, omap_ir->tx_buf_dma_phys);
err_mem1:
	dma_free_coherent(NULL, IRDA_SKB_MAX_MTU,
			omap_ir->rx_buf_dma_virt, omap_ir->rx_buf_dma_phys);
err_irq:
	free_irq(dev->irq, dev);
	return err;
}