示例#1
0
/**
 * Initialize BAM DMA module
 *
 */
int sps_dma_init(const struct sps_bam_props *bam_props)
{
	struct sps_bam_props props;
	const struct sps_bam_props *bam_reg;
	u32 h;

	/* Init local data */
	memset(&bam_dma_dev, 0, sizeof(bam_dma_dev));
	num_bams = 0;
	memset(bam_handles, 0, sizeof(bam_handles));

	/* Create a mutex to control access to the BAM-DMA devices */
	mutex_init(&bam_dma_lock);

	/* Are there any BAM DMA devices? */
	if (bam_props == NULL)
		return 0;

	/*
	 * Registers all BAMs in the BSP properties, but only uses the first
	 * BAM-DMA device for allocations.
	 */
	if (bam_props->phys_addr) {
		/* Force multi-EE option for all BAM-DMAs */
		bam_reg = bam_props;
		if ((bam_props->options & SPS_BAM_OPT_BAMDMA) &&
		    (bam_props->manage & SPS_BAM_MGR_MULTI_EE) == 0) {
			SPS_DBG("sps:Setting multi-EE options for BAM-DMA: %x",
				bam_props->phys_addr);
			props = *bam_props;
			props.manage |= SPS_BAM_MGR_MULTI_EE;
			bam_reg = &props;
		}

		/* Register the BAM */
		if (sps_register_bam_device(bam_reg, &h)) {
			SPS_ERR("sps:Fail to register BAM-DMA BAM device: "
				"phys 0x%0x", bam_props->phys_addr);
			return SPS_ERROR;
		}

		/* Record the BAM so that it may be deregistered later */
		if (num_bams < MAX_BAM_DMA_BAMS) {
			bam_handles[num_bams] = h;
			num_bams++;
		} else {
			SPS_ERR("sps:BAM-DMA: BAM limit exceeded: %d",
					num_bams);
			return SPS_ERROR;
		}
	} else {
		SPS_ERR("sps:BAM-DMA phys_addr is zero.");
		return SPS_ERROR;
	}


	return 0;
}
示例#2
0
int sps_dma_init(const struct sps_bam_props *bam_props)
{
	struct sps_bam_props props;
	const struct sps_bam_props *bam_reg;
	u32 h;

	
	memset(&bam_dma_dev, 0, sizeof(bam_dma_dev));
	num_bams = 0;
	memset(bam_handles, 0, sizeof(bam_handles));

	
	mutex_init(&bam_dma_lock);

	
	if (bam_props == NULL)
		return 0;

	if (bam_props->phys_addr) {
		
		bam_reg = bam_props;
		if ((bam_props->options & SPS_BAM_OPT_BAMDMA) &&
		    (bam_props->manage & SPS_BAM_MGR_MULTI_EE) == 0) {
			SPS_DBG("sps:Setting multi-EE options for BAM-DMA: %x",
				bam_props->phys_addr);
			props = *bam_props;
			props.manage |= SPS_BAM_MGR_MULTI_EE;
			bam_reg = &props;
		}

		
		if (sps_register_bam_device(bam_reg, &h)) {
			SPS_ERR("sps:Fail to register BAM-DMA BAM device: "
				"phys 0x%0x", bam_props->phys_addr);
			return SPS_ERROR;
		}

		
		if (num_bams < MAX_BAM_DMA_BAMS) {
			bam_handles[num_bams] = h;
			num_bams++;
		} else {
			SPS_ERR("sps:BAM-DMA: BAM limit exceeded: %d",
					num_bams);
			return SPS_ERROR;
		}
	} else {
		SPS_ERR("sps:BAM-DMA phys_addr is zero.");
		return SPS_ERROR;
	}


	return 0;
}
示例#3
0
static void bam_init(struct work_struct *work)
{
	u32 h;
	dma_addr_t dma_addr;
	int ret;
	void *a2_virt_addr;

	/* init BAM */
	a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
	if (!a2_virt_addr) {
		pr_err("%s: ioremap failed\n", __func__);
		ret = -ENOMEM;
		goto register_bam_failed;
	}
	a2_props.phys_addr = A2_PHYS_BASE;
	a2_props.virt_addr = a2_virt_addr;
	a2_props.virt_size = A2_PHYS_SIZE;
	a2_props.irq = A2_BAM_IRQ;
	a2_props.num_pipes = A2_NUM_PIPES;
	a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
	/* need to free on tear down */
	ret = sps_register_bam_device(&a2_props, &h);
	if (ret < 0) {
		pr_err("%s: register bam error %d\n", __func__, ret);
		goto register_bam_failed;
	}

	bam_tx_pipe = sps_alloc_endpoint();
	if (bam_tx_pipe == NULL) {
		pr_err("%s: tx alloc endpoint failed\n", __func__);
		ret = -ENOMEM;
		goto register_bam_failed;
	}
	ret = sps_get_config(bam_tx_pipe, &tx_connection);
	if (ret) {
		pr_err("%s: tx get config failed %d\n", __func__, ret);
		goto tx_get_config_failed;
	}

	tx_connection.source = SPS_DEV_HANDLE_MEM;
	tx_connection.src_pipe_index = 0;
	tx_connection.destination = h;
	tx_connection.dest_pipe_index = 4;
	tx_connection.mode = SPS_MODE_DEST;
	tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
					SPS_O_ACK_TRANSFERS;
	tx_desc_mem_buf.size = 0x800; /* 2k */
	tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
							&dma_addr, 0);
	if (tx_desc_mem_buf.base == NULL) {
		pr_err("%s: tx memory alloc failed\n", __func__);
		ret = -ENOMEM;
		goto tx_mem_failed;
	}
	tx_desc_mem_buf.phys_base = dma_addr;
	memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
	tx_connection.desc = tx_desc_mem_buf;
	tx_connection.event_thresh = 0x10;

	ret = sps_connect(bam_tx_pipe, &tx_connection);
	if (ret < 0) {
		pr_err("%s: tx connect error %d\n", __func__, ret);
		goto tx_connect_failed;
	}

	bam_rx_pipe = sps_alloc_endpoint();
	if (bam_rx_pipe == NULL) {
		pr_err("%s: rx alloc endpoint failed\n", __func__);
		ret = -ENOMEM;
		goto tx_connect_failed;
	}
	ret = sps_get_config(bam_rx_pipe, &rx_connection);
	if (ret) {
		pr_err("%s: rx get config failed %d\n", __func__, ret);
		goto rx_get_config_failed;
	}

	rx_connection.source = h;
	rx_connection.src_pipe_index = 5;
	rx_connection.destination = SPS_DEV_HANDLE_MEM;
	rx_connection.dest_pipe_index = 1;
	rx_connection.mode = SPS_MODE_SRC;
	rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
					SPS_O_ACK_TRANSFERS;
	rx_desc_mem_buf.size = 0x800; /* 2k */
	rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
							&dma_addr, 0);
	if (rx_desc_mem_buf.base == NULL) {
		pr_err("%s: rx memory alloc failed\n", __func__);
		ret = -ENOMEM;
		goto rx_mem_failed;
	}
	rx_desc_mem_buf.phys_base = dma_addr;
	memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
	rx_connection.desc = rx_desc_mem_buf;
	rx_connection.event_thresh = 0x10;

	ret = sps_connect(bam_rx_pipe, &rx_connection);
	if (ret < 0) {
		pr_err("%s: rx connect error %d\n", __func__, ret);
		goto rx_connect_failed;
	}

	tx_register_event.options = SPS_O_EOT;
	tx_register_event.mode = SPS_TRIGGER_CALLBACK;
	tx_register_event.xfer_done = NULL;
	tx_register_event.callback = bam_mux_tx_notify;
	tx_register_event.user = NULL;
	ret = sps_register_event(bam_tx_pipe, &tx_register_event);
	if (ret < 0) {
		pr_err("%s: tx register event error %d\n", __func__, ret);
		goto rx_event_reg_failed;
	}

	rx_register_event.options = SPS_O_EOT;
	rx_register_event.mode = SPS_TRIGGER_CALLBACK;
	rx_register_event.xfer_done = NULL;
	rx_register_event.callback = bam_mux_rx_notify;
	rx_register_event.user = NULL;
	ret = sps_register_event(bam_rx_pipe, &rx_register_event);
	if (ret < 0) {
		pr_err("%s: tx register event error %d\n", __func__, ret);
		goto rx_event_reg_failed;
	}

	bam_mux_initialized = 1;
	queue_rx();
	return;

rx_event_reg_failed:
	sps_disconnect(bam_rx_pipe);
rx_connect_failed:
	dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
				rx_desc_mem_buf.phys_base);
rx_mem_failed:
	sps_disconnect(bam_tx_pipe);
rx_get_config_failed:
	sps_free_endpoint(bam_rx_pipe);
tx_connect_failed:
	dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
				tx_desc_mem_buf.phys_base);
tx_get_config_failed:
	sps_free_endpoint(bam_tx_pipe);
tx_mem_failed:
	sps_deregister_bam_device(h);
register_bam_failed:
	/*destroy_workqueue(bam_mux_workqueue);*/
	/*return ret;*/
	return;
}
int qpic_init_sps(struct platform_device *pdev,
				struct qpic_sps_endpt *end_point)
{
	int rc = 0;
	struct sps_pipe *pipe_handle;
	struct sps_connect *sps_config = &end_point->config;
	struct sps_register_event *sps_event = &end_point->bam_event;
	struct sps_bam_props bam = {0};
	u32 bam_handle = 0;

	if (qpic_res->sps_init)
		return 0;
	bam.phys_addr = qpic_res->qpic_phys + 0x4000;
	bam.virt_addr = qpic_res->qpic_base + 0x4000;
	bam.irq = qpic_res->irq - 4;
	bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;

	rc = sps_phy2h(bam.phys_addr, &bam_handle);
	if (rc)
		rc = sps_register_bam_device(&bam, &bam_handle);
	if (rc) {
		pr_err("%s bam_handle is NULL", __func__);
		rc = -ENOMEM;
		goto out;
	}

	pipe_handle = sps_alloc_endpoint();
	if (!pipe_handle) {
		pr_err("sps_alloc_endpoint() failed\n");
		rc = -ENOMEM;
		goto out;
	}

	rc = sps_get_config(pipe_handle, sps_config);
	if (rc) {
		pr_err("sps_get_config() failed %d\n", rc);
		goto free_endpoint;
	}

	/* WRITE CASE: source - system memory; destination - BAM */
	sps_config->source = SPS_DEV_HANDLE_MEM;
	sps_config->destination = bam_handle;
	sps_config->mode = SPS_MODE_DEST;
	sps_config->dest_pipe_index = 6;

	sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
	sps_config->lock_group = 0;
	/*
	 * Descriptor FIFO is a cyclic FIFO. If 64 descriptors
	 * are allowed to be submitted before we get any ack for any of them,
	 * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
	 * sizeof(struct sps_iovec).
	 */
	sps_config->desc.size = (64) *
					sizeof(struct sps_iovec);
	sps_config->desc.base = dmam_alloc_coherent(&pdev->dev,
					sps_config->desc.size,
					&sps_config->desc.phys_base,
					GFP_KERNEL);
	if (!sps_config->desc.base) {
		pr_err("dmam_alloc_coherent() failed for size %x\n",
				sps_config->desc.size);
		rc = -ENOMEM;
		goto free_endpoint;
	}
	memset(sps_config->desc.base, 0x00, sps_config->desc.size);

	rc = sps_connect(pipe_handle, sps_config);
	if (rc) {
		pr_err("sps_connect() failed %d\n", rc);
		goto free_endpoint;
	}

	init_completion(&end_point->completion);
	sps_event->mode = SPS_TRIGGER_WAIT;
	sps_event->options = SPS_O_EOT;
	sps_event->xfer_done = &end_point->completion;
	sps_event->user = (void *)qpic_res;

	rc = sps_register_event(pipe_handle, sps_event);
	if (rc) {
		pr_err("sps_register_event() failed %d\n", rc);
		goto sps_disconnect;
	}

	end_point->handle = pipe_handle;
	qpic_res->sps_init = true;
	goto out;
sps_disconnect:
	sps_disconnect(pipe_handle);
free_endpoint:
	sps_free_endpoint(pipe_handle);
out:
	return rc;
}