Esempio n. 1
0
static int sdma_config_channel(struct sdma_channel *sdmac)
{
	int ret;

	sdma_disable_channel(sdmac);

	sdmac->event_mask0 = 0;
	sdmac->event_mask1 = 0;
	sdmac->shp_addr = 0;
	sdmac->per_addr = 0;

	if (sdmac->event_id0) {
		if (sdmac->event_id0 > 32)
			return -EINVAL;
		sdma_event_enable(sdmac, sdmac->event_id0);
	}

	switch (sdmac->peripheral_type) {
	case IMX_DMATYPE_DSP:
		sdma_config_ownership(sdmac, false, true, true);
		break;
	case IMX_DMATYPE_MEMORY:
		sdma_config_ownership(sdmac, false, true, false);
		break;
	default:
		sdma_config_ownership(sdmac, true, true, false);
		break;
	}

	sdma_get_pc(sdmac, sdmac->peripheral_type);

	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
		/* Handle multiple event channels differently */
		if (sdmac->event_id1) {
			sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
			if (sdmac->event_id1 > 31)
				sdmac->watermark_level |= 1 << 31;
			sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
			if (sdmac->event_id0 > 31)
				sdmac->watermark_level |= 1 << 30;
		} else {
			sdmac->event_mask0 = 1 << sdmac->event_id0;
			sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
		}
		/* Watermark Level */
		sdmac->watermark_level |= sdmac->watermark_level;
		/* Address */
		sdmac->shp_addr = sdmac->per_address;
	} else {
		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
	}

	ret = sdma_load_context(sdmac);

	return ret;
}
Esempio n. 2
0
static int setup_fpga_interface(struct sdma_engine *sdma)
{
	const int channel = 1;

	struct sdma_channel *sdmac = &sdma->channel[channel];
	const u32 sdma_code[24] = {
		0x6c20672b, 0x07647d02, 0x04007cfa, 0x612b622b, 0x662b0762, 0x7d0e6900, 0x6d0406da, 0x7d056e18,
		0x000002a6, 0x0e087cf8, 0x6a186c14, 0x6c2b7ce8, 0x7de7632b, 0x6904008f, 0x38037802, 0x6b290312,
		0x4a007d09, 0x6d0006da, 0x7d056e18, 0x000002a6, 0x0e087cf8, 0x6a180763, 0x7ce80300, 0x7de60000,
	};

	const int origin = 0xe00; /* In data space terms (32 bits/address) */

	struct sdma_context_data *context = sdma->context;

	int ret;

	ret = eim_init();

	if (ret) {
		printk(KERN_ERR THIS "Failed to initialize EIM bus\n");
		return ret;
	}

	sdma_write_datamem(sdma, (void *) sdma_code, sizeof(sdma_code), origin);

	ret = sdma_request_channel(sdmac);

	if (ret) {
		printk(KERN_ERR "Failed to request channel\n");
		return ret;
	}
  
	sdma_disable_channel(sdmac);

	/* Don't let events run yet: */
	sdma_config_ownership(sdmac, true, true, false);

	memset(context, 0, sizeof(*context));

	context->channel_state.pc = origin * 2; /* In program space addressing */
	context->gReg[4] = MX51_CS2_BASE_ADDR + 0x80;  /* Request region */
	context->gReg[5] = MX51_CS2_BASE_ADDR + 0x8000; /* Data region */

	ret = sdma_write_datamem(sdma, (void *) context, sizeof(*context),
				 0x800 + (sizeof(*context) / 4) * channel);
  
	if (ret) {
		printk(KERN_ERR "Failed to load context\n");
		return ret;
	}

	sdmac->desc.callback = sdma_irq_callback;
	sdmac->desc.callback_param = NULL;

	xillybus_sdmac = sdmac;
  
	return 0; /* Success! */
}
Esempio n. 3
0
void *sdma_xillybus_init(void *userdata, irq_handler_t handler)
{
	xillybus_userdata = userdata;
	xillybus_handler = handler;

	sdma_event_enable(xillybus_sdmac, 15); /* Connect to channel 15 */
	sdma_config_ownership(xillybus_sdmac, true, false, false);

	return cs2_base;
}
Esempio n. 4
0
static int sdma_config_channel(struct sdma_channel *sdmac)
{
	int ret;

	sdma_disable_channel(sdmac);

	sdmac->event_mask0 = 0;
	sdmac->event_mask1 = 0;
	sdmac->shp_addr = 0;
	sdmac->per_addr = 0;

	if (sdmac->event_id0)
		sdma_event_enable(sdmac, sdmac->event_id0);

	if (sdmac->event_id1)
		sdma_event_enable(sdmac, sdmac->event_id1);

	switch (sdmac->peripheral_type) {
	case IMX_DMATYPE_DSP:
		sdma_config_ownership(sdmac, false, true, true);
		break;
	case IMX_DMATYPE_MEMORY:
		sdma_config_ownership(sdmac, false, true, false);
		break;
	default:
		sdma_config_ownership(sdmac, true, true, false);
		break;
	}

	sdma_get_pc(sdmac, sdmac->peripheral_type);

	if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
		/* Handle multiple event channels differently */
		if (sdmac->event_id1) {
			if (sdmac->event_id0 > 31) {
				sdmac->watermark_level |= 1 << 28;
				sdmac->event_mask0 |= 0;
				sdmac->event_mask1 |=
					1 << ((sdmac->event_id0)%32);
			} else {
				sdmac->event_mask0 |=
					1 << ((sdmac->event_id0)%32);
				sdmac->event_mask1 |= 0;
			}
			if (sdmac->event_id1 > 31) {
				sdmac->watermark_level |= 1 << 29;
				sdmac->event_mask0 |= 0;
				sdmac->event_mask1 |=
					1 << ((sdmac->event_id1)%32);
			} else {
				sdmac->event_mask0 |=
					1 << ((sdmac->event_id1)%32);
				sdmac->event_mask1 |= 0;
			}
			sdmac->watermark_level |= (unsigned int)(3<<11);
			sdmac->watermark_level |= (unsigned int)(1<<31);
			sdmac->watermark_level |= (unsigned int)(2<<24);
		} else {
			if (sdmac->event_id0 > 31) {
				sdmac->event_mask0 = 0;
				sdmac->event_mask1 =
					1 << ((sdmac->event_id0)%32);
			} else {
				sdmac->event_mask0 =
					1 << ((sdmac->event_id0)%32);
				sdmac->event_mask1 = 0;
			}
		}
		/* Watermark Level */
		sdmac->watermark_level |= sdmac->watermark_level;
		/* Address */
		switch (sdmac->direction) {
		case DMA_DEV_TO_DEV:
			sdmac->per_addr = sdmac->per_address;
			sdmac->shp_addr = sdmac->per_address2;
			break;
		default:
		sdmac->shp_addr = sdmac->per_address;
			break;
		}
	} else {
		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
	}

	ret = sdma_load_context(sdmac);

	return ret;
}
Esempio n. 5
0
static int __init sdma_init(struct sdma_engine *sdma)
{
	int i, ret;
	dma_addr_t ccb_phys;

	switch (sdma->version) {
	case 1:
		sdma->num_events = 32;
		break;
	case 2:
		sdma->num_events = 48;
		break;
	default:
		dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
		return -ENODEV;
	}

	clk_enable(sdma->clk);

	/* Be sure SDMA has not started yet */
	__raw_writel(0, sdma->regs + SDMA_H_C0PTR);

	sdma->channel_control = dma_alloc_coherent(NULL,
			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
			sizeof(struct sdma_context_data),
			&ccb_phys, GFP_KERNEL);

	if (!sdma->channel_control) {
		ret = -ENOMEM;
		goto err_dma_alloc;
	}

	sdma->context = (void *)sdma->channel_control +
		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
	sdma->context_phys = ccb_phys +
		MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);

	/* Zero-out the CCB structures array just allocated */
	memset(sdma->channel_control, 0,
			MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));

	/* disable all channels */
	for (i = 0; i < sdma->num_events; i++)
		__raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));

	/* All channels have priority 0 */
	for (i = 0; i < MAX_DMA_CHANNELS; i++)
		__raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);

	ret = sdma_request_channel(&sdma->channel[0]);
	if (ret)
		goto err_dma_alloc;

	sdma_config_ownership(&sdma->channel[0], false, true, false);

	/* Set Command Channel (Channel Zero) */
	__raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);

	/* Set bits of CONFIG register but with static context switching */
	/* FIXME: Check whether to set ACR bit depending on clock ratios */
	__raw_writel(0, sdma->regs + SDMA_H_CONFIG);

	__raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);

	/* Set bits of CONFIG register with given context switching mode */
	__raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);

	/* Initializes channel's priorities */
	sdma_set_channel_priority(&sdma->channel[0], 7);

	clk_disable(sdma->clk);

	return 0;

err_dma_alloc:
	clk_disable(sdma->clk);
	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
	return ret;
}