Exemplo n.º 1
0
static int __init hsi_request_gdd_irq(struct hsi_dev *hsi_ctrl)
{
    struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
    struct resource *gdd_irq;

    if (hsi_driver_device_is_hsi(pd))
        gdd_irq = platform_get_resource(pd, IORESOURCE_IRQ, 2);
    else
        gdd_irq = platform_get_resource(pd, IORESOURCE_IRQ, 4);

    if (!gdd_irq) {
        dev_err(hsi_ctrl->dev, "HSI has no GDD IRQ resource\n");
        return -ENXIO;
    }

    hsi_ctrl->gdd_irq = gdd_irq->start;
    return hsi_gdd_init(hsi_ctrl, gdd_irq->name);
}
Exemplo n.º 2
0
void hsi_get_rx(struct hsi_port *sport, struct hsr_ctx *cfg)
{
	struct hsi_dev *hsi_ctrl = sport->hsi_controller;
	void __iomem *base = hsi_ctrl->base;
	int port = sport->port_number;
	struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);

	cfg->mode = hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_MODE_VAL_MASK;
	cfg->flow = (hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_FLOW_VAL_MASK)
	    >> HSI_FLOW_OFFSET;
	cfg->frame_size = hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port));
	cfg->channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port));
	if (hsi_driver_device_is_hsi(pdev)) {
		cfg->divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port));
		cfg->counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port));
	} else {
		cfg->counters = hsi_inl(base, SSI_TIMEOUT_REG(port));
	}
}
Exemplo n.º 3
0
static int __init hsi_ports_init(struct hsi_dev *hsi_ctrl)
{
	struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
	struct hsi_platform_data *pdata = dev_get_platdata(hsi_ctrl->dev);
	struct hsi_port *hsi_p;
	unsigned int port;
	int err;

	for (port = 0; port < hsi_ctrl->max_p; port++) {
		hsi_p = &hsi_ctrl->hsi_port[port];
		hsi_p->flags = 0;
		hsi_p->port_number = pdata->ctx->pctx[port].port_number;
		hsi_p->hsi_controller = hsi_ctrl;
		hsi_p->max_ch = hsi_driver_device_is_hsi(pd) ?
		    HSI_CHANNELS_MAX : HSI_SSI_CHANNELS_MAX;
		hsi_p->irq = 0;
		hsi_p->wake_rx_3_wires_mode = 0; /* 4 wires */
		hsi_p->cawake_status = -1; /* Unknown */
		hsi_p->cawake_off_event = false;
		hsi_p->cawake_double_int = false;
		hsi_p->acwake_status = 0;
		hsi_p->in_int_tasklet = false;
		hsi_p->in_cawake_tasklet = false;
		hsi_p->counters_on = 1;
		hsi_p->reg_counters = pdata->ctx->pctx[port].hsr.counters;
		spin_lock_init(&hsi_p->lock);
		err = hsi_port_channels_init(hsi_p);
		if (err < 0)
			goto rback;
		err = hsi_request_mpu_irq(hsi_p);
		if (err < 0)
			goto rback;
		err = hsi_request_cawake_irq(hsi_p);
		if (err < 0)
			goto rback;
		dev_info(hsi_ctrl->dev, "HSI port %d initialized\n",
			 hsi_p->port_number);
	}
	return 0;
rback:
	hsi_ports_exit(hsi_ctrl, port + 1);
	return err;
}
Exemplo n.º 4
0
static int __init hsi_ports_init(struct hsi_dev *hsi_ctrl)
{
	struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
	struct hsi_platform_data *pdata = pd->dev.platform_data;
	struct hsi_port *hsi_p;
	unsigned int port;
	int err;

	for (port = 0; port < hsi_ctrl->max_p; port++) {
		hsi_p = &hsi_ctrl->hsi_port[port];
		hsi_p->port_number = port + 1;
		hsi_p->hsi_controller = hsi_ctrl;
		hsi_p->max_ch = hsi_driver_device_is_hsi(pd) ?
		    HSI_CHANNELS_MAX : HSI_SSI_CHANNELS_MAX;
		hsi_p->irq = 0;
		hsi_p->cawake_status = -1; /* Unknown */
		hsi_p->cawake_off_event = false;
		hsi_p->acwake_status = 0;
		hsi_p->in_int_tasklet = false;
		hsi_p->in_cawake_tasklet = false;
		hsi_p->counters_on = 1;
		hsi_p->reg_counters = pdata->ctx->pctx[port].hsr.counters;
		spin_lock_init(&hsi_p->lock);
		err = hsi_port_channels_init(&hsi_ctrl->hsi_port[port]);
		if (err < 0)
			goto rback1;
		err = hsi_request_mpu_irq(hsi_p);
		if (err < 0)
			goto rback2;
		err = hsi_request_cawake_irq(hsi_p);
		if (err < 0)
			goto rback3;
	}
	return 0;
rback3:
	hsi_mpu_exit(hsi_p);
rback2:
	hsi_ports_exit(hsi_ctrl, port + 1);
rback1:
	return err;
}
static int hsi_debug_show(struct seq_file *m, void *p)
{
	struct hsi_dev *hsi_ctrl = m->private;
	struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);

	hsi_clocks_enable(hsi_ctrl->dev, __func__);

	seq_printf(m, "REVISION\t: 0x%08x\n",
		   hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG));
	if (hsi_driver_device_is_hsi(pdev))
		seq_printf(m, "HWINFO\t\t: 0x%08x\n",
			   hsi_inl(hsi_ctrl->base, HSI_SYS_HWINFO_REG));
	seq_printf(m, "SYSCONFIG\t: 0x%08x\n",
		   hsi_inl(hsi_ctrl->base, HSI_SYS_SYSCONFIG_REG));
	seq_printf(m, "SYSSTATUS\t: 0x%08x\n",
		   hsi_inl(hsi_ctrl->base, HSI_SYS_SYSSTATUS_REG));

	hsi_clocks_disable(hsi_ctrl->dev, __func__);

	return 0;
}
Exemplo n.º 6
0
static int __init hsi_request_mpu_irq(struct hsi_port *hsi_p)
{
    struct hsi_dev *hsi_ctrl = hsi_p->hsi_controller;
    struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
    struct resource *mpu_irq;

    if (hsi_driver_device_is_hsi(pd))
        mpu_irq = platform_get_resource(pd, IORESOURCE_IRQ,
                                        hsi_p->port_number - 1);
    else			/* SSI support 2 IRQs per port */
        mpu_irq = platform_get_resource(pd, IORESOURCE_IRQ,
                                        (hsi_p->port_number - 1) * 2);

    if (!mpu_irq) {
        dev_err(hsi_ctrl->dev, "HSI misses info for MPU IRQ on"
                " port %d\n", hsi_p->port_number);
        return -ENXIO;
    }
    hsi_p->n_irq = 0;	/* We only use one irq line */
    hsi_p->irq = mpu_irq->start;
    return hsi_mpu_init(hsi_p, mpu_irq->name);
}
Exemplo n.º 7
0
static int __init hsi_ports_init(struct hsi_dev *hsi_ctrl)
{
	struct platform_device *pd = to_platform_device(hsi_ctrl->dev);
	struct hsi_platform_data *pdata = pd->dev.platform_data;
	struct hsi_port *hsi_p;
	unsigned int port;
	int err;

	for (port = 0; port < hsi_ctrl->max_p; port++) {
		hsi_p = &hsi_ctrl->hsi_port[port];
		hsi_p->port_number = port + 1;
		hsi_p->hsi_controller = hsi_ctrl;
		hsi_p->max_ch = hsi_driver_device_is_hsi(pd) ?
				HSI_CHANNELS_MAX : HSI_SSI_CHANNELS_MAX;
		hsi_p->max_ch = min(hsi_p->max_ch, (u8) HSI_PORT_MAX_CH);
		hsi_p->irq = 0;
		hsi_p->counters_on = 1;
		hsi_p->reg_counters = pdata->ctx.pctx[port].hsr.timeout;
		spin_lock_init(&hsi_p->lock);
		err = hsi_port_channels_init(&hsi_ctrl->hsi_port[port]);
		if (err < 0)
			goto rback1;
		err = hsi_request_mpu_irq(hsi_p);
		if (err < 0)
			goto rback2;
		err = hsi_request_cawake_irq(hsi_p);
		if (err < 0)
			goto rback3;
	}
	return 0;
rback3:
	hsi_mpu_exit(hsi_p);
rback2:
	hsi_ports_exit(hsi_ctrl, port + 1);
rback1:
	return err;
}
Exemplo n.º 8
0
/* HSI Platform Device probing & hsi_device registration */
static int __init hsi_platform_device_probe(struct platform_device *pd)
{
	struct hsi_platform_data *pdata = pd->dev.platform_data;
	struct hsi_dev *hsi_ctrl;
	u32 revision;
	int err;

	dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n");

	dev_dbg(&pd->dev, "The platform device probed is an %s\n",
		hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");

	if (!pdata) {
		pr_err(LOG_NAME "No platform_data found on hsi device\n");
		return -ENXIO;
	}

	hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
	if (hsi_ctrl == NULL) {
		dev_err(&pd->dev, "Could not allocate memory for"
			" struct hsi_dev\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pd, hsi_ctrl);
	err = hsi_controller_init(hsi_ctrl, pd);
	if (err < 0) {
		dev_err(&pd->dev, "Could not initialize hsi controller:"
			" %d\n", err);
		goto rollback1;
	}

	pm_runtime_enable(hsi_ctrl->dev);
	hsi_clocks_enable(hsi_ctrl->dev, __func__);

	/* Non critical SW Reset */
	err = hsi_softreset(hsi_ctrl);
	if (err < 0)
		goto rollback2;

	hsi_set_pm_default(hsi_ctrl);

	/* Configure HSI ports */
	hsi_set_ports_default(hsi_ctrl, pd);

	/* Gather info from registers for the driver.(REVISION) */
	revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
	if (hsi_driver_device_is_hsi(pd))
		dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
			 revision);
	else
		dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
			 (revision & HSI_SSI_REV_MAJOR) >> 4,
			 (revision & HSI_SSI_REV_MINOR));

	err = hsi_debug_add_ctrl(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev,
			"Could not add hsi controller to debugfs: %d\n", err);
		goto rollback2;
	}

	err = register_hsi_devices(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err);
		goto rollback3;
	}

	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

	/* Allow HSI to wake up the platform */
	device_init_wakeup(hsi_ctrl->dev, 1);

	/* Set the HSI FCLK to default. */
	err = omap_device_set_rate(hsi_ctrl->dev, hsi_ctrl->dev,
					pdata->default_hsi_fclk);
	if (err)
		dev_err(&pd->dev, "Cannot set HSI FClk to default value: %ld\n",
			pdata->default_hsi_fclk);

	return err;

rollback3:
	hsi_debug_remove_ctrl(hsi_ctrl);
rollback2:
	hsi_controller_exit(hsi_ctrl);

	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

rollback1:
	kfree(hsi_ctrl);
	return err;
}
/* HSI Platform Device probing & hsi_device registration */
static int __init hsi_platform_device_probe(struct platform_device *pd)
{
	struct hsi_platform_data *pdata = dev_get_platdata(&pd->dev);
	struct hsi_dev *hsi_ctrl;
	u32 revision;
	int err;

	dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n");

	dev_dbg(&pd->dev, "The platform device probed is an %s\n",
		hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");

	if (!pdata) {
		dev_err(&pd->dev, "No platform_data found on hsi device\n");
		return -ENXIO;
	}

	/* Check if mandatory board functions are populated */
	if (!pdata->device_scale) {
		dev_err(&pd->dev, "Missing platform device_scale function\n");
		return -ENOSYS;
	}

	hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
	if (hsi_ctrl == NULL) {
		dev_err(&pd->dev, "Could not allocate memory for"
			" struct hsi_dev\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pd, hsi_ctrl);
	err = hsi_controller_init(hsi_ctrl, pd);
	if (err < 0) {
		dev_err(&pd->dev, "Could not initialize hsi controller:"
			" %d\n", err);
		goto rollback1;
	}

	pm_runtime_enable(hsi_ctrl->dev);
	pm_runtime_irq_safe(hsi_ctrl->dev);
	hsi_clocks_enable(hsi_ctrl->dev, __func__);

	/* Non critical SW Reset */
	err = hsi_softreset(hsi_ctrl);
	if (err < 0)
		goto rollback2;

	hsi_set_pm_force_hsi_on(hsi_ctrl);

	/* Configure HSI ports */
	hsi_set_ports_default(hsi_ctrl, pd);

	/* Gather info from registers for the driver.(REVISION) */
	revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
	if (hsi_driver_device_is_hsi(pd))
		dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
			 revision);
	else
		dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
			 (revision & HSI_SSI_REV_MAJOR) >> 4,
			 (revision & HSI_SSI_REV_MINOR));

	err = hsi_debug_add_ctrl(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev,
			"Could not add hsi controller to debugfs: %d\n", err);
		goto rollback2;
	}

	err = register_hsi_devices(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err);
		goto rollback3;
	}

	/* Allow HSI to wake up the platform */
	device_init_wakeup(hsi_ctrl->dev, true);

	/* Set the HSI FCLK to default. */
	hsi_ctrl->hsi_fclk_req = pdata->default_hsi_fclk;
	err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev,
				  pdata->default_hsi_fclk);
	if (err == -EBUSY) {
		/* PM framework init is late_initcall, so it may not yet be */
		/* initialized, so be prepared to retry later on open. */
		dev_warn(&pd->dev, "Cannot set HSI FClk to default value: %ld. "
			 "Will retry on next open\n", pdata->default_hsi_fclk);
	} else if (err) {
		dev_err(&pd->dev, "%s: Error %d setting HSI FClk to %ld.\n",
				__func__, err, pdata->default_hsi_fclk);
		goto rollback3;
	} else {
		hsi_ctrl->hsi_fclk_current = pdata->default_hsi_fclk;
	}
	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

	return 0;

rollback3:
	hsi_debug_remove_ctrl(hsi_ctrl);
rollback2:
	hsi_controller_exit(hsi_ctrl);

	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

rollback1:
	kfree(hsi_ctrl);
	return err;
}
Exemplo n.º 10
0
/**
 * hsi_driver_read_dma - Program GDD [DMA] to write data to memory from
 * the hsi channel buffer.
 * @hsi_channel - pointer to the hsi_channel to read data from.
 * @data - 32-bit word pointer where to store the incoming data.
 * @size - Number of 32bit words to be transfered to the buffer.
 *
 * hsi_controller lock must be held before calling this function.
 *
 * Return 0 on success and < 0 on error.
 */
int hsi_driver_read_dma(struct hsi_channel *hsi_channel, u32 * data,
			unsigned int count)
{
	struct hsi_dev *hsi_ctrl = hsi_channel->hsi_port->hsi_controller;
	void __iomem *base = hsi_ctrl->base;
	unsigned int port = hsi_channel->hsi_port->port_number;
	unsigned int channel = hsi_channel->channel_number;
	unsigned int sync;
	int lch;
	dma_addr_t src_addr;
	dma_addr_t dest_addr;
	u16 tmp;
	int fifo;

	lch = hsi_get_free_lch(hsi_ctrl);
	if (lch < 0) {
		dev_err(hsi_ctrl->dev, "No free DMA channels.\n");
		return -EBUSY;	/* No free GDD logical channels. */
	} else {
		dev_dbg(hsi_ctrl->dev, "Allocated DMA channel %d for read on"
					" HSI channel %d.\n", lch,
					hsi_channel->channel_number);
	}

	/* When DMA is used for Rx, disable the Rx Interrupt.
	 * (else DATAAVAILLABLE event would get triggered on first
	 * received data word)
	 * (Rx interrupt might be active for polling feature)
	 */
	hsi_driver_disable_read_interrupt(hsi_channel);

	/*
	 * NOTE: Gettting a free gdd logical channel and
	 * reserve it must be done atomicaly.
	 */
	hsi_channel->read_data.lch = lch;

	/* Sync is required for SSI but not for HSI */
	sync = hsi_sync_table[HSI_SYNC_READ][port - 1][channel];

	dest_addr = dma_map_single(hsi_ctrl->dev, data, count * 4,
				  DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(hsi_ctrl->dev, dest_addr))) {
		dev_err(hsi_ctrl->dev, "Failed to create DMA read mapping.\n");
		return -ENOMEM;
	}

	tmp = HSI_DST_BURST_4x32_BIT |
	    HSI_DST_MEMORY_PORT |
	    HSI_SRC_BURST_4x32_BIT |
	    HSI_SRC_PERIPHERAL_PORT | HSI_DATA_TYPE_S32;
	hsi_outw(tmp, base, HSI_GDD_CSDP_REG(lch));

	tmp = HSI_DST_AMODE_POSTINC | HSI_SRC_AMODE_CONST | sync;
	hsi_outw(tmp, base, HSI_GDD_CCR_REG(lch));

	hsi_outw((HSI_BLOCK_IE | HSI_TOUT_IE), base, HSI_GDD_CCIR_REG(lch));

	if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
		fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
		if (unlikely(fifo < 0)) {
			dev_err(hsi_ctrl->dev, "No valid FIFO id for DMA "
				"transfer from FIFO.\n");
			return -EFAULT;
		}
		/* HSI CSSA register takes a FIFO ID when copying from FIFO */
		hsi_outl(fifo, base, HSI_GDD_CSSA_REG(lch));
	} else{
		src_addr = hsi_ctrl->phy_base + HSI_HSR_BUFFER_CH_REG(port,
								channel);
		/* SSI CSSA register always takes a 32-bit address */
		hsi_outl(src_addr, base, HSI_GDD_CSSA_REG(lch));
	}

	/* HSI CDSA register takes a 32-bit address when copying to memory */
	/* SSI CDSA register always takes a 32-bit address */
	hsi_outl(dest_addr, base, HSI_GDD_CDSA_REG(lch));
	hsi_outw(count, base, HSI_GDD_CEN_REG(lch));

	/* TODO : Need to clean interrupt status here to avoid spurious int */

	hsi_outl_or(HSI_GDD_LCH(lch), base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
	hsi_outw_or(HSI_CCR_ENABLE, base, HSI_GDD_CCR_REG(lch));

	return 0;
}
Exemplo n.º 11
0
/**
 * hsi_ioctl - HSI I/O control
 * @dev - hsi device channel reference to apply the I/O control
 *						(or port associated to it)
 * @command - HSI I/O control command
 * @arg - parameter associated to the control command. NULL, if no parameter.
 *
 * Return 0 on success, a negative value on failure.
 *
 */
int hsi_ioctl(struct hsi_device *dev, unsigned int command, void *arg)
{
	struct hsi_channel *ch;
	struct hsi_dev *hsi_ctrl;
	struct hsi_port *pport;
	void __iomem *base;
	unsigned int port, channel;
	u32 acwake;
	int err = 0;
	int fifo = 0;
	u8 ret;
	struct hsi_platform_data *pdata;

	if (unlikely((!dev) ||
		     (!dev->ch) ||
		     (!dev->ch->hsi_port) ||
		     (!dev->ch->hsi_port->hsi_controller)) ||
	    (!(dev->ch->flags & HSI_CH_OPEN))) {
		pr_err(LOG_NAME "HSI IOCTL Invalid parameter\n");
		return -EINVAL;
	}

	ch = dev->ch;
	pport = ch->hsi_port;
	hsi_ctrl = ch->hsi_port->hsi_controller;
	port = ch->hsi_port->port_number;
	channel = ch->channel_number;
	base = hsi_ctrl->base;

	dev_dbg(hsi_ctrl->dev, "IOCTL: ch %d, command %d\n", channel, command);

	spin_lock_bh(&hsi_ctrl->lock);
	hsi_clocks_enable_channel(hsi_ctrl->dev, channel, __func__);

	switch (command) {
	case HSI_IOCTL_ACWAKE_UP:
		/* Wake up request to Modem (typically OMAP initiated) */
		/* Symetrical disable will be done in HSI_IOCTL_ACWAKE_DOWN */
		if (ch->flags & HSI_CH_ACWAKE) {
			dev_dbg(hsi_ctrl->dev, "Duplicate ACWAKE UP\n");
			err = -EPERM;
			goto out;
		}

		ch->flags |= HSI_CH_ACWAKE;
		pport->acwake_status |= BIT(channel);

		/* We only claim once the wake line per channel */
		acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port));
		if (!(acwake & HSI_WAKE(channel))) {
			hsi_outl(HSI_SET_WAKE(channel), base,
				 HSI_SYS_SET_WAKE_REG(port));
		}

		goto out;
		break;
	case HSI_IOCTL_ACWAKE_DOWN:
		/* Low power request initiation (OMAP initiated, typically */
		/* following inactivity timeout) */
		/* ACPU HSI block shall still be capable of receiving */
		if (!(ch->flags & HSI_CH_ACWAKE)) {
			dev_dbg(hsi_ctrl->dev, "Duplicate ACWAKE DOWN\n");
			err = -EPERM;
			goto out;
		}

		acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port));
		if (unlikely(pport->acwake_status !=
				(acwake & HSI_WAKE_MASK))) {
			dev_warn(hsi_ctrl->dev,
				"ACWAKE shadow register mismatch"
				" acwake_status: 0x%x, HSI_SYS_WAKE_REG: 0x%x",
				pport->acwake_status, acwake);
			pport->acwake_status = acwake & HSI_WAKE_MASK;
		}
		/* SSI_TODO: add safety check for SSI also */

		ch->flags &= ~HSI_CH_ACWAKE;
		pport->acwake_status &= ~BIT(channel);

		/* Release the wake line per channel */
		if ((acwake & HSI_WAKE(channel))) {
			hsi_outl(HSI_CLEAR_WAKE(channel), base,
				 HSI_SYS_CLEAR_WAKE_REG(port));
		}

		goto out;
		break;
	case HSI_IOCTL_SEND_BREAK:
		hsi_outl(1, base, HSI_HST_BREAK_REG(port));
		/*HSI_TODO : need to deactivate clock after BREAK frames sent*/
		/*Use interrupt ? (if TX BREAK INT exists)*/
		break;
	case HSI_IOCTL_GET_ACWAKE:
		if (!arg) {
			err = -EINVAL;
			goto out;
		}
		*(u32 *)arg = hsi_inl(base, HSI_SYS_WAKE_REG(port));
		break;
	case HSI_IOCTL_FLUSH_RX:
		ret = hsi_hsr_fifo_flush_channel(hsi_ctrl, port, channel);
		if (arg)
			*(size_t *)arg = ret;

		/* Ack the RX Int */
		hsi_outl_and(~HSI_HSR_DATAAVAILABLE(channel), base,
			     HSI_SYS_MPU_STATUS_CH_REG(port, pport->n_irq,
						       channel));
		break;
	case HSI_IOCTL_FLUSH_TX:
		ret = hsi_hst_fifo_flush_channel(hsi_ctrl, port, channel);
		if (arg)
			*(size_t *)arg = ret;

		/* Ack the TX Int */
		hsi_outl_and(~HSI_HST_DATAACCEPT(channel), base,
			     HSI_SYS_MPU_STATUS_CH_REG(port, pport->n_irq,
						       channel));
		break;
	case HSI_IOCTL_GET_CAWAKE:
		if (!arg) {
			err = -EINVAL;
			goto out;
		}
		err = hsi_get_cawake(dev->ch->hsi_port);
		if (err < 0) {
			err = -ENODEV;
			goto out;
		}
		*(u32 *)arg = err;
		break;
	case HSI_IOCTL_SET_RX:
		if (!arg) {
			err = -EINVAL;
			goto out;
		}
		err = hsi_set_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg);
		break;
	case HSI_IOCTL_GET_RX:
		if (!arg) {
			err = -EINVAL;
			goto out;
		}
		hsi_get_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg);
		break;
	case HSI_IOCTL_SET_TX:
		if (!arg) {
			err = -EINVAL;
			goto out;
		}
		err = hsi_set_tx(dev->ch->hsi_port, (struct hst_ctx *)arg);
		break;
	case HSI_IOCTL_GET_TX:
		if (!arg) {
			err = -EINVAL;
			goto out;
		}
		hsi_get_tx(dev->ch->hsi_port, (struct hst_ctx *)arg);
		break;
	case HSI_IOCTL_SW_RESET:
		dev_info(hsi_ctrl->dev, "SW Reset\n");
		err = hsi_softreset(hsi_ctrl);

		/* Reset HSI config to default */
		hsi_softreset_driver(hsi_ctrl);
		break;
	case HSI_IOCTL_GET_FIFO_OCCUPANCY:
		if (!arg) {
			err = -EINVAL;
			goto out;
		}
		fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
		if (unlikely(fifo < 0)) {
			dev_err(hsi_ctrl->dev, "No valid FIFO id found for "
					       "channel %d.\n", channel);
			err = -EFAULT;
			goto out;
		}
		*(size_t *)arg = hsi_get_rx_fifo_occupancy(hsi_ctrl, fifo);
		break;
	case HSI_IOCTL_SET_WAKE_RX_3WIRES_MODE:
		dev_info(hsi_ctrl->dev,
			 "Entering RX wakeup in 3 wires mode (no CAWAKE)\n");
		pport->wake_rx_3_wires_mode = 1;

		/* HSI-C1BUG00085: ixxx: HSI wakeup issue in 3 wires mode
		 * HSI will NOT generate the Swakeup for 2nd frame if it entered
		 * IDLE after 1st received frame */
		if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_ixxx_3WIRES_NO_SWAKEUP))
			if (hsi_driver_device_is_hsi(to_platform_device
							(hsi_ctrl->dev)))
				hsi_set_pm_force_hsi_on(hsi_ctrl);

		/* When WAKE is not available, ACREADY must be set to 1 at
		 * reset else remote will never have a chance to transmit. */
		hsi_outl_or(HSI_SET_WAKE_3_WIRES | HSI_SET_WAKE_READY_LVL_1,
			    base, HSI_SYS_SET_WAKE_REG(port));
		hsi_driver_disable_interrupt(pport, HSI_CAWAKEDETECTED);
		break;
	case HSI_IOCTL_SET_WAKE_RX_4WIRES_MODE:
		dev_info(hsi_ctrl->dev, "Entering RX wakeup in 4 wires mode\n");
		pport->wake_rx_3_wires_mode = 0;

		/* HSI-C1BUG00085: ixxx: HSI wakeup issue in 3 wires mode
		 * HSI will NOT generate the Swakeup for 2nd frame if it entered
		 * IDLE after 1st received frame */
		if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_ixxx_3WIRES_NO_SWAKEUP))
			if (hsi_driver_device_is_hsi(to_platform_device
							(hsi_ctrl->dev)))
				hsi_set_pm_default(hsi_ctrl);

		hsi_driver_enable_interrupt(pport, HSI_CAWAKEDETECTED);
		hsi_outl_and(HSI_SET_WAKE_3_WIRES_MASK,	base,
			     HSI_SYS_SET_WAKE_REG(port));
		break;
	case HSI_IOCTL_SET_HI_SPEED:
		if (!arg) {
			err = -EINVAL;
			goto out;
		}
		hsi_ctrl->hsi_fclk_req = *(unsigned int *)arg ?
					HSI_FCLK_HI_SPEED : HSI_FCLK_LOW_SPEED;

		if (hsi_ctrl->hsi_fclk_req == hsi_ctrl->hsi_fclk_current) {
			dev_dbg(hsi_ctrl->dev, "HSI FClk already @%ldHz\n",
				 hsi_ctrl->hsi_fclk_current);
			goto out;
		}

		if (hsi_is_controller_transfer_ongoing(hsi_ctrl)) {
			err = -EBUSY;
			goto out;
		}
		hsi_ctrl->clock_change_ongoing = true;
		spin_unlock_bh(&hsi_ctrl->lock);

		pdata = dev_get_platdata(hsi_ctrl->dev);

		/* Set the HSI FCLK to requested value. */
		err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev,
					  hsi_ctrl->hsi_fclk_req);
		if (err < 0) {
			dev_err(hsi_ctrl->dev, "%s: Cannot set HSI FClk to"
				" %ldHz, err %d\n", __func__,
				hsi_ctrl->hsi_fclk_req, err);
		} else {
			dev_info(hsi_ctrl->dev, "HSI FClk changed from %ldHz to"
				 " %ldHz\n", hsi_ctrl->hsi_fclk_current,
				 hsi_ctrl->hsi_fclk_req);
			hsi_ctrl->hsi_fclk_current = hsi_ctrl->hsi_fclk_req;
		}

		spin_lock_bh(&hsi_ctrl->lock);
		hsi_ctrl->clock_change_ongoing = false;

		break;
	case HSI_IOCTL_GET_SPEED:
		if (!arg) {
			err = -EINVAL;
			goto out;
		}

		*(unsigned long *)arg = hsi_ctrl->hsi_fclk_current;
		break;

	default:
		err = -ENOIOCTLCMD;
		break;
	}
out:
	/* All IOCTL end by disabling the clocks, except ACWAKE high. */
	hsi_clocks_disable_channel(hsi_ctrl->dev, channel, __func__);

	spin_unlock_bh(&hsi_ctrl->lock);

	return err;
}
static int hsi_debug_port_show(struct seq_file *m, void *p)
{
	struct hsi_port *hsi_port = m->private;
	struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller;
	void __iomem *base = hsi_ctrl->base;
	unsigned int port = hsi_port->port_number;
	int ch, fifo;
	long buff_offset;
	struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);

	hsi_clocks_enable(hsi_ctrl->dev, __func__);

	if (hsi_port->cawake_gpio >= 0)
		seq_printf(m, "CAWAKE\t\t: %d\n", hsi_get_cawake(hsi_port));

	seq_printf(m, "WAKE\t\t: 0x%08x\n",
		   hsi_inl(base, HSI_SYS_WAKE_REG(port)));
	seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", hsi_port->n_irq,
		   hsi_inl(base,
			   HSI_SYS_MPU_ENABLE_REG(port, hsi_port->n_irq)));
	seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq,
		   hsi_inl(base,
			   HSI_SYS_MPU_STATUS_REG(port, hsi_port->n_irq)));
	if (hsi_driver_device_is_hsi(pdev)) {
		seq_printf(m, "MPU_U_ENABLE_IRQ%d\t: 0x%08x\n",
			   hsi_port->n_irq,
			   hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port,
							hsi_port->n_irq)));
		seq_printf(m, "MPU_U_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq,
			   hsi_inl(base,
				   HSI_SYS_MPU_U_STATUS_REG(port,
							    hsi_port->n_irq)));
	}
	/* HST */
	seq_printf(m, "\nHST\n===\n");
	seq_printf(m, "MODE\t\t: 0x%08x\n",
		   hsi_inl(base, HSI_HST_MODE_REG(port)));
	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
		   hsi_inl(base, HSI_HST_FRAMESIZE_REG(port)));
	seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
		   hsi_inl(base, HSI_HST_DIVISOR_REG(port)));
	seq_printf(m, "CHANNELS\t: 0x%08x\n",
		   hsi_inl(base, HSI_HST_CHANNELS_REG(port)));
	seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
		   hsi_inl(base, HSI_HST_ARBMODE_REG(port)));
	seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
		   hsi_inl(base, HSI_HST_TXSTATE_REG(port)));
	if (hsi_driver_device_is_hsi(pdev)) {
		seq_printf(m, "BUFSTATE P1\t: 0x%08x\n",
			   hsi_inl(base, HSI_HST_BUFSTATE_REG(1)));
		seq_printf(m, "BUFSTATE P2\t: 0x%08x\n",
			   hsi_inl(base, HSI_HST_BUFSTATE_REG(2)));
	} else {
		seq_printf(m, "BUFSTATE\t: 0x%08x\n",
			   hsi_inl(base, HSI_HST_BUFSTATE_REG(port)));
	}
	seq_printf(m, "BREAK\t\t: 0x%08x\n",
		   hsi_inl(base, HSI_HST_BREAK_REG(port)));
	for (ch = 0; ch < 8; ch++) {
		buff_offset = hsi_hst_buffer_reg(hsi_ctrl, port, ch);
		if (buff_offset >= 0)
			seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
				   hsi_inl(base, buff_offset));
	}
	if (hsi_driver_device_is_hsi(pdev)) {
		for (fifo = 0; fifo < HSI_HST_FIFO_COUNT; fifo++) {
			seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo,
				   hsi_inl(base,
					   HSI_HST_MAPPING_FIFO_REG(fifo)));
		}
	}
	/* HSR */
	seq_printf(m, "\nHSR\n===\n");
	seq_printf(m, "MODE\t\t: 0x%08x\n",
		   hsi_inl(base, HSI_HSR_MODE_REG(port)));
	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
		   hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port)));
	seq_printf(m, "CHANNELS\t: 0x%08x\n",
		   hsi_inl(base, HSI_HSR_CHANNELS_REG(port)));
	seq_printf(m, "COUNTERS\t: 0x%08x\n",
		   hsi_inl(base, HSI_HSR_COUNTERS_REG(port)));
	seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
		   hsi_inl(base, HSI_HSR_RXSTATE_REG(port)));
	if (hsi_driver_device_is_hsi(pdev)) {
		seq_printf(m, "BUFSTATE P1\t: 0x%08x\n",
			   hsi_inl(base, HSI_HSR_BUFSTATE_REG(1)));
		seq_printf(m, "BUFSTATE P2\t: 0x%08x\n",
			   hsi_inl(base, HSI_HSR_BUFSTATE_REG(2)));
	} else {
		seq_printf(m, "BUFSTATE\t: 0x%08x\n",
			   hsi_inl(base, HSI_HSR_BUFSTATE_REG(port)));
	}
	seq_printf(m, "BREAK\t\t: 0x%08x\n",
		   hsi_inl(base, HSI_HSR_BREAK_REG(port)));
	seq_printf(m, "ERROR\t\t: 0x%08x\n",
		   hsi_inl(base, HSI_HSR_ERROR_REG(port)));
	seq_printf(m, "ERRORACK\t: 0x%08x\n",
		   hsi_inl(base, HSI_HSR_ERRORACK_REG(port)));
	for (ch = 0; ch < 8; ch++) {
		buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, port, ch);
		if (buff_offset >= 0)
			seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
				   hsi_inl(base, buff_offset));
	}
	if (hsi_driver_device_is_hsi(pdev)) {
		for (fifo = 0; fifo < HSI_HSR_FIFO_COUNT; fifo++) {
			seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo,
				   hsi_inl(base,
					   HSI_HSR_MAPPING_FIFO_REG(fifo)));
		}
		seq_printf(m, "DLL\t: 0x%08x\n",
			   hsi_inl(base, HSI_HSR_DLL_REG));
		seq_printf(m, "DIVISOR\t: 0x%08x\n",
			   hsi_inl(base, HSI_HSR_DIVISOR_REG(port)));
	}

	hsi_clocks_disable(hsi_ctrl->dev, __func__);

	return 0;
}
Exemplo n.º 13
0
static int __init hsi_platform_device_probe(struct platform_device *pd)
{
	struct hsi_platform_data *pdata = dev_get_platdata(&pd->dev);
	struct hsi_dev *hsi_ctrl;
	u32 revision;
	int err;

	dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n");

	dev_dbg(&pd->dev, "The platform device probed is an %s\n",
		hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");

	if (!pdata) {
		dev_err(&pd->dev, "No platform_data found on hsi device\n");
		return -ENXIO;
	}

	/* Check if mandatory board functions are populated */
	if (!pdata->device_scale) {
		dev_err(&pd->dev, "Missing platform device_scale function\n");
		return -ENOSYS;
	}

	hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
	if (hsi_ctrl == NULL) {
		dev_err(&pd->dev, "Could not allocate memory for"
			" struct hsi_dev\n");
		return -ENOMEM;
	}
//mo2haewoon.you => [START]
#if defined (HSI_SEND_ATCOMMAND_TO_CAWAKE)
	INIT_WORK(&hsi_ctrl->ifx_work,ifx_check_handle_work);
	hsi_ctrl->ifx_wq = create_singlethread_workqueue("ifx_wq");
    if(!hsi_ctrl->ifx_wq){
		printk("Failed to setup workqueue - ifx_wq \n");          
    }
#endif
//mo2haewoon.you => [END]

	platform_set_drvdata(pd, hsi_ctrl);
	err = hsi_controller_init(hsi_ctrl, pd);
	if (err < 0) {
		dev_err(&pd->dev, "Could not initialize hsi controller:"
			" %d\n", err);
		goto rollback1;
	}

	pm_runtime_enable(hsi_ctrl->dev);
	pm_runtime_irq_safe(hsi_ctrl->dev);
	hsi_clocks_enable(hsi_ctrl->dev, __func__);

	/* Non critical SW Reset */
	err = hsi_softreset(hsi_ctrl);
	if (err < 0)
		goto rollback2;

	hsi_set_pm_force_hsi_on(hsi_ctrl);

	/* Configure HSI ports */
	hsi_set_ports_default(hsi_ctrl, pd);

	/* Gather info from registers for the driver.(REVISION) */
	revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
	if (hsi_driver_device_is_hsi(pd))
		dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
			 revision);
	else
		dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
			 (revision & HSI_SSI_REV_MAJOR) >> 4,
			 (revision & HSI_SSI_REV_MINOR));

	err = hsi_debug_add_ctrl(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev,
			"Could not add hsi controller to debugfs: %d\n", err);
		goto rollback2;
	}

	err = register_hsi_devices(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err);
		goto rollback3;
	}

//mo2haewoon.you => [START]
#if defined (HSI_SEND_ATCOMMAND_TO_CAWAKE)
	/* Set the ts_gpio pin mux */
	err = gpio_request(OMAP_SEND, "gpio_122");
	gpio_direction_input(OMAP_SEND);
	irq_num_122 = gpio_to_irq(OMAP_SEND);
	
	err = request_irq(irq_num_122,ifx_check_handle_srdy_irq, IRQF_TRIGGER_RISING,HSI_MODULENAME, hsi_ctrl);
	if (err < 0) {
		pr_err("Modem-wait-check: couldn't request gpio interrupt 122\n");
	}
#endif
//mo2haewoon.you => [END]

	/* Allow HSI to wake up the platform */
	device_init_wakeup(hsi_ctrl->dev, true);

	/* Set the HSI FCLK to default. */
	hsi_ctrl->hsi_fclk_req = pdata->default_hsi_fclk;
	err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev,
				  pdata->default_hsi_fclk);
	if (err == -EBUSY) {
		/* PM framework init is late_initcall, so it may not yet be */
		/* initialized, so be prepared to retry later on open. */
		dev_warn(&pd->dev, "Cannot set HSI FClk to default value: %ld. "
			 "Will retry on next open\n", pdata->default_hsi_fclk);
	} else if (err) {
		dev_err(&pd->dev, "%s: Error %d setting HSI FClk to %ld.\n",
				__func__, err, pdata->default_hsi_fclk);
		goto rollback3;
	} else {
		hsi_ctrl->hsi_fclk_current = pdata->default_hsi_fclk;
	}
	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

// LGE_CHANGE [MIPI-HSI] [email protected] [START]
#if defined(CONFIG_MACH_LGE_COSMOPOLITAN)
	/* Set IMC CP core dump */
	IFX_CP_CRASH_DUMP_INIT();
#endif

#if defined(CONFIG_MACH_LGE_COSMO)
	/* Notify active/sleep status of AP to CP*/
	ifx_init_modem_send();
#endif
// LGE_CHANGE [MIPI-HSI] [email protected] [END]

	return 0;

rollback3:
	hsi_debug_remove_ctrl(hsi_ctrl);
rollback2:
	hsi_controller_exit(hsi_ctrl);

	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

rollback1:
	kfree(hsi_ctrl);
	return err;
}
Exemplo n.º 14
0
/* HSI Platform Device probing & hsi_device registration */
static int __init hsi_platform_device_probe(struct platform_device *pd)
{
	struct hsi_platform_data *pdata = dev_get_platdata(&pd->dev);
	struct hsi_dev *hsi_ctrl;
	u32 revision;
	int err;

	dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n");

	dev_dbg(&pd->dev, "The platform device probed is an %s\n",
		hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");

	if (!pdata) {
		dev_err(&pd->dev, "No platform_data found on hsi device\n");
		return -ENXIO;
	}

	if (!pdata->device_enable || !pdata->device_idle ||
	    !pdata->device_set_rate || !pdata->wakeup_enable ||
	    !pdata->wakeup_disable || !pdata->wakeup_is_from_hsi ||
	    !pdata->board_suspend) {
		dev_err(&pd->dev, "Missing platform function pointers\n");
		return -EINVAL;
	}

	hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
	if (hsi_ctrl == NULL) {
		dev_err(&pd->dev, "Could not allocate memory for"
			" struct hsi_dev\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pd, hsi_ctrl);
	err = hsi_controller_init(hsi_ctrl, pd);
	if (err < 0) {
		dev_err(&pd->dev, "Could not initialize hsi controller:"
			" %d\n", err);
		goto rollback1;
	}

#ifdef USE_PM_RUNTIME_FOR_HSI
	pm_runtime_enable(hsi_ctrl->dev);
#endif
	err = hsi_clocks_enable(hsi_ctrl->dev, __func__);
	if (err < 0) {
		dev_err(&pd->dev, "Could not enable clocks for hsi: %d\n", err);
		goto rollback1;
	}

	/* Non critical SW Reset */
	err = hsi_softreset(hsi_ctrl);
	if (err < 0)
		goto rollback2;

	hsi_set_pm_force_hsi_on(hsi_ctrl);

	/* Configure HSI ports */
	hsi_set_ports_default(hsi_ctrl, pd);

	/* Gather info from registers for the driver.(REVISION) */
	revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
	if (hsi_driver_device_is_hsi(pd))
		dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
			 revision);
	else
		dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
			 (revision & HSI_SSI_REV_MAJOR) >> 4,
			 (revision & HSI_SSI_REV_MINOR));

	err = hsi_debug_add_ctrl(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev,
			"Could not add hsi controller to debugfs: %d\n", err);
		goto rollback2;
	}

	err = register_hsi_devices(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err);
		goto rollback3;
	}

	/* Allow HSI to wake up the platform */
	device_init_wakeup(hsi_ctrl->dev, true);

	/* Set the HSI FCLK to default. */
	err = pdata->device_set_rate(hsi_ctrl->dev, hsi_ctrl->dev,
					pdata->default_hsi_fclk);
	if (err)
		dev_err(&pd->dev, "Cannot set HSI FClk to default value: %ld\n",
			pdata->default_hsi_fclk);

	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

#if defined(CONFIG_MACH_LGE_COSMOPOLITAN)
	/* Set IMC CP core dump */
	IFX_CP_CRASH_DUMP_INIT();

	/* Notify active/sleep status of AP to CP*/
	ifx_init_modem_send();
#endif

#if defined(HSI_GPIO_CAWKAE_NOTIFY_ENABLE)
	ifx_init_gpio_cawake_notify(hsi_ctrl);
#endif

	return err;

rollback3:
	hsi_debug_remove_ctrl(hsi_ctrl);
rollback2:
	hsi_controller_exit(hsi_ctrl);

	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

rollback1:
	kfree(hsi_ctrl);
	return err;
}
Exemplo n.º 15
0
static int __init hsi_probe(struct platform_device *pd)
{
	struct hsi_platform_data *pdata = pd->dev.platform_data;
	struct hsi_dev *hsi_ctrl;
	u32 revision;
	int err;

	dev_dbg(&pd->dev, "The platform device probed is an %s\n",
			hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");

	if (!pdata) {
		pr_err(LOG_NAME "No platform_data found on hsi device\n");
		return -ENXIO;
	}

	hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
	if (hsi_ctrl == NULL) {
		dev_err(&pd->dev, "Could not allocate memory for"
			" struct hsi_dev\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pd, hsi_ctrl);
	err = hsi_controller_init(hsi_ctrl, pd);
	if (err < 0) {
		dev_err(&pd->dev, "Could not initialize hsi controller:"
			" %d\n", err);
		goto rollback1;
	}

	err = hsi_softreset(hsi_ctrl);
	if (err < 0)
		goto rollback2;

	/* Set default PM settings */
	hsi_outl((HSI_AUTOIDLE | HSI_SIDLEMODE_SMART | HSI_MIDLEMODE_SMART),
		 hsi_ctrl->base, HSI_SYS_SYSCONFIG_REG);
	hsi_outl(HSI_CLK_AUTOGATING_ON, hsi_ctrl->base, HSI_GDD_GCR_REG);

	/* Configure HSI ports */
	set_hsi_ports_default(hsi_ctrl, pd);

	/* Gather info from registers for the driver.(REVISION) */
	revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
	if (hsi_driver_device_is_hsi(pd))
		dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
								revision);
	else
		dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
					(revision & HSI_SSI_REV_MAJOR) >> 4,
					(revision & HSI_SSI_REV_MINOR));


	err = hsi_debug_add_ctrl(hsi_ctrl);
	if (err < 0)
		goto rollback2;

	err = register_hsi_devices(hsi_ctrl);
	if (err < 0)
		goto rollback3;

	return err;

rollback3:
	hsi_debug_remove_ctrl(hsi_ctrl);
rollback2:
	hsi_controller_exit(hsi_ctrl);
rollback1:
	kfree(hsi_ctrl);
	return err;
}
Exemplo n.º 16
0
static void do_hsi_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int gdd_lch)
{
	void __iomem *base = hsi_ctrl->base;
	struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
	struct hsi_channel *ch;
	unsigned int port;
	unsigned int channel;
	unsigned int is_read_path;
	u32 gdd_csr;
	dma_addr_t dma_h;
	size_t size;
	int fifo, fifo_words_avail;

	if (hsi_get_info_from_gdd_lch(hsi_ctrl, gdd_lch, &port, &channel,
				      &is_read_path) < 0) {
		dev_err(hsi_ctrl->dev, "Unable to match the DMA channel %d with"
			" an HSI channel\n", gdd_lch);
		return;
	} else {
		dev_dbg(hsi_ctrl->dev, "DMA event on gdd_lch=%d => port=%d, "
			"channel=%d, read=%d\n", gdd_lch, port, channel,
			is_read_path);
	}

	hsi_outl_and(~HSI_GDD_LCH(gdd_lch), base,
		     HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
	/* Warning : CSR register is cleared automaticaly by HW after SW read */
	gdd_csr = hsi_inw(base, HSI_GDD_CSR_REG(gdd_lch));

	if (!(gdd_csr & HSI_CSR_TOUT)) {
		if (is_read_path) {	/* Read path */
			dma_h = hsi_inl(base, HSI_GDD_CDSA_REG(gdd_lch));
			size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
			dma_sync_single_for_cpu(hsi_ctrl->dev, dma_h, size,
						DMA_FROM_DEVICE);
			dma_unmap_single(hsi_ctrl->dev, dma_h, size,
					 DMA_FROM_DEVICE);
			ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
			hsi_reset_ch_read(ch);

			dev_dbg(hsi_ctrl->dev, "Calling ch %d read callback "
					"(size %d).\n", channel,  size/4);
			spin_unlock(&hsi_ctrl->lock);
			ch->read_done(ch->dev, size / 4);
			spin_lock(&hsi_ctrl->lock);

			/* Check if FIFO is correctly emptied */
			if (hsi_driver_device_is_hsi(pdev)) {
				fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
				if (unlikely(fifo < 0)) {
					dev_err(hsi_ctrl->dev, "No valid FIFO "
						"id found for channel %d.\n",
						channel);
					return;
				}
				fifo_words_avail =
					hsi_get_rx_fifo_occupancy(hsi_ctrl,
								fifo);
				if (fifo_words_avail)
					dev_dbg(hsi_ctrl->dev,
						"FIFO %d not empty "
						"after DMA copy, remaining "
						"%d/%d frames\n",
						fifo, fifo_words_avail,
						HSI_HSR_FIFO_SIZE);
			}
			/* Re-enable interrupts for polling if needed */
			if (ch->flags & HSI_CH_RX_POLL)
				hsi_driver_enable_read_interrupt(ch, NULL);
		} else {	/* Write path */
			dma_h = hsi_inl(base, HSI_GDD_CSSA_REG(gdd_lch));
			size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
			dma_unmap_single(hsi_ctrl->dev, dma_h, size,
					 DMA_TO_DEVICE);
			ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
			hsi_reset_ch_write(ch);

			dev_dbg(hsi_ctrl->dev, "Calling ch %d write callback "
					"(size %d).\n", channel, size/4);
			spin_unlock(&hsi_ctrl->lock);
			ch->write_done(ch->dev, size / 4);
			spin_lock(&hsi_ctrl->lock);
		}
	} else {
		dev_err(hsi_ctrl->dev, "Time-out overflow Error on GDD transfer"
			" on gdd channel %d\n", gdd_lch);
		spin_unlock(&hsi_ctrl->lock);
		/* TODO : need to perform a DMA soft reset */
		hsi_port_event_handler(&hsi_ctrl->hsi_port[port - 1],
				       HSI_EVENT_ERROR, NULL);
		spin_lock(&hsi_ctrl->lock);
	}
}
Exemplo n.º 17
0
/* HSI Platform Device probing & hsi_device registration */
static int __init hsi_platform_device_probe(struct platform_device *pd)
{
    struct hsi_platform_data *pdata = pd->dev.platform_data;
    struct hsi_dev *hsi_ctrl;
    u32 revision;
    int err;

    dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n");

    dev_dbg(&pd->dev, "The platform device probed is an %s\n",
            hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");

    if (!pdata) {
        dev_err(&pd->dev, "No platform_data found on hsi device\n");
        return -ENXIO;
    }

    hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
    if (hsi_ctrl == NULL) {
        dev_err(&pd->dev, "Could not allocate memory for"
                " struct hsi_dev\n");
        return -ENOMEM;
    }

    platform_set_drvdata(pd, hsi_ctrl);
    err = hsi_controller_init(hsi_ctrl, pd);
    if (err < 0) {
        dev_err(&pd->dev, "Could not initialize hsi controller:"
                " %d\n", err);
        goto rollback1;
    }
    /* Wakeup dependency was disabled for HSI <-> MPU PM_L3INIT_HSI_WKDEP */
#if 0
    omap_writel(0x141, 0x4A307338);
#endif
    pm_runtime_enable(hsi_ctrl->dev);
    pm_runtime_irq_safe(hsi_ctrl->dev);
    hsi_clocks_enable(hsi_ctrl->dev, __func__);

    /* Non critical SW Reset */
    err = hsi_softreset(hsi_ctrl);
    if (err < 0)
        goto rollback2;

    hsi_set_pm_force_hsi_on(hsi_ctrl);

    /* Configure HSI ports */
    hsi_set_ports_default(hsi_ctrl, pd);

    /* Gather info from registers for the driver.(REVISION) */
    revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
    if (hsi_driver_device_is_hsi(pd))
        dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
                 revision);
    else
        dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
                 (revision & HSI_SSI_REV_MAJOR) >> 4,
                 (revision & HSI_SSI_REV_MINOR));

    err = hsi_debug_add_ctrl(hsi_ctrl);
    if (err < 0) {
        dev_err(&pd->dev,
                "Could not add hsi controller to debugfs: %d\n", err);
        goto rollback2;
    }

    err = register_hsi_devices(hsi_ctrl);
    if (err < 0) {
        dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err);
        goto rollback3;
    }

    /* Allow HSI to wake up the platform */
    device_init_wakeup(hsi_ctrl->dev, true);

    /* Set the HSI FCLK to default. */
    if (!pdata->device_scale) {
        dev_err(&pd->dev, "%s: No platform device_scale function\n",
                __func__);
        err = -ENXIO;
        goto rollback3;
    }
    err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev,
                              pdata->default_hsi_fclk);
    if (err == -EBUSY) {
        dev_warn(&pd->dev, "Cannot set HSI FClk to default value: %ld. "
                 "Will retry on next open\n",
                 pdata->default_hsi_fclk);
    } else if (err) {
        dev_err(&pd->dev, "%s: Error %d setting HSI FClk to %ld.\n",
                __func__, err, pdata->default_hsi_fclk);
        goto rollback3;
    } else {
        hsi_ctrl->clock_rate = pdata->default_hsi_fclk;
    }

    /* From here no need for HSI HW access */
    hsi_clocks_disable(hsi_ctrl->dev, __func__);

    return 0;

rollback3:
    hsi_debug_remove_ctrl(hsi_ctrl);
rollback2:
    hsi_controller_exit(hsi_ctrl);

    /* From here no need for HSI HW access */
    hsi_clocks_disable(hsi_ctrl->dev, __func__);

rollback1:
    kfree(hsi_ctrl);
    return err;
}
/* HSR_AVAILABLE interrupt processing */
static void hsi_do_channel_rx(struct hsi_channel *ch)
{
	struct hsi_dev *hsi_ctrl = ch->hsi_port->hsi_controller;
	void __iomem *base = ch->hsi_port->hsi_controller->base;
	unsigned int n_ch;
	unsigned int n_p;
	unsigned int irq;
	long buff_offset;
	int rx_poll = 0;
	int data_read = 0;
	int fifo, fifo_words_avail;

	n_ch = ch->channel_number;
	n_p = ch->hsi_port->port_number;
	irq = ch->hsi_port->n_irq;

	dev_dbg(hsi_ctrl->dev,
		"Data Available interrupt for channel %d.\n", n_ch);

	/* Check if there is data in FIFO available for reading */
	if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
		fifo = hsi_fifo_get_id(hsi_ctrl, n_ch, n_p);
		if (unlikely(fifo < 0)) {
			dev_err(hsi_ctrl->dev, "No valid FIFO id found for "
					       "channel %d.\n", n_ch);
			return;
		}
		fifo_words_avail = hsi_get_rx_fifo_occupancy(hsi_ctrl, fifo);
		if (!fifo_words_avail) {
			dev_dbg(hsi_ctrl->dev,
				"WARNING: RX FIFO %d empty before CPU copy\n",
				fifo);

			/* Do not disable interrupt becaue another interrupt */
			/* can still come, this time with a real frame. */
			return;
		}
	}

	/* Disable interrupts if not needed for polling */
	if (!(ch->flags & HSI_CH_RX_POLL))
		hsi_driver_disable_read_interrupt(ch);

	/*
	 * Check race condition: RX transmission initiated but DMA transmission
	 * already started - acknowledge then ignore interrupt occurence
	 */
	if (ch->read_data.lch != -1) {
		dev_warn(hsi_ctrl->dev,
			"Race condition between RX Int ch %d and DMA %0x\n",
			n_ch, ch->read_data.lch);
		goto done;
	}

	if (ch->flags & HSI_CH_RX_POLL)
		rx_poll = 1;

	if (ch->read_data.addr) {
		buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, n_p, n_ch);
		if (buff_offset >= 0) {
			data_read = 1;
			*(ch->read_data.addr) = hsi_inl(base, buff_offset);
		}
	}

	hsi_reset_ch_read(ch);

done:
	if (rx_poll) {
		spin_unlock(&hsi_ctrl->lock);
		hsi_port_event_handler(ch->hsi_port,
				       HSI_EVENT_HSR_DATAAVAILABLE,
				       (void *)n_ch);
		spin_lock(&hsi_ctrl->lock);
	}

	if (data_read) {
		spin_unlock(&hsi_ctrl->lock);
		dev_dbg(hsi_ctrl->dev, "Calling ch %d read callback.\n", n_ch);
		(*ch->read_done) (ch->dev, 1);
		spin_lock(&hsi_ctrl->lock);
	}
}
Exemplo n.º 19
0
int hsi_set_tx(struct hsi_port *sport, struct hst_ctx *cfg)
{
	struct hsi_dev *hsi_ctrl = sport->hsi_controller;
	void __iomem *base = hsi_ctrl->base;
	int port = sport->port_number;
	struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
	unsigned int max_divisor = hsi_driver_device_is_hsi(pdev) ?
	    HSI_MAX_TX_DIVISOR : HSI_SSI_MAX_TX_DIVISOR;

	if (((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_STREAM) &&
	    ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_FRAME) &&
	    (cfg->mode != NOT_SET))
		return -EINVAL;

	if (hsi_driver_device_is_hsi(pdev)) {
		if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
		    && ((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_PIPELINED)
		    && (cfg->flow != NOT_SET))
			return -EINVAL;
		/* HSI only supports payload size of 32bits */
		if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
		    (cfg->frame_size != NOT_SET))
			return -EINVAL;
	} else {
		if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
		    && (cfg->flow != NOT_SET))
			return -EINVAL;

		if ((cfg->frame_size > HSI_FRAMESIZE_MAX) &&
		    (cfg->frame_size != NOT_SET))
			return -EINVAL;
	}

	if ((cfg->channels == 0) ||
	    ((cfg->channels > sport->max_ch) && (cfg->channels != NOT_SET)))
		return -EINVAL;

	if ((cfg->divisor > max_divisor) && (cfg->divisor != NOT_SET))
		return -EINVAL;

	if ((cfg->arb_mode != HSI_ARBMODE_ROUNDROBIN) &&
	    (cfg->arb_mode != HSI_ARBMODE_PRIORITY) && (cfg->mode != NOT_SET))
		return -EINVAL;

	if ((cfg->mode != NOT_SET) && (cfg->flow != NOT_SET))
		hsi_outl(cfg->mode | ((cfg->flow & HSI_FLOW_VAL_MASK) <<
				      HSI_FLOW_OFFSET) |
			 HSI_HST_MODE_WAKE_CTRL_SW, base,
			 HSI_HST_MODE_REG(port));

	if (cfg->frame_size != NOT_SET)
		hsi_outl(cfg->frame_size, base, HSI_HST_FRAMESIZE_REG(port));

	if (cfg->channels != NOT_SET) {
		if ((cfg->channels & (-cfg->channels)) ^ cfg->channels)
			return -EINVAL;
		else
			hsi_outl(cfg->channels, base,
				 HSI_HST_CHANNELS_REG(port));
	}

	if (cfg->divisor != NOT_SET)
		hsi_outl(cfg->divisor, base, HSI_HST_DIVISOR_REG(port));

	if (cfg->arb_mode != NOT_SET)
		hsi_outl(cfg->arb_mode, base, HSI_HST_ARBMODE_REG(port));

	return 0;
}
Exemplo n.º 20
0
/* Manage HSR divisor update
 * A special divisor value allows switching to auto-divisor mode in Rx
 * (but with error counters deactivated). This function implements the
 * the transitions to/from this mode.
 */
static int hsi_set_rx_divisor(struct hsi_port *sport, struct hsr_ctx *cfg)
{
	struct hsi_dev *hsi_ctrl = sport->hsi_controller;
	void __iomem *base = hsi_ctrl->base;
	int port = sport->port_number;
	struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);

	if (cfg->divisor == NOT_SET)
		return 0;

	if (hsi_driver_device_is_hsi(pdev)) {
		if (cfg->divisor == HSI_HSR_DIVISOR_AUTO &&
		    sport->counters_on) {
			/* auto mode: deactivate counters + set divisor = 0 */
			sport->reg_counters = hsi_inl(base, HSI_HSR_COUNTERS_REG
							    (port));
			sport->counters_on = 0;

			/* silicon errata fix:i646 for omap44xx
			 * HSI error counters cannot be disabled
			 */
			if (is_hsi_errata(hsi_ctrl,
				HSI_ERRATUM_i646_ERROR_COUNTERS_DISABLED))
				hsi_outl(0xFFFFF, base,
					HSI_HSR_COUNTERS_REG(port));
			else
				hsi_outl(0, base, HSI_HSR_COUNTERS_REG(port));

			hsi_outl(0, base, HSI_HSR_DIVISOR_REG(port));
			dev_dbg(hsi_ctrl->dev, "Switched to HSR auto mode\n");
		} else if (cfg->divisor != HSI_HSR_DIVISOR_AUTO) {
			/* Divisor set mode: use counters */
			/* Leave auto mode: use new counters values */
			sport->reg_counters = cfg->counters;
			sport->counters_on = 1;
			hsi_outl(cfg->counters, base,
				 HSI_HSR_COUNTERS_REG(port));
			hsi_outl(cfg->divisor, base, HSI_HSR_DIVISOR_REG(port));
			dev_dbg(hsi_ctrl->dev, "Left HSR auto mode. Counters=0x%08x, Divisor=0x%08x\n",
				cfg->counters, cfg->divisor);
		}
	} else {
		if (cfg->divisor == HSI_HSR_DIVISOR_AUTO &&
		    sport->counters_on) {
			/* auto mode: deactivate timeout */
			sport->reg_counters = hsi_inl(base,
						      SSI_TIMEOUT_REG(port));
			sport->counters_on = 0;
			hsi_outl(0, base, SSI_TIMEOUT_REG(port));
			dev_dbg(hsi_ctrl->dev, "Deactivated SSR timeout\n");
		} else if (cfg->divisor == HSI_SSR_DIVISOR_USE_TIMEOUT) {
			/* Leave auto mode: use new counters values */
			sport->reg_counters = cfg->counters;
			sport->counters_on = 1;
			hsi_outl(cfg->counters, base, SSI_TIMEOUT_REG(port));
			dev_dbg(hsi_ctrl->dev, "Left SSR auto mode. Timeout=0x%08x\n",
				cfg->counters);
		}
	}

	return 0;
}
Exemplo n.º 21
0
int hsi_set_rx(struct hsi_port *sport, struct hsr_ctx *cfg)
{
	struct hsi_dev *hsi_ctrl = sport->hsi_controller;
	void __iomem *base = hsi_ctrl->base;
	int port = sport->port_number;
	struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);

	if (((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_STREAM) &&
	    ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_FRAME) &&
	    ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_SLEEP) &&
	    (cfg->mode != NOT_SET))
		return -EINVAL;

	if (hsi_driver_device_is_hsi(pdev)) {
		if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
		    && ((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_PIPELINED)
		    && (cfg->flow != NOT_SET))
			return -EINVAL;
		/* HSI only supports payload size of 32bits */
		if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
		    (cfg->frame_size != NOT_SET))
			return -EINVAL;
	} else {
		if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED)
		    && (cfg->flow != NOT_SET))
			return -EINVAL;
		/* HSI only supports payload size of 32bits */
		if ((cfg->frame_size != HSI_FRAMESIZE_MAX) &&
		    (cfg->frame_size != NOT_SET))
			return -EINVAL;
	}

	if ((cfg->channels == 0) ||
	    ((cfg->channels > sport->max_ch) && (cfg->channels != NOT_SET)))
		return -EINVAL;

	if (hsi_driver_device_is_hsi(pdev)) {
		if ((cfg->divisor > HSI_MAX_RX_DIVISOR) &&
		    (cfg->divisor != HSI_HSR_DIVISOR_AUTO) &&
		    (cfg->divisor != NOT_SET))
			return -EINVAL;
	}

	if ((cfg->mode != NOT_SET) && (cfg->flow != NOT_SET))
		hsi_outl(cfg->mode | ((cfg->flow & HSI_FLOW_VAL_MASK)
				      << HSI_FLOW_OFFSET), base,
			 HSI_HSR_MODE_REG(port));

	if (cfg->frame_size != NOT_SET)
		hsi_outl(cfg->frame_size, base, HSI_HSR_FRAMESIZE_REG(port));

	if (cfg->channels != NOT_SET) {
		if ((cfg->channels & (-cfg->channels)) ^ cfg->channels)
			return -EINVAL;
		else
			hsi_outl(cfg->channels, base,
				 HSI_HSR_CHANNELS_REG(port));
	}

	return hsi_set_rx_divisor(sport, cfg);
}
Exemplo n.º 22
0
/**
 * hsi_driver_write_dma - Program GDD [DMA] to write data from memory to
 * the hsi channel buffer.
 * @hsi_channel - pointer to the hsi_channel to write data to.
 * @data - 32-bit word pointer to the data.
 * @size - Number of 32bit words to be transfered.
 *
 * hsi_controller lock must be held before calling this function.
 *
 * Return 0 on success and < 0 on error.
 */
int hsi_driver_write_dma(struct hsi_channel *hsi_channel, u32 * data,
			 unsigned int size)
{
	struct hsi_dev *hsi_ctrl = hsi_channel->hsi_port->hsi_controller;
	void __iomem *base = hsi_ctrl->base;
	unsigned int port = hsi_channel->hsi_port->port_number;
	unsigned int channel = hsi_channel->channel_number;
	unsigned int sync;
	int lch;
	dma_addr_t src_addr;
	dma_addr_t dest_addr;
	u16 tmp;
	int fifo;

	if ((size < 1) || (data == NULL))
		return -EINVAL;

	lch = hsi_get_free_lch(hsi_ctrl);
	if (lch < 0) {
		dev_err(hsi_ctrl->dev, "No free DMA channels.\n");
		return -EBUSY;	/* No free GDD logical channels. */
	} else {
		dev_dbg(hsi_ctrl->dev, "Allocated DMA channel %d for write on"
					" HSI channel %d.\n", lch,
					hsi_channel->channel_number);
	}

	/* NOTE: Getting a free gdd logical channel and
	 * reserve it must be done atomicaly. */
	hsi_channel->write_data.lch = lch;

	/* Sync is required for SSI but not for HSI */
	sync = hsi_sync_table[HSI_SYNC_WRITE][port - 1][channel];

	src_addr = dma_map_single(hsi_ctrl->dev, data, size * 4, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(hsi_ctrl->dev, src_addr))) {
		dev_err(hsi_ctrl->dev, "Failed to create DMA write mapping.\n");
		return -ENOMEM;
	}

	tmp = HSI_SRC_BURST_4x32_BIT|
	    HSI_SRC_MEMORY_PORT |
	    HSI_DST_BURST_4x32_BIT |
	    HSI_DST_PERIPHERAL_PORT | HSI_DATA_TYPE_S32;
	hsi_outw(tmp, base, HSI_GDD_CSDP_REG(lch));

	tmp = HSI_SRC_AMODE_POSTINC | HSI_DST_AMODE_CONST | sync;
	hsi_outw(tmp, base, HSI_GDD_CCR_REG(lch));

	hsi_outw((HSI_BLOCK_IE | HSI_TOUT_IE), base, HSI_GDD_CCIR_REG(lch));

	if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
		fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
		if (unlikely(fifo < 0)) {
			dev_err(hsi_ctrl->dev, "No valid FIFO id for DMA "
				"transfer to FIFO.\n");
			return -EFAULT;
		}
		/* HSI CDSA register takes a FIFO ID when copying to FIFO */
		hsi_outl(fifo, base, HSI_GDD_CDSA_REG(lch));
	} else {
		dest_addr = hsi_ctrl->phy_base + HSI_HST_BUFFER_CH_REG(port,
								channel);
		/* SSI CDSA register always takes a 32-bit address */
		hsi_outl(dest_addr, base, HSI_GDD_CDSA_REG(lch));
	}

	/* HSI CSSA register takes a 32-bit address when copying from memory */
	/* SSI CSSA register always takes a 32-bit address */
	hsi_outl(src_addr, base, HSI_GDD_CSSA_REG(lch));
	hsi_outw(size, base, HSI_GDD_CEN_REG(lch));

	/* TODO : Need to clean interrupt status here to avoid spurious int */

	hsi_outl_or(HSI_GDD_LCH(lch), base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
	hsi_outw_or(HSI_CCR_ENABLE, base, HSI_GDD_CCR_REG(lch));

	return 0;
}