Ejemplo n.º 1
0
/**
 * hsi_driver_int_proc - check all channels / ports for interrupts events
 * @hsi_ctrl - HSI controler data
 * @status_offset: interrupt status register offset
 * @enable_offset: interrupt enable regiser offset
 * @start: interrupt index to start on
 * @stop: interrupt index to stop on
 *
 * This function calls the related processing functions and triggered events
*/
static void hsi_driver_int_proc(struct hsi_port *pport,
		unsigned long status_offset, unsigned long enable_offset,
		unsigned int start, unsigned int stop)
{
	struct hsi_dev *hsi_ctrl = pport->hsi_controller;
	void __iomem *base = hsi_ctrl->base;
	unsigned int port = pport->port_number;
	unsigned int channel;
	u32 status_reg;
	u32 hsr_err_reg;
	u32 channels_served = 0;

	status_reg = hsi_inl(base, status_offset);
	status_reg &= hsi_inl(base, enable_offset);

	for (channel = start; channel < stop; channel++) {
		if (status_reg & HSI_HST_DATAACCEPT(channel)) {

			do_channel_tx(&pport->hsi_channel[channel]);
			channels_served |= HSI_HST_DATAACCEPT(channel);
		}

		if (status_reg & HSI_HSR_DATAAVAILABLE(channel)) {
			do_channel_rx(&pport->hsi_channel[channel]);
			channels_served |= HSI_HSR_DATAAVAILABLE(channel);
		}
	}

	if (status_reg & HSI_BREAKDETECTED) {
		dev_info(hsi_ctrl->dev, "Hardware BREAK on port %d\n", port);
		hsi_outl(0, base, HSI_HSR_BREAK_REG(port));
		hsi_port_event_handler(pport, HSI_EVENT_BREAK_DETECTED, NULL);
		channels_served |= HSI_BREAKDETECTED;
	}

	if (status_reg & HSI_ERROROCCURED) {
		hsr_err_reg = hsi_inl(base, HSI_HSR_ERROR_REG(port));
		dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x\n",
							port, hsr_err_reg);
		hsi_outl(hsr_err_reg, base, HSI_HSR_ERRORACK_REG(port));
		if (hsr_err_reg) /* ignore spurious errors */
			hsi_port_event_handler(pport, HSI_EVENT_ERROR, NULL);
		else
			dev_dbg(hsi_ctrl->dev, "Spurious HSI error!\n");

		channels_served |= HSI_ERROROCCURED;
	}

	hsi_outl(channels_served, base, status_offset);
}
Ejemplo n.º 2
0
static void do_channel_rx(struct hsi_channel *ch)
{
	struct hsi_dev *hsi_ctrl = ch->hsi_port->hsi_controller;
	void __iomem *base = ch->hsi_port->hsi_controller->base;
	unsigned int n_ch;
	unsigned int n_p;
	unsigned int irq;
	long buff_offset;
	int rx_poll = 0;
	int data_read = 0;

	n_ch = ch->channel_number;
	n_p = ch->hsi_port->port_number;
	irq = ch->hsi_port->n_irq;

	spin_lock(&hsi_ctrl->lock);

	if (ch->flags & HSI_CH_RX_POLL)
		rx_poll = 1;

	if (ch->read_data.addr) {
		buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, n_p, n_ch);
		if (buff_offset >= 0) {
			data_read = 1;
			*(ch->read_data.addr) = hsi_inl(base, buff_offset);
		}
	}

	hsi_outl_and(~HSI_HSR_DATAAVAILABLE(n_ch), base,
				HSI_SYS_MPU_ENABLE_CH_REG(n_p, irq, n_ch));
	hsi_reset_ch_read(ch);

	spin_unlock(&hsi_ctrl->lock);

	if (rx_poll)
		hsi_port_event_handler(ch->hsi_port,
				HSI_EVENT_HSR_DATAAVAILABLE, (void *)n_ch);

	if (data_read)
		(*ch->read_done)(ch->dev, 1);
}
/**
 * hsi_driver_int_proc - check all channels / ports for interrupts events
 * @hsi_ctrl - HSI controler data
 * @status_offset: interrupt status register offset
 * @enable_offset: interrupt enable regiser offset
 * @start: interrupt index to start on
 * @stop: interrupt index to stop on
 *
 * returns the bitmap of processed events
 *
 * This function calls the related processing functions and triggered events.
 * Events are cleared after corresponding function has been called.
*/
static u32 hsi_driver_int_proc(struct hsi_port *pport,
				unsigned long status_offset,
				unsigned long enable_offset, unsigned int start,
				unsigned int stop,
				bool cawake_double_int)
{
	struct hsi_dev *hsi_ctrl = pport->hsi_controller;
	void __iomem *base = hsi_ctrl->base;
	unsigned int port = pport->port_number;
	unsigned int channel;
	u32 status_reg;
	u32 hsr_err_reg;
	u32 channels_served = 0;

	/* Get events status */
	status_reg = hsi_inl(base, status_offset);
	status_reg &= hsi_inl(base, enable_offset);

	/* Check if we need to process an additional CAWAKE interrupt */
	if (cawake_double_int)
		status_reg |= HSI_CAWAKEDETECTED;

	if (pport->cawake_off_event) {
		dev_dbg(hsi_ctrl->dev, "CAWAKE detected from OFF mode.\n");
	} else if (!status_reg) {
		dev_dbg(hsi_ctrl->dev, "Channels [%d,%d] : no event, exit.\n",
			start, stop);
			return 0;
	} else {
		dev_dbg(hsi_ctrl->dev, "Channels [%d,%d] : Events 0x%08x\n",
			start, stop, status_reg);
	}

	if (status_reg & HSI_BREAKDETECTED) {
		dev_info(hsi_ctrl->dev, "Hardware BREAK on port %d\n", port);
		spin_unlock(&hsi_ctrl->lock);
		hsi_port_event_handler(pport, HSI_EVENT_BREAK_DETECTED, NULL);
		spin_lock(&hsi_ctrl->lock);

		channels_served |= HSI_BREAKDETECTED;
	}

	if (status_reg & HSI_ERROROCCURED) {
		hsr_err_reg = hsi_inl(base, HSI_HSR_ERROR_REG(port));
		if (hsr_err_reg & HSI_HSR_ERROR_SIG)
			dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
				port, hsr_err_reg, "Signal Error");
		if (hsr_err_reg & HSI_HSR_ERROR_FTE)
			dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
				port, hsr_err_reg, "Frame Timeout Error");
		if (hsr_err_reg & HSI_HSR_ERROR_TBE)
			dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
				port, hsr_err_reg, "Tailing Bit Error");
		if (hsr_err_reg & HSI_HSR_ERROR_RME)
			dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
				port, hsr_err_reg, "RX Mapping Error");
		if (hsr_err_reg & HSI_HSR_ERROR_TME)
			dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n",
				port, hsr_err_reg, "TX Mapping Error");
		/* Clear error event bit */
		hsi_outl(hsr_err_reg, base, HSI_HSR_ERRORACK_REG(port));
		if (hsr_err_reg) {	/* ignore spurious errors */
			spin_unlock(&hsi_ctrl->lock);
			hsi_port_event_handler(pport, HSI_EVENT_ERROR, NULL);
			spin_lock(&hsi_ctrl->lock);
		} else
			dev_dbg(hsi_ctrl->dev, "Spurious HSI error!\n");

		channels_served |= HSI_ERROROCCURED;
	}

	for (channel = start; channel <= stop; channel++) {
		if (status_reg & HSI_HST_DATAACCEPT(channel)) {
			hsi_do_channel_tx(&pport->hsi_channel[channel]);
			channels_served |= HSI_HST_DATAACCEPT(channel);
		}

		if (status_reg & HSI_HSR_DATAAVAILABLE(channel)) {
			hsi_do_channel_rx(&pport->hsi_channel[channel]);
			channels_served |= HSI_HSR_DATAAVAILABLE(channel);
		}

		if (status_reg & HSI_HSR_DATAOVERRUN(channel)) {
			/*HSI_TODO : Data overrun handling*/
			dev_err(hsi_ctrl->dev,
				"Data overrun in real time mode !\n");
		}
	}

	/* CAWAKE falling or rising edge detected */
	if ((status_reg & HSI_CAWAKEDETECTED) || pport->cawake_off_event) {
		if (hsi_do_cawake_process(pport) == -EAGAIN)
			goto proc_done;

		channels_served |= HSI_CAWAKEDETECTED;
		pport->cawake_off_event = false;
	}
proc_done:
	/* Reset status bits */
	hsi_outl(channels_served, base, status_offset);

	return channels_served;
}
/**
 * hsi_do_cawake_process - CAWAKE line management
 * @pport - HSI port to process
 *
 * This function handles the CAWAKE L/H transitions and call the event callback
 * accordingly.
 *
 * Returns 0 if CAWAKE event process, -EAGAIN if CAWAKE event processing is
 * delayed due to a pending DMA interrupt.
 * If -EAGAIN is returned, pport->hsi_tasklet has to be re-scheduled once
 * DMA tasklet has be executed. This should be done automatically by driver.
 *
*/
int hsi_do_cawake_process(struct hsi_port *pport)
{
	struct hsi_dev *hsi_ctrl = pport->hsi_controller;
	bool cawake_status = hsi_get_cawake(pport);

	if (pport->wake_rx_3_wires_mode) {
		dev_warn(hsi_ctrl->dev, "CAWAKE edge in RX 3 wires, exiting\n");
		return 0;
	}

	/* Deal with init condition */
	if (unlikely(pport->cawake_status < 0))
		pport->cawake_status = !cawake_status;
	dev_dbg(hsi_ctrl->dev, "%s: Interrupts are not enabled but CAWAKE came."
		"hsi: port[%d] irq[%d] irq_en=0x%08x dma_irq_en=0x%08x\n",
		__func__, pport->port_number, pport->n_irq,
		hsi_inl(pport->hsi_controller->base,
			HSI_SYS_MPU_ENABLE_REG(pport->port_number,
					pport->n_irq)),
		hsi_inl(pport->hsi_controller->base,
			HSI_SYS_GDD_MPU_IRQ_ENABLE_REG));

	/* Check CAWAKE line status */
	if (cawake_status) {
		dev_dbg(hsi_ctrl->dev, "CAWAKE rising edge detected\n");

		/* Check for possible mismatch (race condition) */
		if (unlikely(pport->cawake_status)) {
			dev_warn(hsi_ctrl->dev,
				"Missed previous CAWAKE falling edge...\n");
			spin_unlock(&hsi_ctrl->lock);
			hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN,
						NULL);
			spin_lock(&hsi_ctrl->lock);

			/* In case another CAWAKE interrupt occured and caused
			 * a race condition, clear CAWAKE backup interrupt to
			 * avoid handling twice the race condition */
			hsi_driver_ack_interrupt(pport, HSI_CAWAKEDETECTED,
						 true);
		}
		pport->cawake_status = 1;

		spin_unlock(&hsi_ctrl->lock);
		hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP, NULL);
		spin_lock(&hsi_ctrl->lock);

		/*
		* HSI - OMAP4430-2.2BUG00055: i702
		* HSI: DSP Swakeup generated is the same than MPU Swakeup.
		* System cannot enter in off mode due to the DSP.
		*/
		if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_i702_PM_HSI_SWAKEUP))
			omap_pm_clear_dsp_wake_up();

	} else {
		dev_dbg(hsi_ctrl->dev, "CAWAKE falling edge detected\n");

		/* Check for pending DMA interrupt */
		if (hsi_is_dma_read_int_pending(hsi_ctrl)) {
			dev_dbg(hsi_ctrl->dev, "Pending DMA Read interrupt "
					       "before CAWAKE->L, exiting "
					       "Interrupt tasklet.\n");
			return -EAGAIN;
		}
		if (unlikely(!pport->cawake_status)) {
			dev_warn(hsi_ctrl->dev,
				"Missed previous CAWAKE rising edge...\n");
			spin_unlock(&hsi_ctrl->lock);
			hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP,
						NULL);
			spin_lock(&hsi_ctrl->lock);

			/* In case another CAWAKE interrupt occured and caused
			 * a race condition, clear CAWAKE backup interrupt to
			 * avoid handling twice the race condition */
			hsi_driver_ack_interrupt(pport, HSI_CAWAKEDETECTED,
						 true);
		}
		pport->cawake_status = 0;

		spin_unlock(&hsi_ctrl->lock);
		hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN, NULL);
		spin_lock(&hsi_ctrl->lock);
	}

	/* If another CAWAKE event occured while previous is still processed */
	/* do not clear the status bit */
	cawake_status = hsi_get_cawake(pport);
	if (cawake_status != pport->cawake_status) {
		dev_warn(hsi_ctrl->dev, "CAWAKE line changed to %d while CAWAKE"
					"event is still being processed\n",
					cawake_status);
		return -EAGAIN;
	}

	return 0;
}
/* HSR_AVAILABLE interrupt processing */
static void hsi_do_channel_rx(struct hsi_channel *ch)
{
	struct hsi_dev *hsi_ctrl = ch->hsi_port->hsi_controller;
	void __iomem *base = ch->hsi_port->hsi_controller->base;
	unsigned int n_ch;
	unsigned int n_p;
	unsigned int irq;
	long buff_offset;
	int rx_poll = 0;
	int data_read = 0;
	int fifo, fifo_words_avail;

	n_ch = ch->channel_number;
	n_p = ch->hsi_port->port_number;
	irq = ch->hsi_port->n_irq;

	dev_dbg(hsi_ctrl->dev,
		"Data Available interrupt for channel %d.\n", n_ch);

	/* Check if there is data in FIFO available for reading */
	if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) {
		fifo = hsi_fifo_get_id(hsi_ctrl, n_ch, n_p);
		if (unlikely(fifo < 0)) {
			dev_err(hsi_ctrl->dev, "No valid FIFO id found for "
					       "channel %d.\n", n_ch);
			return;
		}
		fifo_words_avail = hsi_get_rx_fifo_occupancy(hsi_ctrl, fifo);
		if (!fifo_words_avail) {
			dev_dbg(hsi_ctrl->dev,
				"WARNING: RX FIFO %d empty before CPU copy\n",
				fifo);

			/* Do not disable interrupt becaue another interrupt */
			/* can still come, this time with a real frame. */
			return;
		}
	}

	/* Disable interrupts if not needed for polling */
	if (!(ch->flags & HSI_CH_RX_POLL))
		hsi_driver_disable_read_interrupt(ch);

	/*
	 * Check race condition: RX transmission initiated but DMA transmission
	 * already started - acknowledge then ignore interrupt occurence
	 */
	if (ch->read_data.lch != -1) {
		dev_warn(hsi_ctrl->dev,
			"Race condition between RX Int ch %d and DMA %0x\n",
			n_ch, ch->read_data.lch);
		goto done;
	}

	if (ch->flags & HSI_CH_RX_POLL)
		rx_poll = 1;

	if (ch->read_data.addr) {
		buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, n_p, n_ch);
		if (buff_offset >= 0) {
			data_read = 1;
			*(ch->read_data.addr) = hsi_inl(base, buff_offset);
		}
	}

	hsi_reset_ch_read(ch);

done:
	if (rx_poll) {
		spin_unlock(&hsi_ctrl->lock);
		hsi_port_event_handler(ch->hsi_port,
				       HSI_EVENT_HSR_DATAAVAILABLE,
				       (void *)n_ch);
		spin_lock(&hsi_ctrl->lock);
	}

	if (data_read) {
		spin_unlock(&hsi_ctrl->lock);
		dev_dbg(hsi_ctrl->dev, "Calling ch %d read callback.\n", n_ch);
		(*ch->read_done) (ch->dev, 1);
		spin_lock(&hsi_ctrl->lock);
	}
}
Ejemplo n.º 6
0
/**
 * hsi_do_cawake_process - CAWAKE line management
 * @pport - HSI port to process
 *
 * This function handles the CAWAKE L/H transitions and call the event callback
 * accordingly.
 *
 * Returns 0 if CAWAKE event process, -EAGAIN if CAWAKE event processing is
 * delayed due to a pending DMA interrupt.
 * If -EAGAIN is returned, pport->hsi_tasklet has to be re-scheduled once
 * DMA tasklet has be executed. This should be done automatically by driver.
 *
*/
int hsi_do_cawake_process(struct hsi_port *pport)
{
	struct hsi_dev *hsi_ctrl = pport->hsi_controller;
	bool cawake_status = hsi_get_cawake(pport);

	if (pport->wake_rx_3_wires_mode) {
		dev_warn(hsi_ctrl->dev, "CAWAKE edge in RX 3 wires, exiting\n");
		return 0;
	}

	/* Deal with init condition */
	if (unlikely(pport->cawake_status < 0))
		pport->cawake_status = !cawake_status;

	/* Check CAWAKE line status */
	if (cawake_status) {
		dev_dbg(hsi_ctrl->dev, "CAWAKE rising edge detected\n");

		/* Check for possible mismatch (race condition) */
		if (unlikely(pport->cawake_status)) {
			dev_warn(hsi_ctrl->dev,
				"Missed previous CAWAKE falling edge...\n");
			spin_unlock(&hsi_ctrl->lock);
			hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN,
						NULL);
			spin_lock(&hsi_ctrl->lock);

			/* In case another CAWAKE interrupt occured and caused
			 * a race condition, clear CAWAKE backup interrupt to
			 * avoid handling twice the race condition */
			hsi_driver_ack_interrupt(pport, HSI_CAWAKEDETECTED,
						 true);
		}
		pport->cawake_status = 1;

		/* Allow data reception */
		hsi_hsr_resume(hsi_ctrl);

		spin_unlock(&hsi_ctrl->lock);
		hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP, NULL);
		spin_lock(&hsi_ctrl->lock);

		/*
		* HSI - OMAP4430-2.2BUG00055: i702
		* HSI: DSP Swakeup generated is the same than MPU Swakeup.
		* System cannot enter in off mode due to the DSP.
		*/
		if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_i702_PM_HSI_SWAKEUP))
			omap_pm_clear_dsp_wake_up();

	} else {
		dev_dbg(hsi_ctrl->dev, "CAWAKE falling edge detected\n");

		/* Check for pending DMA interrupt */
		if (hsi_is_dma_read_int_pending(hsi_ctrl)) {
			dev_dbg(hsi_ctrl->dev, "Pending DMA Read interrupt "
					       "before CAWAKE->L, exiting "
					       "Interrupt tasklet.\n");
			return -EAGAIN;
		}
		if (unlikely(!pport->cawake_status)) {
			dev_warn(hsi_ctrl->dev,
				"Missed previous CAWAKE rising edge...\n");
			spin_unlock(&hsi_ctrl->lock);
			hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP,
						NULL);
			spin_lock(&hsi_ctrl->lock);

			/* In case another CAWAKE interrupt occured and caused
			 * a race condition, clear CAWAKE backup interrupt to
			 * avoid handling twice the race condition */
			hsi_driver_ack_interrupt(pport, HSI_CAWAKEDETECTED,
						 true);
		}
		pport->cawake_status = 0;

		/* Forbid data reception */
		hsi_hsr_suspend(hsi_ctrl);

		spin_unlock(&hsi_ctrl->lock);
		hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN, NULL);
		spin_lock(&hsi_ctrl->lock);
	}

	return 0;
}
Ejemplo n.º 7
0
static void do_hsi_gdd_lch(struct hsi_dev *hsi_ctrl, unsigned int gdd_lch)
{
	void __iomem *base = hsi_ctrl->base;
	struct platform_device *pdev = to_platform_device(hsi_ctrl->dev);
	struct hsi_channel *ch;
	unsigned int port;
	unsigned int channel;
	unsigned int is_read_path;
	u32 gdd_csr;
	dma_addr_t dma_h;
	size_t size;
	int fifo, fifo_words_avail;

	if (hsi_get_info_from_gdd_lch(hsi_ctrl, gdd_lch, &port, &channel,
				      &is_read_path) < 0) {
		dev_err(hsi_ctrl->dev, "Unable to match the DMA channel %d with"
			" an HSI channel\n", gdd_lch);
		return;
	} else {
		dev_dbg(hsi_ctrl->dev, "DMA event on gdd_lch=%d => port=%d, "
			"channel=%d, read=%d\n", gdd_lch, port, channel,
			is_read_path);
	}

	hsi_outl_and(~HSI_GDD_LCH(gdd_lch), base,
		     HSI_SYS_GDD_MPU_IRQ_ENABLE_REG);
	/* Warning : CSR register is cleared automaticaly by HW after SW read */
	gdd_csr = hsi_inw(base, HSI_GDD_CSR_REG(gdd_lch));

	if (!(gdd_csr & HSI_CSR_TOUT)) {
		if (is_read_path) {	/* Read path */
			dma_h = hsi_inl(base, HSI_GDD_CDSA_REG(gdd_lch));
			size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
			dma_sync_single_for_cpu(hsi_ctrl->dev, dma_h, size,
						DMA_FROM_DEVICE);
			dma_unmap_single(hsi_ctrl->dev, dma_h, size,
					 DMA_FROM_DEVICE);
			ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
			hsi_reset_ch_read(ch);

			dev_dbg(hsi_ctrl->dev, "Calling ch %d read callback "
					"(size %d).\n", channel,  size/4);
			spin_unlock(&hsi_ctrl->lock);
			ch->read_done(ch->dev, size / 4);
			spin_lock(&hsi_ctrl->lock);

			/* Check if FIFO is correctly emptied */
			if (hsi_driver_device_is_hsi(pdev)) {
				fifo = hsi_fifo_get_id(hsi_ctrl, channel, port);
				if (unlikely(fifo < 0)) {
					dev_err(hsi_ctrl->dev, "No valid FIFO "
						"id found for channel %d.\n",
						channel);
					return;
				}
				fifo_words_avail =
					hsi_get_rx_fifo_occupancy(hsi_ctrl,
								fifo);
				if (fifo_words_avail)
					dev_dbg(hsi_ctrl->dev,
						"FIFO %d not empty "
						"after DMA copy, remaining "
						"%d/%d frames\n",
						fifo, fifo_words_avail,
						HSI_HSR_FIFO_SIZE);
			}
			/* Re-enable interrupts for polling if needed */
			if (ch->flags & HSI_CH_RX_POLL)
				hsi_driver_enable_read_interrupt(ch, NULL);
		} else {	/* Write path */
			dma_h = hsi_inl(base, HSI_GDD_CSSA_REG(gdd_lch));
			size = hsi_inw(base, HSI_GDD_CEN_REG(gdd_lch)) * 4;
			dma_unmap_single(hsi_ctrl->dev, dma_h, size,
					 DMA_TO_DEVICE);
			ch = hsi_ctrl_get_ch(hsi_ctrl, port, channel);
			hsi_reset_ch_write(ch);

			dev_dbg(hsi_ctrl->dev, "Calling ch %d write callback "
					"(size %d).\n", channel, size/4);
			spin_unlock(&hsi_ctrl->lock);
			ch->write_done(ch->dev, size / 4);
			spin_lock(&hsi_ctrl->lock);
		}
	} else {
		dev_err(hsi_ctrl->dev, "Time-out overflow Error on GDD transfer"
			" on gdd channel %d\n", gdd_lch);
		spin_unlock(&hsi_ctrl->lock);
		/* TODO : need to perform a DMA soft reset */
		hsi_port_event_handler(&hsi_ctrl->hsi_port[port - 1],
				       HSI_EVENT_ERROR, NULL);
		spin_lock(&hsi_ctrl->lock);
	}
}