/** * hsi_driver_cancel_write_interrupt - Cancel pending write interrupt. * @dev - hsi device channel where to cancel the pending interrupt. * * Return: -ECANCELED : write cancel success, data not transfered to TX FIFO * 0 : transfer is already over, data already transfered to TX FIFO * * Note: whatever returned value, write callback will not be called after * write cancel. */ int hsi_driver_cancel_write_interrupt(struct hsi_channel *ch) { struct hsi_port *p = ch->hsi_port; unsigned int port = p->port_number; unsigned int channel = ch->channel_number; void __iomem *base = p->hsi_controller->base; u32 status_reg; long buff_offset; status_reg = hsi_inl(base, HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel)); if (!(status_reg & HSI_HST_DATAACCEPT(channel))) { dev_dbg(&ch->dev->device, "Write cancel on not " "enabled channel %d ENABLE REG 0x%08X", channel, status_reg); } status_reg &= hsi_inl(base, HSI_SYS_MPU_STATUS_CH_REG(port, p->n_irq, channel)); hsi_outl_and(~HSI_HST_DATAACCEPT(channel), base, HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel)); buff_offset = hsi_hst_bufstate_f_reg(p->hsi_controller, port, channel); if (buff_offset >= 0) hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel), base, buff_offset); hsi_reset_ch_write(ch); return status_reg & HSI_HST_DATAACCEPT(channel) ? 0 : -ECANCELED; }
/** * hsi_driver_int_proc - check all channels / ports for interrupts events * @hsi_ctrl - HSI controler data * @status_offset: interrupt status register offset * @enable_offset: interrupt enable regiser offset * @start: interrupt index to start on * @stop: interrupt index to stop on * * This function calls the related processing functions and triggered events */ static void hsi_driver_int_proc(struct hsi_port *pport, unsigned long status_offset, unsigned long enable_offset, unsigned int start, unsigned int stop) { struct hsi_dev *hsi_ctrl = pport->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = pport->port_number; unsigned int channel; u32 status_reg; u32 hsr_err_reg; u32 channels_served = 0; status_reg = hsi_inl(base, status_offset); status_reg &= hsi_inl(base, enable_offset); for (channel = start; channel < stop; channel++) { if (status_reg & HSI_HST_DATAACCEPT(channel)) { do_channel_tx(&pport->hsi_channel[channel]); channels_served |= HSI_HST_DATAACCEPT(channel); } if (status_reg & HSI_HSR_DATAAVAILABLE(channel)) { do_channel_rx(&pport->hsi_channel[channel]); channels_served |= HSI_HSR_DATAAVAILABLE(channel); } } if (status_reg & HSI_BREAKDETECTED) { dev_info(hsi_ctrl->dev, "Hardware BREAK on port %d\n", port); hsi_outl(0, base, HSI_HSR_BREAK_REG(port)); hsi_port_event_handler(pport, HSI_EVENT_BREAK_DETECTED, NULL); channels_served |= HSI_BREAKDETECTED; } if (status_reg & HSI_ERROROCCURED) { hsr_err_reg = hsi_inl(base, HSI_HSR_ERROR_REG(port)); dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x\n", port, hsr_err_reg); hsi_outl(hsr_err_reg, base, HSI_HSR_ERRORACK_REG(port)); if (hsr_err_reg) /* ignore spurious errors */ hsi_port_event_handler(pport, HSI_EVENT_ERROR, NULL); else dev_dbg(hsi_ctrl->dev, "Spurious HSI error!\n"); channels_served |= HSI_ERROROCCURED; } hsi_outl(channels_served, base, status_offset); }
/* Enables the Data Accepted Interrupt of HST for the given channel */ int hsi_driver_enable_write_interrupt(struct hsi_channel *ch, u32 * data) { struct hsi_port *p = ch->hsi_port; unsigned int port = p->port_number; unsigned int channel = ch->channel_number; hsi_outl_or(HSI_HST_DATAACCEPT(channel), p->hsi_controller->base, HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel)); return 0; }
/** * hsi_driver_int_proc - check all channels / ports for interrupts events * @hsi_ctrl - HSI controler data * @status_offset: interrupt status register offset * @enable_offset: interrupt enable regiser offset * @start: interrupt index to start on * @stop: interrupt index to stop on * * returns the bitmap of processed events * * This function calls the related processing functions and triggered events. * Events are cleared after corresponding function has been called. */ static u32 hsi_driver_int_proc(struct hsi_port *pport, unsigned long status_offset, unsigned long enable_offset, unsigned int start, unsigned int stop, bool cawake_double_int) { struct hsi_dev *hsi_ctrl = pport->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = pport->port_number; unsigned int channel; u32 status_reg; u32 hsr_err_reg; u32 channels_served = 0; /* Get events status */ status_reg = hsi_inl(base, status_offset); status_reg &= hsi_inl(base, enable_offset); /* Check if we need to process an additional CAWAKE interrupt */ if (cawake_double_int) status_reg |= HSI_CAWAKEDETECTED; if (pport->cawake_off_event) { dev_dbg(hsi_ctrl->dev, "CAWAKE detected from OFF mode.\n"); } else if (!status_reg) { dev_dbg(hsi_ctrl->dev, "Channels [%d,%d] : no event, exit.\n", start, stop); return 0; } else { dev_dbg(hsi_ctrl->dev, "Channels [%d,%d] : Events 0x%08x\n", start, stop, status_reg); } if (status_reg & HSI_BREAKDETECTED) { dev_info(hsi_ctrl->dev, "Hardware BREAK on port %d\n", port); spin_unlock(&hsi_ctrl->lock); hsi_port_event_handler(pport, HSI_EVENT_BREAK_DETECTED, NULL); spin_lock(&hsi_ctrl->lock); channels_served |= HSI_BREAKDETECTED; } if (status_reg & HSI_ERROROCCURED) { hsr_err_reg = hsi_inl(base, HSI_HSR_ERROR_REG(port)); if (hsr_err_reg & HSI_HSR_ERROR_SIG) dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n", port, hsr_err_reg, "Signal Error"); if (hsr_err_reg & HSI_HSR_ERROR_FTE) dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n", port, hsr_err_reg, "Frame Timeout Error"); if (hsr_err_reg & HSI_HSR_ERROR_TBE) dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n", port, hsr_err_reg, "Tailing Bit Error"); if (hsr_err_reg & HSI_HSR_ERROR_RME) dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n", port, hsr_err_reg, "RX Mapping Error"); if (hsr_err_reg & HSI_HSR_ERROR_TME) dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n", port, hsr_err_reg, "TX Mapping Error"); /* Clear error event bit */ hsi_outl(hsr_err_reg, base, HSI_HSR_ERRORACK_REG(port)); if (hsr_err_reg) { /* ignore spurious errors */ spin_unlock(&hsi_ctrl->lock); hsi_port_event_handler(pport, HSI_EVENT_ERROR, NULL); spin_lock(&hsi_ctrl->lock); } else dev_dbg(hsi_ctrl->dev, "Spurious HSI error!\n"); channels_served |= HSI_ERROROCCURED; } for (channel = start; channel <= stop; channel++) { if (status_reg & HSI_HST_DATAACCEPT(channel)) { hsi_do_channel_tx(&pport->hsi_channel[channel]); channels_served |= HSI_HST_DATAACCEPT(channel); } if (status_reg & HSI_HSR_DATAAVAILABLE(channel)) { hsi_do_channel_rx(&pport->hsi_channel[channel]); channels_served |= HSI_HSR_DATAAVAILABLE(channel); } if (status_reg & HSI_HSR_DATAOVERRUN(channel)) { /*HSI_TODO : Data overrun handling*/ dev_err(hsi_ctrl->dev, "Data overrun in real time mode !\n"); } } /* CAWAKE falling or rising edge detected */ if ((status_reg & HSI_CAWAKEDETECTED) || pport->cawake_off_event) { if (hsi_do_cawake_process(pport) == -EAGAIN) goto proc_done; channels_served |= HSI_CAWAKEDETECTED; pport->cawake_off_event = false; } proc_done: /* Reset status bits */ hsi_outl(channels_served, base, status_offset); return channels_served; }