static void do_hsi_cawake_tasklet(unsigned long hsi_p) { struct hsi_port *port = (struct hsi_port *)hsi_p; struct hsi_dev *hsi_ctrl = port->hsi_controller; spin_lock(&hsi_ctrl->lock); hsi_clocks_enable(hsi_ctrl->dev, __func__); port->in_cawake_tasklet = true; port->cawake_status = hsi_get_cawake(port); hsi_do_cawake_process(port); port->in_cawake_tasklet = false; hsi_clocks_disable(hsi_ctrl->dev, __func__); spin_unlock(&hsi_ctrl->lock); }
/** * hsi_driver_int_proc - check all channels / ports for interrupts events * @hsi_ctrl - HSI controler data * @status_offset: interrupt status register offset * @enable_offset: interrupt enable regiser offset * @start: interrupt index to start on * @stop: interrupt index to stop on * * returns the bitmap of processed events * * This function calls the related processing functions and triggered events. * Events are cleared after corresponding function has been called. */ static u32 hsi_driver_int_proc(struct hsi_port *pport, unsigned long status_offset, unsigned long enable_offset, unsigned int start, unsigned int stop, bool cawake_double_int) { struct hsi_dev *hsi_ctrl = pport->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = pport->port_number; unsigned int channel; u32 status_reg; u32 hsr_err_reg; u32 channels_served = 0; /* Get events status */ status_reg = hsi_inl(base, status_offset); status_reg &= hsi_inl(base, enable_offset); /* Check if we need to process an additional CAWAKE interrupt */ if (cawake_double_int) status_reg |= HSI_CAWAKEDETECTED; if (pport->cawake_off_event) { dev_dbg(hsi_ctrl->dev, "CAWAKE detected from OFF mode.\n"); } else if (!status_reg) { dev_dbg(hsi_ctrl->dev, "Channels [%d,%d] : no event, exit.\n", start, stop); return 0; } else { dev_dbg(hsi_ctrl->dev, "Channels [%d,%d] : Events 0x%08x\n", start, stop, status_reg); } if (status_reg & HSI_BREAKDETECTED) { dev_info(hsi_ctrl->dev, "Hardware BREAK on port %d\n", port); spin_unlock(&hsi_ctrl->lock); hsi_port_event_handler(pport, HSI_EVENT_BREAK_DETECTED, NULL); spin_lock(&hsi_ctrl->lock); channels_served |= HSI_BREAKDETECTED; } if (status_reg & HSI_ERROROCCURED) { hsr_err_reg = hsi_inl(base, HSI_HSR_ERROR_REG(port)); if (hsr_err_reg & HSI_HSR_ERROR_SIG) dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n", port, hsr_err_reg, "Signal Error"); if (hsr_err_reg & HSI_HSR_ERROR_FTE) dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n", port, hsr_err_reg, "Frame Timeout Error"); if (hsr_err_reg & HSI_HSR_ERROR_TBE) dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n", port, hsr_err_reg, "Tailing Bit Error"); if (hsr_err_reg & HSI_HSR_ERROR_RME) dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n", port, hsr_err_reg, "RX Mapping Error"); if (hsr_err_reg & HSI_HSR_ERROR_TME) dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x: %s\n", port, hsr_err_reg, "TX Mapping Error"); /* Clear error event bit */ hsi_outl(hsr_err_reg, base, HSI_HSR_ERRORACK_REG(port)); if (hsr_err_reg) { /* ignore spurious errors */ spin_unlock(&hsi_ctrl->lock); hsi_port_event_handler(pport, HSI_EVENT_ERROR, NULL); spin_lock(&hsi_ctrl->lock); } else dev_dbg(hsi_ctrl->dev, "Spurious HSI error!\n"); channels_served |= HSI_ERROROCCURED; } for (channel = start; channel <= stop; channel++) { if (status_reg & HSI_HST_DATAACCEPT(channel)) { hsi_do_channel_tx(&pport->hsi_channel[channel]); channels_served |= HSI_HST_DATAACCEPT(channel); } if (status_reg & HSI_HSR_DATAAVAILABLE(channel)) { hsi_do_channel_rx(&pport->hsi_channel[channel]); channels_served |= HSI_HSR_DATAAVAILABLE(channel); } if (status_reg & HSI_HSR_DATAOVERRUN(channel)) { /*HSI_TODO : Data overrun handling*/ dev_err(hsi_ctrl->dev, "Data overrun in real time mode !\n"); } } /* CAWAKE falling or rising edge detected */ if ((status_reg & HSI_CAWAKEDETECTED) || pport->cawake_off_event) { if (hsi_do_cawake_process(pport) == -EAGAIN) goto proc_done; channels_served |= HSI_CAWAKEDETECTED; pport->cawake_off_event = false; } proc_done: /* Reset status bits */ hsi_outl(channels_served, base, status_offset); return channels_served; }