static void do_hsi_tasklet(unsigned long hsi_port) { struct hsi_port *pport = (struct hsi_port *)hsi_port; struct hsi_dev *hsi_ctrl = pport->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = pport->port_number; unsigned int irq = pport->n_irq; u32 status_reg; struct platform_device *pd = to_platform_device(hsi_ctrl->dev); hsi_driver_int_proc(pport, HSI_SYS_MPU_STATUS_REG(port, irq), HSI_SYS_MPU_ENABLE_REG(port, irq), 0, min(pport->max_ch, (u8) HSI_SSI_CHANNELS_MAX)); if (pport->max_ch > HSI_SSI_CHANNELS_MAX) hsi_driver_int_proc(pport, HSI_SYS_MPU_U_STATUS_REG(port, irq), HSI_SYS_MPU_U_ENABLE_REG(port, irq), HSI_SSI_CHANNELS_MAX, pport->max_ch); status_reg = hsi_inl(base, HSI_SYS_MPU_STATUS_REG(port, irq)) & hsi_inl(base, HSI_SYS_MPU_ENABLE_REG(port, irq)); if (hsi_driver_device_is_hsi(pd)) status_reg |= (hsi_inl(base, HSI_SYS_MPU_U_STATUS_REG(port, irq)) & hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port, irq))); if (status_reg) tasklet_hi_schedule(&pport->hsi_tasklet); else enable_irq(pport->irq); }
static u32 hsi_process_dma_event(struct hsi_dev *hsi_ctrl) { void __iomem *base = hsi_ctrl->base; unsigned int gdd_lch = 0; u32 status_reg = 0; u32 lch_served = 0; unsigned int gdd_max_count = hsi_ctrl->gdd_chan_count; status_reg = hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); status_reg &= hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); if (!status_reg) { dev_dbg(hsi_ctrl->dev, "DMA : no event, exit.\n"); return 0; } for (gdd_lch = 0; gdd_lch < gdd_max_count; gdd_lch++) { if (status_reg & HSI_GDD_LCH(gdd_lch)) { do_hsi_gdd_lch(hsi_ctrl, gdd_lch); lch_served |= HSI_GDD_LCH(gdd_lch); } } /* Acknowledge interrupt for DMA channel */ hsi_outl(lch_served, base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); return status_reg; }
/** * hsi_driver_cancel_write_interrupt - Cancel pending write interrupt. * @dev - hsi device channel where to cancel the pending interrupt. * * Return: -ECANCELED : write cancel success, data not transfered to TX FIFO * 0 : transfer is already over, data already transfered to TX FIFO * * Note: whatever returned value, write callback will not be called after * write cancel. */ int hsi_driver_cancel_write_interrupt(struct hsi_channel *ch) { struct hsi_port *p = ch->hsi_port; unsigned int port = p->port_number; unsigned int channel = ch->channel_number; void __iomem *base = p->hsi_controller->base; u32 status_reg; long buff_offset; status_reg = hsi_inl(base, HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel)); if (!(status_reg & HSI_HST_DATAACCEPT(channel))) { dev_dbg(&ch->dev->device, "Write cancel on not " "enabled channel %d ENABLE REG 0x%08X", channel, status_reg); } status_reg &= hsi_inl(base, HSI_SYS_MPU_STATUS_CH_REG(port, p->n_irq, channel)); hsi_outl_and(~HSI_HST_DATAACCEPT(channel), base, HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel)); buff_offset = hsi_hst_bufstate_f_reg(p->hsi_controller, port, channel); if (buff_offset >= 0) hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel), base, buff_offset); hsi_reset_ch_write(ch); return status_reg & HSI_HST_DATAACCEPT(channel) ? 0 : -ECANCELED; }
/** * hsi_softreset - Force a SW RESET of HSI (core + DMA) * * @hsi_ctrl - reference to the hsi controller to be reset. * */ int hsi_softreset(struct hsi_dev *hsi_ctrl) { unsigned int ind = 0; unsigned int port; void __iomem *base = hsi_ctrl->base; u32 status; /* HSI-C1BUG00088: i696 : HSI: Issue with SW reset * No recovery from SW reset under specific circumstances * If a SW RESET is done while some HSI errors are still not * acknowledged, the HSR FSM is stucked. */ if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_i696_SW_RESET_FSM_STUCK)) { for (port = 1; port <= hsi_ctrl->max_p; port++) { hsi_outl_and(HSI_HSR_MODE_MODE_VAL_SLEEP, base, HSI_HSR_MODE_REG(port)); hsi_outl(HSI_HSR_ERROR_ALL, base, HSI_HSR_ERRORACK_REG(port)); } } /* Reseting HSI Block */ hsi_outl_or(HSI_SOFTRESET, base, HSI_SYS_SYSCONFIG_REG); do { status = hsi_inl(base, HSI_SYS_SYSSTATUS_REG); ind++; } while ((!(status & HSI_RESETDONE)) && (ind < HSI_RESETDONE_MAX_RETRIES)); if (ind >= HSI_RESETDONE_MAX_RETRIES) { dev_err(hsi_ctrl->dev, "HSI SW_RESET failed to complete within" " %d retries.\n", HSI_RESETDONE_MAX_RETRIES); return -EIO; } else if (ind > HSI_RESETDONE_NORMAL_RETRIES) { dev_warn(hsi_ctrl->dev, "HSI SW_RESET abnormally long:" " %d retries to complete.\n", ind); } ind = 0; /* Reseting DMA Engine */ hsi_outl_or(HSI_GDD_GRST_SWRESET, base, HSI_GDD_GRST_REG); do { status = hsi_inl(base, HSI_GDD_GRST_REG); ind++; } while ((status & HSI_GDD_GRST_SWRESET) && (ind < HSI_RESETDONE_MAX_RETRIES)); if (ind >= HSI_RESETDONE_MAX_RETRIES) { dev_err(hsi_ctrl->dev, "HSI DMA SW_RESET failed to complete" " within %d retries.\n", HSI_RESETDONE_MAX_RETRIES); return -EIO; } if (ind > HSI_RESETDONE_NORMAL_RETRIES) { dev_warn(hsi_ctrl->dev, "HSI DMA SW_RESET abnormally long:" " %d retries to complete.\n", ind); } return 0; }
/** * hsi_softreset - Force a SW RESET of HSI (core + DMA) * * @hsi_ctrl - reference to the hsi controller to be reset. * */ int hsi_softreset(struct hsi_dev *hsi_ctrl) { unsigned int ind = 0; unsigned int port; void __iomem *base = hsi_ctrl->base; u32 status; /* SW WA for HSI-C1BUG00088 OMAP4430 HSI : No recovery from SW reset */ /* under specific circumstances */ for (port = 1; port <= hsi_ctrl->max_p; port++) { hsi_outl_and(HSI_HSR_MODE_MODE_VAL_SLEEP, base, HSI_HSR_MODE_REG(port)); hsi_outl(HSI_HSR_ERROR_ALL, base, HSI_HSR_ERRORACK_REG(port)); } /* Reseting HSI Block */ hsi_outl_or(HSI_SOFTRESET, base, HSI_SYS_SYSCONFIG_REG); do { status = hsi_inl(base, HSI_SYS_SYSSTATUS_REG); ind++; } while ((!(status & HSI_RESETDONE)) && (ind < HSI_RESETDONE_MAX_RETRIES)); if (ind >= HSI_RESETDONE_MAX_RETRIES) { dev_err(hsi_ctrl->dev, "HSI SW_RESET failed to complete within" " %d retries.\n", HSI_RESETDONE_MAX_RETRIES); return -EIO; } else if (ind > HSI_RESETDONE_NORMAL_RETRIES) { dev_warn(hsi_ctrl->dev, "HSI SW_RESET abnormally long:" " %d retries to complete.\n", ind); } ind = 0; /* Reseting DMA Engine */ hsi_outl_or(HSI_GDD_GRST_SWRESET, base, HSI_GDD_GRST_REG); do { status = hsi_inl(base, HSI_GDD_GRST_REG); ind++; } while ((status & HSI_GDD_GRST_SWRESET) && (ind < HSI_RESETDONE_MAX_RETRIES)); if (ind >= HSI_RESETDONE_MAX_RETRIES) { dev_err(hsi_ctrl->dev, "HSI DMA SW_RESET failed to complete" " within %d retries.\n", HSI_RESETDONE_MAX_RETRIES); return -EIO; } if (ind > HSI_RESETDONE_NORMAL_RETRIES) { dev_warn(hsi_ctrl->dev, "HSI DMA SW_RESET abnormally long:" " %d retries to complete.\n", ind); } return 0; }
static void hsi_get_tx(struct hsi_port *sport, struct hst_ctx *cfg) { struct hsi_dev *hsi_ctrl = sport->hsi_controller; void __iomem *base = hsi_ctrl->base; int port = sport->port_number; cfg->mode = hsi_inl(base, HSI_HST_MODE_REG(port)) & HSI_MODE_VAL_MASK; cfg->frame_size = hsi_inl(base, HSI_HST_FRAMESIZE_REG(port)); cfg->channels = hsi_inl(base, HSI_HST_CHANNELS_REG(port)); cfg->divisor = hsi_inl(base, HSI_HST_DIVISOR_REG(port)); cfg->arb_mode = hsi_inl(base, HSI_HST_ARBMODE_REG(port)); }
/* Manage HSR divisor update * A special divisor value allows switching to auto-divisor mode in Rx * (but with error counters deactivated). This function implements the * the transitions to/from this mode. */ int hsi_set_rx_divisor(struct hsi_port *sport, struct hsr_ctx *cfg) { struct hsi_dev *hsi_ctrl = sport->hsi_controller; void __iomem *base = hsi_ctrl->base; int port = sport->port_number; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); if (cfg->divisor == NOT_SET) return 0; if (hsi_driver_device_is_hsi(pdev)) { if (cfg->divisor == HSI_HSR_DIVISOR_AUTO && sport->counters_on) { /* auto mode: deactivate counters + set divisor = 0 */ sport->reg_counters = hsi_inl(base, HSI_HSR_COUNTERS_REG (port)); sport->counters_on = 0; hsi_outl(0, base, HSI_HSR_COUNTERS_REG(port)); hsi_outl(0, base, HSI_HSR_DIVISOR_REG(port)); dev_dbg(hsi_ctrl->dev, "Switched to HSR auto mode\n"); } else if (cfg->divisor != HSI_HSR_DIVISOR_AUTO) { /* Divisor set mode: use counters */ /* Leave auto mode: use new counters values */ sport->reg_counters = cfg->counters; sport->counters_on = 1; hsi_outl(cfg->counters, base, HSI_HSR_COUNTERS_REG(port)); hsi_outl(cfg->divisor, base, HSI_HSR_DIVISOR_REG(port)); dev_dbg(hsi_ctrl->dev, "Left HSR auto mode. " "Counters=0x%08x, Divisor=0x%08x\n", cfg->counters, cfg->divisor); } } else { if (cfg->divisor == HSI_HSR_DIVISOR_AUTO && sport->counters_on) { /* auto mode: deactivate timeout */ sport->reg_counters = hsi_inl(base, SSI_TIMEOUT_REG(port)); sport->counters_on = 0; hsi_outl(0, base, SSI_TIMEOUT_REG(port)); dev_dbg(hsi_ctrl->dev, "Deactivated SSR timeout\n"); } else if (cfg->divisor == HSI_SSR_DIVISOR_USE_TIMEOUT) { /* Leave auto mode: use new counters values */ sport->reg_counters = cfg->counters; sport->counters_on = 1; hsi_outl(cfg->counters, base, SSI_TIMEOUT_REG(port)); dev_dbg(hsi_ctrl->dev, "Left SSR auto mode. " "Timeout=0x%08x\n", cfg->counters); } } return 0; }
static int hsi_debug_gdd_show(struct seq_file *m, void *p) { struct hsi_dev *hsi_ctrl = m->private; void __iomem *base = hsi_ctrl->base; int lch; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); hsi_clocks_enable(hsi_ctrl->dev, __func__); seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n", hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG)); seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n", hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG)); if (!hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "HW_ID\t\t: 0x%08x\n", hsi_inl(base, HSI_SSI_GDD_HW_ID_REG)); seq_printf(m, "PPORT_ID\t: 0x%08x\n", hsi_inl(base, HSI_SSI_GDD_PPORT_ID_REG)); seq_printf(m, "MPORT_ID\t: 0x%08x\n", hsi_inl(base, HSI_SSI_GDD_MPORT_ID_REG)); seq_printf(m, "TEST\t\t: 0x%08x\n", hsi_inl(base, HSI_SSI_GDD_TEST_REG)); } seq_printf(m, "GCR\t\t: 0x%08x\n", hsi_inl(base, HSI_GDD_GCR_REG)); for (lch = 0; lch < hsi_ctrl->gdd_chan_count; lch++) { seq_printf(m, "\nGDD LCH %d\n=========\n", lch); seq_printf(m, "CSDP\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CSDP_REG(lch))); seq_printf(m, "CCR\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CCR_REG(lch))); seq_printf(m, "CICR\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CCIR_REG(lch))); seq_printf(m, "CSR\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CSR_REG(lch))); seq_printf(m, "CSSA\t\t: 0x%08x\n", hsi_inl(base, HSI_GDD_CSSA_REG(lch))); seq_printf(m, "CDSA\t\t: 0x%08x\n", hsi_inl(base, HSI_GDD_CDSA_REG(lch))); seq_printf(m, "CEN\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CEN_REG(lch))); seq_printf(m, "CSAC\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CSAC_REG(lch))); seq_printf(m, "CDAC\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CDAC_REG(lch))); if (!hsi_driver_device_is_hsi(pdev)) seq_printf(m, "CLNK_CTRL\t: 0x%04x\n", hsi_inw(base, HSI_SSI_GDD_CLNK_CTRL_REG(lch))); } hsi_clocks_disable(hsi_ctrl->dev, __func__); return 0; }
static void hsi_save_mode(struct platform_device *pdev) { struct hsi_platform_data *pdata = pdev->dev.platform_data; void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); struct port_ctx *p; int port; for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx.pctx[port - 1]; p->hst.mode = hsi_inl(base + HSI_HST_MODE_REG(port)); p->hsr.mode = hsi_inl(base + HSI_HSR_MODE_REG(port)); } }
/** * hsi_driver_int_proc - check all channels / ports for interrupts events * @hsi_ctrl - HSI controler data * @status_offset: interrupt status register offset * @enable_offset: interrupt enable regiser offset * @start: interrupt index to start on * @stop: interrupt index to stop on * * This function calls the related processing functions and triggered events */ static void hsi_driver_int_proc(struct hsi_port *pport, unsigned long status_offset, unsigned long enable_offset, unsigned int start, unsigned int stop) { struct hsi_dev *hsi_ctrl = pport->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = pport->port_number; unsigned int channel; u32 status_reg; u32 hsr_err_reg; u32 channels_served = 0; status_reg = hsi_inl(base, status_offset); status_reg &= hsi_inl(base, enable_offset); for (channel = start; channel < stop; channel++) { if (status_reg & HSI_HST_DATAACCEPT(channel)) { do_channel_tx(&pport->hsi_channel[channel]); channels_served |= HSI_HST_DATAACCEPT(channel); } if (status_reg & HSI_HSR_DATAAVAILABLE(channel)) { do_channel_rx(&pport->hsi_channel[channel]); channels_served |= HSI_HSR_DATAAVAILABLE(channel); } } if (status_reg & HSI_BREAKDETECTED) { dev_info(hsi_ctrl->dev, "Hardware BREAK on port %d\n", port); hsi_outl(0, base, HSI_HSR_BREAK_REG(port)); hsi_port_event_handler(pport, HSI_EVENT_BREAK_DETECTED, NULL); channels_served |= HSI_BREAKDETECTED; } if (status_reg & HSI_ERROROCCURED) { hsr_err_reg = hsi_inl(base, HSI_HSR_ERROR_REG(port)); dev_err(hsi_ctrl->dev, "HSI ERROR Port %d: 0x%x\n", port, hsr_err_reg); hsi_outl(hsr_err_reg, base, HSI_HSR_ERRORACK_REG(port)); if (hsr_err_reg) /* ignore spurious errors */ hsi_port_event_handler(pport, HSI_EVENT_ERROR, NULL); else dev_dbg(hsi_ctrl->dev, "Spurious HSI error!\n"); channels_served |= HSI_ERROROCCURED; } hsi_outl(channels_served, base, status_offset); }
/** * hsi_driver_cancel_write_dma - Cancel an ongoing GDD [DMA] write for the * specified hsi channel. * @hsi_ch - pointer to the hsi_channel to cancel DMA write. * * hsi_controller lock must be held before calling this function. * * Return: -ENXIO : No DMA channel found for specified HSI channel * -ECANCELED : DMA cancel success, data not transfered to TX FIFO * 0 : DMA transfer is already over, data already transfered to TX FIFO * * Note: whatever returned value, write callback will not be called after * write cancel. */ int hsi_driver_cancel_write_dma(struct hsi_channel *hsi_ch) { int lch = hsi_ch->write_data.lch; unsigned int port = hsi_ch->hsi_port->port_number; unsigned int channel = hsi_ch->channel_number; struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller; u16 ccr, gdd_csr; long buff_offset; u32 status_reg; dma_addr_t dma_h; size_t size; if (lch < 0) { dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI " "channel %d\n", hsi_ch->channel_number); return -ENXIO; } ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch)); if (!(ccr & HSI_CCR_ENABLE)) { dev_dbg(&hsi_ch->dev->device, "Write cancel on not " "enabled logical channel %d CCR REG 0x%04X\n", lch, ccr); } status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch)); /* Clear CSR register by reading it, as it is cleared automaticaly */ /* by HW after SW read. */ gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch)); hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); /* Unmap DMA region */ dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CSSA_REG(lch)); size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4; dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_TO_DEVICE); buff_offset = hsi_hst_bufstate_f_reg(hsi_ctrl, port, channel); if (buff_offset >= 0) hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel), hsi_ctrl->base, buff_offset); hsi_reset_ch_write(hsi_ch); return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED; }
/** * hsi_softreset_driver - Must be called following HSI SW RESET, to re-align * variable states with new HW state. * * @hsi_ctrl - reference to the hsi controller to be re-aligned. * */ void hsi_softreset_driver(struct hsi_dev *hsi_ctrl) { struct platform_device *pd = to_platform_device(hsi_ctrl->dev); struct hsi_platform_data *pdata = pd->dev.platform_data; struct hsi_port *hsi_p; unsigned int port; u32 revision; /* HSI port reset */ for (port = 0; port < hsi_ctrl->max_p; port++) { hsi_p = &hsi_ctrl->hsi_port[port]; hsi_p->counters_on = 1; hsi_p->reg_counters = pdata->ctx->pctx[port].hsr.counters; hsi_port_channels_reset(&hsi_ctrl->hsi_port[port]); } hsi_set_pm_force_hsi_on(hsi_ctrl); /* Re-Configure HSI ports */ hsi_set_ports_default(hsi_ctrl, pd); /* Gather info from registers for the driver.(REVISION) */ revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG); if (hsi_driver_device_is_hsi(pd)) dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n", revision); else dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n", (revision & HSI_SSI_REV_MAJOR) >> 4, (revision & HSI_SSI_REV_MINOR)); }
static ssize_t hsi_port_counters_read(struct file *filep, char __user * buff, size_t count, loff_t *offp) { ssize_t ret; struct hsi_port *hsi_port = filep->private_data; struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = hsi_port->port_number; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); char str[50]; unsigned int reg; if (*offp > 0) { ret = 0; goto hsi_cnt_rd_bk; } hsi_clocks_enable(hsi_ctrl->dev, __func__); reg = hsi_inl(base, HSI_HSR_COUNTERS_REG(port)); hsi_clocks_disable(hsi_ctrl->dev, __func__); if (hsi_driver_device_is_hsi(pdev)) { sprintf(str, "FT:%d, TB:%d, FB:%d\n", (unsigned int)(reg & HSI_COUNTERS_FT_MASK) >> HSI_COUNTERS_FT_OFFSET, (unsigned int)(reg & HSI_COUNTERS_TB_MASK) >> HSI_COUNTERS_TB_OFFSET, (unsigned int)(reg & HSI_COUNTERS_FB_MASK) >> HSI_COUNTERS_FB_OFFSET); } else {
/** * hsi_driver_cancel_read_dma - Cancel an ongoing GDD [DMA] read for the * specified hsi channel. * @hsi_ch - pointer to the hsi_channel to cancel DMA read. * * hsi_controller lock must be held before calling this function. * * Return: -ENXIO : No DMA channel found for specified HSI channel * -ECANCELED : DMA cancel success, data not available at expected * address. * 0 : DMA transfer is already over, data already available at * expected address. * * Note: whatever returned value, read callback will not be called after cancel. */ int hsi_driver_cancel_read_dma(struct hsi_channel *hsi_ch) { int lch = hsi_ch->read_data.lch; struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller; u16 ccr, gdd_csr; u32 status_reg; dma_addr_t dma_h; size_t size; /* Re-enable interrupts for polling if needed */ if (hsi_ch->flags & HSI_CH_RX_POLL) hsi_driver_enable_read_interrupt(hsi_ch, NULL); if (lch < 0) { dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI " "channel %d\n", hsi_ch->channel_number); return -ENXIO; } ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch)); if (!(ccr & HSI_CCR_ENABLE)) { dev_dbg(&hsi_ch->dev->device, "Read cancel on not " "enabled logical channel %d CCR REG 0x%04X\n", lch, ccr); } status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch)); /* Clear CSR register by reading it, as it is cleared automaticaly */ /* by HW after SW read */ gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch)); hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); /* Unmap DMA region - Access to the buffer is now safe */ dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CDSA_REG(lch)); size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4; dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_FROM_DEVICE); hsi_reset_ch_read(hsi_ch); return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED; }
int hsi_softreset(struct hsi_dev *hsi_ctrl) { unsigned int ind = 0; void __iomem *base = hsi_ctrl->base; u32 status; /* Reseting HSI Block */ hsi_outl_or(HSI_SOFTRESET, base, HSI_SYS_SYSCONFIG_REG); do { status = hsi_inl(base, HSI_SYS_SYSSTATUS_REG); ind++; } while ((!(status & HSI_RESETDONE)) && (ind < HSI_RESETDONE_MAX_RETRIES)); if (ind >= HSI_RESETDONE_MAX_RETRIES) { dev_err(hsi_ctrl->dev, "HSI SW_RESET failed to complete within" " %d retries.\n", HSI_RESETDONE_MAX_RETRIES); return -EIO; } else if (ind > HSI_RESETDONE_NORMAL_RETRIES) { dev_warn(hsi_ctrl->dev, "HSI SW_RESET abnormally long:" " %d retries to complete.\n", ind); } ind = 0; /* Reseting DMA Engine */ hsi_outl_or(HSI_GDD_GRST_SWRESET, base, HSI_GDD_GRST_REG); do { status = hsi_inl(base, HSI_GDD_GRST_REG); ind++; } while ((status & HSI_GDD_GRST_SWRESET) && (ind < HSI_RESETDONE_MAX_RETRIES)); if (ind >= HSI_RESETDONE_MAX_RETRIES) { dev_err(hsi_ctrl->dev, "HSI DMA SW_RESET failed to complete" " within %d retries.\n", HSI_RESETDONE_MAX_RETRIES); return -EIO; } if (ind > HSI_RESETDONE_NORMAL_RETRIES) { dev_warn(hsi_ctrl->dev, "HSI DMA SW_RESET abnormally long:" " %d retries to complete.\n", ind); } return 0; }
static int hsi_debug_show(struct seq_file *m, void *p) { struct hsi_dev *hsi_ctrl = m->private; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); hsi_clocks_enable(hsi_ctrl->dev, __func__); seq_printf(m, "REVISION\t: 0x%08x\n", hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG)); if (hsi_driver_device_is_hsi(pdev)) seq_printf(m, "HWINFO\t\t: 0x%08x\n", hsi_inl(hsi_ctrl->base, HSI_SYS_HWINFO_REG)); seq_printf(m, "SYSCONFIG\t: 0x%08x\n", hsi_inl(hsi_ctrl->base, HSI_SYS_SYSCONFIG_REG)); seq_printf(m, "SYSSTATUS\t: 0x%08x\n", hsi_inl(hsi_ctrl->base, HSI_SYS_SYSSTATUS_REG)); hsi_clocks_disable(hsi_ctrl->dev, __func__); return 0; }
/** * hsi_driver_cancel_read_interrupt - Cancel pending read interrupt. * @dev - hsi device channel where to cancel the pending interrupt. * * Return: -ECANCELED : read cancel success data not available at expected * address. * 0 : transfer is already over, data already available at expected * address. * * Note: whatever returned value, read callback will not be called after cancel. */ int hsi_driver_cancel_read_interrupt(struct hsi_channel *ch) { struct hsi_port *p = ch->hsi_port; unsigned int port = p->port_number; unsigned int channel = ch->channel_number; void __iomem *base = p->hsi_controller->base; u32 status_reg; status_reg = hsi_inl(base, HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel)); if (!(status_reg & HSI_HSR_DATAAVAILABLE(channel))) { dev_dbg(&ch->dev->device, "Read cancel on not " "enabled channel %d ENABLE REG 0x%08X", channel, status_reg); } status_reg &= hsi_inl(base, HSI_SYS_MPU_STATUS_CH_REG(port, p->n_irq, channel)); hsi_outl_and(~HSI_HSR_DATAAVAILABLE(channel), base, HSI_SYS_MPU_ENABLE_CH_REG(port, p->n_irq, channel)); hsi_reset_ch_read(ch); return status_reg & HSI_HSR_DATAAVAILABLE(channel) ? 0 : -ECANCELED; }
bool hsi_driver_is_interrupt_pending(struct hsi_port *pport, u32 flag, bool backup) { u32 val; val = hsi_inl(pport->hsi_controller->base, HSI_SYS_MPU_STATUS_CH_REG(pport->port_number, pport->n_irq, backup ? HSI_SSI_CHANNELS_MAX : 0)); return val & flag; }
/** * hsi_is_dma_read_int_pending - Indicates if a DMA read interrupt is pending * @hsi_ctrl - HSI controller of the GDD. * * Needs to be called holding the hsi_controller lock * * Returns true if DMA read interrupt is pending, else false */ bool hsi_is_dma_read_int_pending(struct hsi_dev *hsi_ctrl) { void __iomem *base = hsi_ctrl->base; unsigned int gdd_lch = 0; u32 status_reg = 0; int i, j; status_reg = hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); status_reg &= hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); if (!status_reg) return false; /* Scan all enabled DMA channels */ for (gdd_lch = 0; gdd_lch < hsi_ctrl->gdd_chan_count; gdd_lch++) { if (!(status_reg & HSI_GDD_LCH(gdd_lch))) continue; for (i = 0; i < hsi_ctrl->max_p; i++) for (j = 0; j < hsi_ctrl->hsi_port[i].max_ch; j++) if (hsi_ctrl->hsi_port[i]. hsi_channel[j].read_data.lch == gdd_lch) return true; } return false; }
bool hsi_is_hst_port_busy(struct hsi_port *pport) { unsigned int port = pport->port_number; void __iomem *base = pport->hsi_controller->base; u32 txstateval; txstateval = hsi_inl(base, HSI_HST_TXSTATE_REG(port)) & HSI_HST_TXSTATE_VAL_MASK; if (txstateval != HSI_HST_TXSTATE_IDLE) { dev_dbg(pport->hsi_controller->dev, "HST port %d busy, " "TXSTATE=%d\n", port, txstateval); return true; } return false; }
/** * hsi_get_free_lch - Get a free GDD(DMA) logical channel * @hsi_ctrl - HSI controller of the GDD. * * Needs to be called holding the hsi_controller lock * * Returns the logical channel number, or -EBUSY if none available */ static int hsi_get_free_lch(struct hsi_dev *hsi_ctrl) { unsigned int enable_reg; int i, lch; enable_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); lch = hsi_ctrl->last_gdd_lch; for (i = 0; i < hsi_ctrl->gdd_chan_count; i++) { if (++lch >= hsi_ctrl->gdd_chan_count) lch = 0; if ((enable_reg & HSI_GDD_LCH(lch)) == 0) { hsi_ctrl->last_gdd_lch = lch; return lch; } } return -EBUSY; }
static void do_channel_rx(struct hsi_channel *ch) { struct hsi_dev *hsi_ctrl = ch->hsi_port->hsi_controller; void __iomem *base = ch->hsi_port->hsi_controller->base; unsigned int n_ch; unsigned int n_p; unsigned int irq; long buff_offset; int rx_poll = 0; int data_read = 0; n_ch = ch->channel_number; n_p = ch->hsi_port->port_number; irq = ch->hsi_port->n_irq; spin_lock(&hsi_ctrl->lock); if (ch->flags & HSI_CH_RX_POLL) rx_poll = 1; if (ch->read_data.addr) { buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, n_p, n_ch); if (buff_offset >= 0) { data_read = 1; *(ch->read_data.addr) = hsi_inl(base, buff_offset); } } hsi_outl_and(~HSI_HSR_DATAAVAILABLE(n_ch), base, HSI_SYS_MPU_ENABLE_CH_REG(n_p, irq, n_ch)); hsi_reset_ch_read(ch); spin_unlock(&hsi_ctrl->lock); if (rx_poll) hsi_port_event_handler(ch->hsi_port, HSI_EVENT_HSR_DATAAVAILABLE, (void *)n_ch); if (data_read) (*ch->read_done)(ch->dev, 1); }
void hsi_get_rx(struct hsi_port *sport, struct hsr_ctx *cfg) { struct hsi_dev *hsi_ctrl = sport->hsi_controller; void __iomem *base = hsi_ctrl->base; int port = sport->port_number; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); cfg->mode = hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_MODE_VAL_MASK; cfg->flow = (hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_FLOW_VAL_MASK) >> HSI_FLOW_OFFSET; cfg->frame_size = hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port)); cfg->channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port)); if (hsi_driver_device_is_hsi(pdev)) { cfg->divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port)); cfg->counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port)); } else { cfg->counters = hsi_inl(base, SSI_TIMEOUT_REG(port)); } }
static int __init hsi_softreset(struct hsi_dev *hsi_ctrl) { int ind = 0; void __iomem *base = hsi_ctrl->base; u32 status; hsi_outl_or(HSI_SOFTRESET, base, HSI_SYS_SYSCONFIG_REG); do { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(HSI_RESETDONE_TIMEOUT)); status = hsi_inl(base, HSI_SYS_SYSSTATUS_REG); ind++; } while ((!(status & HSI_RESETDONE)) && (ind < HSI_RESETDONE_RETRIES)); if (ind >= HSI_RESETDONE_RETRIES) return -EIO; /* Reseting GDD */ hsi_outl_or(HSI_SWRESET, base, HSI_GDD_GRST_REG); return 0; }
/** * hsi_ioctl - HSI I/O control * @dev - hsi device channel reference to apply the I/O control * (or port associated to it) * @command - HSI I/O control command * @arg - parameter associated to the control command. NULL, if no parameter. * * Return 0 on success, a negative value on failure. * */ int hsi_ioctl(struct hsi_device *dev, unsigned int command, void *arg) { struct hsi_channel *ch; struct hsi_dev *hsi_ctrl; struct hsi_port *pport; void __iomem *base; unsigned int port, channel; u32 acwake; int err = 0; int fifo = 0; u8 ret; struct hsi_platform_data *pdata; if (unlikely((!dev) || (!dev->ch) || (!dev->ch->hsi_port) || (!dev->ch->hsi_port->hsi_controller)) || (!(dev->ch->flags & HSI_CH_OPEN))) { pr_err(LOG_NAME "HSI IOCTL Invalid parameter\n"); return -EINVAL; } ch = dev->ch; pport = ch->hsi_port; hsi_ctrl = ch->hsi_port->hsi_controller; port = ch->hsi_port->port_number; channel = ch->channel_number; base = hsi_ctrl->base; dev_dbg(hsi_ctrl->dev, "IOCTL: ch %d, command %d\n", channel, command); spin_lock_bh(&hsi_ctrl->lock); hsi_clocks_enable_channel(hsi_ctrl->dev, channel, __func__); switch (command) { case HSI_IOCTL_ACWAKE_UP: /* Wake up request to Modem (typically OMAP initiated) */ /* Symetrical disable will be done in HSI_IOCTL_ACWAKE_DOWN */ if (ch->flags & HSI_CH_ACWAKE) { dev_dbg(hsi_ctrl->dev, "Duplicate ACWAKE UP\n"); err = -EPERM; goto out; } ch->flags |= HSI_CH_ACWAKE; pport->acwake_status |= BIT(channel); /* We only claim once the wake line per channel */ acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port)); if (!(acwake & HSI_WAKE(channel))) { hsi_outl(HSI_SET_WAKE(channel), base, HSI_SYS_SET_WAKE_REG(port)); } goto out; break; case HSI_IOCTL_ACWAKE_DOWN: /* Low power request initiation (OMAP initiated, typically */ /* following inactivity timeout) */ /* ACPU HSI block shall still be capable of receiving */ if (!(ch->flags & HSI_CH_ACWAKE)) { dev_dbg(hsi_ctrl->dev, "Duplicate ACWAKE DOWN\n"); err = -EPERM; goto out; } acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port)); if (unlikely(pport->acwake_status != (acwake & HSI_WAKE_MASK))) { dev_warn(hsi_ctrl->dev, "ACWAKE shadow register mismatch" " acwake_status: 0x%x, HSI_SYS_WAKE_REG: 0x%x", pport->acwake_status, acwake); pport->acwake_status = acwake & HSI_WAKE_MASK; } /* SSI_TODO: add safety check for SSI also */ ch->flags &= ~HSI_CH_ACWAKE; pport->acwake_status &= ~BIT(channel); /* Release the wake line per channel */ if ((acwake & HSI_WAKE(channel))) { hsi_outl(HSI_CLEAR_WAKE(channel), base, HSI_SYS_CLEAR_WAKE_REG(port)); } goto out; break; case HSI_IOCTL_SEND_BREAK: hsi_outl(1, base, HSI_HST_BREAK_REG(port)); /*HSI_TODO : need to deactivate clock after BREAK frames sent*/ /*Use interrupt ? (if TX BREAK INT exists)*/ break; case HSI_IOCTL_GET_ACWAKE: if (!arg) { err = -EINVAL; goto out; } *(u32 *)arg = hsi_inl(base, HSI_SYS_WAKE_REG(port)); break; case HSI_IOCTL_FLUSH_RX: ret = hsi_hsr_fifo_flush_channel(hsi_ctrl, port, channel); if (arg) *(size_t *)arg = ret; /* Ack the RX Int */ hsi_outl_and(~HSI_HSR_DATAAVAILABLE(channel), base, HSI_SYS_MPU_STATUS_CH_REG(port, pport->n_irq, channel)); break; case HSI_IOCTL_FLUSH_TX: ret = hsi_hst_fifo_flush_channel(hsi_ctrl, port, channel); if (arg) *(size_t *)arg = ret; /* Ack the TX Int */ hsi_outl_and(~HSI_HST_DATAACCEPT(channel), base, HSI_SYS_MPU_STATUS_CH_REG(port, pport->n_irq, channel)); break; case HSI_IOCTL_GET_CAWAKE: if (!arg) { err = -EINVAL; goto out; } err = hsi_get_cawake(dev->ch->hsi_port); if (err < 0) { err = -ENODEV; goto out; } *(u32 *)arg = err; break; case HSI_IOCTL_SET_RX: if (!arg) { err = -EINVAL; goto out; } err = hsi_set_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg); break; case HSI_IOCTL_GET_RX: if (!arg) { err = -EINVAL; goto out; } hsi_get_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg); break; case HSI_IOCTL_SET_TX: if (!arg) { err = -EINVAL; goto out; } err = hsi_set_tx(dev->ch->hsi_port, (struct hst_ctx *)arg); break; case HSI_IOCTL_GET_TX: if (!arg) { err = -EINVAL; goto out; } hsi_get_tx(dev->ch->hsi_port, (struct hst_ctx *)arg); break; case HSI_IOCTL_SW_RESET: dev_info(hsi_ctrl->dev, "SW Reset\n"); err = hsi_softreset(hsi_ctrl); /* Reset HSI config to default */ hsi_softreset_driver(hsi_ctrl); break; case HSI_IOCTL_GET_FIFO_OCCUPANCY: if (!arg) { err = -EINVAL; goto out; } fifo = hsi_fifo_get_id(hsi_ctrl, channel, port); if (unlikely(fifo < 0)) { dev_err(hsi_ctrl->dev, "No valid FIFO id found for " "channel %d.\n", channel); err = -EFAULT; goto out; } *(size_t *)arg = hsi_get_rx_fifo_occupancy(hsi_ctrl, fifo); break; case HSI_IOCTL_SET_WAKE_RX_3WIRES_MODE: dev_info(hsi_ctrl->dev, "Entering RX wakeup in 3 wires mode (no CAWAKE)\n"); pport->wake_rx_3_wires_mode = 1; /* HSI-C1BUG00085: ixxx: HSI wakeup issue in 3 wires mode * HSI will NOT generate the Swakeup for 2nd frame if it entered * IDLE after 1st received frame */ if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_ixxx_3WIRES_NO_SWAKEUP)) if (hsi_driver_device_is_hsi(to_platform_device (hsi_ctrl->dev))) hsi_set_pm_force_hsi_on(hsi_ctrl); /* When WAKE is not available, ACREADY must be set to 1 at * reset else remote will never have a chance to transmit. */ hsi_outl_or(HSI_SET_WAKE_3_WIRES | HSI_SET_WAKE_READY_LVL_1, base, HSI_SYS_SET_WAKE_REG(port)); hsi_driver_disable_interrupt(pport, HSI_CAWAKEDETECTED); break; case HSI_IOCTL_SET_WAKE_RX_4WIRES_MODE: dev_info(hsi_ctrl->dev, "Entering RX wakeup in 4 wires mode\n"); pport->wake_rx_3_wires_mode = 0; /* HSI-C1BUG00085: ixxx: HSI wakeup issue in 3 wires mode * HSI will NOT generate the Swakeup for 2nd frame if it entered * IDLE after 1st received frame */ if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_ixxx_3WIRES_NO_SWAKEUP)) if (hsi_driver_device_is_hsi(to_platform_device (hsi_ctrl->dev))) hsi_set_pm_default(hsi_ctrl); hsi_driver_enable_interrupt(pport, HSI_CAWAKEDETECTED); hsi_outl_and(HSI_SET_WAKE_3_WIRES_MASK, base, HSI_SYS_SET_WAKE_REG(port)); break; case HSI_IOCTL_SET_HI_SPEED: if (!arg) { err = -EINVAL; goto out; } hsi_ctrl->hsi_fclk_req = *(unsigned int *)arg ? HSI_FCLK_HI_SPEED : HSI_FCLK_LOW_SPEED; if (hsi_ctrl->hsi_fclk_req == hsi_ctrl->hsi_fclk_current) { dev_dbg(hsi_ctrl->dev, "HSI FClk already @%ldHz\n", hsi_ctrl->hsi_fclk_current); goto out; } if (hsi_is_controller_transfer_ongoing(hsi_ctrl)) { err = -EBUSY; goto out; } hsi_ctrl->clock_change_ongoing = true; spin_unlock_bh(&hsi_ctrl->lock); pdata = dev_get_platdata(hsi_ctrl->dev); /* Set the HSI FCLK to requested value. */ err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev, hsi_ctrl->hsi_fclk_req); if (err < 0) { dev_err(hsi_ctrl->dev, "%s: Cannot set HSI FClk to" " %ldHz, err %d\n", __func__, hsi_ctrl->hsi_fclk_req, err); } else { dev_info(hsi_ctrl->dev, "HSI FClk changed from %ldHz to" " %ldHz\n", hsi_ctrl->hsi_fclk_current, hsi_ctrl->hsi_fclk_req); hsi_ctrl->hsi_fclk_current = hsi_ctrl->hsi_fclk_req; } spin_lock_bh(&hsi_ctrl->lock); hsi_ctrl->clock_change_ongoing = false; break; case HSI_IOCTL_GET_SPEED: if (!arg) { err = -EINVAL; goto out; } *(unsigned long *)arg = hsi_ctrl->hsi_fclk_current; break; default: err = -ENOIOCTLCMD; break; } out: /* All IOCTL end by disabling the clocks, except ACWAKE high. */ hsi_clocks_disable_channel(hsi_ctrl->dev, channel, __func__); spin_unlock_bh(&hsi_ctrl->lock); return err; }
/* HSI Platform Device probing & hsi_device registration */ static int __init hsi_platform_device_probe(struct platform_device *pd) { struct hsi_platform_data *pdata = pd->dev.platform_data; struct hsi_dev *hsi_ctrl; u32 revision; int err; dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n"); dev_dbg(&pd->dev, "The platform device probed is an %s\n", hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI"); if (!pdata) { dev_err(&pd->dev, "No platform_data found on hsi device\n"); return -ENXIO; } hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL); if (hsi_ctrl == NULL) { dev_err(&pd->dev, "Could not allocate memory for" " struct hsi_dev\n"); return -ENOMEM; } platform_set_drvdata(pd, hsi_ctrl); err = hsi_controller_init(hsi_ctrl, pd); if (err < 0) { dev_err(&pd->dev, "Could not initialize hsi controller:" " %d\n", err); goto rollback1; } /* Wakeup dependency was disabled for HSI <-> MPU PM_L3INIT_HSI_WKDEP */ #if 0 omap_writel(0x141, 0x4A307338); #endif pm_runtime_enable(hsi_ctrl->dev); pm_runtime_irq_safe(hsi_ctrl->dev); hsi_clocks_enable(hsi_ctrl->dev, __func__); /* Non critical SW Reset */ err = hsi_softreset(hsi_ctrl); if (err < 0) goto rollback2; hsi_set_pm_force_hsi_on(hsi_ctrl); /* Configure HSI ports */ hsi_set_ports_default(hsi_ctrl, pd); /* Gather info from registers for the driver.(REVISION) */ revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG); if (hsi_driver_device_is_hsi(pd)) dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n", revision); else dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n", (revision & HSI_SSI_REV_MAJOR) >> 4, (revision & HSI_SSI_REV_MINOR)); err = hsi_debug_add_ctrl(hsi_ctrl); if (err < 0) { dev_err(&pd->dev, "Could not add hsi controller to debugfs: %d\n", err); goto rollback2; } err = register_hsi_devices(hsi_ctrl); if (err < 0) { dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err); goto rollback3; } /* Allow HSI to wake up the platform */ device_init_wakeup(hsi_ctrl->dev, true); /* Set the HSI FCLK to default. */ if (!pdata->device_scale) { dev_err(&pd->dev, "%s: No platform device_scale function\n", __func__); err = -ENXIO; goto rollback3; } err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev, pdata->default_hsi_fclk); if (err == -EBUSY) { dev_warn(&pd->dev, "Cannot set HSI FClk to default value: %ld. " "Will retry on next open\n", pdata->default_hsi_fclk); } else if (err) { dev_err(&pd->dev, "%s: Error %d setting HSI FClk to %ld.\n", __func__, err, pdata->default_hsi_fclk); goto rollback3; } else { hsi_ctrl->clock_rate = pdata->default_hsi_fclk; } /* From here no need for HSI HW access */ hsi_clocks_disable(hsi_ctrl->dev, __func__); return 0; rollback3: hsi_debug_remove_ctrl(hsi_ctrl); rollback2: hsi_controller_exit(hsi_ctrl); /* From here no need for HSI HW access */ hsi_clocks_disable(hsi_ctrl->dev, __func__); rollback1: kfree(hsi_ctrl); return err; }
void hsi_save_ctx(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); void __iomem *base = hsi_ctrl->base; struct hsi_port_ctx *p; int port; pdata->ctx->sysconfig = hsi_inl(base, HSI_SYS_SYSCONFIG_REG); pdata->ctx->gdd_gcr = hsi_inl(base, HSI_GDD_GCR_REG); if (hsi_driver_device_is_hsi(pdev)) pdata->ctx->dll = hsi_inl(base, HSI_HSR_DLL_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; /* HSI TOP */ p->sys_mpu_enable[0] = hsi_inl(base, HSI_SYS_MPU_ENABLE_REG(port, 0)); p->sys_mpu_enable[1] = hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port, 0)); /* HST */ p->hst.mode = hsi_inl(base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) p->hst.frame_size = hsi_inl(base, HSI_HST_FRAMESIZE_REG(port)); p->hst.divisor = hsi_inl(base, HSI_HST_DIVISOR_REG(port)); p->hst.channels = hsi_inl(base, HSI_HST_CHANNELS_REG(port)); p->hst.arb_mode = hsi_inl(base, HSI_HST_ARBMODE_REG(port)); /* HSR */ p->hsr.mode = hsi_inl(base, HSI_HSR_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) p->hsr.frame_size = hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port)); p->hsr.divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port)); p->hsr.channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port)); p->hsr.counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port)); } }
/* HSI Platform Device probing & hsi_device registration */ static int __init hsi_platform_device_probe(struct platform_device *pd) { struct hsi_platform_data *pdata = dev_get_platdata(&pd->dev); struct hsi_dev *hsi_ctrl; u32 revision; int err; dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n"); dev_dbg(&pd->dev, "The platform device probed is an %s\n", hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI"); if (!pdata) { dev_err(&pd->dev, "No platform_data found on hsi device\n"); return -ENXIO; } /* Check if mandatory board functions are populated */ if (!pdata->device_scale) { dev_err(&pd->dev, "Missing platform device_scale function\n"); return -ENOSYS; } hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL); if (hsi_ctrl == NULL) { dev_err(&pd->dev, "Could not allocate memory for" " struct hsi_dev\n"); return -ENOMEM; } platform_set_drvdata(pd, hsi_ctrl); err = hsi_controller_init(hsi_ctrl, pd); if (err < 0) { dev_err(&pd->dev, "Could not initialize hsi controller:" " %d\n", err); goto rollback1; } pm_runtime_enable(hsi_ctrl->dev); pm_runtime_irq_safe(hsi_ctrl->dev); hsi_clocks_enable(hsi_ctrl->dev, __func__); /* Non critical SW Reset */ err = hsi_softreset(hsi_ctrl); if (err < 0) goto rollback2; hsi_set_pm_force_hsi_on(hsi_ctrl); /* Configure HSI ports */ hsi_set_ports_default(hsi_ctrl, pd); /* Gather info from registers for the driver.(REVISION) */ revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG); if (hsi_driver_device_is_hsi(pd)) dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n", revision); else dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n", (revision & HSI_SSI_REV_MAJOR) >> 4, (revision & HSI_SSI_REV_MINOR)); err = hsi_debug_add_ctrl(hsi_ctrl); if (err < 0) { dev_err(&pd->dev, "Could not add hsi controller to debugfs: %d\n", err); goto rollback2; } err = register_hsi_devices(hsi_ctrl); if (err < 0) { dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err); goto rollback3; } /* Allow HSI to wake up the platform */ device_init_wakeup(hsi_ctrl->dev, true); /* Set the HSI FCLK to default. */ hsi_ctrl->hsi_fclk_req = pdata->default_hsi_fclk; err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev, pdata->default_hsi_fclk); if (err == -EBUSY) { /* PM framework init is late_initcall, so it may not yet be */ /* initialized, so be prepared to retry later on open. */ dev_warn(&pd->dev, "Cannot set HSI FClk to default value: %ld. " "Will retry on next open\n", pdata->default_hsi_fclk); } else if (err) { dev_err(&pd->dev, "%s: Error %d setting HSI FClk to %ld.\n", __func__, err, pdata->default_hsi_fclk); goto rollback3; } else { hsi_ctrl->hsi_fclk_current = pdata->default_hsi_fclk; } /* From here no need for HSI HW access */ hsi_clocks_disable(hsi_ctrl->dev, __func__); return 0; rollback3: hsi_debug_remove_ctrl(hsi_ctrl); rollback2: hsi_controller_exit(hsi_ctrl); /* From here no need for HSI HW access */ hsi_clocks_disable(hsi_ctrl->dev, __func__); rollback1: kfree(hsi_ctrl); return err; }
/* HSI Platform Device probing & hsi_device registration */ static int __init hsi_platform_device_probe(struct platform_device *pd) { struct hsi_platform_data *pdata = pd->dev.platform_data; struct hsi_dev *hsi_ctrl; u32 revision; int err; dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n"); dev_dbg(&pd->dev, "The platform device probed is an %s\n", hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI"); if (!pdata) { pr_err(LOG_NAME "No platform_data found on hsi device\n"); return -ENXIO; } hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL); if (hsi_ctrl == NULL) { dev_err(&pd->dev, "Could not allocate memory for" " struct hsi_dev\n"); return -ENOMEM; } platform_set_drvdata(pd, hsi_ctrl); err = hsi_controller_init(hsi_ctrl, pd); if (err < 0) { dev_err(&pd->dev, "Could not initialize hsi controller:" " %d\n", err); goto rollback1; } pm_runtime_enable(hsi_ctrl->dev); hsi_clocks_enable(hsi_ctrl->dev, __func__); /* Non critical SW Reset */ err = hsi_softreset(hsi_ctrl); if (err < 0) goto rollback2; hsi_set_pm_default(hsi_ctrl); /* Configure HSI ports */ hsi_set_ports_default(hsi_ctrl, pd); /* Gather info from registers for the driver.(REVISION) */ revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG); if (hsi_driver_device_is_hsi(pd)) dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n", revision); else dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n", (revision & HSI_SSI_REV_MAJOR) >> 4, (revision & HSI_SSI_REV_MINOR)); err = hsi_debug_add_ctrl(hsi_ctrl); if (err < 0) { dev_err(&pd->dev, "Could not add hsi controller to debugfs: %d\n", err); goto rollback2; } err = register_hsi_devices(hsi_ctrl); if (err < 0) { dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err); goto rollback3; } /* From here no need for HSI HW access */ hsi_clocks_disable(hsi_ctrl->dev, __func__); /* Allow HSI to wake up the platform */ device_init_wakeup(hsi_ctrl->dev, 1); /* Set the HSI FCLK to default. */ err = omap_device_set_rate(hsi_ctrl->dev, hsi_ctrl->dev, pdata->default_hsi_fclk); if (err) dev_err(&pd->dev, "Cannot set HSI FClk to default value: %ld\n", pdata->default_hsi_fclk); return err; rollback3: hsi_debug_remove_ctrl(hsi_ctrl); rollback2: hsi_controller_exit(hsi_ctrl); /* From here no need for HSI HW access */ hsi_clocks_disable(hsi_ctrl->dev, __func__); rollback1: kfree(hsi_ctrl); return err; }
/** * hsi_ioctl - HSI I/O control * @dev - hsi device channel reference to apply the I/O control * (or port associated to it) * @command - HSI I/O control command * @arg - parameter associated to the control command. NULL, if no parameter. * * Return 0 on success, a negative value on failure. * */ int hsi_ioctl(struct hsi_device *dev, unsigned int command, void *arg) { struct hsi_channel *ch; struct hsi_dev *hsi_ctrl; struct hsi_port *pport; void __iomem *base; unsigned int port, channel; u32 acwake; int err = 0; int fifo = 0; if (unlikely((!dev) || (!dev->ch) || (!dev->ch->hsi_port) || (!dev->ch->hsi_port->hsi_controller)) || (!(dev->ch->flags & HSI_CH_OPEN))) { pr_err(LOG_NAME "HSI IOCTL Invalid parameter\n"); return -EINVAL; } ch = dev->ch; pport = ch->hsi_port; hsi_ctrl = ch->hsi_port->hsi_controller; port = ch->hsi_port->port_number; channel = ch->channel_number; base = hsi_ctrl->base; dev_dbg(dev->device.parent, "IOCTL: ch %d, command %d\n", channel, command); spin_lock_bh(&hsi_ctrl->lock); hsi_clocks_enable_channel(dev->device.parent, channel, __func__); switch (command) { case HSI_IOCTL_ACWAKE_UP: if (ch->flags & HSI_CH_ACWAKE) { dev_dbg(dev->device.parent, "Duplicate ACWAKE UP\n"); err = -EPERM; goto out; } /* Wake up request to Modem (typically OMAP initiated) */ /* Symetrical disable will be done in HSI_IOCTL_ACWAKE_DOWN */ ch->flags |= HSI_CH_ACWAKE; pport->acwake_status |= BIT(channel); /* We only claim once the wake line per channel */ acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port)); if (!(acwake & HSI_WAKE(channel))) { hsi_outl(HSI_SET_WAKE(channel), base, HSI_SYS_SET_WAKE_REG(port)); } goto out; break; case HSI_IOCTL_ACWAKE_DOWN: /* Low power request initiation (OMAP initiated, typically */ /* following inactivity timeout) */ /* ACPU HSI block shall still be capable of receiving */ if (!(ch->flags & HSI_CH_ACWAKE)) { dev_dbg(dev->device.parent, "Duplicate ACWAKE DOWN\n"); err = -EPERM; goto out; } acwake = hsi_inl(base, HSI_SYS_WAKE_REG(port)); if (unlikely(pport->acwake_status != (acwake & HSI_WAKE_MASK))) { dev_warn(dev->device.parent, "ACWAKE shadow register mismatch" " acwake_status: 0x%x, HSI_SYS_WAKE_REG: 0x%x", pport->acwake_status, acwake); pport->acwake_status = acwake & HSI_WAKE_MASK; } /* SSI_TODO: add safety check for SSI also */ ch->flags &= ~HSI_CH_ACWAKE; pport->acwake_status &= ~BIT(channel); /* Release the wake line per channel */ if ((acwake & HSI_WAKE(channel))) { hsi_outl(HSI_CLEAR_WAKE(channel), base, HSI_SYS_CLEAR_WAKE_REG(port)); } goto out; break; case HSI_IOCTL_SEND_BREAK: hsi_outl(1, base, HSI_HST_BREAK_REG(port)); /*HSI_TODO : need to deactivate clock after BREAK frames sent*/ /*Use interrupt ? (if TX BREAK INT exists)*/ break; case HSI_IOCTL_GET_ACWAKE: if (!arg) { err = -EINVAL; goto out; } *(u32 *)arg = hsi_inl(base, HSI_SYS_WAKE_REG(port)); break; case HSI_IOCTL_FLUSH_RX: hsi_outl(0, base, HSI_HSR_RXSTATE_REG(port)); break; case HSI_IOCTL_FLUSH_TX: hsi_outl(0, base, HSI_HST_TXSTATE_REG(port)); break; case HSI_IOCTL_GET_CAWAKE: if (!arg) { err = -EINVAL; goto out; } err = hsi_get_cawake(dev->ch->hsi_port); if (err < 0) { err = -ENODEV; goto out; } *(u32 *)arg = err; break; case HSI_IOCTL_SET_RX: if (!arg) { err = -EINVAL; goto out; } err = hsi_set_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg); break; case HSI_IOCTL_GET_RX: if (!arg) { err = -EINVAL; goto out; } hsi_get_rx(dev->ch->hsi_port, (struct hsr_ctx *)arg); break; case HSI_IOCTL_SET_TX: if (!arg) { err = -EINVAL; goto out; } err = hsi_set_tx(dev->ch->hsi_port, (struct hst_ctx *)arg); break; case HSI_IOCTL_GET_TX: if (!arg) { err = -EINVAL; goto out; } hsi_get_tx(dev->ch->hsi_port, (struct hst_ctx *)arg); break; case HSI_IOCTL_SW_RESET: dev_info(dev->device.parent, "SW Reset\n"); err = hsi_softreset(hsi_ctrl); /* Reset HSI config to default */ hsi_softreset_driver(hsi_ctrl); break; case HSI_IOCTL_GET_FIFO_OCCUPANCY: if (!arg) { err = -EINVAL; goto out; } fifo = hsi_fifo_get_id(hsi_ctrl, channel, port); if (unlikely(fifo < 0)) { dev_err(hsi_ctrl->dev, "No valid FIFO id found for " "channel %d.\n", channel); err = -EFAULT; goto out; } *(size_t *)arg = hsi_get_rx_fifo_occupancy(hsi_ctrl, fifo); break; default: err = -ENOIOCTLCMD; break; } out: /* All IOCTL end by disabling the clocks, except ACWAKE high. */ hsi_clocks_disable_channel(dev->device.parent, channel, __func__); spin_unlock_bh(&hsi_ctrl->lock); return err; }