static void ssi_restore_ctx(struct platform_device *pdev) { struct hsi_platform_data *pdata = pdev->dev.platform_data; void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); struct port_ctx *p; int port; /* TODO: update support for omap_pm_get_dev_context_loss_count int loss_count; loss_count = omap_pm_get_dev_context_loss_count(&pdev->dev); if (loss_count == pdata->ctx.loss_count) return; */ hsi_outl(pdata->ctx.sysconfig, base + HSI_SYS_SYSCONFIG_REG); hsi_outl(pdata->ctx.gdd_gcr, base + HSI_GDD_GCR_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx.pctx[port - 1]; hsi_outl(p->sys_mpu_enable[0], base + HSI_SYS_MPU_ENABLE_REG(port, 0)); hsi_outl(p->sys_mpu_enable[1], base + HSI_SYS_MPU_ENABLE_REG(port, 1)); hsi_outl(p->hst.frame_size, base + HSI_HST_FRAMESIZE_REG(port)); hsi_outl(p->hst.divisor, base + HSI_HST_DIVISOR_REG(port)); hsi_outl(p->hst.channels, base + HSI_HST_CHANNELS_REG(port)); hsi_outl(p->hst.arb_mode, base + HSI_HST_ARBMODE_REG(port)); hsi_outl(p->hsr.frame_size, base + HSI_HSR_FRAMESIZE_REG(port)); hsi_outl(p->hsr.timeout, base + HSI_HSR_COUNTERS_REG(port)); hsi_outl(p->hsr.channels, base + HSI_HSR_CHANNELS_REG(port)); } }
static void do_hsi_tasklet(unsigned long hsi_port) { struct hsi_port *pport = (struct hsi_port *)hsi_port; struct hsi_dev *hsi_ctrl = pport->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = pport->port_number; unsigned int irq = pport->n_irq; u32 status_reg; struct platform_device *pd = to_platform_device(hsi_ctrl->dev); hsi_driver_int_proc(pport, HSI_SYS_MPU_STATUS_REG(port, irq), HSI_SYS_MPU_ENABLE_REG(port, irq), 0, min(pport->max_ch, (u8) HSI_SSI_CHANNELS_MAX)); if (pport->max_ch > HSI_SSI_CHANNELS_MAX) hsi_driver_int_proc(pport, HSI_SYS_MPU_U_STATUS_REG(port, irq), HSI_SYS_MPU_U_ENABLE_REG(port, irq), HSI_SSI_CHANNELS_MAX, pport->max_ch); status_reg = hsi_inl(base, HSI_SYS_MPU_STATUS_REG(port, irq)) & hsi_inl(base, HSI_SYS_MPU_ENABLE_REG(port, irq)); if (hsi_driver_device_is_hsi(pd)) status_reg |= (hsi_inl(base, HSI_SYS_MPU_U_STATUS_REG(port, irq)) & hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port, irq))); if (status_reg) tasklet_hi_schedule(&pport->hsi_tasklet); else enable_irq(pport->irq); }
/* Disables the CAWAKE, BREAK, or ERROR interrupt for the given port */ int hsi_driver_disable_interrupt(struct hsi_port *pport, u32 flag) { hsi_outl_and(~flag, pport->hsi_controller->base, HSI_SYS_MPU_ENABLE_REG(pport->port_number, pport->n_irq)); return 0; }
static void hsi_save_ctx(struct platform_device *pdev) { struct hsi_platform_data *pdata = pdev->dev.platform_data; void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); struct port_ctx *p; int port; pdata->ctx.loss_count = omap_pm_get_dev_context_loss_count(&pdev->dev); pdata->ctx.sysconfig = hsi_inl(base + HSI_SYS_SYSCONFIG_REG); pdata->ctx.gdd_gcr = hsi_inl(base + HSI_GDD_GCR_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx.pctx[port - 1]; p->sys_mpu_enable[0] = hsi_inl(base + HSI_SYS_MPU_ENABLE_REG(port, 0)); p->sys_mpu_enable[1] = hsi_inl(base + HSI_SYS_MPU_U_ENABLE_REG(port, 0)); p->hst.frame_size = hsi_inl(base + HSI_HST_FRAMESIZE_REG(port)); p->hst.divisor = hsi_inl(base + HSI_HST_DIVISOR_REG(port)); p->hst.channels = hsi_inl(base + HSI_HST_CHANNELS_REG(port)); p->hst.arb_mode = hsi_inl(base + HSI_HST_ARBMODE_REG(port)); p->hsr.frame_size = hsi_inl(base + HSI_HSR_FRAMESIZE_REG(port)); /*FIXME - check this register*/ p->hsr.timeout = hsi_inl(base + HSI_HSR_COUNTERS_REG(port)); p->hsr.channels = hsi_inl(base + HSI_HSR_CHANNELS_REG(port)); } }
void hsi_restore_ctx(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); void __iomem *base = hsi_ctrl->base; struct hsi_port_ctx *p; int port; hsi_outl(pdata->ctx->sysconfig, base, HSI_SYS_SYSCONFIG_REG); hsi_outl(pdata->ctx->gdd_gcr, base, HSI_GDD_GCR_REG); if (hsi_driver_device_is_hsi(pdev)) hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; /* HSI TOP */ hsi_outl(p->sys_mpu_enable[0], base, HSI_SYS_MPU_ENABLE_REG(port, 0)); hsi_outl(p->sys_mpu_enable[1], base, HSI_SYS_MPU_U_ENABLE_REG(port, 0)); /* HST */ hsi_outl(p->hst.mode, base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hst.frame_size, base, HSI_HST_FRAMESIZE_REG(port)); hsi_outl(p->hst.divisor, base, HSI_HST_DIVISOR_REG(port)); hsi_outl(p->hst.channels, base, HSI_HST_CHANNELS_REG(port)); hsi_outl(p->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port)); /* HSR */ if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hsr.frame_size, base, HSI_HSR_FRAMESIZE_REG(port)); hsi_outl(p->hsr.divisor, base, HSI_HSR_DIVISOR_REG(port)); hsi_outl(p->hsr.channels, base, HSI_HSR_CHANNELS_REG(port)); hsi_outl(p->hsr.counters, base, HSI_HSR_COUNTERS_REG(port)); } if (hsi_driver_device_is_hsi(pdev)) { /* SW strategy for HSI fifo management can be changed here */ hsi_fifo_mapping(hsi_ctrl, hsi_ctrl->fifo_mapping_strategy); } /* As a last step move HSR from MODE_VAL.SLEEP to the relevant mode. */ /* This will enable the ACREADY flow control mechanism. */ for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port)); } }
void hsi_restore_ctx(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); void __iomem *base = hsi_ctrl->base; struct port_ctx *p; int port; hsi_outl(pdata->ctx->sysconfig, base, HSI_SYS_SYSCONFIG_REG); hsi_outl(pdata->ctx->gdd_gcr, base, HSI_GDD_GCR_REG); if (hsi_driver_device_is_hsi(pdev)) hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; /* HSI TOP */ hsi_outl(p->sys_mpu_enable[0], base, HSI_SYS_MPU_ENABLE_REG(port, 0)); hsi_outl(p->sys_mpu_enable[1], base, HSI_SYS_MPU_U_ENABLE_REG(port, 0)); /* HST */ hsi_outl(p->hst.mode, base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hst.frame_size, base, HSI_HST_FRAMESIZE_REG(port)); hsi_outl(p->hst.divisor, base, HSI_HST_DIVISOR_REG(port)); hsi_outl(p->hst.channels, base, HSI_HST_CHANNELS_REG(port)); hsi_outl(p->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port)); /* HSR */ hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hsr.frame_size, base, HSI_HSR_FRAMESIZE_REG(port)); hsi_outl(p->hsr.divisor, base, HSI_HSR_DIVISOR_REG(port)); hsi_outl(p->hsr.channels, base, HSI_HSR_CHANNELS_REG(port)); hsi_outl(p->hsr.counters, base, HSI_HSR_COUNTERS_REG(port)); } if (hsi_driver_device_is_hsi(pdev)) { /* SW strategy for HSI fifo management can be changed here */ hsi_fifo_mapping(hsi_ctrl, HSI_FIFO_MAPPING_DEFAULT); } }
void hsi_save_ctx(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); void __iomem *base = hsi_ctrl->base; struct hsi_port_ctx *p; int port; pdata->ctx->sysconfig = hsi_inl(base, HSI_SYS_SYSCONFIG_REG); pdata->ctx->gdd_gcr = hsi_inl(base, HSI_GDD_GCR_REG); if (hsi_driver_device_is_hsi(pdev)) pdata->ctx->dll = hsi_inl(base, HSI_HSR_DLL_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; /* HSI TOP */ p->sys_mpu_enable[0] = hsi_inl(base, HSI_SYS_MPU_ENABLE_REG(port, 0)); p->sys_mpu_enable[1] = hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port, 0)); /* HST */ p->hst.mode = hsi_inl(base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) p->hst.frame_size = hsi_inl(base, HSI_HST_FRAMESIZE_REG(port)); p->hst.divisor = hsi_inl(base, HSI_HST_DIVISOR_REG(port)); p->hst.channels = hsi_inl(base, HSI_HST_CHANNELS_REG(port)); p->hst.arb_mode = hsi_inl(base, HSI_HST_ARBMODE_REG(port)); /* HSR */ p->hsr.mode = hsi_inl(base, HSI_HSR_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) p->hsr.frame_size = hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port)); p->hsr.divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port)); p->hsr.channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port)); p->hsr.counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port)); } }
static u32 hsi_process_int_event(struct hsi_port *pport) { unsigned int port = pport->port_number; unsigned int irq = pport->n_irq; u32 status_reg; bool cawake_double_int = false; /* Clear CAWAKE backup interrupt */ hsi_driver_ack_interrupt(pport, HSI_CAWAKEDETECTED, true); /* Process events for channels 0..7 */ status_reg = hsi_driver_int_proc(pport, HSI_SYS_MPU_STATUS_REG(port, irq), HSI_SYS_MPU_ENABLE_REG(port, irq), 0, min(pport->max_ch, (u8) HSI_SSI_CHANNELS_MAX) - 1, cawake_double_int); /* If another CAWAKE interrupt occured while previous is still being * processed, mark it for extra processing */ if (hsi_driver_is_interrupt_pending(pport, HSI_CAWAKEDETECTED, true) && (status_reg & HSI_CAWAKEDETECTED)) { dev_warn(pport->hsi_controller->dev, "New CAWAKE interrupt " "detected during interrupt processing\n"); /* Force processing of backup CAWAKE interrupt */ cawake_double_int = true; } /* Process events for channels 8..15 or backup interrupt if needed */ if ((pport->max_ch > HSI_SSI_CHANNELS_MAX) || cawake_double_int) status_reg |= hsi_driver_int_proc(pport, HSI_SYS_MPU_U_STATUS_REG(port, irq), HSI_SYS_MPU_U_ENABLE_REG(port, irq), HSI_SSI_CHANNELS_MAX, pport->max_ch - 1, cawake_double_int); return status_reg; }
/** * hsi_do_cawake_process - CAWAKE line management * @pport - HSI port to process * * This function handles the CAWAKE L/H transitions and call the event callback * accordingly. * * Returns 0 if CAWAKE event process, -EAGAIN if CAWAKE event processing is * delayed due to a pending DMA interrupt. * If -EAGAIN is returned, pport->hsi_tasklet has to be re-scheduled once * DMA tasklet has be executed. This should be done automatically by driver. * */ int hsi_do_cawake_process(struct hsi_port *pport) { struct hsi_dev *hsi_ctrl = pport->hsi_controller; bool cawake_status = hsi_get_cawake(pport); if (pport->wake_rx_3_wires_mode) { dev_warn(hsi_ctrl->dev, "CAWAKE edge in RX 3 wires, exiting\n"); return 0; } /* Deal with init condition */ if (unlikely(pport->cawake_status < 0)) pport->cawake_status = !cawake_status; dev_dbg(hsi_ctrl->dev, "%s: Interrupts are not enabled but CAWAKE came." "hsi: port[%d] irq[%d] irq_en=0x%08x dma_irq_en=0x%08x\n", __func__, pport->port_number, pport->n_irq, hsi_inl(pport->hsi_controller->base, HSI_SYS_MPU_ENABLE_REG(pport->port_number, pport->n_irq)), hsi_inl(pport->hsi_controller->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG)); /* Check CAWAKE line status */ if (cawake_status) { dev_dbg(hsi_ctrl->dev, "CAWAKE rising edge detected\n"); /* Check for possible mismatch (race condition) */ if (unlikely(pport->cawake_status)) { dev_warn(hsi_ctrl->dev, "Missed previous CAWAKE falling edge...\n"); spin_unlock(&hsi_ctrl->lock); hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN, NULL); spin_lock(&hsi_ctrl->lock); /* In case another CAWAKE interrupt occured and caused * a race condition, clear CAWAKE backup interrupt to * avoid handling twice the race condition */ hsi_driver_ack_interrupt(pport, HSI_CAWAKEDETECTED, true); } pport->cawake_status = 1; spin_unlock(&hsi_ctrl->lock); hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP, NULL); spin_lock(&hsi_ctrl->lock); /* * HSI - OMAP4430-2.2BUG00055: i702 * HSI: DSP Swakeup generated is the same than MPU Swakeup. * System cannot enter in off mode due to the DSP. */ if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_i702_PM_HSI_SWAKEUP)) omap_pm_clear_dsp_wake_up(); } else { dev_dbg(hsi_ctrl->dev, "CAWAKE falling edge detected\n"); /* Check for pending DMA interrupt */ if (hsi_is_dma_read_int_pending(hsi_ctrl)) { dev_dbg(hsi_ctrl->dev, "Pending DMA Read interrupt " "before CAWAKE->L, exiting " "Interrupt tasklet.\n"); return -EAGAIN; } if (unlikely(!pport->cawake_status)) { dev_warn(hsi_ctrl->dev, "Missed previous CAWAKE rising edge...\n"); spin_unlock(&hsi_ctrl->lock); hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_UP, NULL); spin_lock(&hsi_ctrl->lock); /* In case another CAWAKE interrupt occured and caused * a race condition, clear CAWAKE backup interrupt to * avoid handling twice the race condition */ hsi_driver_ack_interrupt(pport, HSI_CAWAKEDETECTED, true); } pport->cawake_status = 0; spin_unlock(&hsi_ctrl->lock); hsi_port_event_handler(pport, HSI_EVENT_CAWAKE_DOWN, NULL); spin_lock(&hsi_ctrl->lock); } /* If another CAWAKE event occured while previous is still processed */ /* do not clear the status bit */ cawake_status = hsi_get_cawake(pport); if (cawake_status != pport->cawake_status) { dev_warn(hsi_ctrl->dev, "CAWAKE line changed to %d while CAWAKE" "event is still being processed\n", cawake_status); return -EAGAIN; } return 0; }
static int hsi_debug_port_show(struct seq_file *m, void *p) { struct hsi_port *hsi_port = m->private; struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = hsi_port->port_number; int ch, fifo; long buff_offset; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); hsi_clocks_enable(hsi_ctrl->dev, __func__); if (hsi_port->cawake_gpio >= 0) seq_printf(m, "CAWAKE\t\t: %d\n", hsi_get_cawake(hsi_port)); seq_printf(m, "WAKE\t\t: 0x%08x\n", hsi_inl(base, HSI_SYS_WAKE_REG(port))); seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_ENABLE_REG(port, hsi_port->n_irq))); seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_STATUS_REG(port, hsi_port->n_irq))); if (hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "MPU_U_ENABLE_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port, hsi_port->n_irq))); seq_printf(m, "MPU_U_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_U_STATUS_REG(port, hsi_port->n_irq))); } /* HST */ seq_printf(m, "\nHST\n===\n"); seq_printf(m, "MODE\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_MODE_REG(port))); seq_printf(m, "FRAMESIZE\t: 0x%08x\n", hsi_inl(base, HSI_HST_FRAMESIZE_REG(port))); seq_printf(m, "DIVISOR\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_DIVISOR_REG(port))); seq_printf(m, "CHANNELS\t: 0x%08x\n", hsi_inl(base, HSI_HST_CHANNELS_REG(port))); seq_printf(m, "ARBMODE\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_ARBMODE_REG(port))); seq_printf(m, "TXSTATE\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_TXSTATE_REG(port))); if (hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "BUFSTATE P1\t: 0x%08x\n", hsi_inl(base, HSI_HST_BUFSTATE_REG(1))); seq_printf(m, "BUFSTATE P2\t: 0x%08x\n", hsi_inl(base, HSI_HST_BUFSTATE_REG(2))); } else { seq_printf(m, "BUFSTATE\t: 0x%08x\n", hsi_inl(base, HSI_HST_BUFSTATE_REG(port))); } seq_printf(m, "BREAK\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_BREAK_REG(port))); for (ch = 0; ch < 8; ch++) { buff_offset = hsi_hst_buffer_reg(hsi_ctrl, port, ch); if (buff_offset >= 0) seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, hsi_inl(base, buff_offset)); } if (hsi_driver_device_is_hsi(pdev)) { for (fifo = 0; fifo < HSI_HST_FIFO_COUNT; fifo++) { seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo, hsi_inl(base, HSI_HST_MAPPING_FIFO_REG(fifo))); } } /* HSR */ seq_printf(m, "\nHSR\n===\n"); seq_printf(m, "MODE\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_MODE_REG(port))); seq_printf(m, "FRAMESIZE\t: 0x%08x\n", hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port))); seq_printf(m, "CHANNELS\t: 0x%08x\n", hsi_inl(base, HSI_HSR_CHANNELS_REG(port))); seq_printf(m, "COUNTERS\t: 0x%08x\n", hsi_inl(base, HSI_HSR_COUNTERS_REG(port))); seq_printf(m, "RXSTATE\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_RXSTATE_REG(port))); if (hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "BUFSTATE P1\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BUFSTATE_REG(1))); seq_printf(m, "BUFSTATE P2\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BUFSTATE_REG(2))); } else { seq_printf(m, "BUFSTATE\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BUFSTATE_REG(port))); } seq_printf(m, "BREAK\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BREAK_REG(port))); seq_printf(m, "ERROR\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_ERROR_REG(port))); seq_printf(m, "ERRORACK\t: 0x%08x\n", hsi_inl(base, HSI_HSR_ERRORACK_REG(port))); for (ch = 0; ch < 8; ch++) { buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, port, ch); if (buff_offset >= 0) seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, hsi_inl(base, buff_offset)); } if (hsi_driver_device_is_hsi(pdev)) { for (fifo = 0; fifo < HSI_HSR_FIFO_COUNT; fifo++) { seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo, hsi_inl(base, HSI_HSR_MAPPING_FIFO_REG(fifo))); } seq_printf(m, "DLL\t: 0x%08x\n", hsi_inl(base, HSI_HSR_DLL_REG)); seq_printf(m, "DIVISOR\t: 0x%08x\n", hsi_inl(base, HSI_HSR_DIVISOR_REG(port))); } hsi_clocks_disable(hsi_ctrl->dev, __func__); return 0; }