static void do_hsi_tasklet(unsigned long hsi_port) { struct hsi_port *pport = (struct hsi_port *)hsi_port; struct hsi_dev *hsi_ctrl = pport->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = pport->port_number; unsigned int irq = pport->n_irq; u32 status_reg; struct platform_device *pd = to_platform_device(hsi_ctrl->dev); hsi_driver_int_proc(pport, HSI_SYS_MPU_STATUS_REG(port, irq), HSI_SYS_MPU_ENABLE_REG(port, irq), 0, min(pport->max_ch, (u8) HSI_SSI_CHANNELS_MAX)); if (pport->max_ch > HSI_SSI_CHANNELS_MAX) hsi_driver_int_proc(pport, HSI_SYS_MPU_U_STATUS_REG(port, irq), HSI_SYS_MPU_U_ENABLE_REG(port, irq), HSI_SSI_CHANNELS_MAX, pport->max_ch); status_reg = hsi_inl(base, HSI_SYS_MPU_STATUS_REG(port, irq)) & hsi_inl(base, HSI_SYS_MPU_ENABLE_REG(port, irq)); if (hsi_driver_device_is_hsi(pd)) status_reg |= (hsi_inl(base, HSI_SYS_MPU_U_STATUS_REG(port, irq)) & hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port, irq))); if (status_reg) tasklet_hi_schedule(&pport->hsi_tasklet); else enable_irq(pport->irq); }
static void hsi_restore_ctx(struct platform_device *pdev) { struct hsi_platform_data *pdata = pdev->dev.platform_data; void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); struct port_ctx *p; int port; int loss_count; loss_count = omap_pm_get_dev_context_loss_count(&pdev->dev); if (loss_count == pdata->ctx.loss_count) return; hsi_outl(pdata->ctx.sysconfig, base + HSI_SYS_SYSCONFIG_REG); hsi_outl(pdata->ctx.gdd_gcr, base + HSI_GDD_GCR_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx.pctx[port - 1]; hsi_outl(p->sys_mpu_enable[0], base + HSI_SYS_MPU_ENABLE_REG(port, 0)); hsi_outl(p->sys_mpu_enable[1], base + HSI_SYS_MPU_U_ENABLE_REG(port, 0)); hsi_outl(p->hst.frame_size, base + HSI_HST_FRAMESIZE_REG(port)); hsi_outl(p->hst.divisor, base + HSI_HST_DIVISOR_REG(port)); hsi_outl(p->hst.channels, base + HSI_HST_CHANNELS_REG(port)); hsi_outl(p->hst.arb_mode, base + HSI_HST_ARBMODE_REG(port)); hsi_outl(p->hsr.frame_size, base + HSI_HSR_FRAMESIZE_REG(port)); /* FIXME - check this register */ hsi_outl(p->hsr.timeout, base + HSI_HSR_COUNTERS_REG(port)); hsi_outl(p->hsr.channels, base + HSI_HSR_CHANNELS_REG(port)); } }
void hsi_restore_ctx(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); void __iomem *base = hsi_ctrl->base; struct hsi_port_ctx *p; int port; hsi_outl(pdata->ctx->sysconfig, base, HSI_SYS_SYSCONFIG_REG); hsi_outl(pdata->ctx->gdd_gcr, base, HSI_GDD_GCR_REG); if (hsi_driver_device_is_hsi(pdev)) hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; /* HSI TOP */ hsi_outl(p->sys_mpu_enable[0], base, HSI_SYS_MPU_ENABLE_REG(port, 0)); hsi_outl(p->sys_mpu_enable[1], base, HSI_SYS_MPU_U_ENABLE_REG(port, 0)); /* HST */ hsi_outl(p->hst.mode, base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hst.frame_size, base, HSI_HST_FRAMESIZE_REG(port)); hsi_outl(p->hst.divisor, base, HSI_HST_DIVISOR_REG(port)); hsi_outl(p->hst.channels, base, HSI_HST_CHANNELS_REG(port)); hsi_outl(p->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port)); /* HSR */ if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hsr.frame_size, base, HSI_HSR_FRAMESIZE_REG(port)); hsi_outl(p->hsr.divisor, base, HSI_HSR_DIVISOR_REG(port)); hsi_outl(p->hsr.channels, base, HSI_HSR_CHANNELS_REG(port)); hsi_outl(p->hsr.counters, base, HSI_HSR_COUNTERS_REG(port)); } if (hsi_driver_device_is_hsi(pdev)) { /* SW strategy for HSI fifo management can be changed here */ hsi_fifo_mapping(hsi_ctrl, hsi_ctrl->fifo_mapping_strategy); } /* As a last step move HSR from MODE_VAL.SLEEP to the relevant mode. */ /* This will enable the ACREADY flow control mechanism. */ for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port)); } }
void hsi_restore_ctx(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); void __iomem *base = hsi_ctrl->base; struct port_ctx *p; int port; hsi_outl(pdata->ctx->sysconfig, base, HSI_SYS_SYSCONFIG_REG); hsi_outl(pdata->ctx->gdd_gcr, base, HSI_GDD_GCR_REG); if (hsi_driver_device_is_hsi(pdev)) hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; /* HSI TOP */ hsi_outl(p->sys_mpu_enable[0], base, HSI_SYS_MPU_ENABLE_REG(port, 0)); hsi_outl(p->sys_mpu_enable[1], base, HSI_SYS_MPU_U_ENABLE_REG(port, 0)); /* HST */ hsi_outl(p->hst.mode, base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hst.frame_size, base, HSI_HST_FRAMESIZE_REG(port)); hsi_outl(p->hst.divisor, base, HSI_HST_DIVISOR_REG(port)); hsi_outl(p->hst.channels, base, HSI_HST_CHANNELS_REG(port)); hsi_outl(p->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port)); /* HSR */ hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hsr.frame_size, base, HSI_HSR_FRAMESIZE_REG(port)); hsi_outl(p->hsr.divisor, base, HSI_HSR_DIVISOR_REG(port)); hsi_outl(p->hsr.channels, base, HSI_HSR_CHANNELS_REG(port)); hsi_outl(p->hsr.counters, base, HSI_HSR_COUNTERS_REG(port)); } if (hsi_driver_device_is_hsi(pdev)) { /* SW strategy for HSI fifo management can be changed here */ hsi_fifo_mapping(hsi_ctrl, HSI_FIFO_MAPPING_DEFAULT); } }
void hsi_save_ctx(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); void __iomem *base = hsi_ctrl->base; struct hsi_port_ctx *p; int port; pdata->ctx->sysconfig = hsi_inl(base, HSI_SYS_SYSCONFIG_REG); pdata->ctx->gdd_gcr = hsi_inl(base, HSI_GDD_GCR_REG); if (hsi_driver_device_is_hsi(pdev)) pdata->ctx->dll = hsi_inl(base, HSI_HSR_DLL_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; /* HSI TOP */ p->sys_mpu_enable[0] = hsi_inl(base, HSI_SYS_MPU_ENABLE_REG(port, 0)); p->sys_mpu_enable[1] = hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port, 0)); /* HST */ p->hst.mode = hsi_inl(base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) p->hst.frame_size = hsi_inl(base, HSI_HST_FRAMESIZE_REG(port)); p->hst.divisor = hsi_inl(base, HSI_HST_DIVISOR_REG(port)); p->hst.channels = hsi_inl(base, HSI_HST_CHANNELS_REG(port)); p->hst.arb_mode = hsi_inl(base, HSI_HST_ARBMODE_REG(port)); /* HSR */ p->hsr.mode = hsi_inl(base, HSI_HSR_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) p->hsr.frame_size = hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port)); p->hsr.divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port)); p->hsr.channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port)); p->hsr.counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port)); } }
static u32 hsi_process_int_event(struct hsi_port *pport) { unsigned int port = pport->port_number; unsigned int irq = pport->n_irq; u32 status_reg; bool cawake_double_int = false; /* Clear CAWAKE backup interrupt */ hsi_driver_ack_interrupt(pport, HSI_CAWAKEDETECTED, true); /* Process events for channels 0..7 */ status_reg = hsi_driver_int_proc(pport, HSI_SYS_MPU_STATUS_REG(port, irq), HSI_SYS_MPU_ENABLE_REG(port, irq), 0, min(pport->max_ch, (u8) HSI_SSI_CHANNELS_MAX) - 1, cawake_double_int); /* If another CAWAKE interrupt occured while previous is still being * processed, mark it for extra processing */ if (hsi_driver_is_interrupt_pending(pport, HSI_CAWAKEDETECTED, true) && (status_reg & HSI_CAWAKEDETECTED)) { dev_warn(pport->hsi_controller->dev, "New CAWAKE interrupt " "detected during interrupt processing\n"); /* Force processing of backup CAWAKE interrupt */ cawake_double_int = true; } /* Process events for channels 8..15 or backup interrupt if needed */ if ((pport->max_ch > HSI_SSI_CHANNELS_MAX) || cawake_double_int) status_reg |= hsi_driver_int_proc(pport, HSI_SYS_MPU_U_STATUS_REG(port, irq), HSI_SYS_MPU_U_ENABLE_REG(port, irq), HSI_SSI_CHANNELS_MAX, pport->max_ch - 1, cawake_double_int); return status_reg; }
static int hsi_debug_port_show(struct seq_file *m, void *p) { struct hsi_port *hsi_port = m->private; struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = hsi_port->port_number; int ch, fifo; long buff_offset; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); hsi_clocks_enable(hsi_ctrl->dev, __func__); if (hsi_port->cawake_gpio >= 0) seq_printf(m, "CAWAKE\t\t: %d\n", hsi_get_cawake(hsi_port)); seq_printf(m, "WAKE\t\t: 0x%08x\n", hsi_inl(base, HSI_SYS_WAKE_REG(port))); seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_ENABLE_REG(port, hsi_port->n_irq))); seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_STATUS_REG(port, hsi_port->n_irq))); if (hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "MPU_U_ENABLE_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port, hsi_port->n_irq))); seq_printf(m, "MPU_U_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_U_STATUS_REG(port, hsi_port->n_irq))); } /* HST */ seq_printf(m, "\nHST\n===\n"); seq_printf(m, "MODE\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_MODE_REG(port))); seq_printf(m, "FRAMESIZE\t: 0x%08x\n", hsi_inl(base, HSI_HST_FRAMESIZE_REG(port))); seq_printf(m, "DIVISOR\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_DIVISOR_REG(port))); seq_printf(m, "CHANNELS\t: 0x%08x\n", hsi_inl(base, HSI_HST_CHANNELS_REG(port))); seq_printf(m, "ARBMODE\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_ARBMODE_REG(port))); seq_printf(m, "TXSTATE\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_TXSTATE_REG(port))); if (hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "BUFSTATE P1\t: 0x%08x\n", hsi_inl(base, HSI_HST_BUFSTATE_REG(1))); seq_printf(m, "BUFSTATE P2\t: 0x%08x\n", hsi_inl(base, HSI_HST_BUFSTATE_REG(2))); } else { seq_printf(m, "BUFSTATE\t: 0x%08x\n", hsi_inl(base, HSI_HST_BUFSTATE_REG(port))); } seq_printf(m, "BREAK\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_BREAK_REG(port))); for (ch = 0; ch < 8; ch++) { buff_offset = hsi_hst_buffer_reg(hsi_ctrl, port, ch); if (buff_offset >= 0) seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, hsi_inl(base, buff_offset)); } if (hsi_driver_device_is_hsi(pdev)) { for (fifo = 0; fifo < HSI_HST_FIFO_COUNT; fifo++) { seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo, hsi_inl(base, HSI_HST_MAPPING_FIFO_REG(fifo))); } } /* HSR */ seq_printf(m, "\nHSR\n===\n"); seq_printf(m, "MODE\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_MODE_REG(port))); seq_printf(m, "FRAMESIZE\t: 0x%08x\n", hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port))); seq_printf(m, "CHANNELS\t: 0x%08x\n", hsi_inl(base, HSI_HSR_CHANNELS_REG(port))); seq_printf(m, "COUNTERS\t: 0x%08x\n", hsi_inl(base, HSI_HSR_COUNTERS_REG(port))); seq_printf(m, "RXSTATE\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_RXSTATE_REG(port))); if (hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "BUFSTATE P1\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BUFSTATE_REG(1))); seq_printf(m, "BUFSTATE P2\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BUFSTATE_REG(2))); } else { seq_printf(m, "BUFSTATE\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BUFSTATE_REG(port))); } seq_printf(m, "BREAK\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BREAK_REG(port))); seq_printf(m, "ERROR\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_ERROR_REG(port))); seq_printf(m, "ERRORACK\t: 0x%08x\n", hsi_inl(base, HSI_HSR_ERRORACK_REG(port))); for (ch = 0; ch < 8; ch++) { buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, port, ch); if (buff_offset >= 0) seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, hsi_inl(base, buff_offset)); } if (hsi_driver_device_is_hsi(pdev)) { for (fifo = 0; fifo < HSI_HSR_FIFO_COUNT; fifo++) { seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo, hsi_inl(base, HSI_HSR_MAPPING_FIFO_REG(fifo))); } seq_printf(m, "DLL\t: 0x%08x\n", hsi_inl(base, HSI_HSR_DLL_REG)); seq_printf(m, "DIVISOR\t: 0x%08x\n", hsi_inl(base, HSI_HSR_DIVISOR_REG(port))); } hsi_clocks_disable(hsi_ctrl->dev, __func__); return 0; }