int hsi_runtime_resume(struct device *dev) { struct platform_device *pd = to_platform_device(dev); struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd); struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; unsigned int i; dev_dbg(dev, "%s\n", __func__); if (hsi_ctrl->clock_enabled) dev_warn(dev, "Warning: clock status mismatch vs runtime PM\n"); hsi_ctrl->clock_enabled = true; /* Restore context */ hsi_restore_ctx(hsi_ctrl); /* Restore HSR_MODE register value */ /* WARNING: works only in this configuration: */ /* - Flow = Synchronized */ /* - Mode = frame */ hsi_outl(HSI_HSR_MODE_FRAME, hsi_ctrl->base, HSI_HSR_MODE_REG(HSI_PORT1)); /* When HSI is ON, no need for IO wakeup mechanism on any HSI port */ for (i = 0; i < hsi_ctrl->max_p; i++) pdata->wakeup_disable(hsi_ctrl->hsi_port[i].port_number); /* HSI device is now fully operational and _must_ be able to */ /* complete I/O operations */ return 0; }
/** * hsi_runtime_suspend - Prepare HSI for low power : device will not process data and will not communicate with the CPU * @dev - reference to the hsi device. * * Return value : -EBUSY or -EAGAIN if device is busy and still operational * */ int hsi_runtime_suspend(struct device *dev) { struct platform_device *pd = to_platform_device(dev); struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd); struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; int port; dev_dbg(dev, "%s\n", __func__); if (!hsi_ctrl->clock_enabled) dev_warn(dev, "Warning: clock status mismatch vs runtime PM\n"); /* Save context */ hsi_save_ctx(hsi_ctrl); hsi_ctrl->clock_enabled = false; /* Put HSR into SLEEP mode to force ACREADY to low while HSI is idle */ for (port = 1; port <= pdata->num_ports; port++) { hsi_outl_and(HSI_HSR_MODE_MODE_VAL_SLEEP, hsi_ctrl->base, HSI_HSR_MODE_REG(port)); } /* HSI is going to INA/RET/OFF, it needs IO wakeup mechanism enabled */ if (device_may_wakeup(dev)) pdata->wakeup_enable(0); else pdata->wakeup_disable(0); /* HSI is now ready to be put in low power state */ return 0; }
/** * hsi_softreset - Force a SW RESET of HSI (core + DMA) * * @hsi_ctrl - reference to the hsi controller to be reset. * */ int hsi_softreset(struct hsi_dev *hsi_ctrl) { unsigned int ind = 0; unsigned int port; void __iomem *base = hsi_ctrl->base; u32 status; /* HSI-C1BUG00088: i696 : HSI: Issue with SW reset * No recovery from SW reset under specific circumstances * If a SW RESET is done while some HSI errors are still not * acknowledged, the HSR FSM is stucked. */ if (is_hsi_errata(hsi_ctrl, HSI_ERRATUM_i696_SW_RESET_FSM_STUCK)) { for (port = 1; port <= hsi_ctrl->max_p; port++) { hsi_outl_and(HSI_HSR_MODE_MODE_VAL_SLEEP, base, HSI_HSR_MODE_REG(port)); hsi_outl(HSI_HSR_ERROR_ALL, base, HSI_HSR_ERRORACK_REG(port)); } } /* Reseting HSI Block */ hsi_outl_or(HSI_SOFTRESET, base, HSI_SYS_SYSCONFIG_REG); do { status = hsi_inl(base, HSI_SYS_SYSSTATUS_REG); ind++; } while ((!(status & HSI_RESETDONE)) && (ind < HSI_RESETDONE_MAX_RETRIES)); if (ind >= HSI_RESETDONE_MAX_RETRIES) { dev_err(hsi_ctrl->dev, "HSI SW_RESET failed to complete within" " %d retries.\n", HSI_RESETDONE_MAX_RETRIES); return -EIO; } else if (ind > HSI_RESETDONE_NORMAL_RETRIES) { dev_warn(hsi_ctrl->dev, "HSI SW_RESET abnormally long:" " %d retries to complete.\n", ind); } ind = 0; /* Reseting DMA Engine */ hsi_outl_or(HSI_GDD_GRST_SWRESET, base, HSI_GDD_GRST_REG); do { status = hsi_inl(base, HSI_GDD_GRST_REG); ind++; } while ((status & HSI_GDD_GRST_SWRESET) && (ind < HSI_RESETDONE_MAX_RETRIES)); if (ind >= HSI_RESETDONE_MAX_RETRIES) { dev_err(hsi_ctrl->dev, "HSI DMA SW_RESET failed to complete" " within %d retries.\n", HSI_RESETDONE_MAX_RETRIES); return -EIO; } if (ind > HSI_RESETDONE_NORMAL_RETRIES) { dev_warn(hsi_ctrl->dev, "HSI DMA SW_RESET abnormally long:" " %d retries to complete.\n", ind); } return 0; }
void hsi_get_rx(struct hsi_port *sport, struct hsr_ctx *cfg) { struct hsi_dev *hsi_ctrl = sport->hsi_controller; void __iomem *base = hsi_ctrl->base; int port = sport->port_number; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); cfg->mode = hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_MODE_VAL_MASK; cfg->flow = (hsi_inl(base, HSI_HSR_MODE_REG(port)) & HSI_FLOW_VAL_MASK) >> HSI_FLOW_OFFSET; cfg->frame_size = hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port)); cfg->channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port)); if (hsi_driver_device_is_hsi(pdev)) { cfg->divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port)); cfg->counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port)); } else { cfg->counters = hsi_inl(base, SSI_TIMEOUT_REG(port)); } }
/** * hsi_softreset - Force a SW RESET of HSI (core + DMA) * * @hsi_ctrl - reference to the hsi controller to be reset. * */ int hsi_softreset(struct hsi_dev *hsi_ctrl) { unsigned int ind = 0; unsigned int port; void __iomem *base = hsi_ctrl->base; u32 status; /* SW WA for HSI-C1BUG00088 OMAP4430 HSI : No recovery from SW reset */ /* under specific circumstances */ for (port = 1; port <= hsi_ctrl->max_p; port++) { hsi_outl_and(HSI_HSR_MODE_MODE_VAL_SLEEP, base, HSI_HSR_MODE_REG(port)); hsi_outl(HSI_HSR_ERROR_ALL, base, HSI_HSR_ERRORACK_REG(port)); } /* Reseting HSI Block */ hsi_outl_or(HSI_SOFTRESET, base, HSI_SYS_SYSCONFIG_REG); do { status = hsi_inl(base, HSI_SYS_SYSSTATUS_REG); ind++; } while ((!(status & HSI_RESETDONE)) && (ind < HSI_RESETDONE_MAX_RETRIES)); if (ind >= HSI_RESETDONE_MAX_RETRIES) { dev_err(hsi_ctrl->dev, "HSI SW_RESET failed to complete within" " %d retries.\n", HSI_RESETDONE_MAX_RETRIES); return -EIO; } else if (ind > HSI_RESETDONE_NORMAL_RETRIES) { dev_warn(hsi_ctrl->dev, "HSI SW_RESET abnormally long:" " %d retries to complete.\n", ind); } ind = 0; /* Reseting DMA Engine */ hsi_outl_or(HSI_GDD_GRST_SWRESET, base, HSI_GDD_GRST_REG); do { status = hsi_inl(base, HSI_GDD_GRST_REG); ind++; } while ((status & HSI_GDD_GRST_SWRESET) && (ind < HSI_RESETDONE_MAX_RETRIES)); if (ind >= HSI_RESETDONE_MAX_RETRIES) { dev_err(hsi_ctrl->dev, "HSI DMA SW_RESET failed to complete" " within %d retries.\n", HSI_RESETDONE_MAX_RETRIES); return -EIO; } if (ind > HSI_RESETDONE_NORMAL_RETRIES) { dev_warn(hsi_ctrl->dev, "HSI DMA SW_RESET abnormally long:" " %d retries to complete.\n", ind); } return 0; }
static void ssi_set_mode(struct platform_device *pdev, u32 mode) { struct hsi_platform_data *pdata = pdev->dev.platform_data; void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); int port; for (port = 1; port <= pdata->num_ports; port++) { outl(mode, (unsigned int) base + HSI_HST_MODE_REG(port)); outl(mode, (unsigned int) base + HSI_HSR_MODE_REG(port)); } }
void hsi_hsr_suspend(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; int port; dev_dbg(hsi_ctrl->dev, "%s\n", __func__); for (port = 1; port <= pdata->num_ports; port++) { hsi_outl_and(HSI_HSR_MODE_MODE_VAL_SLEEP, hsi_ctrl->base, HSI_HSR_MODE_REG(port)); } }
void hsi_restore_ctx(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); void __iomem *base = hsi_ctrl->base; struct hsi_port_ctx *p; int port; hsi_outl(pdata->ctx->sysconfig, base, HSI_SYS_SYSCONFIG_REG); hsi_outl(pdata->ctx->gdd_gcr, base, HSI_GDD_GCR_REG); if (hsi_driver_device_is_hsi(pdev)) hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; /* HSI TOP */ hsi_outl(p->sys_mpu_enable[0], base, HSI_SYS_MPU_ENABLE_REG(port, 0)); hsi_outl(p->sys_mpu_enable[1], base, HSI_SYS_MPU_U_ENABLE_REG(port, 0)); /* HST */ hsi_outl(p->hst.mode, base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hst.frame_size, base, HSI_HST_FRAMESIZE_REG(port)); hsi_outl(p->hst.divisor, base, HSI_HST_DIVISOR_REG(port)); hsi_outl(p->hst.channels, base, HSI_HST_CHANNELS_REG(port)); hsi_outl(p->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port)); /* HSR */ if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hsr.frame_size, base, HSI_HSR_FRAMESIZE_REG(port)); hsi_outl(p->hsr.divisor, base, HSI_HSR_DIVISOR_REG(port)); hsi_outl(p->hsr.channels, base, HSI_HSR_CHANNELS_REG(port)); hsi_outl(p->hsr.counters, base, HSI_HSR_COUNTERS_REG(port)); } if (hsi_driver_device_is_hsi(pdev)) { /* SW strategy for HSI fifo management can be changed here */ hsi_fifo_mapping(hsi_ctrl, hsi_ctrl->fifo_mapping_strategy); } /* As a last step move HSR from MODE_VAL.SLEEP to the relevant mode. */ /* This will enable the ACREADY flow control mechanism. */ for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port)); } }
static void hsi_restore_mode(struct platform_device *pdev) { struct hsi_platform_data *pdata = pdev->dev.platform_data; void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); struct port_ctx *p; int port; for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx.pctx[port - 1]; hsi_outl(p->hst.mode, base + HSI_HST_MODE_REG(port)); hsi_outl(p->hsr.mode, base + HSI_HSR_MODE_REG(port)); } }
void hsi_hsr_suspend(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; int port; dev_dbg(hsi_ctrl->dev, "%s\n", __func__); /* Put HSR into SLEEP mode to force ACREADY to low while HSI is idle */ for (port = 1; port <= pdata->num_ports; port++) { hsi_outl_and(HSI_HSR_MODE_MODE_VAL_SLEEP, hsi_ctrl->base, HSI_HSR_MODE_REG(port)); } }
static void hsi_set_mode(struct platform_device *pdev, u32 mode) { struct hsi_platform_data *pdata = pdev->dev.platform_data; void __iomem *base = OMAP2_IO_ADDRESS(pdev->resource[0].start); int port; for (port = 1; port <= pdata->num_ports; port++) { /* FIXME - to update: need read/modify/write or something else: * this register now also contains flow and wake ctrl */ hsi_outl(mode, base + HSI_HST_MODE_REG(port)); hsi_outl(mode, base + HSI_HSR_MODE_REG(port)); } }
void hsi_hsr_resume(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; void __iomem *base = hsi_ctrl->base; struct hsi_port_ctx *p; int port; dev_dbg(hsi_ctrl->dev, "%s\n", __func__); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port)); } }
void hsi_hsr_resume(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; void __iomem *base = hsi_ctrl->base; struct hsi_port_ctx *p; int port; dev_dbg(hsi_ctrl->dev, "%s\n", __func__); /* Move HSR from MODE_VAL.SLEEP to the relevant mode. */ /* This will enable the ACREADY flow control mechanism. */ for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port)); } }
void hsi_restore_ctx(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); void __iomem *base = hsi_ctrl->base; struct port_ctx *p; int port; hsi_outl(pdata->ctx->sysconfig, base, HSI_SYS_SYSCONFIG_REG); hsi_outl(pdata->ctx->gdd_gcr, base, HSI_GDD_GCR_REG); if (hsi_driver_device_is_hsi(pdev)) hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; /* HSI TOP */ hsi_outl(p->sys_mpu_enable[0], base, HSI_SYS_MPU_ENABLE_REG(port, 0)); hsi_outl(p->sys_mpu_enable[1], base, HSI_SYS_MPU_U_ENABLE_REG(port, 0)); /* HST */ hsi_outl(p->hst.mode, base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hst.frame_size, base, HSI_HST_FRAMESIZE_REG(port)); hsi_outl(p->hst.divisor, base, HSI_HST_DIVISOR_REG(port)); hsi_outl(p->hst.channels, base, HSI_HST_CHANNELS_REG(port)); hsi_outl(p->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port)); /* HSR */ hsi_outl(p->hsr.mode, base, HSI_HSR_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(p->hsr.frame_size, base, HSI_HSR_FRAMESIZE_REG(port)); hsi_outl(p->hsr.divisor, base, HSI_HSR_DIVISOR_REG(port)); hsi_outl(p->hsr.channels, base, HSI_HSR_CHANNELS_REG(port)); hsi_outl(p->hsr.counters, base, HSI_HSR_COUNTERS_REG(port)); } if (hsi_driver_device_is_hsi(pdev)) { /* SW strategy for HSI fifo management can be changed here */ hsi_fifo_mapping(hsi_ctrl, HSI_FIFO_MAPPING_DEFAULT); } }
static void hsi_set_ports_default(struct hsi_dev *hsi_ctrl, struct platform_device *pd) { struct hsi_port_ctx *cfg; struct hsi_platform_data *pdata = pd->dev.platform_data; unsigned int port = 0; void __iomem *base = hsi_ctrl->base; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); for (port = 1; port <= pdata->num_ports; port++) { cfg = &pdata->ctx->pctx[port - 1]; /* HST */ hsi_outl(cfg->hst.mode | cfg->hst.flow | HSI_HST_MODE_WAKE_CTRL_SW, base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(cfg->hst.frame_size, base, HSI_HST_FRAMESIZE_REG(port)); hsi_outl(cfg->hst.divisor, base, HSI_HST_DIVISOR_REG(port)); hsi_outl(cfg->hst.channels, base, HSI_HST_CHANNELS_REG(port)); hsi_outl(cfg->hst.arb_mode, base, HSI_HST_ARBMODE_REG(port)); /* HSR */ hsi_outl(cfg->hsr.mode | cfg->hsr.flow, base, HSI_HSR_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) hsi_outl(cfg->hsr.frame_size, base, HSI_HSR_FRAMESIZE_REG(port)); hsi_outl(cfg->hsr.channels, base, HSI_HSR_CHANNELS_REG(port)); if (hsi_driver_device_is_hsi(pdev)) hsi_outl(cfg->hsr.divisor, base, HSI_HSR_DIVISOR_REG(port)); hsi_outl(cfg->hsr.counters, base, HSI_HSR_COUNTERS_REG(port)); } if (hsi_driver_device_is_hsi(pdev)) { /* SW strategy for HSI fifo management can be changed here */ hsi_fifo_mapping(hsi_ctrl, hsi_ctrl->fifo_mapping_strategy); hsi_outl(pdata->ctx->dll, base, HSI_HSR_DLL_REG); } }
void hsi_save_ctx(struct hsi_dev *hsi_ctrl) { struct hsi_platform_data *pdata = hsi_ctrl->dev->platform_data; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); void __iomem *base = hsi_ctrl->base; struct hsi_port_ctx *p; int port; pdata->ctx->sysconfig = hsi_inl(base, HSI_SYS_SYSCONFIG_REG); pdata->ctx->gdd_gcr = hsi_inl(base, HSI_GDD_GCR_REG); if (hsi_driver_device_is_hsi(pdev)) pdata->ctx->dll = hsi_inl(base, HSI_HSR_DLL_REG); for (port = 1; port <= pdata->num_ports; port++) { p = &pdata->ctx->pctx[port - 1]; /* HSI TOP */ p->sys_mpu_enable[0] = hsi_inl(base, HSI_SYS_MPU_ENABLE_REG(port, 0)); p->sys_mpu_enable[1] = hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port, 0)); /* HST */ p->hst.mode = hsi_inl(base, HSI_HST_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) p->hst.frame_size = hsi_inl(base, HSI_HST_FRAMESIZE_REG(port)); p->hst.divisor = hsi_inl(base, HSI_HST_DIVISOR_REG(port)); p->hst.channels = hsi_inl(base, HSI_HST_CHANNELS_REG(port)); p->hst.arb_mode = hsi_inl(base, HSI_HST_ARBMODE_REG(port)); /* HSR */ p->hsr.mode = hsi_inl(base, HSI_HSR_MODE_REG(port)); if (!hsi_driver_device_is_hsi(pdev)) p->hsr.frame_size = hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port)); p->hsr.divisor = hsi_inl(base, HSI_HSR_DIVISOR_REG(port)); p->hsr.channels = hsi_inl(base, HSI_HSR_CHANNELS_REG(port)); p->hsr.counters = hsi_inl(base, HSI_HSR_COUNTERS_REG(port)); } }
int hsi_set_rx(struct hsi_port *sport, struct hsr_ctx *cfg) { struct hsi_dev *hsi_ctrl = sport->hsi_controller; void __iomem *base = hsi_ctrl->base; int port = sport->port_number; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); if (((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_STREAM) && ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_FRAME) && ((cfg->mode & HSI_MODE_VAL_MASK) != HSI_MODE_SLEEP) && (cfg->mode != NOT_SET)) return -EINVAL; if (hsi_driver_device_is_hsi(pdev)) { if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED) && ((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_PIPELINED) && (cfg->flow != NOT_SET)) return -EINVAL; /* HSI only supports payload size of 32bits */ if ((cfg->frame_size != HSI_FRAMESIZE_MAX) && (cfg->frame_size != NOT_SET)) return -EINVAL; } else { if (((cfg->flow & HSI_FLOW_VAL_MASK) != HSI_FLOW_SYNCHRONIZED) && (cfg->flow != NOT_SET)) return -EINVAL; /* HSI only supports payload size of 32bits */ if ((cfg->frame_size != HSI_FRAMESIZE_MAX) && (cfg->frame_size != NOT_SET)) return -EINVAL; } if ((cfg->channels == 0) || ((cfg->channels > sport->max_ch) && (cfg->channels != NOT_SET))) return -EINVAL; if (hsi_driver_device_is_hsi(pdev)) { if ((cfg->divisor > HSI_MAX_RX_DIVISOR) && (cfg->divisor != HSI_HSR_DIVISOR_AUTO) && (cfg->divisor != NOT_SET)) return -EINVAL; } if ((cfg->mode != NOT_SET) && (cfg->flow != NOT_SET)) hsi_outl(cfg->mode | ((cfg->flow & HSI_FLOW_VAL_MASK) << HSI_FLOW_OFFSET), base, HSI_HSR_MODE_REG(port)); if (cfg->frame_size != NOT_SET) hsi_outl(cfg->frame_size, base, HSI_HSR_FRAMESIZE_REG(port)); if (cfg->channels != NOT_SET) { if ((cfg->channels & (-cfg->channels)) ^ cfg->channels) return -EINVAL; else hsi_outl(cfg->channels, base, HSI_HSR_CHANNELS_REG(port)); } return hsi_set_rx_divisor(sport, cfg); }
static int hsi_debug_port_show(struct seq_file *m, void *p) { struct hsi_port *hsi_port = m->private; struct hsi_dev *hsi_ctrl = hsi_port->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = hsi_port->port_number; int ch, fifo; long buff_offset; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); hsi_clocks_enable(hsi_ctrl->dev, __func__); if (hsi_port->cawake_gpio >= 0) seq_printf(m, "CAWAKE\t\t: %d\n", hsi_get_cawake(hsi_port)); seq_printf(m, "WAKE\t\t: 0x%08x\n", hsi_inl(base, HSI_SYS_WAKE_REG(port))); seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_ENABLE_REG(port, hsi_port->n_irq))); seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_STATUS_REG(port, hsi_port->n_irq))); if (hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "MPU_U_ENABLE_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_U_ENABLE_REG(port, hsi_port->n_irq))); seq_printf(m, "MPU_U_STATUS_IRQ%d\t: 0x%08x\n", hsi_port->n_irq, hsi_inl(base, HSI_SYS_MPU_U_STATUS_REG(port, hsi_port->n_irq))); } /* HST */ seq_printf(m, "\nHST\n===\n"); seq_printf(m, "MODE\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_MODE_REG(port))); seq_printf(m, "FRAMESIZE\t: 0x%08x\n", hsi_inl(base, HSI_HST_FRAMESIZE_REG(port))); seq_printf(m, "DIVISOR\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_DIVISOR_REG(port))); seq_printf(m, "CHANNELS\t: 0x%08x\n", hsi_inl(base, HSI_HST_CHANNELS_REG(port))); seq_printf(m, "ARBMODE\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_ARBMODE_REG(port))); seq_printf(m, "TXSTATE\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_TXSTATE_REG(port))); if (hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "BUFSTATE P1\t: 0x%08x\n", hsi_inl(base, HSI_HST_BUFSTATE_REG(1))); seq_printf(m, "BUFSTATE P2\t: 0x%08x\n", hsi_inl(base, HSI_HST_BUFSTATE_REG(2))); } else { seq_printf(m, "BUFSTATE\t: 0x%08x\n", hsi_inl(base, HSI_HST_BUFSTATE_REG(port))); } seq_printf(m, "BREAK\t\t: 0x%08x\n", hsi_inl(base, HSI_HST_BREAK_REG(port))); for (ch = 0; ch < 8; ch++) { buff_offset = hsi_hst_buffer_reg(hsi_ctrl, port, ch); if (buff_offset >= 0) seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, hsi_inl(base, buff_offset)); } if (hsi_driver_device_is_hsi(pdev)) { for (fifo = 0; fifo < HSI_HST_FIFO_COUNT; fifo++) { seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo, hsi_inl(base, HSI_HST_MAPPING_FIFO_REG(fifo))); } } /* HSR */ seq_printf(m, "\nHSR\n===\n"); seq_printf(m, "MODE\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_MODE_REG(port))); seq_printf(m, "FRAMESIZE\t: 0x%08x\n", hsi_inl(base, HSI_HSR_FRAMESIZE_REG(port))); seq_printf(m, "CHANNELS\t: 0x%08x\n", hsi_inl(base, HSI_HSR_CHANNELS_REG(port))); seq_printf(m, "COUNTERS\t: 0x%08x\n", hsi_inl(base, HSI_HSR_COUNTERS_REG(port))); seq_printf(m, "RXSTATE\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_RXSTATE_REG(port))); if (hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "BUFSTATE P1\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BUFSTATE_REG(1))); seq_printf(m, "BUFSTATE P2\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BUFSTATE_REG(2))); } else { seq_printf(m, "BUFSTATE\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BUFSTATE_REG(port))); } seq_printf(m, "BREAK\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_BREAK_REG(port))); seq_printf(m, "ERROR\t\t: 0x%08x\n", hsi_inl(base, HSI_HSR_ERROR_REG(port))); seq_printf(m, "ERRORACK\t: 0x%08x\n", hsi_inl(base, HSI_HSR_ERRORACK_REG(port))); for (ch = 0; ch < 8; ch++) { buff_offset = hsi_hsr_buffer_reg(hsi_ctrl, port, ch); if (buff_offset >= 0) seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, hsi_inl(base, buff_offset)); } if (hsi_driver_device_is_hsi(pdev)) { for (fifo = 0; fifo < HSI_HSR_FIFO_COUNT; fifo++) { seq_printf(m, "FIFO MAPPING%d\t: 0x%08x\n", fifo, hsi_inl(base, HSI_HSR_MAPPING_FIFO_REG(fifo))); } seq_printf(m, "DLL\t: 0x%08x\n", hsi_inl(base, HSI_HSR_DLL_REG)); seq_printf(m, "DIVISOR\t: 0x%08x\n", hsi_inl(base, HSI_HSR_DIVISOR_REG(port))); } hsi_clocks_disable(hsi_ctrl->dev, __func__); return 0; }