/** * hsi_driver_cancel_write_dma - Cancel an ongoing GDD [DMA] write for the * specified hsi channel. * @hsi_ch - pointer to the hsi_channel to cancel DMA write. * * hsi_controller lock must be held before calling this function. * * Return: -ENXIO : No DMA channel found for specified HSI channel * -ECANCELED : DMA cancel success, data not transfered to TX FIFO * 0 : DMA transfer is already over, data already transfered to TX FIFO * * Note: whatever returned value, write callback will not be called after * write cancel. */ int hsi_driver_cancel_write_dma(struct hsi_channel *hsi_ch) { int lch = hsi_ch->write_data.lch; unsigned int port = hsi_ch->hsi_port->port_number; unsigned int channel = hsi_ch->channel_number; struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller; u16 ccr, gdd_csr; long buff_offset; u32 status_reg; dma_addr_t dma_h; size_t size; if (lch < 0) { dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI " "channel %d\n", hsi_ch->channel_number); return -ENXIO; } ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch)); if (!(ccr & HSI_CCR_ENABLE)) { dev_dbg(&hsi_ch->dev->device, "Write cancel on not " "enabled logical channel %d CCR REG 0x%04X\n", lch, ccr); } status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch)); /* Clear CSR register by reading it, as it is cleared automaticaly */ /* by HW after SW read. */ gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch)); hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); /* Unmap DMA region */ dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CSSA_REG(lch)); size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4; dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_TO_DEVICE); buff_offset = hsi_hst_bufstate_f_reg(hsi_ctrl, port, channel); if (buff_offset >= 0) hsi_outl_and(~HSI_BUFSTATE_CHANNEL(channel), hsi_ctrl->base, buff_offset); hsi_reset_ch_write(hsi_ch); return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED; }
/** * hsi_driver_cancel_read_dma - Cancel an ongoing GDD [DMA] read for the * specified hsi channel. * @hsi_ch - pointer to the hsi_channel to cancel DMA read. * * hsi_controller lock must be held before calling this function. * * Return: -ENXIO : No DMA channel found for specified HSI channel * -ECANCELED : DMA cancel success, data not available at expected * address. * 0 : DMA transfer is already over, data already available at * expected address. * * Note: whatever returned value, read callback will not be called after cancel. */ int hsi_driver_cancel_read_dma(struct hsi_channel *hsi_ch) { int lch = hsi_ch->read_data.lch; struct hsi_dev *hsi_ctrl = hsi_ch->hsi_port->hsi_controller; u16 ccr, gdd_csr; u32 status_reg; dma_addr_t dma_h; size_t size; /* Re-enable interrupts for polling if needed */ if (hsi_ch->flags & HSI_CH_RX_POLL) hsi_driver_enable_read_interrupt(hsi_ch, NULL); if (lch < 0) { dev_err(&hsi_ch->dev->device, "No DMA channel found for HSI " "channel %d\n", hsi_ch->channel_number); return -ENXIO; } ccr = hsi_inw(hsi_ctrl->base, HSI_GDD_CCR_REG(lch)); if (!(ccr & HSI_CCR_ENABLE)) { dev_dbg(&hsi_ch->dev->device, "Read cancel on not " "enabled logical channel %d CCR REG 0x%04X\n", lch, ccr); } status_reg = hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); status_reg &= hsi_inl(hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); hsi_outw_and(~HSI_CCR_ENABLE, hsi_ctrl->base, HSI_GDD_CCR_REG(lch)); /* Clear CSR register by reading it, as it is cleared automaticaly */ /* by HW after SW read */ gdd_csr = hsi_inw(hsi_ctrl->base, HSI_GDD_CSR_REG(lch)); hsi_outl_and(~HSI_GDD_LCH(lch), hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); hsi_outl(HSI_GDD_LCH(lch), hsi_ctrl->base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG); /* Unmap DMA region - Access to the buffer is now safe */ dma_h = hsi_inl(hsi_ctrl->base, HSI_GDD_CDSA_REG(lch)); size = hsi_inw(hsi_ctrl->base, HSI_GDD_CEN_REG(lch)) * 4; dma_unmap_single(hsi_ctrl->dev, dma_h, size, DMA_FROM_DEVICE); hsi_reset_ch_read(hsi_ch); return status_reg & HSI_GDD_LCH(lch) ? 0 : -ECANCELED; }
static int hsi_debug_gdd_show(struct seq_file *m, void *p) { struct hsi_dev *hsi_ctrl = m->private; void __iomem *base = hsi_ctrl->base; int lch; struct platform_device *pdev = to_platform_device(hsi_ctrl->dev); hsi_clocks_enable(hsi_ctrl->dev, __func__); seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n", hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_STATUS_REG)); seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n", hsi_inl(base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG)); if (!hsi_driver_device_is_hsi(pdev)) { seq_printf(m, "HW_ID\t\t: 0x%08x\n", hsi_inl(base, HSI_SSI_GDD_HW_ID_REG)); seq_printf(m, "PPORT_ID\t: 0x%08x\n", hsi_inl(base, HSI_SSI_GDD_PPORT_ID_REG)); seq_printf(m, "MPORT_ID\t: 0x%08x\n", hsi_inl(base, HSI_SSI_GDD_MPORT_ID_REG)); seq_printf(m, "TEST\t\t: 0x%08x\n", hsi_inl(base, HSI_SSI_GDD_TEST_REG)); } seq_printf(m, "GCR\t\t: 0x%08x\n", hsi_inl(base, HSI_GDD_GCR_REG)); for (lch = 0; lch < hsi_ctrl->gdd_chan_count; lch++) { seq_printf(m, "\nGDD LCH %d\n=========\n", lch); seq_printf(m, "CSDP\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CSDP_REG(lch))); seq_printf(m, "CCR\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CCR_REG(lch))); seq_printf(m, "CICR\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CCIR_REG(lch))); seq_printf(m, "CSR\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CSR_REG(lch))); seq_printf(m, "CSSA\t\t: 0x%08x\n", hsi_inl(base, HSI_GDD_CSSA_REG(lch))); seq_printf(m, "CDSA\t\t: 0x%08x\n", hsi_inl(base, HSI_GDD_CDSA_REG(lch))); seq_printf(m, "CEN\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CEN_REG(lch))); seq_printf(m, "CSAC\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CSAC_REG(lch))); seq_printf(m, "CDAC\t\t: 0x%04x\n", hsi_inw(base, HSI_GDD_CDAC_REG(lch))); if (!hsi_driver_device_is_hsi(pdev)) seq_printf(m, "CLNK_CTRL\t: 0x%04x\n", hsi_inw(base, HSI_SSI_GDD_CLNK_CTRL_REG(lch))); } hsi_clocks_disable(hsi_ctrl->dev, __func__); return 0; }
/** * hsi_driver_read_dma - Program GDD [DMA] to write data to memory from * the hsi channel buffer. * @hsi_channel - pointer to the hsi_channel to read data from. * @data - 32-bit word pointer where to store the incoming data. * @size - Number of 32bit words to be transfered to the buffer. * * hsi_controller lock must be held before calling this function. * * Return 0 on success and < 0 on error. */ int hsi_driver_read_dma(struct hsi_channel *hsi_channel, u32 * data, unsigned int count) { struct hsi_dev *hsi_ctrl = hsi_channel->hsi_port->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = hsi_channel->hsi_port->port_number; unsigned int channel = hsi_channel->channel_number; unsigned int sync; int lch; dma_addr_t src_addr; dma_addr_t dest_addr; u16 tmp; int fifo; lch = hsi_get_free_lch(hsi_ctrl); if (lch < 0) { dev_err(hsi_ctrl->dev, "No free DMA channels.\n"); return -EBUSY; /* No free GDD logical channels. */ } else { dev_dbg(hsi_ctrl->dev, "Allocated DMA channel %d for read on" " HSI channel %d.\n", lch, hsi_channel->channel_number); } /* When DMA is used for Rx, disable the Rx Interrupt. * (else DATAAVAILLABLE event would get triggered on first * received data word) * (Rx interrupt might be active for polling feature) */ hsi_driver_disable_read_interrupt(hsi_channel); /* * NOTE: Gettting a free gdd logical channel and * reserve it must be done atomicaly. */ hsi_channel->read_data.lch = lch; /* Sync is required for SSI but not for HSI */ sync = hsi_sync_table[HSI_SYNC_READ][port - 1][channel]; dest_addr = dma_map_single(hsi_ctrl->dev, data, count * 4, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(hsi_ctrl->dev, dest_addr))) { dev_err(hsi_ctrl->dev, "Failed to create DMA read mapping.\n"); return -ENOMEM; } tmp = HSI_DST_BURST_4x32_BIT | HSI_DST_MEMORY_PORT | HSI_SRC_BURST_4x32_BIT | HSI_SRC_PERIPHERAL_PORT | HSI_DATA_TYPE_S32; hsi_outw(tmp, base, HSI_GDD_CSDP_REG(lch)); tmp = HSI_DST_AMODE_POSTINC | HSI_SRC_AMODE_CONST | sync; hsi_outw(tmp, base, HSI_GDD_CCR_REG(lch)); hsi_outw((HSI_BLOCK_IE | HSI_TOUT_IE), base, HSI_GDD_CCIR_REG(lch)); if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) { fifo = hsi_fifo_get_id(hsi_ctrl, channel, port); if (unlikely(fifo < 0)) { dev_err(hsi_ctrl->dev, "No valid FIFO id for DMA " "transfer from FIFO.\n"); return -EFAULT; } /* HSI CSSA register takes a FIFO ID when copying from FIFO */ hsi_outl(fifo, base, HSI_GDD_CSSA_REG(lch)); } else{ src_addr = hsi_ctrl->phy_base + HSI_HSR_BUFFER_CH_REG(port, channel); /* SSI CSSA register always takes a 32-bit address */ hsi_outl(src_addr, base, HSI_GDD_CSSA_REG(lch)); } /* HSI CDSA register takes a 32-bit address when copying to memory */ /* SSI CDSA register always takes a 32-bit address */ hsi_outl(dest_addr, base, HSI_GDD_CDSA_REG(lch)); hsi_outw(count, base, HSI_GDD_CEN_REG(lch)); /* TODO : Need to clean interrupt status here to avoid spurious int */ hsi_outl_or(HSI_GDD_LCH(lch), base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); hsi_outw_or(HSI_CCR_ENABLE, base, HSI_GDD_CCR_REG(lch)); return 0; }
/** * hsi_driver_write_dma - Program GDD [DMA] to write data from memory to * the hsi channel buffer. * @hsi_channel - pointer to the hsi_channel to write data to. * @data - 32-bit word pointer to the data. * @size - Number of 32bit words to be transfered. * * hsi_controller lock must be held before calling this function. * * Return 0 on success and < 0 on error. */ int hsi_driver_write_dma(struct hsi_channel *hsi_channel, u32 * data, unsigned int size) { struct hsi_dev *hsi_ctrl = hsi_channel->hsi_port->hsi_controller; void __iomem *base = hsi_ctrl->base; unsigned int port = hsi_channel->hsi_port->port_number; unsigned int channel = hsi_channel->channel_number; unsigned int sync; int lch; dma_addr_t src_addr; dma_addr_t dest_addr; u16 tmp; int fifo; if ((size < 1) || (data == NULL)) return -EINVAL; lch = hsi_get_free_lch(hsi_ctrl); if (lch < 0) { dev_err(hsi_ctrl->dev, "No free DMA channels.\n"); return -EBUSY; /* No free GDD logical channels. */ } else { dev_dbg(hsi_ctrl->dev, "Allocated DMA channel %d for write on" " HSI channel %d.\n", lch, hsi_channel->channel_number); } /* NOTE: Getting a free gdd logical channel and * reserve it must be done atomicaly. */ hsi_channel->write_data.lch = lch; /* Sync is required for SSI but not for HSI */ sync = hsi_sync_table[HSI_SYNC_WRITE][port - 1][channel]; src_addr = dma_map_single(hsi_ctrl->dev, data, size * 4, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(hsi_ctrl->dev, src_addr))) { dev_err(hsi_ctrl->dev, "Failed to create DMA write mapping.\n"); return -ENOMEM; } tmp = HSI_SRC_BURST_4x32_BIT| HSI_SRC_MEMORY_PORT | HSI_DST_BURST_4x32_BIT | HSI_DST_PERIPHERAL_PORT | HSI_DATA_TYPE_S32; hsi_outw(tmp, base, HSI_GDD_CSDP_REG(lch)); tmp = HSI_SRC_AMODE_POSTINC | HSI_DST_AMODE_CONST | sync; hsi_outw(tmp, base, HSI_GDD_CCR_REG(lch)); hsi_outw((HSI_BLOCK_IE | HSI_TOUT_IE), base, HSI_GDD_CCIR_REG(lch)); if (hsi_driver_device_is_hsi(to_platform_device(hsi_ctrl->dev))) { fifo = hsi_fifo_get_id(hsi_ctrl, channel, port); if (unlikely(fifo < 0)) { dev_err(hsi_ctrl->dev, "No valid FIFO id for DMA " "transfer to FIFO.\n"); return -EFAULT; } /* HSI CDSA register takes a FIFO ID when copying to FIFO */ hsi_outl(fifo, base, HSI_GDD_CDSA_REG(lch)); } else { dest_addr = hsi_ctrl->phy_base + HSI_HST_BUFFER_CH_REG(port, channel); /* SSI CDSA register always takes a 32-bit address */ hsi_outl(dest_addr, base, HSI_GDD_CDSA_REG(lch)); } /* HSI CSSA register takes a 32-bit address when copying from memory */ /* SSI CSSA register always takes a 32-bit address */ hsi_outl(src_addr, base, HSI_GDD_CSSA_REG(lch)); hsi_outw(size, base, HSI_GDD_CEN_REG(lch)); /* TODO : Need to clean interrupt status here to avoid spurious int */ hsi_outl_or(HSI_GDD_LCH(lch), base, HSI_SYS_GDD_MPU_IRQ_ENABLE_REG); hsi_outw_or(HSI_CCR_ENABLE, base, HSI_GDD_CCR_REG(lch)); return 0; }