int spi_adc_read(struct busyboard *bb) { int i, val; spi_set_cs(bb, 0); for (i = 0; i < 3; ++i) { do_delay(); spi_set_clk(bb); do_delay(); spi_clear_clk(bb); } for (i = val = 0; i < 10; ++i) { do_delay(); spi_set_clk(bb); do_delay(); busyboard_in(bb); val = (val << 1) | (bb->in_state[1] & 1); spi_clear_clk(bb); } spi_clear_cs(bb); return val; }
static void rockchip_spi_config(struct rockchip_spi *rs) { u32 div = 0; u32 dmacr = 0; u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET) | (CR0_SSD_ONE << CR0_SSD_OFFSET); cr0 |= (rs->n_bytes << CR0_DFS_OFFSET); cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET); cr0 |= (rs->tmode << CR0_XFM_OFFSET); cr0 |= (rs->type << CR0_FRF_OFFSET); if (rs->use_dma) { if (rs->tx) dmacr |= TF_DMA_EN; if (rs->rx) dmacr |= RF_DMA_EN; } if (WARN_ON(rs->speed > MAX_SCLK_OUT)) rs->speed = MAX_SCLK_OUT; /* the minimum divsor is 2 */ if (rs->max_freq < 2 * rs->speed) { clk_set_rate(rs->spiclk, 2 * rs->speed); rs->max_freq = clk_get_rate(rs->spiclk); } /* div doesn't support odd number */ div = max_t(u32, rs->max_freq / rs->speed, 1); div = (div + 1) & 0xfffe; writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR); writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR); writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR); writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR); writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR); spi_set_clk(rs, div); dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div); }
static void rockchip_spi_config(struct rockchip_spi *rs) { u32 div = 0; u32 dmacr = 0; u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET) | (CR0_SSD_ONE << CR0_SSD_OFFSET); cr0 |= (rs->n_bytes << CR0_DFS_OFFSET); cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET); cr0 |= (rs->tmode << CR0_XFM_OFFSET); cr0 |= (rs->type << CR0_FRF_OFFSET); if (rs->use_dma) { if (rs->tx) dmacr |= TF_DMA_EN; if (rs->rx) dmacr |= RF_DMA_EN; } /* div doesn't support odd number */ div = rs->max_freq / rs->speed; div = (div + 1) & 0xfffe; spi_enable_chip(rs, 0); writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR); writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR); writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR); writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR); writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR); spi_set_clk(rs, div); dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div); spi_enable_chip(rs, 1); }
static void rockchip_spi_config(struct rockchip_spi *rs) { u32 div = 0; u32 dmacr = 0; int rsd = 0; u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET) | (CR0_SSD_ONE << CR0_SSD_OFFSET) | (CR0_EM_BIG << CR0_EM_OFFSET); cr0 |= (rs->n_bytes << CR0_DFS_OFFSET); cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET); cr0 |= (rs->tmode << CR0_XFM_OFFSET); cr0 |= (rs->type << CR0_FRF_OFFSET); if (rs->use_dma) { if (rs->tx) dmacr |= TF_DMA_EN; if (rs->rx) dmacr |= RF_DMA_EN; } if (WARN_ON(rs->speed > MAX_SCLK_OUT)) rs->speed = MAX_SCLK_OUT; /* the minimum divsor is 2 */ if (rs->max_freq < 2 * rs->speed) { clk_set_rate(rs->spiclk, 2 * rs->speed); rs->max_freq = clk_get_rate(rs->spiclk); } /* div doesn't support odd number */ div = DIV_ROUND_UP(rs->max_freq, rs->speed); div = (div + 1) & 0xfffe; /* Rx sample delay is expressed in parent clock cycles (max 3) */ rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8), 1000000000 >> 8); if (!rsd && rs->rsd_nsecs) { pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n", rs->max_freq, rs->rsd_nsecs); } else if (rsd > 3) { rsd = 3; pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n", rs->max_freq, rs->rsd_nsecs, rsd * 1000000000U / rs->max_freq); } cr0 |= rsd << CR0_RSD_OFFSET; writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR); writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR); writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR); writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR); writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR); spi_set_clk(rs, div); dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div); }
static int dw_spi_transfer_one(struct spi_controller *master, struct spi_device *spi, struct spi_transfer *transfer) { struct dw_spi *dws = spi_controller_get_devdata(master); struct chip_data *chip = spi_get_ctldata(spi); u8 imask = 0; u16 txlevel = 0; u32 cr0; int ret; dws->dma_mapped = 0; dws->tx = (void *)transfer->tx_buf; dws->tx_end = dws->tx + transfer->len; dws->rx = transfer->rx_buf; dws->rx_end = dws->rx + transfer->len; dws->len = transfer->len; spi_enable_chip(dws, 0); /* Handle per transfer options for bpw and speed */ if (transfer->speed_hz != dws->current_freq) { if (transfer->speed_hz != chip->speed_hz) { /* clk_div doesn't support odd number */ chip->clk_div = (DIV_ROUND_UP(dws->max_freq, transfer->speed_hz) + 1) & 0xfffe; chip->speed_hz = transfer->speed_hz; } dws->current_freq = transfer->speed_hz; spi_set_clk(dws, chip->clk_div); } dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE); dws->dma_width = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE); /* Default SPI mode is SCPOL = 0, SCPH = 0 */ cr0 = (transfer->bits_per_word - 1) | (chip->type << SPI_FRF_OFFSET) | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) | (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET)) | (chip->tmode << SPI_TMOD_OFFSET); /* * Adjust transfer mode if necessary. Requires platform dependent * chipselect mechanism. */ if (chip->cs_control) { if (dws->rx && dws->tx) chip->tmode = SPI_TMOD_TR; else if (dws->rx) chip->tmode = SPI_TMOD_RO; else chip->tmode = SPI_TMOD_TO; cr0 &= ~SPI_TMOD_MASK; cr0 |= (chip->tmode << SPI_TMOD_OFFSET); } dw_writel(dws, DW_SPI_CTRL0, cr0); /* Check if current transfer is a DMA transaction */ if (master->can_dma && master->can_dma(master, spi, transfer)) dws->dma_mapped = master->cur_msg_mapped; /* For poll mode just disable all interrupts */ spi_mask_intr(dws, 0xff); /* * Interrupt mode * we only need set the TXEI IRQ, as TX/RX always happen syncronizely */ if (dws->dma_mapped) { ret = dws->dma_ops->dma_setup(dws, transfer); if (ret < 0) { spi_enable_chip(dws, 1); return ret; } } else if (!chip->poll_mode) { txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes); dw_writel(dws, DW_SPI_TXFLTR, txlevel); /* Set the interrupt mask */ imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI; spi_umask_intr(dws, imask); dws->transfer_handler = interrupt_transfer; } spi_enable_chip(dws, 1); if (dws->dma_mapped) { ret = dws->dma_ops->dma_transfer(dws, transfer); if (ret < 0) return ret; } if (chip->poll_mode) return poll_transfer(dws); return 1; }