static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mmci_host *host = mmc_priv(mmc); u32 sdi_clkcr; sdi_clkcr = vmm_readl(&host->base->clock); /* Ramp up the clock rate */ if (ios->clock) { u32 clkdiv = 0; u32 tmp_clock; if (ios->clock >= mmc->f_max) { clkdiv = 0; ios->clock = mmc->f_max; } else { clkdiv = udiv32(host->clock_in, ios->clock) - 2; } tmp_clock = udiv32(host->clock_in, (clkdiv + 2)); while (tmp_clock > ios->clock) { clkdiv++; tmp_clock = udiv32(host->clock_in, (clkdiv + 2)); } if (clkdiv > SDI_CLKCR_CLKDIV_MASK) clkdiv = SDI_CLKCR_CLKDIV_MASK; tmp_clock = udiv32(host->clock_in, (clkdiv + 2)); ios->clock = tmp_clock; sdi_clkcr &= ~(SDI_CLKCR_CLKDIV_MASK); sdi_clkcr |= clkdiv; } /* Set the bus width */ if (ios->bus_width) { u32 buswidth = 0; switch (ios->bus_width) { case 1: buswidth |= SDI_CLKCR_WIDBUS_1; break; case 4: buswidth |= SDI_CLKCR_WIDBUS_4; break; case 8: buswidth |= SDI_CLKCR_WIDBUS_8; break; default: vmm_printf("%s: Invalid bus width: %d\n", __func__, ios->bus_width); break; } sdi_clkcr &= ~(SDI_CLKCR_WIDBUS_MASK); sdi_clkcr |= buswidth; } vmm_writel(sdi_clkcr, &host->base->clock); vmm_udelay(CLK_CHANGE_DELAY); }
static int mmci_command(struct mmc_host *mmc, struct mmc_cmd *cmd) { int result; u32 sdi_cmd, sdi_pwr; struct mmci_host *host = mmc_priv(mmc); sdi_cmd = ((cmd->cmdidx & SDI_CMD_CMDINDEX_MASK) | SDI_CMD_CPSMEN); if (cmd->resp_type) { sdi_cmd |= SDI_CMD_WAITRESP; if (cmd->resp_type & MMC_RSP_136) { sdi_cmd |= SDI_CMD_LONGRESP; } } vmm_writel((u32)cmd->cmdarg, &host->base->argument); vmm_udelay(COMMAND_REG_DELAY); vmm_writel(sdi_cmd, &host->base->command); result = mmci_wait_for_command_end(mmc, cmd); /* After CMD2 set RCA to a none zero value. */ if ((result == 0) && (cmd->cmdidx == MMC_CMD_ALL_SEND_CID)) { mmc->card->rca = 10; } /* After CMD3 open drain is switched off and push pull is used. */ if ((result == 0) && (cmd->cmdidx == MMC_CMD_SET_RELATIVE_ADDR)) { sdi_pwr = vmm_readl(&host->base->power) & ~SDI_PWR_OPD; vmm_writel(sdi_pwr, &host->base->power); } return result; }
static int sdhci_set_clock(struct mmc_host *mmc, u32 clock) { struct sdhci_host *host = (struct sdhci_host *)mmc->priv; u32 div, clk, timeout; sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); if (clock == 0) { return VMM_OK; } if ((host->sdhci_version & SDHCI_SPEC_VER_MASK) >= SDHCI_SPEC_300) { /* Version 3.00 divisors must be a multiple of 2. */ if (mmc->f_max <= clock) div = 1; else { for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) { if (udiv32(mmc->f_max, div) <= clock) { break; } } } } else { /* Version 2.00 divisors must be a power of 2. */ for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { if (udiv32(mmc->f_max, div) <= clock) { break; } } } div >>= 1; if (host->ops.set_clock) { host->ops.set_clock(host, div); } clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) << SDHCI_DIVIDER_HI_SHIFT; clk |= SDHCI_CLOCK_INT_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); /* Wait max 20 ms */ timeout = 20; while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) & SDHCI_CLOCK_INT_STABLE)) { if (timeout == 0) { vmm_printf("%s: Internal clock never stabilised.\n", __func__); return VMM_EFAIL; } timeout--; vmm_udelay(1000); } clk |= SDHCI_CLOCK_CARD_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); return VMM_OK; }
static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data, u32 start_addr) { u32 ctrl, stat, rdy, mask, timeout, block = 0; if (host->sdhci_caps & SDHCI_CAN_DO_SDMA) { ctrl = sdhci_readl(host, SDHCI_HOST_CONTROL); ctrl &= ~SDHCI_CTRL_DMA_MASK; ctrl |= SDHCI_CTRL_SDMA; sdhci_writel(host, ctrl, SDHCI_HOST_CONTROL); } timeout = 1000000; rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL; mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE; do { stat = sdhci_readl(host, SDHCI_INT_STATUS); if (stat & SDHCI_INT_ERROR) { vmm_printf("%s: Error detected in status(0x%X)!\n", __func__, stat); return VMM_EFAIL; } if (stat & rdy) { if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)) { continue; } sdhci_writel(host, rdy, SDHCI_INT_STATUS); sdhci_transfer_pio(host, data); data->dest += data->blocksize; if (++block >= data->blocks) { break; } } if (host->sdhci_caps & SDHCI_CAN_DO_SDMA) { if (stat & SDHCI_INT_DMA_END) { sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS); start_addr &= ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1); start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE; sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS); } } if (timeout-- > 0) { vmm_udelay(10); } else { vmm_printf("%s: Transfer data timeout\n", __func__); return VMM_ETIMEDOUT; } } while (!(stat & SDHCI_INT_DATA_END)); return VMM_OK; }
static int mmci_data_transfer(struct mmc_host *mmc, struct mmc_cmd *cmd, struct mmc_data *data) { int error = VMM_ETIMEDOUT; struct mmci_host *host = mmc_priv(mmc); u32 blksz = 0; u32 data_ctrl = 0; u32 data_len = (u32)(data->blocks * data->blocksize); if (!host->version2) { blksz = (ffs(data->blocksize) - 1); data_ctrl |= ((blksz << 4) & SDI_DCTRL_DBLKSIZE_MASK); } else { blksz = data->blocksize; data_ctrl |= (blksz << SDI_DCTRL_DBLOCKSIZE_V2_SHIFT); } data_ctrl |= SDI_DCTRL_DTEN | SDI_DCTRL_BUSYMODE; vmm_writel(SDI_DTIMER_DEFAULT, &host->base->datatimer); vmm_writel(data_len, &host->base->datalength); vmm_udelay(DATA_REG_DELAY); if (data->flags & MMC_DATA_READ) { data_ctrl |= SDI_DCTRL_DTDIR_IN; vmm_writel(data_ctrl, &host->base->datactrl); error = mmci_command(mmc, cmd); if (error) { return error; } error = mmci_read_bytes(mmc, (u32 *)data->dest, (u32)data->blocks, (u32)data->blocksize); } else if (data->flags & MMC_DATA_WRITE) { error = mmci_command(mmc, cmd); if (error) { return error; } vmm_writel(data_ctrl, &host->base->datactrl); error = mmci_write_bytes(mmc, (u32 *)data->src, (u32)data->blocks, (u32)data->blocksize); } return error; }
/* * DWC2 IP interface */ static int wait_for_bit(void *reg, const u32 mask, bool set) { unsigned int timeout = 1000000; u32 val; while (--timeout) { val = vmm_readl(reg); if (!set) val = ~val; if ((val & mask) == mask) return 0; vmm_udelay(1); } return VMM_ETIMEDOUT; }
static void sdhci_reset(struct sdhci_host *host, u8 mask) { u32 timeout; if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) return; } /* Wait max 100 ms */ timeout = 100; sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) { if (timeout == 0) { vmm_printf("%s: Reset 0x%x never completed.\n", __func__, mask); return; } timeout--; vmm_udelay(1000); } }
static int mmci_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { int rc; u32 sdi; virtual_addr_t base; physical_addr_t basepa; struct mmc_host *mmc; struct mmci_host *host; mmc = mmc_alloc_host(sizeof(struct mmci_host), dev); if (!mmc) { rc = VMM_ENOMEM; goto free_nothing; } host = mmc_priv(mmc); rc = vmm_devtree_regmap(dev->node, &base, 0); if (rc) { goto free_host; } host->base = (struct sdi_registers *)base; rc = vmm_devtree_irq_get(dev->node, &host->irq0, 0); if (rc) { goto free_reg; } if ((rc = vmm_host_irq_register(host->irq0, dev->name, mmci_cmd_irq_handler, mmc))) { goto free_reg; } rc = vmm_devtree_irq_get(dev->node, &host->irq1, 1); if (!rc) { if ((rc = vmm_host_irq_register(host->irq1, dev->name, mmci_pio_irq_handler, mmc))) { goto free_irq0; } host->singleirq = 0; } else { host->singleirq = 1; } /* Retrive matching data */ host->pwr_init = ((const u32 *)devid->data)[0]; host->clkdiv_init = ((const u32 *)devid->data)[1]; host->voltages = ((const u32 *)devid->data)[2]; host->caps = ((const u32 *)devid->data)[3]; host->clock_in = ((const u32 *)devid->data)[4]; host->clock_min = ((const u32 *)devid->data)[5]; host->clock_max = ((const u32 *)devid->data)[6]; host->b_max = ((const u32 *)devid->data)[7]; host->version2 = ((const u32 *)devid->data)[8]; /* Initialize power and clock divider */ vmm_writel(host->pwr_init, &host->base->power); vmm_writel(host->clkdiv_init, &host->base->clock); vmm_udelay(CLK_CHANGE_DELAY); /* Disable interrupts */ sdi = vmm_readl(&host->base->mask0) & ~SDI_MASK0_MASK; vmm_writel(sdi, &host->base->mask0); /* Setup mmc host configuration */ mmc->caps = host->caps; mmc->voltages = host->voltages; mmc->f_min = host->clock_min; mmc->f_max = host->clock_max; mmc->b_max = host->b_max; /* Setup mmc host operations */ mmc->ops.send_cmd = mmci_request; mmc->ops.set_ios = mmci_set_ios; mmc->ops.init_card = mmci_init_card; mmc->ops.get_cd = NULL; mmc->ops.get_wp = NULL; rc = mmc_add_host(mmc); if (rc) { goto free_irq1; } dev->priv = mmc; vmm_devtree_regaddr(dev->node, &basepa, 0); vmm_printf("%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", dev->name, amba_part(dev), amba_manf(dev), amba_rev(dev), (unsigned long long)basepa, host->irq0, host->irq1); return VMM_OK; free_irq1: if (!host->singleirq) { vmm_host_irq_unregister(host->irq1, mmc); } free_irq0: vmm_host_irq_unregister(host->irq0, mmc); free_reg: vmm_devtree_regunmap(dev->node, (virtual_addr_t)host->base, 0); free_host: mmc_free_host(mmc); free_nothing: return rc; }
int sdhci_send_command(struct mmc_host *mmc, struct mmc_cmd *cmd, struct mmc_data *data) { bool present; u32 mask, flags, mode; int ret = 0, trans_bytes = 0, is_aligned = 1; u32 timeout, retry = 10000, stat = 0, start_addr = 0; struct sdhci_host *host = mmc_priv(mmc); /* If polling, assume that the card is always present. */ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) { present = TRUE; } else { present = sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT; } /* If card not present then return error */ if (!present) { return VMM_EIO; } /* Wait max 10 ms */ timeout = 10; sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS); mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT; /* We shouldn't wait for data inihibit for stop commands, even though they might use busy signaling */ if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION) { mask &= ~SDHCI_DATA_INHIBIT; } while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { if (timeout == 0) { vmm_printf("%s: Controller never released " "inhibit bit(s).\n", __func__); return VMM_EIO; } timeout--; vmm_udelay(1000); } mask = SDHCI_INT_RESPONSE; if (!(cmd->resp_type & MMC_RSP_PRESENT)) { flags = SDHCI_CMD_RESP_NONE; } else if (cmd->resp_type & MMC_RSP_136) { flags = SDHCI_CMD_RESP_LONG; } else if (cmd->resp_type & MMC_RSP_BUSY) { flags = SDHCI_CMD_RESP_SHORT_BUSY; mask |= SDHCI_INT_DATA_END; } else { flags = SDHCI_CMD_RESP_SHORT; } if (cmd->resp_type & MMC_RSP_CRC) { flags |= SDHCI_CMD_CRC; } if (cmd->resp_type & MMC_RSP_OPCODE) { flags |= SDHCI_CMD_INDEX; } if (data) { flags |= SDHCI_CMD_DATA; } /* Set Transfer mode regarding to data flag */ if (data != 0) { sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL); mode = SDHCI_TRNS_BLK_CNT_EN; trans_bytes = data->blocks * data->blocksize; if (data->blocks > 1) { mode |= SDHCI_TRNS_MULTI; } if (data->flags == MMC_DATA_READ) { mode |= SDHCI_TRNS_READ; } if (host->sdhci_caps & SDHCI_CAN_DO_SDMA) { if (data->flags == MMC_DATA_READ) { start_addr = (u32)data->dest; } else { start_addr = (u32)data->src; } if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && (start_addr & 0x7) != 0x0) { is_aligned = 0; start_addr = (u32)host->aligned_buffer; if (data->flags != MMC_DATA_READ) { memcpy(host->aligned_buffer, data->src, trans_bytes); } } sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS); mode |= SDHCI_TRNS_DMA; vmm_flush_cache_range(start_addr, start_addr + trans_bytes); } sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, data->blocksize), SDHCI_BLOCK_SIZE); sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); } sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT); sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND); do { stat = sdhci_readl(host, SDHCI_INT_STATUS); if (stat & SDHCI_INT_ERROR) { break; } if (--retry == 0) { break; } } while ((stat & mask) != mask); if (retry == 0) { if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) return VMM_OK; else { vmm_printf("%s: Status update timeout!\n", __func__); return VMM_ETIMEDOUT; } } if ((stat & (SDHCI_INT_ERROR | mask)) == mask) { sdhci_cmd_done(host, cmd); sdhci_writel(host, mask, SDHCI_INT_STATUS); } else { ret = VMM_EFAIL; } if (!ret && data) { ret = sdhci_transfer_data(host, data, start_addr); } if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD) { vmm_udelay(1000); } stat = sdhci_readl(host, SDHCI_INT_STATUS); sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS); if (!ret) { if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !is_aligned && (data->flags == MMC_DATA_READ)) { memcpy(data->dest, host->aligned_buffer, trans_bytes); } return VMM_OK; } sdhci_reset(host, SDHCI_RESET_CMD); sdhci_reset(host, SDHCI_RESET_DATA); if (stat & SDHCI_INT_TIMEOUT) { return VMM_ETIMEDOUT; } else { return VMM_EIO; } }