static int sh_mmcif_single_write(struct sh_mmcif_host *host, struct mmc_request *mrq) { struct mmc_data *data = mrq->data; long time; u32 blocksize, i, *p = sg_virt(data->sg); host->wait_int = 0; sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); /* buf write enable */ time = wait_event_interruptible_timeout(host->intr_wait, host->wait_int == 1 || host->sd_error == 1, host->timeout); if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) return sh_mmcif_error_manage(host); host->wait_int = 0; blocksize = (BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; for (i = 0; i < blocksize / 4; i++) sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); /* buffer write end */ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); time = wait_event_interruptible_timeout(host->intr_wait, host->wait_int == 1 || host->sd_error == 1, host->timeout); if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) return sh_mmcif_error_manage(host); host->wait_int = 0; return 0; }
static int sh_mmcif_single_write(struct sh_mmcif_host *host, struct mmc_data *data) { long time; u32 blocksize, i; const unsigned long *p = (unsigned long *)data->dest; if ((unsigned long)p & 0x00000001) { printf("%s: The data pointer is unaligned.", __func__); return -EIO; } host->wait_int = 0; sh_mmcif_bitset(MASK_MBUFWEN, &host->regs->ce_int_mask); time = mmcif_wait_interrupt_flag(host); if (time == 0 || host->sd_error != 0) return sh_mmcif_error_manage(host); host->wait_int = 0; blocksize = (BLOCK_SIZE_MASK & sh_mmcif_read(&host->regs->ce_block_set)) + 3; for (i = 0; i < blocksize / 4; i++) sh_mmcif_write(*p++, &host->regs->ce_data); /* buffer write end */ sh_mmcif_bitset(MASK_MDTRANE, &host->regs->ce_int_mask); time = mmcif_wait_interrupt_flag(host); if (time == 0 || host->sd_error != 0) return sh_mmcif_error_manage(host); host->wait_int = 0; return 0; }
static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, struct mmc_request *mrq, struct mmc_command *cmd) { long time; if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); else { pr_err(DRIVER_NAME": not support stop cmd\n"); cmd->error = sh_mmcif_error_manage(host); return; } time = wait_event_interruptible_timeout(host->intr_wait, host->wait_int == 1 || host->sd_error == 1, host->timeout); if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) { cmd->error = sh_mmcif_error_manage(host); return; } sh_mmcif_get_cmd12response(host, cmd); host->wait_int = 0; cmd->error = 0; }
static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) { u32 tmp; tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL); sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON); sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); /* byte swap on */ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); }
static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) { u32 tmp; tmp = sh_mmcif_read(&host->regs->ce_clk_ctrl) & (CLK_ENABLE | CLK_CLEAR); sh_mmcif_write(SOFT_RST_ON, &host->regs->ce_version); sh_mmcif_write(SOFT_RST_OFF, &host->regs->ce_version); sh_mmcif_bitset(tmp | SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29, &host->regs->ce_clk_ctrl); /* byte swap on */ sh_mmcif_bitset(BUF_ACC_ATYP, &host->regs->ce_buf_acc); }
static int sh_mmcif_multi_write(struct sh_mmcif_host *host, struct mmc_data *data) { long time; u32 i, j, blocksize; const unsigned long *p = (unsigned long *)data->dest; if ((unsigned long)p & 0x00000001) { printf("%s: The data pointer is unaligned.", __func__); return -EIO; } host->wait_int = 0; blocksize = BLOCK_SIZE_MASK & sh_mmcif_read(&host->regs->ce_block_set); for (j = 0; j < data->blocks; j++) { sh_mmcif_bitset(MASK_MBUFWEN, &host->regs->ce_int_mask); time = mmcif_wait_interrupt_flag(host); if (time == 0 || host->sd_error != 0) return sh_mmcif_error_manage(host); host->wait_int = 0; for (i = 0; i < blocksize / 4; i++) sh_mmcif_write(*p++, &host->regs->ce_data); WATCHDOG_RESET(); } return 0; }
static int sh_mmcif_multi_write(struct sh_mmcif_host *host, struct mmc_request *mrq) { struct mmc_data *data = mrq->data; long time; u32 i, sec, j, blocksize, *p; blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET); for (j = 0; j < data->sg_len; j++) { p = sg_virt(data->sg); host->wait_int = 0; for (sec = 0; sec < data->sg->length / blocksize; sec++) { sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); /* buf write enable*/ time = wait_event_interruptible_timeout(host->intr_wait, host->wait_int == 1 || host->sd_error == 1, host->timeout); if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) return sh_mmcif_error_manage(host); host->wait_int = 0; for (i = 0; i < blocksize / 4; i++) sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); } if (j < data->sg_len - 1) data->sg++; } return 0; }
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) { struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); if (!clk) return; if (p->sup_pclk && clk == host->clk) sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); else sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16)); sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); }
static int sh_mmcif_error_manage(struct sh_mmcif_host *host) { u32 state1, state2; int ret, timeout; host->sd_error = false; state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1); dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2); if (state1 & STS1_CMDSEQ) { sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); for (timeout = 10000000; timeout; timeout--) { if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) & STS1_CMDSEQ)) break; mdelay(1); } if (!timeout) { dev_err(&host->pd->dev, "Forced end of command sequence timeout err\n"); return -EIO; } sh_mmcif_sync_reset(host); dev_dbg(&host->pd->dev, "Forced end of command sequence\n"); return -EIO; } if (state2 & STS2_CRC_ERR) { dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n", host->state, host->wait_for); ret = -EIO; } else if (state2 & STS2_TIMEOUT_ERR) { dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n", host->state, host->wait_for); ret = -ETIMEDOUT; } else { dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n", host->state, host->wait_for); ret = -EIO; } return ret; }
static int sh_mmcif_error_manage(struct sh_mmcif_host *host) { u32 state1, state2; int ret, timeout = 10000000; host->sd_error = 0; host->wait_int = 0; state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1); state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); pr_debug("%s: ERR HOST_STS1 = %08x\n", DRIVER_NAME, state1); pr_debug("%s: ERR HOST_STS2 = %08x\n", DRIVER_NAME, state2); if (state1 & STS1_CMDSEQ) { sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); while (1) { timeout--; if (timeout < 0) { pr_err(DRIVER_NAME": Forceed end of " \ "command sequence timeout err\n"); return -EIO; } if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) & STS1_CMDSEQ)) break; mdelay(1); } sh_mmcif_sync_reset(host); pr_debug(DRIVER_NAME": Forced end of command sequence\n"); return -EIO; } if (state2 & STS2_CRC_ERR) { pr_debug(DRIVER_NAME": Happened CRC error\n"); ret = -EIO; } else if (state2 & STS2_TIMEOUT_ERR) { pr_debug(DRIVER_NAME": Happened Timeout error\n"); ret = -ETIMEDOUT; } else { pr_debug(DRIVER_NAME": Happened End/Index error\n"); ret = -EIO; } return ret; }
static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, struct mmc_request *mrq) { switch (mrq->cmd->opcode) { case MMC_READ_MULTIPLE_BLOCK: sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); break; case MMC_WRITE_MULTIPLE_BLOCK: sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); break; default: dev_err(&host->pd->dev, "unsupported stop cmd\n"); mrq->stop->error = sh_mmcif_error_manage(host); return; } host->wait_for = MMCIF_WAIT_FOR_STOP; }
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) { int i; sh_mmcif_bitclr(CLK_ENABLE, &host->regs->ce_clk_ctrl); sh_mmcif_bitclr(CLK_CLEAR, &host->regs->ce_clk_ctrl); if (!clk) return; if (clk == CLKDEV_EMMC_DATA) { sh_mmcif_bitset(CLK_PCLK, &host->regs->ce_clk_ctrl); } else { for (i = 1; (unsigned int)host->clk / (1 << i) >= clk; i++) ; sh_mmcif_bitset((i - 1) << 16, &host->regs->ce_clk_ctrl); } sh_mmcif_bitset(CLK_ENABLE, &host->regs->ce_clk_ctrl); }
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) { struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; bool sup_pclk = p ? p->sup_pclk : false; sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); if (!clk) return; if (sup_pclk && clk == host->clk) sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); else sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & ((fls(DIV_ROUND_UP(host->clk, clk) - 1) - 1) << 16)); sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); }
static int sh_mmcif_error_manage(struct sh_mmcif_host *host) { u32 state1, state2; int ret, timeout = 10000000; host->sd_error = 0; host->wait_int = 0; state1 = sh_mmcif_read(&host->regs->ce_host_sts1); state2 = sh_mmcif_read(&host->regs->ce_host_sts2); debug("%s: ERR HOST_STS1 = %08x\n", \ DRIVER_NAME, sh_mmcif_read(&host->regs->ce_host_sts1)); debug("%s: ERR HOST_STS2 = %08x\n", \ DRIVER_NAME, sh_mmcif_read(&host->regs->ce_host_sts2)); if (state1 & STS1_CMDSEQ) { debug("%s: Forced end of command sequence\n", DRIVER_NAME); sh_mmcif_bitset(CMD_CTRL_BREAK, &host->regs->ce_cmd_ctrl); sh_mmcif_bitset(~CMD_CTRL_BREAK, &host->regs->ce_cmd_ctrl); while (1) { timeout--; if (timeout < 0) { printf(DRIVER_NAME": Forceed end of " \ "command sequence timeout err\n"); return -EILSEQ; } if (!(sh_mmcif_read(&host->regs->ce_host_sts1) & STS1_CMDSEQ)) break; } sh_mmcif_sync_reset(host); return -EILSEQ; } if (state2 & STS2_CRC_ERR) ret = -EILSEQ; else if (state2 & STS2_TIMEOUT_ERR) ret = TIMEOUT; else ret = -EILSEQ; return ret; }
static void sh_mmcif_single_write(struct sh_mmcif_host *host, struct mmc_request *mrq) { host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & BLOCK_SIZE_MASK) + 3; host->wait_for = MMCIF_WAIT_FOR_WRITE; /* buf write enable */ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); }
static void sh_mmcif_single_read(struct sh_mmcif_host *host, struct mmc_request *mrq) { host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & BLOCK_SIZE_MASK) + 3; host->wait_for = MMCIF_WAIT_FOR_READ; schedule_delayed_work(&host->timeout_work, host->timeout); /* buf read enable */ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); }
static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) { struct mmc_data *data = host->mrq->data; struct scatterlist *sg = data->sg; struct dma_async_tx_descriptor *desc = NULL; struct dma_chan *chan = host->chan_tx; dma_cookie_t cookie = -EINVAL; int ret; ret = dma_map_sg(chan->device->dev, sg, data->sg_len, DMA_TO_DEVICE); if (ret > 0) { host->dma_active = true; desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } if (desc) { desc->callback = mmcif_dma_complete; desc->callback_param = host; cookie = dmaengine_submit(desc); sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); dma_async_issue_pending(chan); } dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", __func__, data->sg_len, ret, cookie); if (!desc) { /* DMA failed, fall back to PIO */ if (ret >= 0) ret = -EIO; host->chan_tx = NULL; host->dma_active = false; dma_release_channel(chan); /* Free the Rx channel too */ chan = host->chan_rx; if (chan) { host->chan_rx = NULL; dma_release_channel(chan); } dev_warn(&host->pd->dev, "DMA failed: %d, falling back to PIO\n", ret); sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); } dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__, desc, cookie); }
static void sh_mmcif_multi_read(struct sh_mmcif_host *host, struct mmc_request *mrq) { struct mmc_data *data = mrq->data; if (!data->sg_len || !data->sg->length) return; host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & BLOCK_SIZE_MASK; host->wait_for = MMCIF_WAIT_FOR_MREAD; host->sg_idx = 0; host->sg_blkidx = 0; host->pio_ptr = sg_virt(data->sg); schedule_delayed_work(&host->timeout_work, host->timeout); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); }
static void sh_mmcif_multi_write(struct sh_mmcif_host *host, struct mmc_request *mrq) { struct mmc_data *data = mrq->data; if (!data->sg_len || !data->sg->length) return; host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & BLOCK_SIZE_MASK; host->wait_for = MMCIF_WAIT_FOR_MWRITE; host->sg_idx = 0; host->sg_blkidx = 0; host->pio_ptr = sg_virt(data->sg); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); }
static bool sh_mmcif_write_block(struct sh_mmcif_host *host) { struct mmc_data *data = host->mrq->data; u32 *p = sg_virt(data->sg); int i; if (host->sd_error) { data->error = sh_mmcif_error_manage(host); return false; } for (i = 0; i < host->blocksize / 4; i++) sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); /* buffer write end */ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); host->wait_for = MMCIF_WAIT_FOR_WRITE_END; return true; }
static bool sh_mmcif_read_block(struct sh_mmcif_host *host) { struct mmc_data *data = host->mrq->data; u32 *p = sg_virt(data->sg); int i; if (host->sd_error) { data->error = sh_mmcif_error_manage(host); return false; } for (i = 0; i < host->blocksize / 4; i++) *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); /* buffer read end */ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); host->wait_for = MMCIF_WAIT_FOR_READ_END; return true; }
static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) { struct mmc_data *data = host->mrq->data; u32 *p = host->pio_ptr; int i; if (host->sd_error) { data->error = sh_mmcif_error_manage(host); return false; } BUG_ON(!data->sg->length); for (i = 0; i < host->blocksize / 4; i++) sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); if (!sh_mmcif_next_block(host, p)) return false; schedule_delayed_work(&host->timeout_work, host->timeout); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); return true; }
static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) { struct mmc_data *data = host->mrq->data; u32 *p = host->pio_ptr; int i; if (host->sd_error) { data->error = sh_mmcif_error_manage(host); dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); return false; } BUG_ON(!data->sg->length); for (i = 0; i < host->blocksize / 4; i++) sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); if (!sh_mmcif_next_block(host, p)) return false; sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); return true; }
static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, struct mmc_request *mrq, struct mmc_command *cmd, u32 opc) { u32 tmp = 0; /* Response Type check */ switch (mmc_resp_type(cmd)) { case MMC_RSP_NONE: tmp |= CMD_SET_RTYP_NO; break; case MMC_RSP_R1: case MMC_RSP_R1B: case MMC_RSP_R3: tmp |= CMD_SET_RTYP_6B; break; case MMC_RSP_R2: tmp |= CMD_SET_RTYP_17B; break; default: pr_err(DRIVER_NAME": Not support type response.\n"); break; } switch (opc) { /* RBSY */ case MMC_SWITCH: case MMC_STOP_TRANSMISSION: case MMC_SET_WRITE_PROT: case MMC_CLR_WRITE_PROT: case MMC_ERASE: case MMC_GEN_CMD: tmp |= CMD_SET_RBSY; break; } /* WDAT / DATW */ if (host->data) { tmp |= CMD_SET_WDAT; switch (host->bus_width) { case MMC_BUS_WIDTH_1: tmp |= CMD_SET_DATW_1; break; case MMC_BUS_WIDTH_4: tmp |= CMD_SET_DATW_4; break; case MMC_BUS_WIDTH_8: tmp |= CMD_SET_DATW_8; break; default: pr_err(DRIVER_NAME": Not support bus width.\n"); break; } } /* DWEN */ if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) tmp |= CMD_SET_DWEN; /* CMLTE/CMD12EN */ if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, mrq->data->blocks << 16); } /* RIDXC[1:0] check bits */ if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || opc == MMC_SEND_CSD || opc == MMC_SEND_CID) tmp |= CMD_SET_RIDXC_BITS; /* RCRC7C[1:0] check bits */ if (opc == MMC_SEND_OP_COND) tmp |= CMD_SET_CRC7C_BITS; /* RCRC7C[1:0] internal CRC7 */ if (opc == MMC_ALL_SEND_CID || opc == MMC_SEND_CSD || opc == MMC_SEND_CID) tmp |= CMD_SET_CRC7C_INTERNAL; return opc = ((opc << 24) | tmp); }
static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, struct mmc_data *data, struct mmc_cmd *cmd) { u32 tmp = 0; u32 opc = cmd->cmdidx; /* Response Type check */ switch (cmd->resp_type) { case MMC_RSP_NONE: tmp |= CMD_SET_RTYP_NO; break; case MMC_RSP_R1: case MMC_RSP_R1b: case MMC_RSP_R3: tmp |= CMD_SET_RTYP_6B; break; case MMC_RSP_R2: tmp |= CMD_SET_RTYP_17B; break; default: printf(DRIVER_NAME": Not support type response.\n"); break; } /* RBSY */ if (opc == MMC_CMD_SWITCH) tmp |= CMD_SET_RBSY; /* WDAT / DATW */ if (host->data) { tmp |= CMD_SET_WDAT; switch (host->bus_width) { case MMC_BUS_WIDTH_1: tmp |= CMD_SET_DATW_1; break; case MMC_BUS_WIDTH_4: tmp |= CMD_SET_DATW_4; break; case MMC_BUS_WIDTH_8: tmp |= CMD_SET_DATW_8; break; default: printf(DRIVER_NAME": Not support bus width.\n"); break; } } /* DWEN */ if (opc == MMC_CMD_WRITE_SINGLE_BLOCK || opc == MMC_CMD_WRITE_MULTIPLE_BLOCK) tmp |= CMD_SET_DWEN; /* CMLTE/CMD12EN */ if (opc == MMC_CMD_READ_MULTIPLE_BLOCK || opc == MMC_CMD_WRITE_MULTIPLE_BLOCK) { tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; sh_mmcif_bitset(data->blocks << 16, &host->regs->ce_block_set); } /* RIDXC[1:0] check bits */ if (opc == MMC_CMD_SEND_OP_COND || opc == MMC_CMD_ALL_SEND_CID || opc == MMC_CMD_SEND_CSD || opc == MMC_CMD_SEND_CID) tmp |= CMD_SET_RIDXC_BITS; /* RCRC7C[1:0] check bits */ if (opc == MMC_CMD_SEND_OP_COND) tmp |= CMD_SET_CRC7C_BITS; /* RCRC7C[1:0] internal CRC7 */ if (opc == MMC_CMD_ALL_SEND_CID || opc == MMC_CMD_SEND_CSD || opc == MMC_CMD_SEND_CID) tmp |= CMD_SET_CRC7C_INTERNAL; return opc = ((opc << 24) | tmp); }
static int sh_mmcif_start_cmd(struct sh_mmcif_host *host, struct mmc_data *data, struct mmc_cmd *cmd) { long time; int ret = 0, mask = 0; u32 opc = cmd->cmdidx; if (opc == MMC_CMD_STOP_TRANSMISSION) { /* MMCIF sends the STOP command automatically */ if (host->last_cmd == MMC_CMD_READ_MULTIPLE_BLOCK) sh_mmcif_bitset(MASK_MCMD12DRE, &host->regs->ce_int_mask); else sh_mmcif_bitset(MASK_MCMD12RBE, &host->regs->ce_int_mask); time = mmcif_wait_interrupt_flag(host); if (time == 0 || host->sd_error != 0) return sh_mmcif_error_manage(host); sh_mmcif_get_cmd12response(host, cmd); return 0; } if (opc == MMC_CMD_SWITCH) mask = MASK_MRBSYE; else mask = MASK_MCRSPE; mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO; if (host->data) { sh_mmcif_write(0, &host->regs->ce_block_set); sh_mmcif_write(data->blocksize, &host->regs->ce_block_set); } opc = sh_mmcif_set_cmd(host, data, cmd); sh_mmcif_write(INT_START_MAGIC, &host->regs->ce_int); sh_mmcif_write(mask, &host->regs->ce_int_mask); debug("CMD%d ARG:%08x\n", cmd->cmdidx, cmd->cmdarg); /* set arg */ sh_mmcif_write(cmd->cmdarg, &host->regs->ce_arg); host->wait_int = 0; /* set cmd */ sh_mmcif_write(opc, &host->regs->ce_cmd_set); time = mmcif_wait_interrupt_flag(host); if (time == 0) return sh_mmcif_error_manage(host); if (host->sd_error) { switch (cmd->cmdidx) { case MMC_CMD_ALL_SEND_CID: case MMC_CMD_SELECT_CARD: case MMC_CMD_APP_CMD: ret = TIMEOUT; break; default: printf(DRIVER_NAME": Cmd(d'%d) err\n", cmd->cmdidx); ret = sh_mmcif_error_manage(host); break; } host->sd_error = 0; host->wait_int = 0; return ret; } /* if no response */ if (!(opc & 0x00C00000)) return 0; if (host->wait_int == 1) { sh_mmcif_get_response(host, cmd); host->wait_int = 0; } if (host->data) ret = sh_mmcif_data_trans(host, data, cmd->cmdidx); host->last_cmd = cmd->cmdidx; return ret; }
static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, struct mmc_request *mrq) { struct mmc_data *data = mrq->data; struct mmc_command *cmd = mrq->cmd; u32 opc = cmd->opcode; u32 tmp = 0; /* Response Type check */ switch (mmc_resp_type(cmd)) { case MMC_RSP_NONE: tmp |= CMD_SET_RTYP_NO; break; case MMC_RSP_R1: case MMC_RSP_R1B: case MMC_RSP_R3: tmp |= CMD_SET_RTYP_6B; break; case MMC_RSP_R2: tmp |= CMD_SET_RTYP_17B; break; default: dev_err(&host->pd->dev, "Unsupported response type.\n"); break; } switch (opc) { /* RBSY */ case MMC_SLEEP_AWAKE: case MMC_SWITCH: case MMC_STOP_TRANSMISSION: case MMC_SET_WRITE_PROT: case MMC_CLR_WRITE_PROT: case MMC_ERASE: tmp |= CMD_SET_RBSY; break; } /* WDAT / DATW */ if (data) { tmp |= CMD_SET_WDAT; switch (host->bus_width) { case MMC_BUS_WIDTH_1: tmp |= CMD_SET_DATW_1; break; case MMC_BUS_WIDTH_4: tmp |= CMD_SET_DATW_4; break; case MMC_BUS_WIDTH_8: tmp |= CMD_SET_DATW_8; break; default: dev_err(&host->pd->dev, "Unsupported bus width.\n"); break; } switch (host->timing) { case MMC_TIMING_UHS_DDR50: /* * MMC core will only set this timing, if the host * advertises the MMC_CAP_UHS_DDR50 capability. MMCIF * implementations with this capability, e.g. sh73a0, * will have to set it in their platform data. */ tmp |= CMD_SET_DARS; break; } } /* DWEN */ if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) tmp |= CMD_SET_DWEN; /* CMLTE/CMD12EN */ if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, data->blocks << 16); } /* RIDXC[1:0] check bits */ if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || opc == MMC_SEND_CSD || opc == MMC_SEND_CID) tmp |= CMD_SET_RIDXC_BITS; /* RCRC7C[1:0] check bits */ if (opc == MMC_SEND_OP_COND) tmp |= CMD_SET_CRC7C_BITS; /* RCRC7C[1:0] internal CRC7 */ if (opc == MMC_ALL_SEND_CID || opc == MMC_SEND_CSD || opc == MMC_SEND_CID) tmp |= CMD_SET_CRC7C_INTERNAL; return (opc << 24) | tmp; }