int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) { struct mmc_command cmd; int i, err = 0; BUG_ON(!host); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_OP_COND; cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; for (i = 100; i; i--) { err = mmc_wait_for_cmd(host, &cmd, 0); if (err) break; /* if we're just probing, do a single pass */ if (ocr == 0) break; /* otherwise wait until reset completes */ if (mmc_host_is_spi(host)) { if (!(cmd.resp[0] & R1_SPI_IDLE)) break; } else { if (cmd.resp[0] & MMC_CARD_BUSY) break; } err = -ETIMEDOUT; mmc_delay(10); } if (rocr && !mmc_host_is_spi(host)) *rocr = cmd.resp[0]; return err; }
int mmc_bkops(struct mmc_card *card, int start) { int err; int retry = 3; struct mmc_command cmd = {0}; BUG_ON(!card); BUG_ON(!card->host); if (start) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START, 1, 0); if (err) printk(KERN_ERR "%s : %s start bkops fail err = %d\n", mmc_hostname(card->host), __func__, err); else printk(KERN_DEBUG "%s : %s start bkops!!\n", mmc_hostname(card->host), __func__); } else { do { cmd.opcode = card->ext_csd.hpi_cmd; if (cmd.opcode == MMC_SEND_STATUS) { cmd.arg = (card->rca << 16 | 0x1); cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; } else { cmd.arg = (card->rca << 16 | 0x1); cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; } err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); } while (err && retry--); if (err || !retry) { printk(KERN_DEBUG "%s : %s stop bkops fail retry %d\n", mmc_hostname(card->host), __func__, retry); } else { printk(KERN_DEBUG "%s : %s stop bkops\n", mmc_hostname(card->host), __func__); } } return err; }
int mmc_go_idle(struct mmc_host *host) { int err; struct mmc_command cmd; DBG("[%s] s\n",__func__); /* * Non-SPI hosts need to prevent chipselect going active during * GO_IDLE; that would put chips into SPI mode. Remind them of * that in case of hardware that won't pull up DAT3/nCS otherwise. * * SPI hosts ignore ios.chip_select; it's managed according to * rules that must accomodate non-MMC slaves which this layer * won't even know about. */ if (!mmc_host_is_spi(host)) { mmc_set_chip_select(host, MMC_CS_HIGH); mmc_delay(1); } memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_GO_IDLE_STATE; cmd.arg = 0; // cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; cmd.flags = MMC_RSP_NONE | MMC_CMD_BC; //zhf: mark SPI mode temporarily by James Tian err = mmc_wait_for_cmd(host, &cmd, 0); mmc_delay(1); if (!mmc_host_is_spi(host)) { mmc_set_chip_select(host, MMC_CS_DONTCARE); mmc_delay(1); } host->use_spi_crc = 0; DBG("[%s] e\n",__func__); return err; }
int mmc_card_sleepawake(struct mmc_host *host, int sleep) { struct mmc_command cmd; struct mmc_card *card = host->card; int err; if (sleep) mmc_deselect_cards(host); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SLEEP_AWAKE; cmd.arg = card->rca << 16; if (sleep) cmd.arg |= 1 << 15; cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, 0); if (err) return err; /* * If the host does not wait while the card signals busy, then we will * will have to wait the sleep/awake timeout. Note, we cannot use the * SEND_STATUS command to poll the status because that command (and most * others) is invalid while the card sleeps. */ if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) { /* JEDEC MMCA 4.41 specifies the timeout value is in 200ns..838.86ms range. Round it up to 1us and use an appropriate delay method. */ unsigned long us = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10); if (us < 10) udelay(us); else usleep_range(us, us + 100); } if (!sleep) err = mmc_select_card(card); return err; }
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) { int err; struct mmc_command cmd; BUG_ON(!card); BUG_ON(!card->host); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SWITCH; cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | (index << 16) | (value << 8) | set; cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); if (err) return err; return 0; }
static int mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) { struct mmc_command cmd; int err; mmc_card_claim_host(card); cmd.opcode = MMC_SET_BLOCKLEN; cmd.arg = 1 << card->csd.read_blkbits; cmd.flags = MMC_RSP_R1; err = mmc_wait_for_cmd(card->host, &cmd, 5); mmc_card_release_host(card); if (err) { printk(KERN_ERR "%s: unable to set block size to %d: %d\n", md->disk->disk_name, cmd.arg, err); return -EINVAL; } return 0; }
static int mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) { int err; struct mmc_command cmd = {0}; BUG_ON(!host); BUG_ON(!cxd); cmd.opcode = opcode; cmd.arg = arg; cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) return err; memcpy(cxd, cmd.resp, sizeof(u32) * 4); return 0; }
static int mmc_send_cmd(struct mmc_host *host, u32 opcode, u32 arg, unsigned int flags, u32 *resp) { int err; struct mmc_command cmd; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = opcode; cmd.arg = arg; cmd.flags = flags; *resp = 0; err = mmc_wait_for_cmd(host, &cmd, 0); if (!err) *resp = cmd.resp[0]; else printk(KERN_ERR "[CMD%d] FAILED!!\n", cmd.opcode); return err; }
int mmc_reset_sdio(struct mmc_host *host) { struct mmc_command cmd; int err = 0; BUG_ON(!host); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_IO_RW_DIRECT; cmd.arg = 0x80000000; cmd.arg |= (SDIO_CCCR_ABORT) << 9; cmd.arg |= (1<<3); cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, 0); if (err) return err; return 0; }
static int fsl_io_rw_direct(struct mmc_card *card, int write, unsigned fn, unsigned addr, u8 in, u8 *out) { struct mmc_command cmd; int err; BUG_ON(!card); BUG_ON(fn > 7); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_IO_RW_DIRECT; cmd.arg = write ? 0x80000000 : 0x00000000; cmd.arg |= fn << 28; cmd.arg |= (write && out) ? 0x08000000 : 0x00000000; cmd.arg |= addr << 9; cmd.arg |= in; cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) return err; if (mmc_host_is_spi(card->host)) { /* host driver already reported errors */ } else { if (cmd.resp[0] & R5_ERROR) return -EIO; if (cmd.resp[0] & R5_FUNCTION_NUMBER) return -EINVAL; if (cmd.resp[0] & R5_OUT_OF_RANGE) return -ERANGE; } if (out) { if (mmc_host_is_spi(card->host)) *out = (cmd.resp[0] >> 8) & 0xFF; else *out = cmd.resp[0] & 0xFF; }
int mmc_send_status(struct mmc_card *card, u32 *status) { int err; struct mmc_command cmd = {}; cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); if (err) return err; /* NOTE: callers are required to understand the difference * between "native" and SPI format status words! */ if (status) *status = cmd.resp[0]; return 0; }
static s32 IssueSDCommand(struct hif_device *device, u32 opcode, u32 arg, u32 flags, u32 *resp) { struct mmc_command cmd; s32 err; struct mmc_host *host; struct sdio_func *func; func = device->func; host = func->card->host; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = opcode; cmd.arg = arg; cmd.flags = flags; err = mmc_wait_for_cmd(host, &cmd, 3); if ((!err) && (resp)) { *resp = cmd.resp[0]; } return err; }
int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) { int err; struct mmc_command cmd; BUG_ON(!host); BUG_ON(!rca); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = SD_SEND_RELATIVE_ADDR; cmd.arg = 0; cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err != MMC_ERR_NONE) return err; *rca = cmd.resp[0] >> 16; return MMC_ERR_NONE; }
int mmc_card_sleepawake(struct mmc_host *host, int sleep) { struct mmc_command cmd; struct mmc_card *card = host->card; int err; if (sleep){ mmc_deselect_cards(host); } memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SLEEP_AWAKE; cmd.arg = card->rca << 16; if (sleep) cmd.arg |= 1 << 15; cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; err = mmc_wait_for_cmd(host, &cmd, 0); if (err) return err; /* * If the host does not wait while the card signals busy, then we will * will have to wait the sleep/awake timeout. Note, we cannot use the * SEND_STATUS command to poll the status because that command (and most * others) is invalid while the card sleeps. */ if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)){ mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000)); } if (!sleep) err = mmc_select_card(card); return err; }
int mmc_send_status(struct mmc_card *card, u32 *status) { int err; struct mmc_command cmd = {0}; BUG_ON(!card); BUG_ON(!card->host); cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); if (err) return err; if (status) *status = cmd.resp[0]; return 0; }
int mmc_all_send_cid(struct mmc_host *host, u32 *cid) { int err; struct mmc_command cmd; BUG_ON(!host); BUG_ON(!cid); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_ALL_SEND_CID; cmd.arg = 0; cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) return err; memcpy(cid, cmd.resp, sizeof(u32) * 4); return 0; }
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) { struct mmc_command cmd = {0}; int i, err = 0; BUG_ON(!host); cmd.opcode = MMC_SEND_OP_COND; cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; for (i = 100; i; i--) { err = mmc_wait_for_cmd(host, &cmd, 0); if (err) break; if (ocr == 0) break; if (mmc_host_is_spi(host)) { if (!(cmd.resp[0] & R1_SPI_IDLE)) break; } else { if (cmd.resp[0] & MMC_CARD_BUSY) break; } err = -ETIMEDOUT; mmc_delay(10); } if (rocr && !mmc_host_is_spi(host)) *rocr = cmd.resp[0]; return err; }
static int swrm_mmc_wait_busy(struct mmc_card *card) { int ret, busy = 0; struct mmc_command cmd = {0}; do { memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; ret = mmc_wait_for_cmd(card->host, &cmd, 0); if (ret) break; if (!busy && swrm_mmc_busy(&cmd)) { busy = 1; if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) { printk(KERN_DEBUG "%s: Warning: Host did not \ wait for busy state to end.\n", mmc_hostname(card->host)); } }
int mmc_abort_tuning(struct mmc_host *host, u32 opcode) { struct mmc_command cmd = {}; /* * eMMC specification specifies that CMD12 can be used to stop a tuning * command, but SD specification does not, so do nothing unless it is * eMMC. */ if (opcode != MMC_SEND_TUNING_BLOCK_HS200) return 0; cmd.opcode = MMC_STOP_TRANSMISSION; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; /* * For drivers that override R1 to R1b, set an arbitrary timeout based * on the tuning timeout i.e. 150ms. */ cmd.busy_timeout = 150; return mmc_wait_for_cmd(host, &cmd, 0); }
static int assd_set_blksize(struct mmc_host *host, int blksize) { struct mmc_command cmd; int ret; BUG_ON(!host); cmd.opcode = MMC_SET_BLOCKLEN; cmd.arg = blksize; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; mmc_claim_host(host); if (host->card == NULL) { ret = -ENODEV; goto out; } ret = mmc_wait_for_cmd(host, &cmd, 5); out: mmc_release_host(host); return ret; }
static int mmc_send_ext_csd(struct mmc_softc *sc, uint8_t *rawextcsd) { int err; struct mmc_command cmd; struct mmc_data data; memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); memset(rawextcsd, 0, 512); cmd.opcode = MMC_SEND_EXT_CSD; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; cmd.arg = 0; cmd.data = &data; data.data = rawextcsd; data.len = 512; data.flags = MMC_DATA_READ; err = mmc_wait_for_cmd(sc, &cmd, CMD_RETRIES); return (err); }
int mmc_send_status(struct mmc_card *card, u32 *status) { int err; struct mmc_command cmd; BUG_ON(!card); BUG_ON(!card->host); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); if (err != MMC_ERR_NONE) return err; if (status) *status = cmd.resp[0]; return MMC_ERR_NONE; }
int mmc_send_csd(struct mmc_card *card, u32 *csd) { int err; struct mmc_command cmd; BUG_ON(!card); BUG_ON(!card->host); BUG_ON(!csd); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_CSD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); if (err != MMC_ERR_NONE) return err; memcpy(csd, cmd.resp, sizeof(u32) * 4); return MMC_ERR_NONE; }
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) { int err; struct mmc_command cmd = {0}; BUG_ON(!host); cmd.opcode = MMC_SELECT_CARD; if (card) { cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; } else { cmd.arg = 0; cmd.flags = MMC_RSP_NONE | MMC_CMD_AC; } err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) return err; return 0; }
static void mmc_idle_cards(struct mmc_softc *sc) { device_t dev; struct mmc_command cmd; dev = sc->dev; mmcbr_set_chip_select(dev, cs_high); mmcbr_update_ios(dev); mmc_ms_delay(1); memset(&cmd, 0, sizeof(cmd)); cmd.opcode = MMC_GO_IDLE_STATE; cmd.arg = 0; cmd.flags = MMC_RSP_NONE | MMC_CMD_BC; cmd.data = NULL; mmc_wait_for_cmd(sc, &cmd, 0); mmc_ms_delay(1); mmcbr_set_chip_select(dev, cs_dontcare); mmcbr_update_ios(dev); mmc_ms_delay(1); }
/* * Wait for the card to finish the busy state */ static int mmc_test_wait_busy(struct mmc_test_card *test) { int ret, busy; struct mmc_command cmd; busy = 0; do { memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; cmd.arg = test->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); if (ret){ printk(KERN_ERR "%s: error %d requesting status\n", mmc_hostname(test->card->host), ret); break; } if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) { busy = 1; printk(KERN_INFO "%s: Warning: Host did not " "wait for busy state to end.\n", mmc_hostname(test->card->host)); } /* * Some cards mishandle the status bits, * so make sure to check both the busy * indication and the card state. */ } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == 7)); return ret; }
int mmc_send_if_cond(struct mmc_host *host, u32 ocr) { struct mmc_command cmd; int err; static const u8 test_pattern = 0xAA; /* * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND * before SD_APP_OP_COND. This command will harmlessly fail for * SD 1.0 cards. */ cmd.opcode = SD_SEND_IF_COND; cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern; cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR; err = mmc_wait_for_cmd(host, &cmd, 0); if (err != MMC_ERR_NONE) return err; if ((cmd.resp[0] & 0xFF) != test_pattern) return MMC_ERR_FAILED; return MMC_ERR_NONE; }
int mmc_set_relative_addr(struct mmc_card *card) { int err; struct mmc_command cmd; DBG("[%s] s\n",__func__); BUG_ON(!card); BUG_ON(!card->host); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SET_RELATIVE_ADDR; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); if (err) { DBG("[%s] e1\n",__func__); return err; } DBG("[%s] e2\n",__func__); return 0; }
int mmc_send_speed_class_ctrl(struct mmc_host *host, unsigned int speed_class_ctrl_arg) { int err = 0; struct mmc_command cmd = { .opcode = SD_SPEED_CLASS_CONTROL, .arg = (speed_class_ctrl_arg << 28), .flags = MMC_RSP_R1B | MMC_CMD_AC | MMC_RSP_BUSY, }; BUG_ON(!host); BUG_ON(speed_class_ctrl_arg > 3); err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) return err; /* * If the host does not wait while the card signals busy, then we will * will have to wait the max busy indication timeout. */ if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) mmc_delay(1000); return err; }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { struct request *req = NULL; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = elv_next_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); #ifdef CONFIG_MMC_BLOCK_PARANOID_RESUME if (mq->check_status) { struct mmc_command cmd; do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = mq->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; mmc_claim_host(mq->card->host); err = mmc_wait_for_cmd(mq->card->host, &cmd, 5); mmc_release_host(mq->card->host); if (err) { printk(KERN_ERR "%s: failed to get status (%d)\n", __func__, err); msleep(5); continue; } printk(KERN_DEBUG "%s: status 0x%.8x\n", __func__, cmd.resp[0]); } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == 7)); mq->check_status = 0; } #endif mq->issue_fn(mq, req); } while (1); up(&mq->thread_sem); return 0; }