int mmc_erase_blks(int dev_id, u32 addr, u32 size, int bootarea) { unsigned long ret; int i, j, result = 0; u8 val; u8 *ext_csd; u32 blknr; u32 total_blks; struct mmc_card *card; if (!size) return 0; if (addr % MMC_BLOCK_SIZE) return MMC_ERR_FAILED; card = mmc_get_card(dev_id); ext_csd = &card->raw_ext_csd[0]; if (bootarea && !mmc_card_sd(card) && card->ext_csd.part_en) { /* configure to specified partition */ val = (ext_csd[EXT_CSD_PART_CFG] & ~0x7) | EXT_CSD_PART_CFG_BOOT_PART_1; if (mmc_set_part_config(card, val) != MMC_ERR_NONE) { result = -__LINE__; goto done; } } blknr = addr / MMC_BLOCK_SIZE; total_blks = (size + MMC_BLOCK_SIZE - 1) / MMC_BLOCK_SIZE; if (mmc_erase_start(card, blknr * MMC_BLOCK_SIZE) != MMC_ERR_NONE) { result = -__LINE__; goto done; } if (mmc_erase_end(card, (blknr + total_blks) * MMC_BLOCK_SIZE) != MMC_ERR_NONE) { result = -__LINE__; goto done; } if (mmc_erase(card, MMC_ERASE_NORMAL) != MMC_ERR_NONE) { result = -__LINE__; goto done; } done: if (bootarea && !mmc_card_sd(card) && card->ext_csd.part_en) { /* configure to user partition */ val = (ext_csd[EXT_CSD_PART_CFG] & ~0x7) | EXT_CSD_PART_CFG_DEFT_PART; if (mmc_set_part_config(card, val) != MMC_ERR_NONE) result = -__LINE__; } if (!result) { printf("[SD%d] Erase %d blocks (%d bytes) from 0x%x successfully\n", dev_id, total_blks, total_blks * MMC_BLOCK_SIZE, blknr * MMC_BLOCK_SIZE); } else { printf("[SD%d] Erase %d blocks (%d bytes) from 0x%x failed %d\n", dev_id, total_blks, total_blks * MMC_BLOCK_SIZE, blknr * MMC_BLOCK_SIZE, result); } return result; }
int mmc_readback_blks(int dev_id, unsigned long addr, int blks, int bootarea) { int i, j, result = 0; u8 val; u8 *ext_csd; unsigned long blknr = addr / MMC_BLOCK_SIZE; unsigned char *buf = (unsigned char*)MMC_BUF_ADDR; struct mmc_card *card; struct mmc_host *host; host = mmc_get_host(dev_id); card = mmc_get_card(dev_id); ext_csd = &card->raw_ext_csd[0]; if (bootarea && !mmc_card_sd(card)) { /* configure to specified partition */ val = (ext_csd[EXT_CSD_PART_CFG] & ~0x7) | EXT_CSD_PART_CFG_BOOT_PART_1; if (mmc_set_part_config(card, val) != MMC_ERR_NONE) { result = -__LINE__; goto done; } if (mmc_read_ext_csd(host, card) != MMC_ERR_NONE) { result = -__LINE__; goto done; } } printf("[SD%d] Dump %d blks from 0x%x (FLASH)\n", dev_id, blks, blknr * MMC_BLOCK_SIZE); for (i = 0; i < blks; i++) { memset(buf, 0, MMC_BLOCK_SIZE); if (MMC_ERR_NONE != mmc_block_read(dev_id, blknr + i, 1, (unsigned long*)buf)) { printf("\n[SD%d] Read from %dth block error\n", dev_id, blknr + i); break; } for (j = 0; j < MMC_BLOCK_SIZE; j++) { if (j % 16 == 0) printf("\n%xh: ", (blknr + i) * MMC_BLOCK_SIZE + j); printf("%x ", buf[j]); } printf("\n"); buf += MMC_BLOCK_SIZE; } done: if (bootarea && !mmc_card_sd(card)) { /* configure to user partition */ val = (ext_csd[EXT_CSD_PART_CFG] & ~0x7) | EXT_CSD_PART_CFG_DEFT_PART; if (mmc_set_part_config(card, val) != MMC_ERR_NONE) result = -__LINE__; if (mmc_read_ext_csd(host, card) != MMC_ERR_NONE) { result = -__LINE__; } } return result; }
void mmc_add_card_debugfs(struct mmc_card *card) { struct mmc_host *host = card->host; struct dentry *root = NULL; struct dentry *sdxc_root = NULL; if (!host->debugfs_root) return; sdxc_root = debugfs_create_dir("sdxc_root", host->debugfs_root); if (IS_ERR(sdxc_root)) return; if (!sdxc_root) goto err; root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root); if (IS_ERR(root)) /* Don't complain -- debugfs just isn't enabled */ return; if (!root) /* Complain -- debugfs is enabled, but it failed to * create the directory. */ goto err; card->debugfs_sdxc = sdxc_root; card->debugfs_root = root; if (!debugfs_create_x32("state", S_IRUSR, root, &card->state)) goto err; if (mmc_card_mmc(card) || mmc_card_sd(card)) if (!debugfs_create_file("status", S_IRUSR, root, card, &mmc_dbg_card_status_fops)) goto err; if (mmc_card_mmc(card)) if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, &mmc_dbg_ext_csd_fops)) goto err; if (mmc_card_sd(card)) if (!debugfs_create_file("sdxc", S_IRUSR, sdxc_root, card, &mmc_sdxc_fops)) goto err; if (mmc_card_mmc(card)) if (!debugfs_create_file("wr_prot", S_IFREG|S_IRWXU|S_IRGRP|S_IROTH, root, card, &mmc_dbg_wr_prot_fops)) goto err; return; err: debugfs_remove_recursive(root); debugfs_remove_recursive(sdxc_root); card->debugfs_root = NULL; card->debugfs_sdxc = NULL; dev_err(&card->dev, "failed to initialize debugfs\n"); }
/** * mmc_set_data_timeout - set the timeout for a data command * @data: data phase for command * @card: the MMC card associated with the data transfer * * Computes the data timeout parameters according to the * correct algorithm given the card type. */ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) { unsigned int mult; /* * SDIO cards only define an upper 1 s limit on access. */ if (mmc_card_sdio(card)) { data->timeout_ns = 1000000000; data->timeout_clks = 0; return; } /* * SD cards use a 100 multiplier rather than 10 */ mult = mmc_card_sd(card) ? 100 : 10; /* * Scale up the multiplier (and therefore the timeout) by * the r2w factor for writes. */ if (data->flags & MMC_DATA_WRITE) mult <<= card->csd.r2w_factor; data->timeout_ns = card->csd.tacc_ns * mult; data->timeout_clks = card->csd.tacc_clks * mult; /* * SD cards also have an upper limit on the timeout. */ if (mmc_card_sd(card)) { unsigned int timeout_us, limit_us; timeout_us = data->timeout_ns / 1000; timeout_us += data->timeout_clks * 1000 / (card->host->ios.clock / 1000); if (data->flags & MMC_DATA_WRITE) /* * The limit is really 250 ms, but that is * insufficient for some crappy cards. */ limit_us = 300000; else limit_us = 100000; /* * SDHC cards always use these fixed values. */ if (timeout_us > limit_us || mmc_card_blockaddr(card)) { data->timeout_ns = limit_us * 1000; data->timeout_clks = 0; } } }
static int mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) { struct mmc_command cmd; int err; /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */ /* only in SD mode. by scsuh */ if (mmc_card_blockaddr(card) && mmc_card_sd(card)) return 0; mmc_card_claim_host(card); cmd.opcode = MMC_SET_BLOCKLEN; cmd.arg = 1 << md->block_bits; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 5); mmc_card_release_host(card); if (err) { printk(KERN_ERR "%s: unable to set block size to %d: %d\n", md->disk->disk_name, cmd.arg, err); return -EINVAL; } return 0; }
/* * If idle time bkops is running on the card, let's not get * into suspend. */ if (mmc_card_doing_bkops(card) && mmc_card_is_prog_state(card)) return -EBUSY; else return 0; } else { return mmc_power_save_host(card->host); } } static int mmc_runtime_resume(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); if (mmc_use_core_runtime_pm(card->host)) return 0; else return mmc_power_restore_host(card->host); } #ifdef CONFIG_MMC_SD_PENDING_RESUME_CUST_SH extern bool sh_mmc_pending_resume; #endif /* CONFIG_MMC_SD_PENDING_RESUME_CUST_SH */ static int mmc_runtime_idle(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); struct mmc_host *host = card->host; int ret = 0; if (mmc_use_core_runtime_pm(card->host)) { #ifdef CONFIG_PM_EMMC_CUST_SH if( !(host->card && mmc_card_mmc( host->card )) ) { #endif /* CONFIG_PM_EMMC_CUST_SH */ #ifdef CONFIG_MMC_SD_PENDING_RESUME_CUST_SH if( (host->card && mmc_card_sd( host->card )) && sh_mmc_pending_resume == false ) { #endif /* CONFIG_MMC_SD_PENDING_RESUME_CUST_SH */ ret = pm_schedule_suspend(dev, card->idle_timeout); if (ret) { pr_err("%s: %s: pm_schedule_suspend failed: err: %d\n", mmc_hostname(host), __func__, ret); return ret; } #ifdef CONFIG_MMC_SD_PENDING_RESUME_CUST_SH } #endif /* CONFIG_MMC_SD_PENDING_RESUME_CUST_SH */ #ifdef CONFIG_PM_EMMC_CUST_SH } #endif /* CONFIG_PM_EMMC_CUST_SH */ } return ret; }
static int mmc_select_card(struct mmc_host *host, struct mmc_card *card) { int err; struct mmc_command cmd; BUG_ON(host->card_busy == NULL); if (host->card_selected == card) return MMC_ERR_NONE; host->card_selected = card; cmd.opcode = MMC_SELECT_CARD; cmd.arg = card->rca << 16; // cmd.flags = MMC_RSP_R1; cmd.flags = MMCSD_RSP1; err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) return err; /* * Default bus width is 1 bit. */ host->ios.bus_width = MMC_BUS_WIDTH_1; /* * We can only change the bus width of the selected * card so therefore we have to put the handling * here. */ if (host->caps & MMC_CAP_4_BIT_DATA) { /* * The card is in 1 bit mode by default so * we only need to change if it supports the * wider version. */ if (mmc_card_sd(card) && (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { struct mmc_command cmd; cmd.opcode = SD_APP_SET_BUS_WIDTH; cmd.arg = SD_BUS_WIDTH_4; // cmd.flags = MMC_RSP_R1; cmd.flags = MMCSD_RSP1; err = mmc_wait_for_app_cmd(host, card->rca, &cmd, CMD_RETRIES); if (err != MMC_ERR_NONE) return err; host->ios.bus_width = MMC_BUS_WIDTH_4; } } host->ops->set_ios(host, &host->ios); return MMC_ERR_NONE; }
void mmc_add_card_debugfs(struct mmc_card *card) { struct mmc_host *host = card->host; struct dentry *root; if (!host->debugfs_root) return; root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root); if (IS_ERR(root)) /* Don't complain -- debugfs just isn't enabled */ return; if (!root) /* Complain -- debugfs is enabled, but it failed to * create the directory. */ goto err; card->debugfs_root = root; if (!debugfs_create_x32("state", S_IRUSR, root, &card->state)) goto err; if (mmc_card_mmc(card) || mmc_card_sd(card)) if (!debugfs_create_file("status", S_IRUSR, root, card, &mmc_dbg_card_status_fops)) goto err; if (mmc_card_mmc(card)) if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, &mmc_dbg_ext_csd_fops)) goto err; if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) && (card->host->caps2 & MMC_CAP2_PACKED_WR)) if (!debugfs_create_file("wr_pack_stats", S_IRUSR, root, card, &mmc_dbg_wr_pack_stats_fops)) goto err; if (mmc_card_mmc(card) && (card->ext_csd.rev >= 5) && card->ext_csd.bkops_en) if (!debugfs_create_file("bkops_stats", S_IRUSR, root, card, &mmc_dbg_bkops_stats_fops)) goto err; //ASUS_BSP +++ Gavin_Chang "mmc cmd statistics" if (mmc_card_mmc(card)) if (!debugfs_create_file("cmd_stats", S_IRUSR, root, card, &mmc_dbg_cmd_stats_fops)) goto err; //ASUS_BSP --- Gavin_Chang "mmc cmd statistics" return; err: debugfs_remove_recursive(root); card->debugfs_root = NULL; dev_err(&card->dev, "failed to initialize debugfs\n"); }
/** * mmc_set_data_timeout - set the timeout for a data command * @data: data phase for command * @card: the MMC card associated with the data transfer * @write: flag to differentiate reads from writes */ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card, int write) { unsigned int mult; /* * SD cards use a 100 multiplier rather than 10 */ mult = mmc_card_sd(card) ? 100 : 10; /* * Scale up the multiplier (and therefore the timeout) by * the r2w factor for writes. */ if (write) mult <<= card->csd.r2w_factor; data->timeout_ns = card->csd.tacc_ns * mult; data->timeout_clks = card->csd.tacc_clks * mult; /* * SD cards also have an upper limit on the timeout. */ if (mmc_card_sd(card)) { unsigned int timeout_us, limit_us; timeout_us = data->timeout_ns / 1000; timeout_us += data->timeout_clks * 1000 / (card->host->ios.clock / 1000); if (write) limit_us = 250000; else limit_us = 100000; /* * SDHC cards always use these fixed values. */ if (timeout_us > limit_us || mmc_card_blockaddr(card)) { data->timeout_ns = limit_us * 1000; data->timeout_clks = 0; } } }
void mmc_cleanup_queue(struct mmc_queue *mq) { struct request_queue *q = mq->queue; unsigned long flags; struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; mmc_queue_resume(mq); kthread_stop(mq->thread); spin_lock_irqsave(q->queue_lock, flags); q->queuedata = NULL; blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); kfree(mqrq_cur->bounce_sg); mqrq_cur->bounce_sg = NULL; kfree(mqrq_cur->sg); mqrq_cur->sg = NULL; if(!mmc_card_sd(mq->card)) { kfree(mqrq_cur->bounce_buf); } mqrq_cur->bounce_buf = NULL; kfree(mqrq_prev->bounce_sg); mqrq_prev->bounce_sg = NULL; kfree(mqrq_prev->sg); mqrq_prev->sg = NULL; if(!mmc_card_sd(mq->card)) { kfree(mqrq_prev->bounce_buf); } mqrq_prev->bounce_buf = NULL; mq->card = NULL; }
void mmc_cleanup_queue(struct mmc_queue *mq) { struct request_queue *q = mq->queue; unsigned long flags; struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; /* Make sure the queue isn't suspended, as that will deadlock */ mmc_queue_resume(mq); /* Then terminate our worker thread */ kthread_stop(mq->thread); /* Empty the queue */ spin_lock_irqsave(q->queue_lock, flags); q->queuedata = NULL; blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); kfree(mqrq_cur->bounce_sg); mqrq_cur->bounce_sg = NULL; kfree(mqrq_cur->sg); mqrq_cur->sg = NULL; if(!mmc_card_sd(mq->card)) kfree(mqrq_cur->bounce_buf); mqrq_cur->bounce_buf = NULL; kfree(mqrq_prev->bounce_sg); mqrq_prev->bounce_sg = NULL; kfree(mqrq_prev->sg); mqrq_prev->sg = NULL; if(!mmc_card_sd(mq->card)) kfree(mqrq_prev->bounce_buf); mqrq_prev->bounce_buf = NULL; mq->card = NULL; }
void mmc_add_card_debugfs(struct mmc_card *card) { struct mmc_host *host = card->host; struct dentry *root; if (!host->debugfs_root) return; root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root); if (IS_ERR(root)) return; if (!root) goto err; card->debugfs_root = root; if (!debugfs_create_x32("state", S_IRUSR, root, &card->state)) goto err; if (mmc_card_mmc(card) || mmc_card_sd(card)) if (!debugfs_create_file("status", S_IRUSR, root, card, &mmc_dbg_card_status_fops)) goto err; if (mmc_card_mmc(card)) if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, &mmc_dbg_ext_csd_fops)) goto err; if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) && (card->host->caps2 & MMC_CAP2_PACKED_WR)) if (!debugfs_create_file("wr_pack_stats", S_IRUSR, root, card, &mmc_dbg_wr_pack_stats_fops)) goto err; if (mmc_card_mmc(card) && (card->ext_csd.rev >= 5) && card->ext_csd.bkops_en) if (!debugfs_create_file("bkops_stats", S_IRUSR, root, card, &mmc_dbg_bkops_stats_fops)) goto err; return; err: debugfs_remove_recursive(root); card->debugfs_root = NULL; dev_err(&card->dev, "failed to initialize debugfs\n"); }
void mmc_add_card_debugfs(struct mmc_card *card) { struct mmc_host *host = card->host; struct dentry *root; DBG("[%s] s\n",__func__); if (!host->debugfs_root) { DBG("[%s] e\n",__func__); return; } root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root); if (IS_ERR(root)) { DBG("[%s] e1\n",__func__); /* Don't complain -- debugfs just isn't enabled */ return; } if (!root) /* Complain -- debugfs is enabled, but it failed to * create the directory. */ goto err; card->debugfs_root = root; if (!debugfs_create_x32("state", S_IRUSR, root, &card->state)) goto err; if (mmc_card_mmc(card) || mmc_card_sd(card)) if (!debugfs_create_file("status", S_IRUSR, root, card, &mmc_dbg_card_status_fops)) goto err; if (mmc_card_mmc(card)) if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, &mmc_dbg_ext_csd_fops)) goto err; DBG("[%s] e2\n",__func__); return; err: debugfs_remove_recursive(root); card->debugfs_root = NULL; dev_err(&card->dev, "failed to initialize debugfs\n"); DBG("[%s] e3\n",__func__); }
/* * Unregister a new MMC card with the driver model, and * (eventually) free it. */ void mmc_remove_card(struct mmc_card *card) { if (mmc_card_sd(card)) card->removed = 1; #ifdef CONFIG_DEBUG_FS mmc_remove_card_debugfs(card); #endif if (mmc_card_present(card)) { if (mmc_host_is_spi(card->host)) { printk(KERN_INFO "%s: SPI card removed\n", mmc_hostname(card->host)); } else { printk(KERN_INFO "%s: card %04x removed\n", mmc_hostname(card->host), card->rca); } device_del(&card->dev); } put_device(&card->dev); }
int mmc_reinit_card(struct mmc_host *host) { int err = 0; printk(KERN_INFO "%s: %s\n", mmc_hostname(host), __func__); mmc_bus_get(host); if (host->bus_ops && !host->bus_dead && host->bus_ops->resume) { if (host->card && mmc_card_sd(host->card)) { mmc_power_off(host); mdelay(5); } mmc_power_up(host); err = host->bus_ops->resume(host); } mmc_bus_put(host); printk(KERN_INFO "%s: %s return %d\n", mmc_hostname(host), __func__, err); return err; }
int mmc_download(int dev_id, u32 imgaddr, u32 size, u32 addr, int bootarea) { int ret; int i, j, result = 0; u8 val; u8 *ext_csd; uchar *buf, *chkbuf; u32 chunks, chunk_blks = 128, left_blks, blknr; u32 total_blks; struct mmc_card *card; if (!size) return 0; if (addr % MMC_BLOCK_SIZE) return MMC_ERR_FAILED; card = mmc_get_card(dev_id); ext_csd = &card->raw_ext_csd[0]; if (bootarea && !mmc_card_sd(card) && card->ext_csd.part_en) { /* configure to specified partition */ val = (ext_csd[EXT_CSD_PART_CFG] & ~0x7) | EXT_CSD_PART_CFG_BOOT_PART_1; if (mmc_set_part_config(card, val) != MMC_ERR_NONE) { result = -__LINE__; goto done; } } blknr = addr / MMC_BLOCK_SIZE; total_blks = (size + MMC_BLOCK_SIZE - 1) / MMC_BLOCK_SIZE; /* multiple block write */ chunks = total_blks / chunk_blks; left_blks = total_blks % chunk_blks; buf = (uchar*)imgaddr; chkbuf = (uchar*)MMC_BUF_ADDR; for (i = 0; i < chunks; i++) { ret = mmc_block_write(dev_id, blknr + i * chunk_blks, chunk_blks, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { result = -__LINE__; goto done; } ret = mmc_block_read(dev_id, blknr + i * chunk_blks, chunk_blks, (unsigned long*)chkbuf); if (ret != MMC_ERR_NONE) { result = -__LINE__; goto done; } for (j = 0; j < chunk_blks * MMC_BLOCK_SIZE; j++) { if (buf[j] == chkbuf[j]) continue; result = -__LINE__; goto done; } printf("[SD%d] Write %3d blocks from 0x%.8x(RAM) to 0x%.8x(FLASH).\n", dev_id, chunk_blks, (unsigned int)buf, (blknr + i * chunk_blks) * MMC_BLOCK_SIZE); buf += (chunk_blks * MMC_BLOCK_SIZE); } if (left_blks) { ret = mmc_block_write(dev_id, blknr + chunks * chunk_blks, left_blks, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { result = -__LINE__; goto done; } ret = mmc_block_read(dev_id, blknr + chunks * chunk_blks, left_blks, (unsigned long*)chkbuf); if (ret != MMC_ERR_NONE) { result = -__LINE__; goto done; } for (j = 0; j < left_blks * MMC_BLOCK_SIZE; j++) { if (buf[j] == chkbuf[j]) continue; printf("[SD%d] chkbuf[%d] = %xh (!= %xh) \n", dev_id, j, chkbuf[j], buf[j]); result = -__LINE__; goto done; } printf("[SD%d] Write %3d blocks from 0x%.8x(RAM) to 0x%.8x(FLASH).\n", dev_id, left_blks, (unsigned int)buf, (blknr + chunks * chunk_blks) * MMC_BLOCK_SIZE); } done: if (bootarea && !mmc_card_sd(card) && card->ext_csd.part_en) { /* configure to user partition */ val = (ext_csd[EXT_CSD_PART_CFG] & ~0x7) | EXT_CSD_PART_CFG_DEFT_PART; if (mmc_set_part_config(card, val) != MMC_ERR_NONE) result = -__LINE__; } if (!result) { printf("[SD%d] Download %d blocks (%d bytes) to 0x%.8x successfully\n", dev_id, total_blks, total_blks * MMC_BLOCK_SIZE, blknr * MMC_BLOCK_SIZE); } else { printf("[SD%d] Download %d blocks (%d bytes) to 0x%.8x failed %d\n", dev_id, total_blks, total_blks * MMC_BLOCK_SIZE, blknr * MMC_BLOCK_SIZE, result); } return result; }
static int sd_reset_sequence(struct sd_host *host) { struct sd_command *cmd = &host->cmd; u8 d; int i; int retval = 0; u32 cmd_ret = 0; host->card.state = 0; /* * Wait at least 80 dummy clock cycles with the card deselected * and with the MOSI line continuously high. */ exi_dev_take(host->exi_device); exi_dev_deselect(host->exi_device); for (i = 0; i < SD_IDLE_CYCLES; i++) { d = 0xff; exi_dev_write(host->exi_device, &d, sizeof(d)); } exi_dev_give(host->exi_device); /* * Send a CMD0, card must ack with "idle state" (0x01). * This puts the card into SPI mode and soft resets it. * CRC checking is disabled by default. */ for (i = 0; i < 255; i++) { /* CMD0 */ sd_cmd_go_idle_state(cmd); retval = sd_run_no_data_command(host, cmd); if (retval < 0) { retval = -ENODEV; goto out; } if (retval == R1_SPI_IDLE) { break; } } if (retval != R1_SPI_IDLE) { retval = -ENODEV; goto out; } /* * Send CMD8 and CMD58 for SDHC support */ for (i = 0; i < 8; i++) { sd_cmd_crc(cmd, SD_SEND_IF_COND, 0x01AA, 0x87); //CMD8 + Check and CRC retval = sd_start_command(host, cmd); if(retval==0x01) { memset(&cmd_ret, 0, sizeof(cmd_ret)); spi_read(host, &cmd_ret, sizeof(cmd_ret)); sd_end_command(host); if (cmd_ret == 0x01AA) { //Check if CMD8 is alright sd_cmd(cmd, MMC_SPI_READ_OCR, 0);//CMD58 retval = sd_start_command(host, cmd); memset(&cmd_ret, 0, sizeof(cmd_ret)); spi_read(host, &cmd_ret, sizeof(cmd_ret)); sd_end_command(host); if(retval==0x01) { //Everything is alright break; } } break; } else if(retval==0x05) { //NO SDHC-Card break; } } /* * Send a ACMD41 to activate card initialization process. * SD card must ack with "ok" (0x00). * MMC card will report "invalid command" (0x04). */ for (i = 0; i < 65535; i++) { /* ACMD41 = CMD55 + CMD41 */ sd_cmd(cmd, MMC_APP_CMD, 0); retval = sd_run_no_data_command(host, cmd); if (retval < 0) { retval = -ENODEV; goto out; } sd_cmd(cmd, SD_APP_OP_COND, (1<<30)|0x100000); retval = sd_run_no_data_command(host, cmd); if (retval < 0) { retval = -ENODEV; goto out; } if (retval == 0x00) { /* we found a SD card */ mmc_card_set_present(&host->card); host->card.type = MMC_TYPE_SD; break; } else if(retval != 0x01) { DBG("ACMD41 return: %d\n", retval); } if ((retval & R1_SPI_ILLEGAL_COMMAND)) { /* this looks like a MMC card */ break; } } /* * MMC cards require CMD1 to activate card initialization process. * MMC card must ack with "ok" (0x00) */ if (!mmc_card_sd(&host->card)) { for (i = 0; i < 65535; i++) { sd_cmd(cmd, MMC_SEND_OP_COND, 0); retval = sd_run_no_data_command(host, cmd); if (retval < 0) { retval = -ENODEV; goto out; } if (retval == 0x00) { /* we found a MMC card */ mmc_card_set_present(&host->card); break; } } if (retval != 0x00) { DBG("MMC card, bad, retval=%02x\n", retval); sd_card_set_bad(host); } } out: return retval; }
/** * mmc_set_data_timeout - set the timeout for a data command * @data: data phase for command * @card: the MMC card associated with the data transfer * * Computes the data timeout parameters according to the * correct algorithm given the card type. */ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) { unsigned int mult; /* * SDIO cards only define an upper 1 s limit on access. */ if (mmc_card_sdio(card)) { data->timeout_ns = 1000000000; data->timeout_clks = 0; return; } /* * SD cards use a 100 multiplier rather than 10 */ mult = mmc_card_sd(card) ? 100 : 10; /* * Scale up the multiplier (and therefore the timeout) by * the r2w factor for writes. */ if (data->flags & MMC_DATA_WRITE) mult <<= card->csd.r2w_factor; data->timeout_ns = card->csd.tacc_ns * mult; data->timeout_clks = card->csd.tacc_clks * mult; /* * SD cards also have an upper limit on the timeout. */ if (mmc_card_sd(card)) { unsigned int timeout_us, limit_us; timeout_us = data->timeout_ns / 1000; timeout_us += data->timeout_clks * 1000 / (card->host->ios.clock / 1000); if (data->flags & MMC_DATA_WRITE) /* * The limit is really 250 ms, but that is * insufficient for some crappy cards. */ limit_us = 300000; else limit_us = 100000; /* * SDHC cards always use these fixed values. */ if (timeout_us > limit_us || mmc_card_blockaddr(card)) { data->timeout_ns = limit_us * 1000; data->timeout_clks = 0; } } /* * Some cards need very high timeouts if driven in SPI mode. * The worst observed timeout was 900ms after writing a * continuous stream of data until the internal logic * overflowed. */ if (mmc_host_is_spi(card->host)) { if (data->flags & MMC_DATA_WRITE) { if (data->timeout_ns < 1000000000) data->timeout_ns = 1000000000; /* 1s */ } else { if (data->timeout_ns < 100000000) data->timeout_ns = 100000000; /* 100ms */ } } }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * @subname: partition subname * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = *mmc_dev(host)->dma_mask; mq->card = card; mq->queue = blk_init_queue(mmc_request, lock); if (!mq->queue) return -ENOMEM; memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev)); mq->mqrq_cur = mqrq_cur; mq->mqrq_prev = mqrq_prev; mq->queue->queuedata = mq; blk_queue_prep_rq(mq->queue, mmc_prep_request); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_segs == 1) { unsigned int bouncesz; if(!mmc_card_sd(card)) bouncesz = MMC_QUEUE_BOUNCESZ; else bouncesz = MMC_QUEUE_SD_BOUNCESZ; if (bouncesz > host->max_req_size) bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; if (bouncesz > (host->max_blk_count * 512)) bouncesz = host->max_blk_count * 512; if (bouncesz > 512) { if(!mmc_card_sd(card)) mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); else mqrq_cur->bounce_buf = mmc_queue_cur_bounce_buf; if (!mqrq_cur->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce cur buffer\n", mmc_card_name(card)); } if(!mmc_card_sd(card)) mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); else mqrq_prev->bounce_buf = mmc_queue_prev_bounce_buf; if (!mqrq_prev->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce prev buffer\n", mmc_card_name(card)); kfree(mqrq_cur->bounce_buf); mqrq_cur->bounce_buf = NULL; } } if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); mqrq_cur->sg = mmc_alloc_sg(1, &ret); if (ret) goto cleanup_queue; mqrq_cur->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); if (ret) goto cleanup_queue; mqrq_prev->sg = mmc_alloc_sg(1, &ret); if (ret) goto cleanup_queue; mqrq_prev->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); if (ret) goto cleanup_queue; } } #endif if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); if (ret) goto cleanup_queue; mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); if (ret) goto cleanup_queue; } sema_init(&mq->thread_sem, 1); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", host->index, subname ? subname : ""); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto free_bounce_sg; } return 0; free_bounce_sg: kfree(mqrq_cur->bounce_sg); mqrq_cur->bounce_sg = NULL; kfree(mqrq_prev->bounce_sg); mqrq_prev->bounce_sg = NULL; cleanup_queue: kfree(mqrq_cur->sg); mqrq_cur->sg = NULL; if(!mmc_card_sd(card)) kfree(mqrq_cur->bounce_buf); mqrq_cur->bounce_buf = NULL; kfree(mqrq_prev->sg); mqrq_prev->sg = NULL; if(!mmc_card_sd(card)) kfree(mqrq_prev->bounce_buf); mqrq_prev->bounce_buf = NULL; blk_cleanup_queue(mq->queue); return ret; }
/** * * added this from linux 3.10 to get better performance for 64 gb sd cards cheers Jukkaman * mmc_set_data_timeout - set the timeout for a data command * @data: data phase for command * @card: the MMC card associated with the data transfer * * Computes the data timeout parameters according to the * correct algorithm given the card type. */ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) { unsigned int mult; /* * SDIO cards only define an upper 1 s limit on access. */ if (mmc_card_sdio(card)) { data->timeout_ns = 1000000000; data->timeout_clks = 0; return; } /* * SD cards use a 100 multiplier rather than 10 */ mult = mmc_card_sd(card) ? 100 : 10; /* * Scale up the multiplier (and therefore the timeout) by * the r2w factor for writes. */ if (data->flags & MMC_DATA_WRITE) mult <<= card->csd.r2w_factor; data->timeout_ns = card->csd.tacc_ns * mult; data->timeout_clks = card->csd.tacc_clks * mult; /* * SD cards also have an upper limit on the timeout. */ if (mmc_card_sd(card)) { unsigned int timeout_us, limit_us; timeout_us = data->timeout_ns / 1000; if (mmc_host_clk_rate(card->host)) timeout_us += data->timeout_clks * 1000 / (mmc_host_clk_rate(card->host) / 1000); if (data->flags & MMC_DATA_WRITE) /* * The MMC spec "It is strongly recommended * for hosts to implement more than 500ms * timeout value even if the card indicates * the 250ms maximum busy length." Even the * previous value of 300ms is known to be * insufficient for some cards. */ limit_us = 3000000; else limit_us = 100000; /* * SDHC cards always use these fixed values. */ if (timeout_us > limit_us || mmc_card_blockaddr(card)) { data->timeout_ns = limit_us * 1000; data->timeout_clks = 0; } } /* * Some cards require longer data read timeout than indicated in CSD. * Address this by setting the read timeout to a "reasonably high" * value. For the cards tested, 300ms has proven enough. If necessary, * this value can be increased if other problematic cards require this. */ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { data->timeout_ns = 300000000; data->timeout_clks = 0; } /* * Some cards need very high timeouts if driven in SPI mode. * The worst observed timeout was 900ms after writing a * continuous stream of data until the internal logic * overflowed. */ if (mmc_host_is_spi(card->host)) { if (data->flags & MMC_DATA_WRITE) { if (data->timeout_ns < 1000000000) data->timeout_ns = 1000000000; /* 1s */ } else { if (data->timeout_ns < 100000000) data->timeout_ns = 100000000; /* 100ms */ } } }
//merged from 8960_ICS,20130116,yeganlin void mmc_card_host_inserted(struct mmc_card *card,int i) { if((card->host->inserted == 0)&&(mmc_card_sd(card))) card->host->inserted = i; }
static int simple_sd_ioctl_multi_rw(struct msdc_ioctl* msdc_ctl) { char l_buf[512]; struct scatterlist msdc_sg; struct mmc_data msdc_data; struct mmc_command msdc_cmd; struct mmc_command msdc_stop; #ifdef MTK_MSDC_USE_CMD23 struct mmc_command msdc_sbc; #endif struct mmc_request msdc_mrq; struct msdc_host *host_ctl; host_ctl = mtk_msdc_host[msdc_ctl->host_num]; BUG_ON(!host_ctl); BUG_ON(!host_ctl->mmc); BUG_ON(!host_ctl->mmc->card); mmc_claim_host(host_ctl->mmc); #if DEBUG_MMC_IOCTL printk("user want access %d partition\n",msdc_ctl->partition); #endif mmc_send_ext_csd(host_ctl->mmc->card, l_buf); switch (msdc_ctl->partition){ case BOOT_PARTITION_1: if (0x1 != (l_buf[179] & 0x7)){ /* change to access boot partition 1 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x1; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; case BOOT_PARTITION_2: if (0x2 != (l_buf[179] & 0x7)){ /* change to access boot partition 2 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x2; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; default: /* make sure access partition is user data area */ if (0 != (l_buf[179] & 0x7)){ /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; } if(msdc_ctl->total_size > 64*1024){ msdc_ctl->result = -1; return msdc_ctl->result; } memset(&msdc_data, 0, sizeof(struct mmc_data)); memset(&msdc_mrq, 0, sizeof(struct mmc_request)); memset(&msdc_cmd, 0, sizeof(struct mmc_command)); memset(&msdc_stop, 0, sizeof(struct mmc_command)); #ifdef MTK_MSDC_USE_CMD23 memset(&msdc_sbc, 0, sizeof(struct mmc_command)); #endif msdc_mrq.cmd = &msdc_cmd; msdc_mrq.data = &msdc_data; if(msdc_ctl->trans_type) dma_force[host_ctl->id] = FORCE_IN_DMA; else dma_force[host_ctl->id] = FORCE_IN_PIO; if (msdc_ctl->iswrite){ msdc_data.flags = MMC_DATA_WRITE; msdc_cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size)){ dma_force[host_ctl->id] = FORCE_NOTHING; return -EFAULT; } } else { /* called from other kernel module */ memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size); } } else { msdc_data.flags = MMC_DATA_READ; msdc_cmd.opcode = MMC_READ_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; memset(sg_msdc_multi_buffer, 0 , msdc_ctl->total_size); } #ifdef MTK_MSDC_USE_CMD23 if ((mmc_card_mmc(host_ctl->mmc->card) || (mmc_card_sd(host_ctl->mmc->card) && host_ctl->mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)) && !(host_ctl->mmc->card->quirks & MMC_QUIRK_BLK_NO_CMD23)){ msdc_mrq.sbc = &msdc_sbc; msdc_mrq.sbc->opcode = MMC_SET_BLOCK_COUNT; msdc_mrq.sbc->arg = msdc_data.blocks; msdc_mrq.sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; } #endif msdc_cmd.arg = msdc_ctl->address; if (!mmc_card_blockaddr(host_ctl->mmc->card)){ printk("this device use byte address!!\n"); msdc_cmd.arg <<= 9; } msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; msdc_stop.opcode = MMC_STOP_TRANSMISSION; msdc_stop.arg = 0; msdc_stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; msdc_data.stop = &msdc_stop; msdc_data.blksz = 512; msdc_data.sg = &msdc_sg; msdc_data.sg_len = 1; #if DEBUG_MMC_IOCTL printk("total size is %d\n",msdc_ctl->total_size); #endif sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size); mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card); mmc_wait_for_req(host_ctl->mmc, &msdc_mrq); if (!msdc_ctl->iswrite){ if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size)){ dma_force[host_ctl->id] = FORCE_NOTHING; return -EFAULT; } } else { /* called from other kernel module */ memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size); } } if (msdc_ctl->partition){ mmc_send_ext_csd(host_ctl->mmc->card,l_buf); if (l_buf[179] & 0x7) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } } mmc_release_host(host_ctl->mmc); if (msdc_cmd.error) msdc_ctl->result = msdc_cmd.error; if (msdc_data.error){ msdc_ctl->result = msdc_data.error; } else { msdc_ctl->result = 0; } dma_force[host_ctl->id] = FORCE_NOTHING; return msdc_ctl->result; }
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request brq; int ret = 1; if (mmc_card_claim_host(card)) goto flush_queue; do { struct mmc_command cmd; u32 readcmd, writecmd; memset(&brq, 0, sizeof(struct mmc_blk_request)); brq.mrq.cmd = &brq.cmd; brq.mrq.data = &brq.data; brq.cmd.arg = req->sector; if (!mmc_card_blockaddr(card)) brq.cmd.arg <<= 9; brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; brq.data.blksz = 1 << md->block_bits; brq.stop.opcode = MMC_STOP_TRANSMISSION; brq.stop.arg = 0; brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); if (brq.data.blocks > card->host->max_blk_count) brq.data.blocks = card->host->max_blk_count; mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); #ifdef CONFIG_MMC_SUPPORT_MOVINAND if (mmc_card_movinand(card)) { if ((brq.data.blocks > 1) || (rq_data_dir(req) == WRITE)) { cmd.opcode = MMC_SET_BLOCK_COUNT; cmd.arg = req->nr_sectors; cmd.flags = MMC_RSP_R1; ret = mmc_wait_for_cmd(card->host, &cmd, 2); } if (rq_data_dir(req) == READ) { if (brq.data.blocks > 1) { brq.cmd.opcode = MMC_READ_MULTIPLE_BLOCK; brq.data.flags |= (MMC_DATA_READ | MMC_DATA_MULTI); // brq.mrq.stop = &brq.stop; } else { brq.cmd.opcode = MMC_READ_SINGLE_BLOCK; brq.data.flags |= MMC_DATA_READ; brq.mrq.stop = NULL; } } else { brq.cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; brq.data.flags |= MMC_DATA_WRITE | MMC_DATA_MULTI; // brq.mrq.stop = &brq.stop; } } else { #endif /* * If the host doesn't support multiple block writes, force * block writes to single block. SD cards are excepted from * this rule as they support querying the number of * successfully written sectors. */ if (rq_data_dir(req) != READ && !(card->host->caps & MMC_CAP_MULTIWRITE) && !mmc_card_sd(card)) brq.data.blocks = 1; if (brq.data.blocks > 1) { brq.data.flags |= MMC_DATA_MULTI; brq.mrq.stop = &brq.stop; readcmd = MMC_READ_MULTIPLE_BLOCK; writecmd = MMC_WRITE_MULTIPLE_BLOCK; } else { brq.mrq.stop = NULL; readcmd = MMC_READ_SINGLE_BLOCK; writecmd = MMC_WRITE_BLOCK; } if (rq_data_dir(req) == READ) { brq.cmd.opcode = readcmd; brq.data.flags |= MMC_DATA_READ; } else { brq.cmd.opcode = writecmd; brq.data.flags |= MMC_DATA_WRITE; } #ifdef CONFIG_MMC_SUPPORT_MOVINAND } #endif brq.data.sg = mq->sg; brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg); mmc_wait_for_req(card->host, &brq.mrq); if (brq.cmd.error) { printk(KERN_ERR "%s: error %d sending read/write command\n", req->rq_disk->disk_name, brq.cmd.error); goto cmd_err; } if (brq.data.error) { printk(KERN_ERR "%s: error %d transferring data\n", req->rq_disk->disk_name, brq.data.error); goto cmd_err; } if (brq.stop.error) { printk(KERN_ERR "%s: error %d sending stop command\n", req->rq_disk->disk_name, brq.stop.error); goto cmd_err; } if (rq_data_dir(req) != READ) { do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 5); if (err) { printk(KERN_ERR "%s: error %d requesting status\n", req->rq_disk->disk_name, err); goto cmd_err; } #ifdef CONFIG_MMC_SUPPORT_MOVINAND /* Work-around for broken cards setting READY_FOR_DATA * when not actually ready. */ if (mmc_card_movinand(card)) { if (R1_CURRENT_STATE(cmd.resp[0]) == 7) cmd.resp[0] &= ~R1_READY_FOR_DATA; } #endif } while (!(cmd.resp[0] & R1_READY_FOR_DATA)); #if 0 if (cmd.resp[0] & ~0x00000900) printk(KERN_ERR "%s: status = %08x\n", req->rq_disk->disk_name, cmd.resp[0]); if (mmc_decode_status(cmd.resp)) goto cmd_err; #endif } /* * A block was successfully transferred. */ spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); if (!ret) { /* * The whole request completed successfully. */ add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); end_that_request_last(req, 1); } spin_unlock_irq(&md->lock); } while (ret); mmc_card_release_host(card); return 1; cmd_err: /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. * * If the card is not SD, we can still ok written sectors * if the controller can do proper error reporting. * * For reads we just fail the entire chunk as that should * be safe in all cases. */ if (rq_data_dir(req) != READ && mmc_card_sd(card)) { u32 blocks; unsigned int bytes; blocks = mmc_sd_num_wr_blocks(card); if (blocks != (u32)-1) { if (card->csd.write_partial) bytes = blocks << md->block_bits; else bytes = blocks << 9; spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, bytes); spin_unlock_irq(&md->lock); } } else if (rq_data_dir(req) != READ && (card->host->caps & MMC_CAP_MULTIWRITE)) { spin_lock_irq(&md->lock); ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); spin_unlock_irq(&md->lock); } flush_queue: mmc_card_release_host(card); spin_lock_irq(&md->lock); while (ret) { ret = end_that_request_chunk(req, 0, req->current_nr_sectors << 9); } add_disk_randomness(req->rq_disk); blkdev_dequeue_request(req); end_that_request_last(req, 0); spin_unlock_irq(&md->lock); return 0; }
/* * Given the decoded CSD structure, decode the raw CID to our CID structure. */ static void mmc_decode_cid(struct mmc_card *card) { u32 *resp = card->raw_cid; memset(&card->cid, 0, sizeof(struct mmc_cid)); if (mmc_card_sd(card)) { card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4); card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4); card->cid.serial = UNSTUFF_BITS(resp, 24, 32); card->cid.year = UNSTUFF_BITS(resp, 12, 8); card->cid.month = UNSTUFF_BITS(resp, 8, 4); card->cid.year += 2000; } else { /* * The selection of the format here is guesswork based upon * information people have sent to date. */ switch (card->csd.mmca_vsn) { case 0: /* MMC v1.0 - v1.2 */ case 1: /* MMC v1.4 */ card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); card->cid.serial = UNSTUFF_BITS(resp, 16, 24); card->cid.month = UNSTUFF_BITS(resp, 12, 4); card->cid.year = UNSTUFF_BITS(resp, 8, 4); card->cid.year += 1997; break; case 2: /* MMC v2.0 - v2.2 */ case 3: /* MMC v3.1 - v3.3 */ card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); card->cid.serial = UNSTUFF_BITS(resp, 16, 32); card->cid.month = UNSTUFF_BITS(resp, 12, 4); card->cid.year = UNSTUFF_BITS(resp, 8, 4); card->cid.year += 1997; break; default: sd_printk(KERN_ERR, "card has unknown MMCA" " version %d\n", card->csd.mmca_vsn); break; } } }
void mmc_add_card_debugfs(struct mmc_card *card) { struct mmc_host *host = card->host; struct dentry *root; if (!host->debugfs_root) return; root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root); if (IS_ERR(root)) /* Don't complain -- debugfs just isn't enabled */ return; if (!root) /* Complain -- debugfs is enabled, but it failed to * create the directory. */ goto err; card->debugfs_root = root; if (!debugfs_create_x32("state", S_IRUSR, root, &card->state)) goto err; if (mmc_card_mmc(card) || mmc_card_sd(card)) if (!debugfs_create_file("status", S_IRUSR, root, card, &mmc_dbg_card_status_fops)) goto err; if (mmc_card_mmc(card)) { if (!debugfs_create_file("ext_csd", S_IRUSR, root, card, &mmc_dbg_ext_csd_fops)) goto err; if (!debugfs_create_file("eol_status", S_IRUSR, root, card, &mmc_dbg_ext_csd_eol_fops)) goto err; if (!debugfs_create_file("dhs_type_a", S_IRUSR, root, card, &mmc_dbg_ext_csd_life_time_type_a_fops)) goto err; if (!debugfs_create_file("dhs_type_b", S_IRUSR, root, card, &mmc_dbg_ext_csd_life_time_type_b_fops)) goto err; if (!debugfs_create_file("emmc_device_type", S_IRUSR, root, card, &mmc_dbg_ext_csd_device_type_fops)) goto err; if (!debugfs_create_file("firmware_version", S_IRUSR, root, card, &mmc_dbg_ext_csd_fw_v_fops)) goto err; if (!debugfs_create_file("bkops_status", S_IRUSR, root, card, &mmc_dbg_ext_csd_bkops_status_fops)) goto err; if (!debugfs_create_file("bkops_support", S_IRUSR, root, card, &mmc_dbg_ext_csd_bkops_support_fops)) goto err; if (!debugfs_create_file("poweron_notify", S_IRUSR, root, card, &mmc_dbg_ext_csd_pon_notify_fops)) goto err; if (!debugfs_create_file("hpi_support", S_IRUSR, root, card, &mmc_dbg_ext_csd_hpi_support_fops)) goto err; } if (mmc_card_sd(card)) if (!debugfs_create_file("speed_class", S_IROTH, root, card, &mmc_dbg_card_speed_class_fops)) goto err; return; err: debugfs_remove_recursive(root); card->debugfs_root = NULL; dev_err(&card->dev, "failed to initialize debugfs\n"); }
int mmc_test_mem_card(struct mmc_test_config *cfg) { int id, count, forever; int ret, chk_result, tid = 0, result = 0; unsigned int chunks, chunk_blks, left_blks, pass = 0, fail = 0; unsigned int total_blks; unsigned int i, j; unsigned int blksz; unsigned int clkhz; char pattern = 0; char *buf; unsigned long blknr; struct mmc_host *host; struct mmc_card *card; id = cfg->id; count = cfg->count; buf = cfg->buf; blknr = cfg->blknr; blksz = cfg->blksz; chk_result = cfg->chk_result; chunk_blks = cfg->chunk_blks; total_blks = (cfg->total_size + blksz - 1) / blksz; forever = (count == -1) ? 1 : 0; host = mmc_get_host(id); card = mmc_get_card(id); while (forever || count--) { printf("[TST] ==============================================\n"); printf("[TST] BEGIN: %d/%d, No Stop(%d)\n", (cfg->count != -1) ? cfg->count - count : 0, (cfg->count != -1) ? cfg->count : 0, forever); printf("[TST] ----------------------------------------------\n"); printf("[TST] Mode : %d\n", cfg->mode); printf("[TST] Clock : %d kHz\n", cfg->clock / 1000); printf("[TST] BusWidth: %d bits\n", cfg->buswidth); printf("[TST] BurstSz : %d bytes\n", 0x1 << cfg->burstsz); printf("[TST] BlkAddr : %xh\n", blknr); printf("[TST] BlkSize : %dbytes\n", blksz); printf("[TST] TstBlks : %d\n", total_blks); #if defined(BB_MT6575) printf("[TST] AutoCMD : 12(%d), 23(%d)\n", (cfg->autocmd & MSDC_AUTOCMD12) ? 1 : 0, (cfg->autocmd & MSDC_AUTOCMD23) ? 1 : 0); #endif printf("[TST] ----------------------------------------------\n"); if (mmc_init_host(host, id) != 0) { result = -__LINE__; goto failure; } if (mmc_init_card(host, card) != 0) { result = -__LINE__; goto failure; } #if defined(BB_MT6575) msdc_set_dma(host, (u8)cfg->burstsz, (u32)cfg->flags); msdc_set_autocmd(host, cfg->autocmd, 1); #endif /* change uhs-1 mode */ #if 0 if (mmc_card_uhs1(card)) { if (mmc_switch_uhs1(host, card, cfg->uhsmode) != 0) { result = -__LINE__; goto failure; } } #endif /* change clock */ if (cfg->clock) { clkhz = card->maxhz < cfg->clock ? card->maxhz : cfg->clock; mmc_set_clock(host, mmc_card_ddr(card), clkhz); } if (mmc_card_sd(card) && cfg->buswidth == HOST_BUS_WIDTH_8) { printf("[TST] SD card doesn't support 8-bit bus width (SKIP)\n"); result = MMC_ERR_NONE; } if (mmc_set_bus_width(host, card, cfg->buswidth) != 0) { result = -__LINE__; goto failure; } /* cmd16 is illegal while card is in ddr mode */ if (!(mmc_card_mmc(card) && mmc_card_ddr(card))) { if (mmc_set_blk_length(host, blksz) != 0) { result = -__LINE__; goto failure; } } #if defined(BB_MT6575) if (cfg->piobits) { printf("[TST] PIO bits: %d\n", cfg->piobits); msdc_set_pio_bits(host, cfg->piobits); } #endif tid = result = 0; if (mmc_erase_start(card, blknr * blksz) != MMC_ERR_NONE) { result = -__LINE__; goto failure; } if (mmc_erase_end(card, (blknr + total_blks) * blksz) != MMC_ERR_NONE) { result = -__LINE__; goto failure; } if (mmc_erase(card, MMC_ERASE_NORMAL) != MMC_ERR_NONE) { result = -__LINE__; goto failure; } printf("[TST] 0x%x - 0x%x Erased\n", blknr * blksz, (blknr + total_blks) * blksz); mmc_send_status(host, card, &status); if (cfg->tst_single) { /* single block write */ for (i = 0; i < total_blks; i++) { pattern = (i + count) % 256; memset(buf, pattern, blksz); ret = mmc_block_write(id, blknr + i, 1, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { printf("test single block write failed (%d)\n", i); result = -__LINE__; goto failure; } } printf(TC_MSG, host->id, result == 0 ? "PASS" : "FAIL", tid++, "test single block write\n"); if (result) break; /* single block read */ for (i = 0; i < total_blks && !result; i++) { pattern = (i + count) % 256; /* populate buffer with different pattern */ memset(buf, pattern + 1, blksz); ret = mmc_block_read(id, blknr + i, 1, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { result = -__LINE__; goto failure; } if (chk_result) { for (j = 0; j < blksz; j++) { if (buf[j] != pattern) { result = -__LINE__; goto failure; } } } } printf(TC_MSG, host->id, result == 0 ? "PASS" : "FAIL", tid++, "test single block read\n"); if (result) { printf("[SD%d]\t\tread back pattern(0x%.2x) failed\n", id, pattern); goto failure; } } mmc_send_status(host, card, &status); if (cfg->tst_multiple) { /* multiple block write */ chunks = total_blks / chunk_blks; left_blks = total_blks % chunk_blks; for (i = 0; i < chunks; i++) { pattern = (i + count) % 256; memset(buf, pattern, blksz * chunk_blks); ret = mmc_block_write(id, blknr + i * chunk_blks, chunk_blks, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { result = -__LINE__; goto failure; } } if (!result && left_blks) { pattern = (i + count) % 256; memset(buf, pattern, blksz * left_blks); ret = mmc_block_write(id, blknr + chunks * chunk_blks, left_blks, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { result = -__LINE__; goto failure; } } printf(TC_MSG, host->id, result == 0 ? "PASS" : "FAIL", tid++, "test multiple block write\n"); if (result) goto failure; /* multiple block read */ for (i = 0; i < chunks; i++) { pattern = (i + count) % 256; /* populate buffer with different pattern */ memset(buf, pattern + 1, blksz); ret = mmc_block_read(id, blknr + i * chunk_blks, chunk_blks, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { printf("[SD%d]\t\tread %d blks failed(ret = %d blks)\n", host->id, chunk_blks, ret); result = -__LINE__; goto failure; } if (chk_result) { for (j = 0; j < chunk_blks * blksz; j++) { if (buf[j] == pattern) continue; result = -__LINE__; printf("[SD%d]\t\t%xh = %x (!= %x)\n", host->id, blknr + i * chunk_blks + j, buf[j], pattern); goto failure; } } } if (!result && left_blks) { pattern = i % 256; /* populate buffer with different pattern */ memset(buf, pattern + 1, blksz); ret = mmc_block_read(id, blknr + chunks * chunk_blks, left_blks, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { printf("[SD%d]\t\tread %d blks failed(ret = %d blks)\n", host->id, left_blks, ret); result = -__LINE__; goto failure; } if (chk_result) { for (j = 0; j < left_blks * blksz; j++) { if (buf[j] == pattern) continue; printf("[SD%d]\t\t%xh = %x (!= %x)\n", host->id, blknr + chunks * chunk_blks + j, buf[j], pattern); result = -__LINE__; goto failure; } } } printf(TC_MSG, host->id, result == 0 ? "PASS" : "FAIL", tid++, "test multiple block read\n"); if (result) goto failure; } mmc_send_status(host, card, &status); if (cfg->tst_interleave) { /* multiple block write */ chunks = total_blks / chunk_blks; left_blks = total_blks % chunk_blks; for (i = 0; i < chunks; i++) { pattern = (i + count) % 256; memset(buf, pattern, blksz * chunk_blks); ret = mmc_block_write(id, blknr + i * chunk_blks, chunk_blks, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { result = -__LINE__; goto failure; } /* populate buffer with different pattern */ memset(buf, pattern + 1, blksz * chunk_blks); ret = mmc_block_read(id, blknr + i * chunk_blks, chunk_blks, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { result = -__LINE__; goto failure; } if (chk_result) { for (j = 0; j < chunk_blks * blksz; j++) { if (buf[j] == pattern) continue; result = -__LINE__; goto failure; } } } if (!result && left_blks) { pattern = (i + count) % 256; memset(buf, pattern, blksz * left_blks); ret = mmc_block_write(id, blknr + chunks * chunk_blks, left_blks, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { result = -__LINE__; goto failure; } /* populate buffer with different pattern */ memset(buf, pattern + 1, blksz * left_blks); ret = mmc_block_read(id, blknr + chunks * chunk_blks, left_blks, (unsigned long*)buf); if (ret != MMC_ERR_NONE) { result = -__LINE__; break; } if (chk_result) { for (j = 0; j < left_blks * blksz; j++) { if (buf[j] == pattern) continue; result = -__LINE__; goto failure; } } } printf(TC_MSG, host->id, result == 0 ? "PASS" : "FAIL", tid++, "test multiple block interleave write-read\n"); if (result) goto failure; } if (cfg->desc) { printf("[TST] ----------------------------------------------\n"); printf("[TST] Report - %s \n", cfg->desc); printf("[TST] ----------------------------------------------\n"); } mmc_prof_dump(id); failure: if (result) { printf("[SD%d] mmc test failed (%d)\n", host->id, result); fail++; } else { pass++; } printf("[TST] ----------------------------------------------\n"); printf("[TST] Test Result: TOTAL(%d/%d), PASS(%d), FAIL(%d) \n", cfg->count - count, cfg->count, pass, fail); printf("[TST] ----------------------------------------------\n"); //mdelay(1000); } return result; }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = *mmc_dev(host)->dma_mask; mq->card = card; mq->queue = blk_init_queue(mmc_request, lock); if (!mq->queue) return -ENOMEM; mq->queue->queuedata = mq; mq->req = NULL; blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_hw_segs == 1) { unsigned int bouncesz; bouncesz = MMC_QUEUE_BOUNCESZ; if (bouncesz > host->max_req_size) bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; if (bouncesz > (host->max_blk_count * 512)) bouncesz = host->max_blk_count * 512; if (bouncesz > 512) { mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); if (!mq->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce buffer\n", mmc_card_name(card)); } } if (mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_sectors(mq->queue, bouncesz / 512); blk_queue_max_phys_segments(mq->queue, bouncesz / 512); blk_queue_max_hw_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); mq->sg = kmalloc(sizeof(struct scatterlist), GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, 1); mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * bouncesz / 512, GFP_KERNEL); if (!mq->bounce_sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->bounce_sg, bouncesz / 512); } } #endif if (!mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, host->max_phys_segs); } init_MUTEX(&mq->thread_sem); if (is_svlte_type_mmc_card(card)) mq->thread = kthread_run(mmc_queue_thread, mq, "svlte-qd"); else if (mmc_card_sd(card)) mq->thread = kthread_run(mmc_queue_thread, mq, "sd-qd"); else if (mmc_card_mmc(card)) mq->thread = kthread_run(mmc_queue_thread, mq, "emmc-qd"); else if (mmc_card_sdio(card)) mq->thread = kthread_run(mmc_queue_thread, mq, "sdio-qd"); else mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto free_bounce_sg; } return 0; free_bounce_sg: if (mq->bounce_sg) kfree(mq->bounce_sg); mq->bounce_sg = NULL; cleanup_queue: if (mq->sg) kfree(mq->sg); mq->sg = NULL; if (mq->bounce_buf) kfree(mq->bounce_buf); mq->bounce_buf = NULL; blk_cleanup_queue(mq->queue); return ret; }