/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; int err, ddr = 0; u32 cid[4]; unsigned int max_dtr; BUG_ON(!host); WARN_ON(!host->claimed); /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. */ mmc_go_idle(host); /* The extra bit indicates that we support high capacity */ err = mmc_send_op_cond(host, ocr | (1 << 30), NULL); if (err) goto err; /* * For SPI, enable CRC as appropriate. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err; } /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else err = mmc_all_send_cid(host, cid); if (err) goto err; if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { err = -ENOENT; goto err; } card = oldcard; } else { /* * Allocate card structure. */ card = mmc_alloc_card(host, &mmc_type); if (IS_ERR(card)) { err = PTR_ERR(card); goto err; } card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } /* * For native busses: set card RCA and quit open drain mode. */ if (!mmc_host_is_spi(host)) { err = mmc_set_relative_addr(card); if (err) goto free_card; mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } if (!oldcard) { /* * Fetch CSD from card. */ err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card; err = mmc_decode_csd(card); if (err) goto free_card; err = mmc_decode_cid(card); if (err) goto free_card; } /* * Select card, as all following commands rely on that. */ if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) goto free_card; } if (!oldcard) { /* * Fetch and process extended CSD. */ err = mmc_read_ext_csd(card); if (err) goto free_card; } /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to highspeed failed\n", mmc_hostname(card->host)); err = 0; } else { mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_MMC_HS); } } /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->ext_csd.hs_max_dtr) max_dtr = card->ext_csd.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); /* * Indicate DDR mode (if supported). */ if (mmc_card_highspeed(card)) { if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) && (host->caps & (MMC_CAP_1_8V_DDR))) ddr = MMC_1_8V_DDR_MODE; else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) && (host->caps & (MMC_CAP_1_2V_DDR))) ddr = MMC_1_2V_DDR_MODE; } /* * Activate wide bus and DDR (if supported). */ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { unsigned ext_csd_bit, bus_width; if (host->caps & MMC_CAP_8_BIT_DATA) { if (ddr) ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8; else ext_csd_bit = EXT_CSD_BUS_WIDTH_8; bus_width = MMC_BUS_WIDTH_8; } else { if (ddr) ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4; else ext_csd_bit = EXT_CSD_BUS_WIDTH_4; bus_width = MMC_BUS_WIDTH_4; } err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bit); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to bus width %d ddr %d " "failed\n", mmc_hostname(card->host), 1 << bus_width, ddr); err = 0; } else { if (ddr) mmc_card_set_ddr_mode(card); else ddr = MMC_SDR_MODE; mmc_set_bus_width_ddr(card->host, bus_width, ddr); } } if (!oldcard) host->card = card; return 0; free_card: if (!oldcard) mmc_remove_card(card); err: return err; }
/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; int err, ddr = 0; u32 cid[4]; unsigned int max_dtr; u32 rocr; u8 *ext_csd = NULL; #if defined(CONFIG_MMC_DISABLE_WP_RFG_5) /* 2012 March detect write protection status for SHR/SHR#K workaround */ /* mfg partition start sector = LBA 65536 */ unsigned char WP_STATUS[8] = {0}; #endif BUG_ON(!host); WARN_ON(!host->claimed); /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. */ mmc_go_idle(host); /* The extra bit indicates that we support high capacity */ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); if (err) goto err; /* * For SPI, enable CRC as appropriate. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err; } /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else err = mmc_all_send_cid(host, cid); if (err) goto err; if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { err = -ENOENT; goto err; } card = oldcard; } else { /* * Allocate card structure. */ card = mmc_alloc_card(host, &mmc_type); if (IS_ERR(card)) { err = PTR_ERR(card); goto err; } card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } /* * For native busses: set card RCA and quit open drain mode. */ if (!mmc_host_is_spi(host)) { err = mmc_set_relative_addr(card); if (err) goto free_card; mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } if (!oldcard) { /* * Fetch CSD from card. */ err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card; err = mmc_decode_csd(card); if (err) goto free_card; err = mmc_decode_cid(card); if (err) goto free_card; } /* * Select card, as all following commands rely on that. */ if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) goto free_card; } if (!oldcard) { /* * Fetch and process extended CSD. */ err = mmc_get_ext_csd(card, &ext_csd); if (err) goto free_card; err = mmc_read_ext_csd(card, ext_csd); if (err) goto free_card; /* If doing byte addressing, check if required to do sector * addressing. Handle the case of <2GB cards needing sector * addressing. See section 8.1 JEDEC Standard JED84-A441; * ocr register has bit 30 set for sector addressing. */ if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) mmc_card_set_blockaddr(card); /* Erase size depends on CSD and Extended CSD */ mmc_set_erase_size(card); if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR)) mmc_card_set_blockaddr(card); } /* If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF */ /* bit. This bit will be lost every time after a reset or power off. */ /* For 2GB eMMC, there will no HC_ERASE_GROUP define */ if (card->ext_csd.sectors > 4194304) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GROUP_DEF, 1, 0); if (err && err != -EBADMSG) goto free_card; if (err) { err = 0; /* * Just disable enhanced area off & sz * will try to enable ERASE_GROUP_DEF * during next time reinit */ card->ext_csd.enhanced_area_offset = -EINVAL; card->ext_csd.enhanced_area_size = -EINVAL; } else { card->ext_csd.erase_group_def = 1; /* * enable ERASE_GRP_DEF successfully. * This will affect the erase size, so * here need to reset erase size */ mmc_set_erase_size(card); } } /* * Ensure eMMC user default partition is enabled */ if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, card->ext_csd.part_config, card->ext_csd.part_time); if (err && err != -EBADMSG) goto free_card; } /* For SanDisk X3, we have to enable power class 4 */ if (card->cid.manfid == 0x45) { if (card->ext_csd.sectors > 33554432) { /* the storage size larger than 16GB */ err = mmc_switch(card, EXT_CSD_CMD_SET_ZERO, EXT_CSD_POWER_CLASS, 4, 0); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to power class 4 failed\n", mmc_hostname(card->host)); err = 0; } else { printk(KERN_WARNING "%s: switch to power class 4 sucessfully\n", mmc_hostname(card->host)); } } else if (card->ext_csd.sectors == 31105024) { err = mmc_switch(card, EXT_CSD_CMD_SET_ZERO, EXT_CSD_POWER_CLASS, 4, 0); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to power class 4 failed\n", mmc_hostname(card->host)); err = 0; } else { printk(KERN_WARNING "%s: switch to power class 4 sucessfully\n", mmc_hostname(card->host)); } } } /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1, 0); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to highspeed failed\n", mmc_hostname(card->host)); err = 0; } else { mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_MMC_HS); } } if (card->cid.manfid == 0x45) { /* Sandisk 24nm extreme 16G */ if ((card->ext_csd.sectors == 31105024) && !strcmp(card->cid.prod_name, "SEM16G")) card->wr_perf = 12; /* Sandisk 24nm extreme 32G */ else if ((card->ext_csd.sectors == 62324736) && !strcmp(card->cid.prod_name, "SEM32G")) card->wr_perf = 12; } else if (card->cid.manfid == 0x15) { pr_info("%s: sectors %u\n", mmc_hostname(card->host), card->ext_csd.sectors); /* Samsung 27nm 16G */ if ((card->ext_csd.sectors == 30777344) && !strcmp(card->cid.prod_name, "KYL00M")) card->wr_perf = 11; else if ((card->ext_csd.sectors == 62521344) && !strcmp(card->cid.prod_name, "MBG8FA")) card->wr_perf = 11; /* Samsung 21nm 16G */ else if ((card->ext_csd.sectors == 30535680) && !strcmp(card->cid.prod_name, "MAG2GA")) card->wr_perf = 14; } /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->ext_csd.hs_max_dtr) max_dtr = card->ext_csd.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); /* * Indicate DDR mode (if supported). */ if (mmc_card_highspeed(card)) { if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) && ((host->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_8V_DDR_MODE; else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) && ((host->caps & (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_2V_DDR_MODE; } /* * Activate wide bus and DDR (if supported). */ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { static unsigned ext_csd_bits[][2] = { { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, }; static unsigned bus_widths[] = { MMC_BUS_WIDTH_8, MMC_BUS_WIDTH_4, MMC_BUS_WIDTH_1 }; unsigned idx, bus_width = 0; if (host->caps & MMC_CAP_8_BIT_DATA) idx = 0; else idx = 1; for (; idx < ARRAY_SIZE(bus_widths); idx++) { bus_width = bus_widths[idx]; if (bus_width == MMC_BUS_WIDTH_1) ddr = 0; /* no DDR for 1-bit width */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][0], 0); if (!err) { mmc_set_bus_width(card->host, bus_width); /* * If controller can't handle bus width test, * compare ext_csd previously read in 1 bit mode * against ext_csd at new bus width */ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) err = mmc_compare_ext_csds(card, bus_width); else err = mmc_bus_test(card, bus_width); if (!err) break; } } if (!err && ddr) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][1], 0); } if (err) { printk(KERN_WARNING "%s: switch to bus width %d ddr %d " "failed\n", mmc_hostname(card->host), 1 << bus_width, ddr); goto free_card; } else if (ddr) { /* * eMMC cards can support 3.3V to 1.2V i/o (vccq) * signaling. * * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. * * 1.8V vccq at 3.3V core voltage (vcc) is not required * in the JEDEC spec for DDR. * * Do not force change in vccq since we are obviously * working and no change to vccq is needed. * * WARNING: eMMC rules are NOT the same as SD DDR */ if (ddr == MMC_1_2V_DDR_MODE) { err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0); if (err) goto err; } mmc_card_set_ddr_mode(card); mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); mmc_set_bus_width(card->host, bus_width); } } #if defined(CONFIG_MMC_DISABLE_WP_RFG_5) /* 2012 March detect write protection status for SHR/SHR#K workaround */ /* mfg partition start sector = LBA 65536 */ err = mmc_set_block_length(card, 8); if (err && err != -EBADMSG) goto free_card; if (err) { pr_err("%s: set block length to 8 fail\n", mmc_hostname(card->host)); err = 0; } err = mmc_send_write_prot_type(card, WP_STATUS, 65536); if (err && err != -EBADMSG) goto free_card; if (err) { pr_err("%s: send write protection type at address 65536 failed\n", mmc_hostname(card->host)); err = 0; } if (WP_STATUS[0] & 0xAA) { pr_info("%s: trigger software write protection\n", mmc_hostname(card->host)); card->write_prot_type = 1; } else { pr_info("%s: disable software write protection\n", mmc_hostname(card->host)); card->write_prot_type = 0; } err = mmc_set_block_length(card, 512); if (err && err != -EBADMSG) goto free_card; if (err) { pr_err("%s: set block length to 512 fail\n", mmc_hostname(card->host)); err = 0; } #endif #if defined(CONFIG_ARCH_MSM7230) /* 2012 March detect write protection status for Kingston workaround System partition start sector = LBA 200704 */ if (card->cid.manfid == 0x70) { unsigned char WP_STATUS[8] = {0}; err = mmc_set_block_length(card, 8); if (err && err != -EBADMSG) goto free_card; if (err) { pr_err("%s: set block length to 8 fail\n", mmc_hostname(card->host)); err = 0; } err = mmc_send_write_prot_type(card, WP_STATUS, 200704); if (err && err != -EBADMSG) goto free_card; if (err) { pr_err("%s: send write protection type at address 200704 failed\n", mmc_hostname(card->host)); err = 0; } if (WP_STATUS[0] & 0xAA) { pr_info("%s: trigger Kingston write protection\n", mmc_hostname(card->host)); card->write_prot_type = 1; } else { pr_info("%s: disable Kingston write protection\n", mmc_hostname(card->host)); card->write_prot_type = 0; } err = mmc_set_block_length(card, 512); if (err && err != -EBADMSG) goto free_card; if (err) { pr_err("%s: set block length to 512 fail\n", mmc_hostname(card->host)); err = 0; } } #endif if (!oldcard) host->card = card; mmc_free_ext_csd(ext_csd); return 0; free_card: if (!oldcard) mmc_remove_card(card); err: mmc_free_ext_csd(ext_csd); return err; }
/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; int err, ddr = 0; int card_is_null = 0; u32 cid[4]; unsigned int max_dtr; u32 rocr; u8 *ext_csd = NULL; BUG_ON(!host); WARN_ON(!host->claimed); /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. */ mmc_go_idle(host); /* The extra bit indicates that we support high capacity */ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); if (err) goto err; /* * For SPI, enable CRC as appropriate. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err; } /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else err = mmc_all_send_cid(host, cid); if (err) goto err; if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { err = -ENOENT; goto err; } card = oldcard; } else { /* * Allocate card structure. */ card = mmc_alloc_card(host, &mmc_type); if (IS_ERR(card)) { err = PTR_ERR(card); goto err; } card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } /* * For native busses: set card RCA and quit open drain mode. */ if (!mmc_host_is_spi(host)) { err = mmc_set_relative_addr(card); if (err) goto free_card; mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } if (!oldcard) { /* * Fetch CSD from card. */ err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card; err = mmc_decode_csd(card); if (err) goto free_card; err = mmc_decode_cid(card); if (err) goto free_card; } /* * Select card, as all following commands rely on that. */ if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) goto free_card; } if (!oldcard) { /* * Fetch and process extended CSD. */ err = mmc_get_ext_csd(card, &ext_csd); if (err) goto free_card; err = mmc_read_ext_csd(card, ext_csd); if (err) goto free_card; /* If doing byte addressing, check if required to do sector * addressing. Handle the case of <2GB cards needing sector * addressing. See section 8.1 JEDEC Standard JED84-A441; * ocr register has bit 30 set for sector addressing. */ if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) mmc_card_set_blockaddr(card); /* Erase size depends on CSD and Extended CSD */ mmc_set_erase_size(card); } /* * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF * bit. This bit will be lost every time after a reset or power off. */ if (card->ext_csd.enhanced_area_en) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GROUP_DEF, 1, 0); if (err && err != -EBADMSG) goto free_card; if (err) { err = 0; /* * Just disable enhanced area off & sz * will try to enable ERASE_GROUP_DEF * during next time reinit */ card->ext_csd.enhanced_area_offset = -EINVAL; card->ext_csd.enhanced_area_size = -EINVAL; } else { card->ext_csd.erase_group_def = 1; /* * enable ERASE_GRP_DEF successfully. * This will affect the erase size, so * here need to reset erase size */ mmc_set_erase_size(card); } } /* * Ensure eMMC user default partition is enabled */ if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, card->ext_csd.part_config, card->ext_csd.part_time); if (err && err != -EBADMSG) goto free_card; } /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1, 0); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to highspeed failed\n", mmc_hostname(card->host)); err = 0; } else { mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_MMC_HS); } } /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->ext_csd.hs_max_dtr) max_dtr = card->ext_csd.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); if (!host->card) { host->card = card; card_is_null = 1; } if (card->host->ops->execute_tuning) card->host->ops->execute_tuning(card->host); /* * Indicate DDR mode (if supported). */ if (mmc_card_highspeed(card)) { if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) && ((host->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_8V_DDR_MODE; else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) && ((host->caps & (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_2V_DDR_MODE; } /* * Activate wide bus and DDR (if supported). */ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { static unsigned ext_csd_bits[][2] = { { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, }; static unsigned bus_widths[] = { MMC_BUS_WIDTH_8, MMC_BUS_WIDTH_4, MMC_BUS_WIDTH_1 }; unsigned idx, bus_width = 0; if (host->caps & MMC_CAP_8_BIT_DATA) idx = 0; else idx = 1; for (; idx < ARRAY_SIZE(bus_widths); idx++) { bus_width = bus_widths[idx]; if (bus_width == MMC_BUS_WIDTH_1) ddr = 0; /* no DDR for 1-bit width */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][0], 0); if (!err) { mmc_set_bus_width(card->host, bus_width); /* * If controller can't handle bus width test, * compare ext_csd previously read in 1 bit mode * against ext_csd at new bus width */ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) { /* err = mmc_compare_ext_csds(card, ext_csd, bus_width); */ } else err = mmc_bus_test(card, bus_width); if (!err) break; } } if (!err && ddr) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][1], 0); } if (err) { printk(KERN_WARNING "%s: switch to bus width %d ddr %d " "failed\n", mmc_hostname(card->host), 1 << bus_width, ddr); if (card_is_null) host->card = NULL; goto free_card; } else if (ddr) { /* * eMMC cards can support 3.3V to 1.2V i/o (vccq) * signaling. * * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. * * 1.8V vccq at 3.3V core voltage (vcc) is not required * in the JEDEC spec for DDR. * * Do not force change in vccq since we are obviously * working and no change to vccq is needed. * * WARNING: eMMC rules are NOT the same as SD DDR */ if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) { err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0); if (err) { if (card_is_null) host->card = NULL; goto err; } } mmc_card_set_ddr_mode(card); mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); mmc_set_bus_width(card->host, bus_width); } } if (!oldcard) host->card = card; if (card->host->ops->execute_tuning) card->host->ops->execute_tuning(card->host); mmc_free_ext_csd(ext_csd); return 0; free_card: if (!oldcard) mmc_remove_card(card); err: mmc_free_ext_csd(ext_csd); return err; }
static int mmc_awake(struct mmc_host *host) { struct mmc_card *card = host->card; int err = -ENOSYS; int ddr = 0; unsigned int max_dtr; if (card && card->ext_csd.rev >= 3) { err = mmc_card_sleepawake(host, 0); if (err < 0) { pr_debug("%s: Error %d while awaking sleeping card", mmc_hostname(host), err); return err; } /* * Ensure eMMC user default partition is enabled */ if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, card->ext_csd.part_config, card->ext_csd.part_time); if (err && err != -EBADMSG) goto err; } /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_MMC_HS); } /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->ext_csd.hs_max_dtr) max_dtr = card->ext_csd.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); /* * Indicate DDR mode (if supported). */ if (mmc_card_highspeed(card)) { if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) && ((host->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_8V_DDR_MODE; else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) && ((host->caps & (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_2V_DDR_MODE; } /* * Activate wide bus and DDR (if supported). */ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { static unsigned ext_csd_bits[][2] = { { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, }; static unsigned bus_widths[] = { MMC_BUS_WIDTH_8, MMC_BUS_WIDTH_4, MMC_BUS_WIDTH_1 }; unsigned idx, bus_width = 0; if (host->caps & MMC_CAP_8_BIT_DATA) idx = 0; else idx = 1; for (; idx < ARRAY_SIZE(bus_widths); idx++) { bus_width = bus_widths[idx]; if (bus_width == MMC_BUS_WIDTH_1) ddr = 0; /* no DDR for 1-bit width */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][0], 0); if (!err) { mmc_set_bus_width(card->host, bus_width); break; } } if (!err && ddr) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][1], 0); } if (err) { printk(KERN_WARNING "%s: switch to bus width %d ddr %d " "failed\n", mmc_hostname(card->host), 1 << bus_width, ddr); goto err; } else if (ddr) { /* * eMMC cards can support 3.3V to 1.2V i/o (vccq) * signaling. * * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. * * 1.8V vccq at 3.3V core voltage (vcc) is not required * in the JEDEC spec for DDR. * * Do not force change in vccq since we are obviously * working and no change to vccq is needed. * * WARNING: eMMC rules are NOT the same as SD DDR */ if (ddr == MMC_1_2V_DDR_MODE) { err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0); if (err) goto err; } mmc_card_set_ddr_mode(card); mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); mmc_set_bus_width(card->host, bus_width); } } } err: return err; }
/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; int err, ddr = 0; u32 cid[4]; unsigned int max_dtr; u32 rocr; BUG_ON(!host); WARN_ON(!host->claimed); /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. */ mmc_go_idle(host); /* The extra bit indicates that we support high capacity */ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); if (err) goto err; /* * For SPI, enable CRC as appropriate. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err; } /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else err = mmc_all_send_cid(host, cid); if (err) goto err; if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { err = -ENOENT; goto err; } card = oldcard; } else { /* * Allocate card structure. */ card = mmc_alloc_card(host, &mmc_type); if (IS_ERR(card)) { err = PTR_ERR(card); goto err; } card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } /* * For native busses: set card RCA and quit open drain mode. */ if (!mmc_host_is_spi(host)) { err = mmc_set_relative_addr(card); if (err) goto free_card; mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } if (!oldcard) { /* * Fetch CSD from card. */ err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card; err = mmc_decode_csd(card); if (err) goto free_card; err = mmc_decode_cid(card); if (err) goto free_card; } /* * Select card, as all following commands rely on that. */ if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) goto free_card; } if (!oldcard) { /* * Fetch and process extended CSD. */ err = mmc_read_ext_csd(card); if (err) goto free_card; /* If doing byte addressing, check if required to do sector * addressing. Handle the case of <2GB cards needing sector * addressing. See section 8.1 JEDEC Standard JED84-A441; * ocr register has bit 30 set for sector addressing. */ if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) mmc_card_set_blockaddr(card); /* Erase size depends on CSD and Extended CSD */ mmc_set_erase_size(card); } /* * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF * bit. This bit will be lost everytime after a reset or power off. */ if (card->ext_csd.enhanced_area_en) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GROUP_DEF, 1); if (err && err != -EBADMSG) goto free_card; if (err) { err = 0; /* * Just disable enhanced area off & sz * will try to enable ERASE_GROUP_DEF * during next time reinit */ card->ext_csd.enhanced_area_offset = -EINVAL; card->ext_csd.enhanced_area_size = -EINVAL; } else { card->ext_csd.erase_group_def = 1; /* * enable ERASE_GRP_DEF successfully. * This will affect the erase size, so * here need to reset erase size */ mmc_set_erase_size(card); } } /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to highspeed failed\n", mmc_hostname(card->host)); err = 0; } else { mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_MMC_HS); } } /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->ext_csd.hs_max_dtr) max_dtr = card->ext_csd.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); /* * Indicate DDR mode (if supported). */ if (mmc_card_highspeed(card)) { if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) && (host->caps & (MMC_CAP_1_8V_DDR))) ddr = MMC_1_8V_DDR_MODE; else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) && (host->caps & (MMC_CAP_1_2V_DDR))) ddr = MMC_1_2V_DDR_MODE; } /* * Activate wide bus and DDR (if supported). */ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { static unsigned ext_csd_bits[][2] = { { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, }; static unsigned bus_widths[] = { MMC_BUS_WIDTH_8, MMC_BUS_WIDTH_4, MMC_BUS_WIDTH_1 }; unsigned idx, bus_width = 0; if (host->caps & MMC_CAP_8_BIT_DATA) idx = 0; else idx = 1; for (; idx < ARRAY_SIZE(bus_widths); idx++) { bus_width = bus_widths[idx]; if (bus_width == MMC_BUS_WIDTH_1) ddr = 0; /* no DDR for 1-bit width */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][0]); if (!err) { mmc_set_bus_width_ddr(card->host, bus_width, MMC_SDR_MODE); /* * If controller can't handle bus width test, * use the highest bus width to maintain * compatibility with previous MMC behavior. */ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) break; err = mmc_bus_test(card, bus_width); if (!err) break; } } if (!err && ddr) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][1]); } if (err) { printk(KERN_WARNING "%s: switch to bus width %d ddr %d " "failed\n", mmc_hostname(card->host), 1 << bus_width, ddr); goto free_card; } else if (ddr) { mmc_card_set_ddr_mode(card); mmc_set_bus_width_ddr(card->host, bus_width, ddr); } } if (!oldcard) host->card = card; return 0; free_card: if (!oldcard) mmc_remove_card(card); err: return err; }
static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) { unsigned int bus_speed = 0, timing = 0; int err; /* * If the host doesn't support any of the UHS-I modes, fallback on * default speed. */ if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) return 0; if ((card->host->caps & MMC_CAP_UHS_SDR104) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { bus_speed = UHS_SDR104_BUS_SPEED; timing = MMC_TIMING_UHS_SDR104; card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { bus_speed = UHS_DDR50_BUS_SPEED; timing = MMC_TIMING_UHS_DDR50; card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50)) { bus_speed = UHS_SDR50_BUS_SPEED; timing = MMC_TIMING_UHS_SDR50; card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { bus_speed = UHS_SDR25_BUS_SPEED; timing = MMC_TIMING_UHS_SDR25; card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR12)) { bus_speed = UHS_SDR12_BUS_SPEED; timing = MMC_TIMING_UHS_SDR12; card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; } card->sd_bus_speed = bus_speed; err = mmc_sd_switch(card, 1, 0, bus_speed, status); if (err) return err; if ((status[16] & 0xF) != bus_speed) { printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", mmc_hostname(card->host)); } else { mmc_set_timing(card->host, timing); if (timing == MMC_TIMING_UHS_DDR50) mmc_card_set_ddr_mode(card); mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); } return 0; }
/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; int err, ddr = 0; u32 cid[4]; unsigned int max_dtr; u32 rocr; BUG_ON(!host); WARN_ON(!host->claimed); /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. */ mmc_go_idle(host); /* The extra bit indicates that we support high capacity */ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); if (err) goto err; /* * For SPI, enable CRC as appropriate. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err; } /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else err = mmc_all_send_cid(host, cid); if (err) goto err; if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { err = -ENOENT; goto err; } card = oldcard; } else { /* * Allocate card structure. */ card = mmc_alloc_card(host, &mmc_type); if (IS_ERR(card)) { err = PTR_ERR(card); goto err; } card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } /* * For native busses: set card RCA and quit open drain mode. */ if (!mmc_host_is_spi(host)) { err = mmc_set_relative_addr(card); if (err) goto free_card; mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } if (!oldcard) { /* * Fetch CSD from card. */ err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card; err = mmc_decode_csd(card); if (err) goto free_card; err = mmc_decode_cid(card); if (err) goto free_card; } /* * Select card, as all following commands rely on that. */ if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) goto free_card; } if (!oldcard) { /* * Fetch and process extended CSD. */ err = mmc_read_ext_csd(card); if (err) goto free_card; /* If doing byte addressing, check if required to do sector * addressing. Handle the case of <2GB cards needing sector * addressing. See section 8.1 JEDEC Standard JED84-A441; * ocr register has bit 30 set for sector addressing. */ if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) mmc_card_set_blockaddr(card); /* Erase size depends on CSD and Extended CSD */ mmc_set_erase_size(card); } #if defined(CONFIG_ARCH_ACER_T20) || defined(CONFIG_ARCH_ACER_T30) if (card->cid.manfid == SANDISK_X3_CID_MID) { err = mmc_switch(card, 0x0, EXT_CSD_POWER_CLASS, 4); if (err) printk(KERN_ERR "%s: switch power class fail \n", mmc_hostname(card->host)); } #endif /* * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF * bit. This bit will be lost every time after a reset or power off. */ if (card->ext_csd.enhanced_area_en) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GROUP_DEF, 1); if (err && err != -EBADMSG) goto free_card; if (err) { err = 0; /* * Just disable enhanced area off & sz * will try to enable ERASE_GROUP_DEF * during next time reinit */ card->ext_csd.enhanced_area_offset = -EINVAL; card->ext_csd.enhanced_area_size = -EINVAL; } else { card->ext_csd.erase_group_def = 1; /* * enable ERASE_GRP_DEF successfully. * This will affect the erase size, so * here need to reset erase size */ mmc_set_erase_size(card); } } /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to highspeed failed\n", mmc_hostname(card->host)); err = 0; } else { mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_MMC_HS); } } /* * Enable HPI feature (if supported) */ if (card->ext_csd.hpi && (card->host->caps & MMC_CAP_BKOPS)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HPI_MGMT, 1); if (err && err != -EBADMSG) goto free_card; if (err) { pr_warning("%s: Enabling HPI failed\n", mmc_hostname(card->host)); err = 0; } else { card->ext_csd.hpi_en = 1; } } /* * Enable Background ops feature (if supported) */ if (card->ext_csd.bk_ops && (card->host->caps & MMC_CAP_BKOPS)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1); if (err && err != -EBADMSG) goto free_card; if (err) { pr_warning("%s: Enabling BK ops failed\n", mmc_hostname(card->host)); err = 0; } else { card->ext_csd.bk_ops_en = 1; } } /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->ext_csd.hs_max_dtr) max_dtr = card->ext_csd.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); /* * Indicate DDR mode (if supported). */ if (mmc_card_highspeed(card)) { if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) && ((host->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_8V_DDR_MODE; else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) && ((host->caps & (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_2V_DDR_MODE; } /* * Activate wide bus and DDR (if supported). */ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { static unsigned ext_csd_bits[][2] = { { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, }; static unsigned bus_widths[] = { MMC_BUS_WIDTH_8, MMC_BUS_WIDTH_4, MMC_BUS_WIDTH_1 }; unsigned idx, bus_width = 0; if (host->caps & MMC_CAP_8_BIT_DATA) idx = 0; else idx = 1; for (; idx < ARRAY_SIZE(bus_widths); idx++) { bus_width = bus_widths[idx]; if (bus_width == MMC_BUS_WIDTH_1) ddr = 0; /* no DDR for 1-bit width */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][0]); if (!err) { mmc_set_bus_width(card->host, bus_width); /* * If controller can't handle bus width test, * use the highest bus width to maintain * compatibility with previous MMC behavior. */ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) break; err = mmc_bus_test(card, bus_width); if (!err) break; } } if (!err && ddr) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][1]); } if (err) { printk(KERN_WARNING "%s: switch to bus width %d ddr %d " "failed\n", mmc_hostname(card->host), 1 << bus_width, ddr); goto free_card; } else if (ddr) { /* * eMMC cards can support 3.3V to 1.2V i/o (vccq) * signaling. * * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. * * 1.8V vccq at 3.3V core voltage (vcc) is not required * in the JEDEC spec for DDR. * * Do not force change in vccq since we are obviously * working and no change to vccq is needed. * * WARNING: eMMC rules are NOT the same as SD DDR */ if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) { err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); if (err) goto err; } mmc_card_set_ddr_mode(card); mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); mmc_set_bus_width(card->host, bus_width); } } if (!oldcard) host->card = card; #if defined(CONFIG_ARCH_ACER_T20) || defined(CONFIG_ARCH_ACER_T30) switch (card->type) { case MMC_TYPE_MMC: sprintf(emmc_type, "MMC"); break; case MMC_TYPE_SD: sprintf(emmc_type, "SD"); break; case MMC_TYPE_SDIO: sprintf(emmc_type, "SDIO"); break; case MMC_TYPE_SD_COMBO: sprintf(emmc_type, "SDcombo"); break; default: sprintf(emmc_type, "unknow"); } sprintf(emmc_date, "%02d/%04d", card->cid.month, card->cid.year); emmc_size = card->ext_csd.sectors >> 11; emmc_name = card->cid.prod_name; if (device_info_kobj == NULL) { device_info_kobj = kobject_create_and_add("dev-info_rom", NULL); if (device_info_kobj == NULL) { pr_warning("%s: subsystem_register failed\n", mmc_hostname(card->host)); } else { err = sysfs_create_group(device_info_kobj, &attr_group); if(err) { pr_warning("%s: sysfs_create_group failed\n", mmc_hostname(card->host)); } } } #endif return 0; free_card: if (!oldcard) mmc_remove_card(card); err: return err; }
static int mmc_set_bus_speed_mode(struct mmc_card *card, u32 speed) { int err = 0; u32 clock = 0; u32 bus_width = 0; u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK; u32 caps = card->host->caps, caps2 = card->host->caps2; /* HS_TIMING is set to 2 in HS200 and all other modes needs to be 1 */ if (speed == UHS_DDR50_BUS_SPEED) { /* check card and host capability for DDR50 to proceed */ if (!(((caps & MMC_CAP_1_8V_DDR) && (card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)) || ((caps & MMC_CAP_1_2V_DDR) && (card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)))) { err = -EINVAL; goto err_node; } err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1, 0); if (err) { pr_err("%s: switch to HS_TIMING failed with error %d\n", mmc_hostname(card->host), err); goto err_node; } else { if (card->host->caps & MMC_CAP_8_BIT_DATA) bus_width = EXT_CSD_DDR_BUS_WIDTH_8; else bus_width = EXT_CSD_DDR_BUS_WIDTH_4; err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, bus_width, card->ext_csd.generic_cmd6_time); if (err) { pr_err("%s: switch to bus width failed", mmc_hostname(card->host)); pr_err("with error %d\n", err); goto err_node; } clock = MMC_HIGH_DDR_MAX_DTR; } } else { /* check card and host capability for HS200 to proceed */ if (!(((caps2 & MMC_CAP2_HS200_1_8V_SDR) && (card_type & EXT_CSD_CARD_TYPE_SDR_1_8V)) || ((caps2 & MMC_CAP2_HS200_1_2V_SDR) && (card_type & EXT_CSD_CARD_TYPE_SDR_1_2V)))) { err = -EINVAL; goto err_node; } /* Based on host capability, set card side bus width */ if (card->host->caps & MMC_CAP_8_BIT_DATA) bus_width = EXT_CSD_BUS_WIDTH_8; else if (card->host->caps & MMC_CAP_4_BIT_DATA) bus_width = EXT_CSD_BUS_WIDTH_4; else bus_width = EXT_CSD_BUS_WIDTH_1; err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, bus_width, card->ext_csd.generic_cmd6_time); if (err) { pr_err("%s: switch to bus width failed with error %d\n", mmc_hostname(card->host), err); goto err_node; } err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 2, 0); if (err) { pr_err("%s: switch to HS_TIMING failed with error %d\n", mmc_hostname(card->host), err); goto err_node; } clock = MMC_HS200_MAX_DTR; } mmc_set_timing(card->host, card->host->ios.timing); if (card->host->ios.timing == MMC_TIMING_UHS_DDR50) { mmc_card_set_ddr_mode(card); card->state &= ~(MMC_STATE_HIGHSPEED_200 | MMC_STATE_HIGHSPEED_400 | MMC_STATE_HIGHSPEED); } else if (card->host->ios.timing == MMC_TIMING_MMC_HS200) { mmc_card_set_hs200(card); card->state &= ~(MMC_STATE_HIGHSPEED_DDR | MMC_STATE_HIGHSPEED_400 | MMC_STATE_HIGHSPEED); } /* Based on bus width selected for card, set host side bus width */ switch (bus_width) { case EXT_CSD_BUS_WIDTH_8: case EXT_CSD_DDR_BUS_WIDTH_8: mmc_set_bus_width(card->host, MMC_BUS_WIDTH_8); break; case EXT_CSD_BUS_WIDTH_4: case EXT_CSD_DDR_BUS_WIDTH_4: mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); break; default: mmc_set_bus_width(card->host, MMC_BUS_WIDTH_1); } mmc_set_clock(card->host, clock); err_node: return err; }