static int sdio_irq_thread(void *_host) { struct mmc_host *host = _host; struct sched_param param = { .sched_priority = 1 }; unsigned long period, idle_period; int ret; sched_setscheduler(current, SCHED_FIFO, ¶m); /* * We want to allow for SDIO cards to work even on non SDIO * aware hosts. One thing that non SDIO host cannot do is * asynchronous notification of pending SDIO card interrupts * hence we poll for them in that case. */ idle_period = msecs_to_jiffies(10); period = (host->caps & MMC_CAP_SDIO_IRQ) ? MAX_SCHEDULE_TIMEOUT : idle_period; pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n", mmc_hostname(host), period); do { /* * We claim the host here on drivers behalf for a couple * reasons: * * 1) it is already needed to retrieve the CCCR_INTx; * 2) we want the driver(s) to clear the IRQ condition ASAP; * 3) we need to control the abort condition locally. * * Just like traditional hard IRQ handlers, we expect SDIO * IRQ handlers to be quick and to the point, so that the * holding of the host lock does not cover too much work * that doesn't require that lock to be held. */ ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); if (ret) break; ret = process_sdio_pending_irqs(host->card); mmc_release_host(host); /* * Give other threads a chance to run in the presence of * errors. */ if (ret < 0) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule_timeout(HZ); set_current_state(TASK_RUNNING); } /* * Adaptive polling frequency based on the assumption * that an interrupt will be closely followed by more. * This has a substantial benefit for network devices. */ if (!(host->caps & MMC_CAP_SDIO_IRQ)) { if (ret > 0) period /= 2; else { period++; if (period > idle_period) period = idle_period; } } set_current_state(TASK_INTERRUPTIBLE); if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 1); if (!kthread_should_stop()) schedule_timeout(period); set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 0); pr_debug("%s: IRQ thread exiting with code %d\n", mmc_hostname(host), ret); return ret; } static int sdio_card_irq_get(struct mmc_card *card) { struct mmc_host *host = card->host; WARN_ON(!host->claimed); if (!host->sdio_irqs++) { atomic_set(&host->sdio_irq_thread_abort, 0); host->sdio_irq_thread = kthread_run(sdio_irq_thread, host, "ksdioirqd/%s", mmc_hostname(host)); if (IS_ERR(host->sdio_irq_thread)) { int err = PTR_ERR(host->sdio_irq_thread); host->sdio_irqs--; return err; } } return 0; } static int sdio_card_irq_put(struct mmc_card *card) { struct mmc_host *host = card->host; WARN_ON(!host->claimed); BUG_ON(host->sdio_irqs < 1); if (!--host->sdio_irqs) { atomic_set(&host->sdio_irq_thread_abort, 1); kthread_stop(host->sdio_irq_thread); } return 0; } /* If there is only 1 function registered set sdio_single_irq */ static void sdio_single_irq_set(struct mmc_card *card) { struct sdio_func *func; int i; card->sdio_single_irq = NULL; if ((card->host->caps & MMC_CAP_SDIO_IRQ) && card->host->sdio_irqs == 1) for (i = 0; i < card->sdio_funcs; i++) { func = card->sdio_func[i]; if (func && func->irq_handler) { card->sdio_single_irq = func; break; } } } /** * sdio_claim_irq - claim the IRQ for a SDIO function * @func: SDIO function * @handler: IRQ handler callback * * Claim and activate the IRQ for the given SDIO function. The provided * handler will be called when that IRQ is asserted. The host is always * claimed already when the handler is called so the handler must not * call sdio_claim_host() nor sdio_release_host(). */ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler) { int ret; unsigned char reg; BUG_ON(!func); BUG_ON(!func->card); pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func)); if (func->irq_handler) { pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func)); return -EBUSY; } ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®); if (ret) return ret; reg |= 1 << func->num; reg |= 1; /* Master interrupt enable */ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL); if (ret) return ret; func->irq_handler = handler; ret = sdio_card_irq_get(func->card); if (ret) func->irq_handler = NULL; sdio_single_irq_set(func->card); return ret; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err = 0; int extend_wakelock = 0; #ifdef CONFIG_MMC_PARANOID_SD_INIT int retries = 2; #endif mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; retry: mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: #ifdef CONFIG_MMC_PARANOID_SD_INIT if (err && (err != -ENOMEDIUM) && retries) { printk(KERN_INFO "%s: Re-scan card rc = %d (retries = %d)\n", mmc_hostname(host), err, retries); retries--; goto retry; } #endif if (extend_wakelock) wake_lock_timeout(&host->wakelock, 5 * HZ); else wake_unlock(&host->wakelock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
/* * Read extended CSD. */ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) { int err; u8 *ext_csd; BUG_ON(!card); BUG_ON(!new_ext_csd); *new_ext_csd = NULL; if (card->csd.mmca_vsn < CSD_SPEC_VER_4) return 0; /* * As the ext_csd is so large and mostly unused, we don't store the * raw block in mmc_card. */ ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) { printk(KERN_ERR "%s: could not allocate a buffer to " "receive the ext_csd.\n", mmc_hostname(card->host)); return -ENOMEM; } err = mmc_send_ext_csd(card, ext_csd); if (err) { kfree(ext_csd); *new_ext_csd = NULL; /* If the host or the card can't do the switch, * fail more gracefully. */ if ((err != -EINVAL) && (err != -ENOSYS) && (err != -EFAULT)) return err; /* * High capacity cards should have this "magic" size * stored in their CSD. */ if (card->csd.capacity == (4096 * 512)) { printk(KERN_ERR "%s: unable to read EXT_CSD " "on a possible high capacity card. " "Card will be ignored.\n", mmc_hostname(card->host)); } else { printk(KERN_WARNING "%s: unable to read " "EXT_CSD, performance might " "suffer.\n", mmc_hostname(card->host)); err = 0; } } else *new_ext_csd = ext_csd; #if defined(MMC_CHECK_EXT_CSD) /* For debugging about ext_csd register value */ mmc_error_ext_csd(card, ext_csd, 1, 0); #endif return err; }
static int sd_select_driver_type(struct mmc_card *card, u8 *status) { int host_drv_type = SD_DRIVER_TYPE_B; int card_drv_type = SD_DRIVER_TYPE_B; int drive_strength; int err; /* * If the host doesn't support any of the Driver Types A,C or D, * or there is no board specific handler then default Driver * Type B is used. */ if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C | MMC_CAP_DRIVER_TYPE_D))) return 0; if (!card->host->ops->select_drive_strength) return 0; if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) host_drv_type |= SD_DRIVER_TYPE_A; if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) host_drv_type |= SD_DRIVER_TYPE_C; if (card->host->caps & MMC_CAP_DRIVER_TYPE_D) host_drv_type |= SD_DRIVER_TYPE_D; if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) card_drv_type |= SD_DRIVER_TYPE_A; if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) card_drv_type |= SD_DRIVER_TYPE_C; if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D) card_drv_type |= SD_DRIVER_TYPE_D; /* * The drive strength that the hardware can support * depends on the board design. Pass the appropriate * information and let the hardware specific code * return what is possible given the options */ drive_strength = card->host->ops->select_drive_strength( card->sw_caps.uhs_max_dtr, host_drv_type, card_drv_type); err = mmc_sd_switch(card, 1, 2, drive_strength, status); if (err) return err; if ((status[15] & 0xF) != drive_strength) { printk(KERN_WARNING "%s: Problem setting drive strength!\n", mmc_hostname(card->host)); return 0; } mmc_set_driver_type(card->host, drive_strength); return 0; }
int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, bool reinit) { int err; #ifdef CONFIG_MMC_PARANOID_SD_INIT int retries; #endif if (!reinit) { /* * Fetch SCR from card. */ err = mmc_app_send_scr(card, card->raw_scr); if (err) return err; err = mmc_decode_scr(card); if (err) return err; /* * Fetch and process SD Status register. */ err = mmc_read_ssr(card); if (err) return err; /* Erase init depends on CSD and SSR */ mmc_init_erase(card); /* * Fetch switch information from card. */ #ifdef CONFIG_MMC_PARANOID_SD_INIT for (retries = 1; retries <= 3; retries++) { err = mmc_read_switch(card); if (!err) { if (retries > 1) { printk(KERN_WARNING "%s: recovered\n", mmc_hostname(host)); } break; } else { printk(KERN_WARNING "%s: read switch failed (attempt %d)\n", mmc_hostname(host), retries); } } #else err = mmc_read_switch(card); #endif if (err) return err; } /* * For SPI, enable CRC as appropriate. * This CRC enable is located AFTER the reading of the * card registers because some SDHC cards are not able * to provide valid CRCs for non-512-byte blocks. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) return err; } /* * Check if read-only switch is active. */ if (!reinit) { int ro = -1; if (host->ops->get_ro) ro = host->ops->get_ro(host); if (ro < 0) { printk(KERN_WARNING "%s: host does not " "support reading read-only " "switch. assuming write-enable.\n", mmc_hostname(host)); } else if (ro > 0) { mmc_card_set_readonly(card); } } return 0; }
/* * Decode extended CSD. */ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) { int err = 0, idx; unsigned int part_size; u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0; BUG_ON(!card); if (!ext_csd) return 0; /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; if (card->csd.structure == 3) { if (card->ext_csd.raw_ext_csd_structure > 2) { pr_err("%s: unrecognised EXT_CSD structure " "version %d\n", mmc_hostname(card->host), card->ext_csd.raw_ext_csd_structure); err = -EINVAL; goto out; } } card->ext_csd.rev = ext_csd[EXT_CSD_REV]; if (card->ext_csd.rev > 7) { pr_err("%s: unrecognised EXT_CSD revision %d\n", mmc_hostname(card->host), card->ext_csd.rev); err = -EINVAL; goto out; } card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3]; if (card->ext_csd.rev >= 2) { card->ext_csd.sectors = ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; /* Cards with density > 2GiB are sector addressed */ if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) mmc_card_set_blockaddr(card); } card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; mmc_select_card_type(card); card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.raw_erase_timeout_mult = ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; card->ext_csd.raw_hc_erase_grp_size = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; if (card->ext_csd.rev >= 3) { u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; /* EXT_CSD value is in units of 10ms, but we store in ms */ card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) card->ext_csd.sa_timeout = 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.erase_group_def = ext_csd[EXT_CSD_ERASE_GROUP_DEF]; card->ext_csd.hc_erase_timeout = 300 * ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; card->ext_csd.hc_erase_size = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; /* * There are two boot regions of equal size, defined in * multiples of 128K. */ if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) { part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; mmc_part_add(card, part_size, EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, "boot%d", idx, true, MMC_BLK_DATA_AREA_BOOT); } } } card->ext_csd.raw_hc_erase_gap_size = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; card->ext_csd.raw_sec_trim_mult = ext_csd[EXT_CSD_SEC_TRIM_MULT]; card->ext_csd.raw_sec_erase_mult = ext_csd[EXT_CSD_SEC_ERASE_MULT]; card->ext_csd.raw_sec_feature_support = ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.raw_trim_mult = ext_csd[EXT_CSD_TRIM_MULT]; card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if (card->ext_csd.rev >= 4) { /* * Enhanced area feature support -- check whether the eMMC * card has the Enhanced area enabled. If so, export enhanced * area offset and size to user by adding sysfs interface. */ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { hc_erase_grp_sz = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; hc_wp_grp_sz = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; card->ext_csd.enhanced_area_en = 1; /* * calculate the enhanced data area offset, in bytes */ card->ext_csd.enhanced_area_offset = (ext_csd[139] << 24) + (ext_csd[138] << 16) + (ext_csd[137] << 8) + ext_csd[136]; if (mmc_card_blockaddr(card)) card->ext_csd.enhanced_area_offset <<= 9; /* * calculate the enhanced data area size, in kilobytes */ card->ext_csd.enhanced_area_size = (ext_csd[142] << 16) + (ext_csd[141] << 8) + ext_csd[140]; card->ext_csd.enhanced_area_size *= (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); card->ext_csd.enhanced_area_size <<= 9; } else { /* * If the enhanced area is not enabled, disable these * device attributes. */ card->ext_csd.enhanced_area_offset = -EINVAL; card->ext_csd.enhanced_area_size = -EINVAL; } /* * General purpose partition feature support -- * If ext_csd has the size of general purpose partitions, * set size, part_cfg, partition name in mmc_part. */ if (ext_csd[EXT_CSD_PARTITION_SUPPORT] & EXT_CSD_PART_SUPPORT_PART_EN) { if (card->ext_csd.enhanced_area_en != 1) { hc_erase_grp_sz = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; hc_wp_grp_sz = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; card->ext_csd.enhanced_area_en = 1; } for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) { if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] && !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] && !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]) continue; part_size = (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2] << 16) + (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] << 8) + ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; part_size *= (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); mmc_part_add(card, part_size << 19, EXT_CSD_PART_CONFIG_ACC_GP0 + idx, "gp%d", idx, false, MMC_BLK_DATA_AREA_GP); } } card->ext_csd.sec_trim_mult = ext_csd[EXT_CSD_SEC_TRIM_MULT]; card->ext_csd.sec_erase_mult = ext_csd[EXT_CSD_SEC_ERASE_MULT]; card->ext_csd.sec_feature_support = ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.trim_timeout = 300 * ext_csd[EXT_CSD_TRIM_MULT]; /* * Note that the call to mmc_part_add above defaults to read * only. If this default assumption is changed, the call must * take into account the value of boot_locked below. */ card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; card->ext_csd.boot_ro_lockable = true; } if (card->ext_csd.rev >= 5) { /* Adjust production date as per JEDEC JESD84-B451 */ if (card->cid.year < 2010) card->cid.year += 16; /* check whether the eMMC card supports BKOPS */ if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { card->ext_csd.bkops = 1; card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN]; card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; if (!card->ext_csd.bkops_en) pr_info("%s: BKOPS_EN bit is not set\n", mmc_hostname(card->host)); } /* check whether the eMMC card supports HPI */ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { card->ext_csd.hpi = 1; if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; else card->ext_csd.hpi_cmd = MMC_SEND_STATUS; /* * Indicate the maximum timeout to close * a command interrupted by HPI */ card->ext_csd.out_of_int_time = ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; } card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; /* * RPMB regions are defined in multiples of 128K. */ card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; #if 0 //noted by xbw,2014-03-11 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) { mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, EXT_CSD_PART_CONFIG_ACC_RPMB, "rpmb", 0, false, MMC_BLK_DATA_AREA_RPMB); } #endif } card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) card->erased_byte = 0xFF; else card->erased_byte = 0x0; /* eMMC v4.5 or later */ if (card->ext_csd.rev >= 6) { card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; card->ext_csd.generic_cmd6_time = 10 * ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; card->ext_csd.power_off_longtime = 10 * ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; card->ext_csd.cache_size = ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) card->ext_csd.data_sector_size = 4096; else card->ext_csd.data_sector_size = 512; if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { card->ext_csd.data_tag_unit_size = ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * (card->ext_csd.data_sector_size); } else { card->ext_csd.data_tag_unit_size = 0; } card->ext_csd.max_packed_writes = ext_csd[EXT_CSD_MAX_PACKED_WRITES]; card->ext_csd.max_packed_reads = ext_csd[EXT_CSD_MAX_PACKED_READS]; } else { card->ext_csd.data_sector_size = 512; } out: return err; }
/* * Starting point for SD card init. */ int mmc_attach_sd(struct mmc_host *host) { int err; u32 ocr; #ifdef CONFIG_MMC_PARANOID_SD_INIT int retries; #endif BUG_ON(!host); WARN_ON(!host->claimed); /* Make sure we are at 3.3V signalling voltage */ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330); if (err) return err; err = mmc_send_app_op_cond(host, 0, &ocr); if (err) return err; mmc_sd_attach_bus_ops(host); if (host->ocr_avail_sd) host->ocr_avail = host->ocr_avail_sd; /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { mmc_go_idle(host); err = mmc_spi_read_ocr(host, 0, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } if ((ocr & MMC_VDD_165_195) && !(host->ocr_avail_sd & MMC_VDD_165_195)) { printk(KERN_WARNING "%s: SD card claims to support the " "incompletely defined 'low voltage range'. This " "will be ignored.\n", mmc_hostname(host)); ocr &= ~MMC_VDD_165_195; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage(s) of the card(s)? */ if (!host->ocr) { err = -EINVAL; goto err; } /* * Detect and init the card. */ #ifdef CONFIG_MMC_PARANOID_SD_INIT retries = 5; while (retries) { err = mmc_sd_init_card(host, host->ocr, NULL); if (err) { retries--; continue; } break; } if (!retries) { printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n", mmc_hostname(host), err); goto err; } #else err = mmc_sd_init_card(host, host->ocr, NULL); if (err) goto err; #endif mmc_release_host(host); err = mmc_add_card(host->card); mmc_claim_host(host); if (err) goto remove_card; return 0; remove_card: mmc_release_host(host); mmc_remove_card(host->card); host->card = NULL; mmc_claim_host(host); err: mmc_detach_bus(host); printk(KERN_ERR "%s: error %d whilst initialising SD card\n", mmc_hostname(host), err); return err; }
/* * Starting point for MMC card init. */ int mmc_attach_mmc(struct mmc_host *host, u32 ocr) { int err; int i = 0; BUG_ON(!host); WARN_ON(!host->claimed); mmc_attach_bus_ops(host); /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { err = mmc_spi_read_ocr(host, 1, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage of the card? */ if (!host->ocr) { err = -EINVAL; goto err; } /* * Detect and init the card. */ err = mmc_init_card(host, host->ocr, NULL); if (err) goto err; /* WA : Lock/Unlock CMD in case of 32nm iNAND */ /*check iNAND*/ if (host->card->cid.manfid == 0x45 || host->card->cid.manfid == 0x02) /*check 32nm*/ if (!(host->card->ext_csd.hpi & 0x1)) { printk(KERN_DEBUG "%s: Lock-unlock started, MID=0x%x, HPI=0x%x\n", __func__, host->card->cid.manfid, host->card->ext_csd.hpi); for (i = 0 ; i < 50 ; i++) { if (mmc_send_lock_cmd(host, 1)) { printk(KERN_ERR "%s: eMMC lock CMD is failed.\n", mmc_hostname(host)); goto remove_card; } if (mmc_send_lock_cmd(host, 0)) { printk(KERN_ERR "%s: eMMC unlock CMD is failed.\n", mmc_hostname(host)); goto remove_card; } } printk(KERN_DEBUG "%s:COMPLETED\n",__func__); } mmc_release_host(host); err = mmc_add_card(host->card); if (err) goto remove_card; return 0; remove_card: mmc_remove_card(host->card); host->card = NULL; mmc_claim_host(host); err: mmc_detach_bus(host); mmc_release_host(host); printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", mmc_hostname(host), err); return err; }
static void mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) { #ifdef CONFIG_MMC_DEBUG unsigned int i, sz; struct scatterlist *sg; #endif pr_debug("%s: starting CMD%u arg %08x flags %08x\n", mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); if (mrq->data) { pr_debug("%s: blksz %d blocks %d flags %08x " "tsac %d ms nsac %d\n", mmc_hostname(host), mrq->data->blksz, mrq->data->blocks, mrq->data->flags, mrq->data->timeout_ns / 1000000, mrq->data->timeout_clks); } if (mrq->stop) { pr_debug("%s: CMD%u arg %08x flags %08x\n", mmc_hostname(host), mrq->stop->opcode, mrq->stop->arg, mrq->stop->flags); } WARN_ON(!host->claimed); led_trigger_event(host->led, LED_FULL); mrq->cmd->error = 0; mrq->cmd->mrq = mrq; if (mrq->data) { BUG_ON(mrq->data->blksz > host->max_blk_size); BUG_ON(mrq->data->blocks > host->max_blk_count); BUG_ON(mrq->data->blocks * mrq->data->blksz > host->max_req_size); #ifdef CONFIG_MMC_DEBUG sz = 0; for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) sz += sg->length; BUG_ON(sz != mrq->data->blocks * mrq->data->blksz); #endif mrq->cmd->data = mrq->data; mrq->data->error = 0; mrq->data->mrq = mrq; if (mrq->stop) { mrq->data->stop = mrq->stop; mrq->stop->error = 0; mrq->stop->mrq = mrq; } #ifdef CONFIG_MMC_PERF_PROFILING host->perf.start = ktime_get(); #endif } host->ops->request(host, mrq); }
static int mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, u8 len) { struct mmc_request mrq = {0}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; u8 *data_buf; u8 *test_buf; int i, err; static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; /* dma onto stack is unsafe/nonportable, but callers to this * routine normally provide temporary on-stack buffers ... */ data_buf = kmalloc(len, GFP_KERNEL); if (!data_buf) return -ENOMEM; if (len == 8) test_buf = testdata_8bit; else if (len == 4) test_buf = testdata_4bit; else { printk(KERN_ERR "%s: Invalid bus_width %d\n", mmc_hostname(host), len); kfree(data_buf); return -EINVAL; } if (opcode == MMC_BUS_TEST_W) memcpy(data_buf, test_buf, len); mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = opcode; cmd.arg = 0; /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we * rely on callers to never use this with "native" calls for reading * CSD or CID. Native versions of those commands use the R2 type, * not R1 plus a data block. */ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = len; data.blocks = 1; if (opcode == MMC_BUS_TEST_R) data.flags = MMC_DATA_READ; else data.flags = MMC_DATA_WRITE; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, len); mmc_wait_for_req(host, &mrq); err = 0; if (opcode == MMC_BUS_TEST_R) { for (i = 0; i < len / 4; i++) if ((test_buf[i] ^ data_buf[i]) != 0xff) { err = -EIO; break; } } kfree(data_buf); if (cmd.error) return cmd.error; if (data.error) return data.error; return err; }
/* * Read and decode extended CSD. */ static int mmc_read_ext_csd(struct mmc_card *card) { int err; u8 *ext_csd; BUG_ON(!card); if (card->csd.mmca_vsn < CSD_SPEC_VER_4) return 0; /* * As the ext_csd is so large and mostly unused, we don't store the * raw block in mmc_card. */ ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) { printk(KERN_ERR "%s: could not allocate a buffer to " "receive the ext_csd.\n", mmc_hostname(card->host)); return -ENOMEM; } err = mmc_send_ext_csd(card, ext_csd); if (err) { /* If the host or the card can't do the switch, * fail more gracefully. */ if ((err != -EINVAL) && (err != -ENOSYS) && (err != -EFAULT)) goto out; /* * High capacity cards should have this "magic" size * stored in their CSD. */ if (card->csd.capacity == (4096 * 512)) { printk(KERN_ERR "%s: unable to read EXT_CSD " "on a possible high capacity card. " "Card will be ignored.\n", mmc_hostname(card->host)); } else { printk(KERN_WARNING "%s: unable to read " "EXT_CSD, performance might " "suffer.\n", mmc_hostname(card->host)); err = 0; } goto out; } card->ext_csd.rev = ext_csd[EXT_CSD_REV]; if (card->ext_csd.rev > 5) { printk(KERN_ERR "%s: unrecognised EXT_CSD structure " "version %d\n", mmc_hostname(card->host), card->ext_csd.rev); err = -EINVAL; goto out; } if (card->ext_csd.rev >= 2) { card->ext_csd.sectors = ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; if (card->ext_csd.sectors) mmc_card_set_blockaddr(card); } switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { #ifdef CONFIG_MMC_DISCARD case EXT_CSD_CARD_TYPE_TEMP |EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: // printk("[MMC] %s : MAX CSD TYPE\n",__func__); card->ext_csd.hs_max_dtr = 52000000; break; #endif case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52; break; case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V; break; case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V; break; case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; break; case EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 26000000; break; default: /* MMC v4 spec says this cannot happen */ printk(KERN_WARNING "%s: card is mmc v4 but doesn't " "support any high-speed modes.\n", mmc_hostname(card->host)); } if (card->ext_csd.rev >= 3) { u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) card->ext_csd.sa_timeout = 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; #ifdef CONFIG_MMC_DISCARD card->ext_csd.erase_group_def = ext_csd[EXT_CSD_ERASE_GROUP_DEF]; card->ext_csd.hc_erase_timeout = 300 * ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; card->ext_csd.hc_erase_size = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; #endif /* CONFIG_MMC_DISCARD */ } #ifdef CONFIG_MMC_DISCARD if (card->ext_csd.rev >= 4) { card->ext_csd.sec_trim_mult = ext_csd[EXT_CSD_SEC_TRIM_MULT]; card->ext_csd.sec_erase_mult = ext_csd[EXT_CSD_SEC_ERASE_MULT]; card->ext_csd.sec_feature_support = ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.trim_timeout = 300 * ext_csd[EXT_CSD_TRIM_MULT]; } #endif /* CONFIG_MMC_DISCARD */ card->ext_csd.hpi = ext_csd[EXT_CSD_HPI]; out: kfree(ext_csd); return err; }
/* * Probe for the device */ static int __init at91_mci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct at91mci_host *host; struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) return -EBUSY; mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; dev_dbg(&pdev->dev, "couldn't allocate mmc host\n"); goto fail6; } mmc->ops = &at91_mci_ops; mmc->f_min = 375000; mmc->f_max = 25000000; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->caps = 0; mmc->max_blk_size = MCI_MAXBLKSIZE; mmc->max_blk_count = MCI_BLKATONCE; mmc->max_req_size = MCI_BUFSIZE; mmc->max_segs = MCI_BLKATONCE; mmc->max_seg_size = MCI_BUFSIZE; host = mmc_priv(mmc); host->mmc = mmc; host->bus_mode = 0; host->board = pdev->dev.platform_data; if (host->board->wire4) { if (at91mci_is_mci1rev2xx()) mmc->caps |= MMC_CAP_4_BIT_DATA; else dev_warn(&pdev->dev, "4 wire bus mode not supported" " - using 1 wire\n"); } host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE, &host->physical_address, GFP_KERNEL); if (!host->buffer) { ret = -ENOMEM; dev_err(&pdev->dev, "Can't allocate transmit buffer\n"); goto fail5; } /* Add SDIO capability when available */ if (at91mci_is_mci1rev2xx()) { /* at91mci MCI1 rev2xx sdio interrupt erratum */ if (host->board->wire4 || !host->board->slot_b) mmc->caps |= MMC_CAP_SDIO_IRQ; } /* * Reserve GPIOs ... board init code makes sure these pins are set * up as GPIOs with the right direction (input, except for vcc) */ if (gpio_is_valid(host->board->det_pin)) { ret = gpio_request(host->board->det_pin, "mmc_detect"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim card detect pin\n"); goto fail4b; } } if (gpio_is_valid(host->board->wp_pin)) { ret = gpio_request(host->board->wp_pin, "mmc_wp"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n"); goto fail4; } } if (gpio_is_valid(host->board->vcc_pin)) { ret = gpio_request(host->board->vcc_pin, "mmc_vcc"); if (ret < 0) { dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n"); goto fail3; } } /* * Get Clock */ host->mci_clk = clk_get(&pdev->dev, "mci_clk"); if (IS_ERR(host->mci_clk)) { ret = -ENODEV; dev_dbg(&pdev->dev, "no mci_clk?\n"); goto fail2; } /* * Map I/O region */ host->baseaddr = ioremap(res->start, resource_size(res)); if (!host->baseaddr) { ret = -ENOMEM; goto fail1; } /* * Reset hardware */ clk_enable(host->mci_clk); /* Enable the peripheral clock */ at91_mci_disable(host); at91_mci_enable(host); /* * Allocate the MCI interrupt */ host->irq = platform_get_irq(pdev, 0); ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, mmc_hostname(mmc), host); if (ret) { dev_dbg(&pdev->dev, "request MCI interrupt failed\n"); goto fail0; } setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host); platform_set_drvdata(pdev, mmc); /* * Add host to MMC layer */ if (gpio_is_valid(host->board->det_pin)) { host->present = !gpio_get_value(host->board->det_pin); } else host->present = -1; mmc_add_host(mmc); /* * monitor card insertion/removal if we can */ if (gpio_is_valid(host->board->det_pin)) { ret = request_irq(gpio_to_irq(host->board->det_pin), at91_mmc_det_irq, 0, mmc_hostname(mmc), host); if (ret) dev_warn(&pdev->dev, "request MMC detect irq failed\n"); else device_init_wakeup(&pdev->dev, 1); } pr_debug("Added MCI driver\n"); return 0; fail0: clk_disable(host->mci_clk); iounmap(host->baseaddr); fail1: clk_put(host->mci_clk); fail2: if (gpio_is_valid(host->board->vcc_pin)) gpio_free(host->board->vcc_pin); fail3: if (gpio_is_valid(host->board->wp_pin)) gpio_free(host->board->wp_pin); fail4: if (gpio_is_valid(host->board->det_pin)) gpio_free(host->board->det_pin); fail4b: if (host->buffer) dma_free_coherent(&pdev->dev, MCI_BUFSIZE, host->buffer, host->physical_address); fail5: mmc_free_host(mmc); fail6: release_mem_region(res->start, resource_size(res)); dev_err(&pdev->dev, "probe failed, err %d\n", ret); return ret; }
/* * Register a new MMC card with the driver model. */ int mmc_add_card(struct mmc_card *card) { int ret; const char *type; const char *uhs_bus_speed_mode = ""; static const char *const uhs_speeds[] = { [UHS_SDR12_BUS_SPEED] = "SDR12 ", [UHS_SDR25_BUS_SPEED] = "SDR25 ", [UHS_SDR50_BUS_SPEED] = "SDR50 ", [UHS_SDR104_BUS_SPEED] = "SDR104 ", [UHS_DDR50_BUS_SPEED] = "DDR50 ", }; dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); switch (card->type) { case MMC_TYPE_MMC: type = "MMC"; break; case MMC_TYPE_SD: type = "SD"; if (mmc_card_blockaddr(card)) { if (mmc_card_ext_capacity(card)) type = "SDXC"; else type = "SDHC"; } break; case MMC_TYPE_SDIO: type = "SDIO"; break; case MMC_TYPE_SD_COMBO: type = "SD-combo"; if (mmc_card_blockaddr(card)) type = "SDHC-combo"; break; default: type = "?"; break; } if (mmc_sd_card_uhs(card) && (card->sd_bus_speed < ARRAY_SIZE(uhs_speeds))) uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed]; if (mmc_host_is_spi(card->host)) { pr_info("%s: new %s%s%s card on SPI\n", mmc_hostname(card->host), mmc_card_highspeed(card) ? "high speed " : "", mmc_card_ddr_mode(card) ? "DDR " : "", type); } else { pr_info("%s: new %s%s%s%s%s card at address %04x\n", mmc_hostname(card->host), mmc_card_uhs(card) ? "ultra high speed " : (mmc_card_highspeed(card) ? "high speed " : ""), (mmc_card_hs200(card) ? "HS200 " : ""), mmc_card_ddr_mode(card) ? "DDR " : "", uhs_bus_speed_mode, type, card->rca); } #ifdef CONFIG_DEBUG_FS mmc_add_card_debugfs(card); #endif ret = device_add(&card->dev); if (ret) return ret; mmc_card_set_present(card); //merged from 8960_ICS,20121221,yeganlin mmc_card_host_inserted(card,1); return 0; }
static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) { const struct platform_device_id *platid = platform_get_device_id(pdev); struct sdhci_pltfm_data *pdata; struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct resource *iomem; int ret; if (platid && platid->driver_data) pdata = (void *)platid->driver_data; else pdata = pdev->dev.platform_data; iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { ret = -ENOMEM; goto err; } if (resource_size(iomem) < 0x100) dev_err(&pdev->dev, "Invalid iomem size. You may " "experience problems.\n"); /* Some PCI-based MFD need the parent here */ if (pdev->dev.parent != &platform_bus) host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host)); else host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host)); if (IS_ERR(host)) { ret = PTR_ERR(host); goto err; } pltfm_host = sdhci_priv(host); host->hw_name = "platform"; if (pdata && pdata->ops) host->ops = pdata->ops; else host->ops = &sdhci_pltfm_ops; if (pdata) host->quirks = pdata->quirks; host->irq = platform_get_irq(pdev, 0); if (!request_mem_region(iomem->start, resource_size(iomem), mmc_hostname(host->mmc))) { dev_err(&pdev->dev, "cannot request region\n"); ret = -EBUSY; goto err_request; } host->ioaddr = ioremap(iomem->start, resource_size(iomem)); if (!host->ioaddr) { dev_err(&pdev->dev, "failed to remap registers\n"); ret = -ENOMEM; goto err_remap; } if (pdata && pdata->init) { ret = pdata->init(host, pdata); if (ret) goto err_plat_init; } ret = sdhci_add_host(host); if (ret) goto err_add_host; platform_set_drvdata(pdev, host); return 0; err_add_host: if (pdata && pdata->exit) pdata->exit(host); err_plat_init: iounmap(host->ioaddr); err_remap: release_mem_region(iomem->start, resource_size(iomem)); err_request: sdhci_free_host(host); err: printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret); return ret; }
static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) { int err = 0, idx; unsigned int part_size; u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0; BUG_ON(!card); if (!ext_csd) return 0; card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; if (card->csd.structure == 3) { if (card->ext_csd.raw_ext_csd_structure > 2) { pr_err("%s: unrecognised EXT_CSD structure " "version %d\n", mmc_hostname(card->host), card->ext_csd.raw_ext_csd_structure); err = -EINVAL; goto out; } } card->ext_csd.rev = ext_csd[EXT_CSD_REV]; if (card->ext_csd.rev > 7) { pr_err("%s: unrecognised EXT_CSD revision %d\n", mmc_hostname(card->host), card->ext_csd.rev); err = -EINVAL; goto out; } mmc_fixup_device(card, mmc_fixups); card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3]; if (card->ext_csd.rev >= 2) { card->ext_csd.sectors = ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) mmc_card_set_blockaddr(card); } card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; mmc_select_card_type(card); card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.raw_erase_timeout_mult = ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; card->ext_csd.raw_hc_erase_grp_size = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; if (card->ext_csd.rev >= 3) { u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; if (sa_shift > 0 && sa_shift <= 0x17) card->ext_csd.sa_timeout = 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.erase_group_def = ext_csd[EXT_CSD_ERASE_GROUP_DEF]; card->ext_csd.hc_erase_timeout = 300 * ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; card->ext_csd.hc_erase_size = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) { part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; mmc_part_add(card, part_size, EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, "boot%d", idx, true, MMC_BLK_DATA_AREA_BOOT); } } } card->ext_csd.raw_hc_erase_gap_size = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; card->ext_csd.raw_sec_trim_mult = ext_csd[EXT_CSD_SEC_TRIM_MULT]; card->ext_csd.raw_sec_erase_mult = ext_csd[EXT_CSD_SEC_ERASE_MULT]; card->ext_csd.raw_sec_feature_support = ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.raw_trim_mult = ext_csd[EXT_CSD_TRIM_MULT]; if (card->ext_csd.rev >= 4) { card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { hc_erase_grp_sz = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; hc_wp_grp_sz = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; card->ext_csd.enhanced_area_en = 1; card->ext_csd.enhanced_area_offset = (ext_csd[139] << 24) + (ext_csd[138] << 16) + (ext_csd[137] << 8) + ext_csd[136]; if (mmc_card_blockaddr(card)) card->ext_csd.enhanced_area_offset <<= 9; card->ext_csd.enhanced_area_size = (ext_csd[142] << 16) + (ext_csd[141] << 8) + ext_csd[140]; card->ext_csd.enhanced_area_size *= (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); card->ext_csd.enhanced_area_size <<= 9; } else { card->ext_csd.enhanced_area_offset = -EINVAL; card->ext_csd.enhanced_area_size = -EINVAL; } if (ext_csd[EXT_CSD_PARTITION_SUPPORT] & EXT_CSD_PART_SUPPORT_PART_EN) { if (card->ext_csd.enhanced_area_en != 1) { hc_erase_grp_sz = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; hc_wp_grp_sz = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; card->ext_csd.enhanced_area_en = 1; } for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) { if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] && !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] && !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]) continue; part_size = (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2] << 16) + (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] << 8) + ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; part_size *= (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); mmc_part_add(card, part_size << 19, EXT_CSD_PART_CONFIG_ACC_GP0 + idx, "gp%d", idx, false, MMC_BLK_DATA_AREA_GP); } } card->ext_csd.sec_trim_mult = ext_csd[EXT_CSD_SEC_TRIM_MULT]; card->ext_csd.sec_erase_mult = ext_csd[EXT_CSD_SEC_ERASE_MULT]; card->ext_csd.sec_feature_support = ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.trim_timeout = 300 * ext_csd[EXT_CSD_TRIM_MULT]; card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; card->ext_csd.boot_ro_lockable = true; } if (card->ext_csd.rev >= 5) { if ((ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) && !(card->quirks & MMC_QUIRK_BROKEN_HPI)) { card->ext_csd.hpi = 1; if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; else card->ext_csd.hpi_cmd = MMC_SEND_STATUS; card->ext_csd.out_of_int_time = ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; } if ((ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) && card->ext_csd.hpi) { card->ext_csd.bkops = 1; card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN]; card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; if (!card->ext_csd.bkops_en && card->host->caps2 & MMC_CAP2_INIT_BKOPS) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1, 0); if (err) pr_warn("%s: Enabling BKOPS failed\n", mmc_hostname(card->host)); else card->ext_csd.bkops_en = 1; } } pr_info("%s: BKOPS_EN bit = %d\n", mmc_hostname(card->host), card->ext_csd.bkops_en); card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; if (ext_csd[EXT_CSD_RPMB_MULT]) { mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, EXT_CSD_PART_CONFIG_ACC_RPMB, "rpmb", 0, false, MMC_BLK_DATA_AREA_RPMB); } } card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) card->erased_byte = 0xFF; else card->erased_byte = 0x0; if (card->ext_csd.rev >= 6) { card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; card->ext_csd.generic_cmd6_time = 10 * ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; card->ext_csd.power_off_longtime = 10 * ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; card->ext_csd.cache_size = ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) card->ext_csd.data_sector_size = 4096; else card->ext_csd.data_sector_size = 512; if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { card->ext_csd.data_tag_unit_size = ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * (card->ext_csd.data_sector_size); } else { card->ext_csd.data_tag_unit_size = 0; } card->ext_csd.max_packed_writes = ext_csd[EXT_CSD_MAX_PACKED_WRITES]; card->ext_csd.max_packed_reads = ext_csd[EXT_CSD_MAX_PACKED_READS]; } if (mmc_card_mmc(card)) { char *buf; int i, j; ssize_t n = 0; pr_info("%s: cid %08x%08x%08x%08x\n", mmc_hostname(card->host), card->raw_cid[0], card->raw_cid[1], card->raw_cid[2], card->raw_cid[3]); pr_info("%s: csd %08x%08x%08x%08x\n", mmc_hostname(card->host), card->raw_csd[0], card->raw_csd[1], card->raw_csd[2], card->raw_csd[3]); buf = kmalloc(512, GFP_KERNEL); if (buf) { for (i = 0; i < 32; i++) { for (j = 511 - (16 * i); j >= 496 - (16 * i); j--) n += sprintf(buf + n, "%02x", ext_csd[j]); n += sprintf(buf + n, "\n"); pr_info("%s: ext_csd %s", mmc_hostname(card->host), buf); n = 0; } } if (buf) kfree(buf); } out: return err; }
/* * Read and decode extended CSD. */ static int mmc_read_ext_csd(struct mmc_card *card) { int err; u8 *ext_csd; BUG_ON(!card); if (card->csd.mmca_vsn < CSD_SPEC_VER_4) return 0; /* * As the ext_csd is so large and mostly unused, we don't store the * raw block in mmc_card. */ ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) { printk(KERN_ERR "%s: could not allocate a buffer to " "receive the ext_csd.\n", mmc_hostname(card->host)); return -ENOMEM; } err = mmc_send_ext_csd(card, ext_csd); if (err) { /* If the host or the card can't do the switch, * fail more gracefully. */ if ((err != -EINVAL) && (err != -ENOSYS) && (err != -EFAULT)) goto out; /* * High capacity cards should have this "magic" size * stored in their CSD. */ if (card->csd.capacity == (4096 * 512)) { printk(KERN_ERR "%s: unable to read EXT_CSD " "on a possible high capacity card. " "Card will be ignored.\n", mmc_hostname(card->host)); } else { printk(KERN_WARNING "%s: unable to read " "EXT_CSD, performance might " "suffer.\n", mmc_hostname(card->host)); err = 0; } goto out; } card->ext_csd.rev = ext_csd[EXT_CSD_REV]; if (card->ext_csd.rev > 5) { printk(KERN_ERR "%s: unrecognised EXT_CSD structure " "version %d\n", mmc_hostname(card->host), card->ext_csd.rev); err = -EINVAL; goto out; } if (card->ext_csd.rev >= 2) { card->ext_csd.sectors = ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; } switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; break; case EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 26000000; break; default: /* MMC v4 spec says this cannot happen */ printk(KERN_WARNING "%s: card is mmc v4 but doesn't " "support any high-speed modes.\n", mmc_hostname(card->host)); } if (card->ext_csd.rev >= 3) { u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) card->ext_csd.sa_timeout = 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; } out: kfree(ext_csd); return err; }
static int mmc_select_powerclass(struct mmc_card *card, unsigned int bus_width, u8 *ext_csd) { int err = 0; unsigned int pwrclass_val; unsigned int index = 0; struct mmc_host *host; BUG_ON(!card); host = card->host; BUG_ON(!host); if (ext_csd == NULL) return 0; if (card->csd.mmca_vsn < CSD_SPEC_VER_4) return 0; if (bus_width == EXT_CSD_BUS_WIDTH_1) return 0; switch (1 << host->ios.vdd) { case MMC_VDD_165_195: if (host->ios.clock <= 26000000) index = EXT_CSD_PWR_CL_26_195; else if (host->ios.clock <= 52000000) index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? EXT_CSD_PWR_CL_52_195 : EXT_CSD_PWR_CL_DDR_52_195; else if (host->ios.clock <= 200000000) index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? EXT_CSD_PWR_CL_200_195 : EXT_CSD_PWR_CL_DDR_200_195; break; case MMC_VDD_27_28: case MMC_VDD_28_29: case MMC_VDD_29_30: case MMC_VDD_30_31: case MMC_VDD_31_32: case MMC_VDD_32_33: case MMC_VDD_33_34: case MMC_VDD_34_35: case MMC_VDD_35_36: if (host->ios.clock <= 26000000) index = EXT_CSD_PWR_CL_26_360; else if (host->ios.clock <= 52000000) index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? EXT_CSD_PWR_CL_52_360 : EXT_CSD_PWR_CL_DDR_52_360; else if (host->ios.clock <= 200000000) index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? EXT_CSD_PWR_CL_200_360 : EXT_CSD_PWR_CL_DDR_200_360; break; default: pr_warning("%s: Voltage range not supported " "for power class.\n", mmc_hostname(host)); return -EINVAL; } pwrclass_val = ext_csd[index]; if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8)) pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >> EXT_CSD_PWR_CL_8BIT_SHIFT; else
/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; int err; u32 cid[4]; unsigned int max_dtr; u32 rocr; BUG_ON(!host); WARN_ON(!host->claimed); /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. */ mmc_go_idle(host); /* The extra bit indicates that we support high capacity */ err = mmc_send_op_cond(host, ocr | MMC_CARD_SECTOR_ADDR, &rocr); if (err) goto err; /* * For SPI, enable CRC as appropriate. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err; } /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else err = mmc_all_send_cid(host, cid); if (err) goto err; if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { err = -ENOENT; goto err; } card = oldcard; } else { /* * Allocate card structure. */ card = mmc_alloc_card(host, &mmc_type); if (IS_ERR(card)) { err = PTR_ERR(card); goto err; } card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } /* * For native busses: set card RCA and quit open drain mode. */ if (!mmc_host_is_spi(host)) { err = mmc_set_relative_addr(card); if (err) goto free_card; mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } if (!oldcard) { /* * Fetch CSD from card. */ err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card; err = mmc_decode_csd(card); if (err) goto free_card; err = mmc_decode_cid(card); if (err) goto free_card; } /* * Select card, as all following commands rely on that. */ if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) goto free_card; } if (!oldcard) { /* * Fetch and process extended CSD. */ err = mmc_read_ext_csd(card); if (err) goto free_card; if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR)) mmc_card_set_blockaddr(card); } /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to highspeed failed\n", mmc_hostname(card->host)); err = 0; } else { mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_MMC_HS); } } /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->ext_csd.hs_max_dtr) max_dtr = card->ext_csd.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); /* * Activate wide bus (if supported). */ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { unsigned ext_csd_bit, bus_width; if ((host->caps & MMC_CAP_8_BIT_DATA) && !(mmc_bustest(host, card, MMC_BUS_WIDTH_8))) { pr_info("Setting the bus width to 8 bit\n"); ext_csd_bit = EXT_CSD_BUS_WIDTH_8; bus_width = MMC_BUS_WIDTH_8; } else if (!(mmc_bustest(host, card, MMC_BUS_WIDTH_4))) { pr_info("Setting the bus width to 4 bit\n"); ext_csd_bit = EXT_CSD_BUS_WIDTH_4; bus_width = MMC_BUS_WIDTH_4; } else { pr_info("Setting the bus width to 1 bit\n"); ext_csd_bit = EXT_CSD_BUS_WIDTH_1; bus_width = MMC_BUS_WIDTH_1; } err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bit); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to bus width %d " "failed\n", mmc_hostname(card->host), 1 << bus_width); err = 0; } else { mmc_set_bus_width(card->host, bus_width); } } if (!oldcard) host->card = card; return 0; free_card: if (!oldcard) mmc_remove_card(card); err: return err; }
/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; int err; u32 cid[4]; unsigned int max_dtr; #ifdef CONFIG_MMC_PARANOID_SD_INIT int retries; #endif BUG_ON(!host); WARN_ON(!host->claimed); #ifdef CONFIG_ARCH_EMXX /* initialize select state */ host->select = 0xffffffff; #endif /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. */ mmc_go_idle(host); /* * If SD_SEND_IF_COND indicates an SD 2.0 * compliant card and we should set bit 30 * of the ocr to indicate that we can handle * block-addressed SDHC cards. */ err = mmc_send_if_cond(host, ocr); if (!err) ocr |= 1 << 30; err = mmc_send_app_op_cond(host, ocr, NULL); if (err) goto err; /* * For SPI, enable CRC as appropriate. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err; } /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else err = mmc_all_send_cid(host, cid); if (err) goto err; if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { err = -ENOENT; goto err; } card = oldcard; } else { /* * Allocate card structure. */ card = mmc_alloc_card(host, &sd_type); if (IS_ERR(card)) { err = PTR_ERR(card); goto err; } card->type = MMC_TYPE_SD; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } /* * For native busses: get card RCA and quit open drain mode. */ if (!mmc_host_is_spi(host)) { err = mmc_send_relative_addr(host, &card->rca); if (err) goto free_card; mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } if (!oldcard) { /* * Fetch CSD from card. */ err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card; err = mmc_decode_csd(card); if (err) goto free_card; mmc_decode_cid(card); } /* * Select card, as all following commands rely on that. */ if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) goto free_card; } if (!oldcard) { /* * Fetch SCR from card. */ err = mmc_app_send_scr(card, card->raw_scr); if (err) goto free_card; err = mmc_decode_scr(card); if (err < 0) goto free_card; /* * Fetch switch information from card. */ #ifdef CONFIG_MMC_PARANOID_SD_INIT for (retries = 1; retries <= 3; retries++) { err = mmc_read_switch(card); if (!err) { if (retries > 1) { printk(KERN_WARNING "%s: recovered\n", mmc_hostname(host)); } break; } else { printk(KERN_WARNING "%s: read switch failed (attempt %d)\n", mmc_hostname(host), retries); } } #else err = mmc_read_switch(card); #endif if (err) goto free_card; } /* * Attempt to change to high-speed (if supported) */ err = mmc_switch_hs(card); if (err) goto free_card; /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->sw_caps.hs_max_dtr) max_dtr = card->sw_caps.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); /* * Switch to wider bus (if supported). */ if ((host->caps & MMC_CAP_4_BIT_DATA) && (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); if (err) goto free_card; mmc_set_bus_width(host, MMC_BUS_WIDTH_4); } /* * Check if read-only switch is active. */ if (!oldcard) { if (!host->ops->get_ro || host->ops->get_ro(host) < 0) { printk(KERN_WARNING "%s: host does not " "support reading read-only " "switch. assuming write-enable.\n", mmc_hostname(host)); } else { if (host->ops->get_ro(host) > 0) mmc_card_set_readonly(card); } } #ifdef CONFIG_ARCH_EMXX if (!oldcard) host->card[0] = card; #else if (!oldcard) host->card = card; #endif return 0; free_card: if (!oldcard) mmc_remove_card(card); err: return err; }
/* * Read and decode extended CSD. */ static int mmc_read_ext_csd(struct mmc_card *card) { int err; u8 *ext_csd; unsigned int ext_csd_struct; BUG_ON(!card); if (card->csd.mmca_vsn < CSD_SPEC_VER_4) return 0; /* * As the ext_csd is so large and mostly unused, we don't store the * raw block in mmc_card. */ ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) { printk(KERN_ERR "%s: could not allocate a buffer to " "receive the ext_csd.\n", mmc_hostname(card->host)); return -ENOMEM; } err = mmc_send_ext_csd(card, ext_csd); if (err) { /* * We all hosts that cannot perform the command * to fail more gracefully */ if (err != -EINVAL) goto out; /* * High capacity cards should have this "magic" size * stored in their CSD. */ if (card->csd.capacity == (4096 * 512)) { printk(KERN_ERR "%s: unable to read EXT_CSD " "on a possible high capacity card. " "Card will be ignored.\n", mmc_hostname(card->host)); } else { printk(KERN_WARNING "%s: unable to read " "EXT_CSD, performance might " "suffer.\n", mmc_hostname(card->host)); err = 0; } goto out; } ext_csd_struct = ext_csd[EXT_CSD_REV]; // HACK: Hynix MMC reports "1" in revision field, which causes // the driver to ignore EXT_CSD sector count and use // 1GB size instead. Ignoring the ext_csd_struct is a temporary // solution until we figure out the correct one / fix the Hynix part. if (ext_csd_struct >= 2 || card->cid.manfid == 0x90) { card->ext_csd.sectors = ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; if (card->ext_csd.sectors) mmc_card_set_blockaddr(card); } switch (ext_csd[EXT_CSD_CARD_TYPE]) { case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; break; case EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 26000000; break; default: /* MMC v4 spec says this cannot happen */ printk(KERN_WARNING "%s: card is mmc v4 but doesn't " "support any high-speed modes.\n", mmc_hostname(card->host)); goto out; } out: kfree(ext_csd); return err; }
/* * Fetches and decodes switch information */ static int mmc_read_switch(struct mmc_card *card) { int err; u8 *status; if (card->scr.sda_vsn < SCR_SPEC_VER_1) return 0; if (!(card->csd.cmdclass & CCC_SWITCH)) { printk(KERN_WARNING "%s: card lacks mandatory switch " "function, performance might suffer.\n", mmc_hostname(card->host)); return 0; } err = -EIO; status = kmalloc(64, GFP_KERNEL); if (!status) { printk(KERN_ERR "%s: could not allocate a buffer for " "switch capabilities.\n", mmc_hostname(card->host)); return -ENOMEM; } /* Find out the supported Bus Speed Modes. */ err = mmc_sd_switch(card, 0, 0, 1, status); if (err) { /* * If the host or the card can't do the switch, * fail more gracefully. */ if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) goto out; printk(KERN_WARNING "%s: problem reading Bus Speed modes.\n", mmc_hostname(card->host)); err = 0; goto out; } if (card->scr.sda_spec3) { card->sw_caps.sd3_bus_mode = status[13]; /* Find out Driver Strengths supported by the card */ err = mmc_sd_switch(card, 0, 2, 1, status); if (err) { /* * If the host or the card can't do the switch, * fail more gracefully. */ if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) goto out; printk(KERN_WARNING "%s: problem reading " "Driver Strength.\n", mmc_hostname(card->host)); err = 0; goto out; } card->sw_caps.sd3_drv_type = status[9]; /* Find out Current Limits supported by the card */ err = mmc_sd_switch(card, 0, 3, 1, status); if (err) { /* * If the host or the card can't do the switch, * fail more gracefully. */ if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) goto out; printk(KERN_WARNING "%s: problem reading " "Current Limit.\n", mmc_hostname(card->host)); err = 0; goto out; } card->sw_caps.sd3_curr_limit = status[7]; } if (status[13] & 0x02) card->sw_caps.hs_max_dtr = 50000000; out: kfree(status); return err; }
/* * Starting point for MMC card init. */ int mmc_attach_mmc(struct mmc_host *host) { int err; u32 ocr; BUG_ON(!host); WARN_ON(!host->claimed); err = mmc_send_op_cond(host, 0, &ocr); if (err) return err; mmc_attach_bus_ops(host); if (host->ocr_avail_mmc) host->ocr_avail = host->ocr_avail_mmc; /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { err = mmc_spi_read_ocr(host, 1, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage of the card? */ if (!host->ocr) { err = -EINVAL; goto err; } /* * Detect and init the card. */ err = mmc_init_card(host, host->ocr, NULL); if (err) goto err; mmc_release_host(host); err = mmc_add_card(host->card); mmc_claim_host(host); if (err) goto remove_card; return 0; remove_card: mmc_release_host(host); mmc_remove_card(host->card); mmc_claim_host(host); host->card = NULL; err: mmc_detach_bus(host); printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", mmc_hostname(host), err); return err; }
static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) { unsigned int bus_speed = 0, timing = 0; int err; /* * If the host doesn't support any of the UHS-I modes, fallback on * default speed. */ if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) return 0; if ((card->host->caps & MMC_CAP_UHS_SDR104) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { bus_speed = UHS_SDR104_BUS_SPEED; timing = MMC_TIMING_UHS_SDR104; card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { bus_speed = UHS_DDR50_BUS_SPEED; timing = MMC_TIMING_UHS_DDR50; card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50)) { bus_speed = UHS_SDR50_BUS_SPEED; timing = MMC_TIMING_UHS_SDR50; card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { bus_speed = UHS_SDR25_BUS_SPEED; timing = MMC_TIMING_UHS_SDR25; card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR12)) { bus_speed = UHS_SDR12_BUS_SPEED; timing = MMC_TIMING_UHS_SDR12; card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; } card->sd_bus_speed = bus_speed; err = mmc_sd_switch(card, 1, 0, bus_speed, status); if (err) return err; if ((status[16] & 0xF) != bus_speed) { printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", mmc_hostname(card->host)); } else { mmc_set_timing(card->host, timing); if (timing == MMC_TIMING_UHS_DDR50) mmc_card_set_ddr_mode(card); mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); } return 0; }
/* * Decode extended CSD. */ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) { int err = 0; BUG_ON(!card); if (!ext_csd) return 0; /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; if (card->csd.structure == 3) { if (card->ext_csd.raw_ext_csd_structure > 2) { printk(KERN_ERR "%s: unrecognised EXT_CSD structure " "version %d\n", mmc_hostname(card->host), card->ext_csd.raw_ext_csd_structure); err = -EINVAL; goto out; } } card->ext_csd.rev = ext_csd[EXT_CSD_REV]; if (card->ext_csd.rev > 5) { printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", mmc_hostname(card->host), card->ext_csd.rev); err = -EINVAL; goto out; } card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3]; if (card->ext_csd.rev >= 2) { card->ext_csd.sectors = ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; /* Cards with density > 2GiB are sector addressed */ if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) mmc_card_set_blockaddr(card); } card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52; break; case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V; break; case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V; break; case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; break; case EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 26000000; break; default: /* MMC v4 spec says this cannot happen */ printk(KERN_WARNING "%s: card is mmc v4 but doesn't " "support any high-speed modes.\n", mmc_hostname(card->host)); } card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.raw_erase_timeout_mult = ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; card->ext_csd.raw_hc_erase_grp_size = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; if (card->ext_csd.rev >= 3) { u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; /* EXT_CSD value is in units of 10ms, but we store in ms */ card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) card->ext_csd.sa_timeout = 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.erase_group_def = ext_csd[EXT_CSD_ERASE_GROUP_DEF]; card->ext_csd.hc_erase_timeout = 300 * ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; card->ext_csd.hc_erase_size = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; /* * There are two boot regions of equal size, defined in * multiples of 128K. */ card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; } card->ext_csd.raw_hc_erase_gap_size = ext_csd[EXT_CSD_PARTITION_ATTRIBUTE]; card->ext_csd.raw_sec_trim_mult = ext_csd[EXT_CSD_SEC_TRIM_MULT]; card->ext_csd.raw_sec_erase_mult = ext_csd[EXT_CSD_SEC_ERASE_MULT]; card->ext_csd.raw_sec_feature_support = ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.raw_trim_mult = ext_csd[EXT_CSD_TRIM_MULT]; if (card->ext_csd.rev >= 4) { /* * Enhanced area feature support -- check whether the eMMC * card has the Enhanced area enabled. If so, export enhanced * area offset and size to user by adding sysfs interface. */ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { u8 hc_erase_grp_sz = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; u8 hc_wp_grp_sz = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; card->ext_csd.enhanced_area_en = 1; /* * calculate the enhanced data area offset, in bytes */ card->ext_csd.enhanced_area_offset = (ext_csd[139] << 24) + (ext_csd[138] << 16) + (ext_csd[137] << 8) + ext_csd[136]; if (mmc_card_blockaddr(card)) card->ext_csd.enhanced_area_offset <<= 9; /* * calculate the enhanced data area size, in kilobytes */ card->ext_csd.enhanced_area_size = (ext_csd[142] << 16) + (ext_csd[141] << 8) + ext_csd[140]; card->ext_csd.enhanced_area_size *= (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); card->ext_csd.enhanced_area_size <<= 9; } else { /* * If the enhanced area is not enabled, disable these * device attributes. */ card->ext_csd.enhanced_area_offset = -EINVAL; card->ext_csd.enhanced_area_size = -EINVAL; } card->ext_csd.sec_trim_mult = ext_csd[EXT_CSD_SEC_TRIM_MULT]; card->ext_csd.sec_erase_mult = ext_csd[EXT_CSD_SEC_ERASE_MULT]; card->ext_csd.sec_feature_support = ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.trim_timeout = 300 * ext_csd[EXT_CSD_TRIM_MULT]; } if (card->ext_csd.rev >= 5) card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) card->erased_byte = 0xFF; else card->erased_byte = 0x0; out: return err; }
/* * Given a 128-bit response, decode to our card CSD structure. */ static int mmc_decode_csd(struct mmc_card *card) { struct mmc_csd *csd = &card->csd; unsigned int e, m, csd_struct; u32 *resp = card->raw_csd; csd_struct = UNSTUFF_BITS(resp, 126, 2); switch (csd_struct) { case 0: m = UNSTUFF_BITS(resp, 115, 4); e = UNSTUFF_BITS(resp, 112, 3); csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; m = UNSTUFF_BITS(resp, 99, 4); e = UNSTUFF_BITS(resp, 96, 3); csd->max_dtr = tran_exp[e] * tran_mant[m]; csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); e = UNSTUFF_BITS(resp, 47, 3); m = UNSTUFF_BITS(resp, 62, 12); csd->capacity = (1 + m) << (e + 2); csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); csd->read_partial = UNSTUFF_BITS(resp, 79, 1); csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); csd->write_partial = UNSTUFF_BITS(resp, 21, 1); if (UNSTUFF_BITS(resp, 46, 1)) { csd->erase_size = 1; } else if (csd->write_blkbits >= 9) { csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1; csd->erase_size <<= csd->write_blkbits - 9; } break; case 1: /* * This is a block-addressed SDHC card. Most * interesting fields are unused and have fixed * values. To avoid getting tripped by buggy cards, * we assume those fixed values ourselves. */ mmc_card_set_blockaddr(card); csd->tacc_ns = 0; /* Unused */ csd->tacc_clks = 0; /* Unused */ m = UNSTUFF_BITS(resp, 99, 4); e = UNSTUFF_BITS(resp, 96, 3); csd->max_dtr = tran_exp[e] * tran_mant[m]; csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); m = UNSTUFF_BITS(resp, 48, 22); csd->capacity = (1 + m) << 10; csd->read_blkbits = 9; csd->read_partial = 0; csd->write_misalign = 0; csd->read_misalign = 0; csd->r2w_factor = 4; /* Unused */ csd->write_blkbits = 9; csd->write_partial = 0; csd->erase_size = 1; break; default: printk(KERN_ERR "%s: unrecognised CSD structure version %d\n", mmc_hostname(card->host), csd_struct); return -EINVAL; } card->erase_size = csd->erase_size; return 0; }
/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise. */ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; int err, ddr = 0; u32 cid[4]; unsigned int max_dtr; u32 rocr; u8 *ext_csd = NULL; BUG_ON(!host); WARN_ON(!host->claimed); /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. */ mmc_go_idle(host); /* The extra bit indicates that we support high capacity */ err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); if (err) goto err; /* * For SPI, enable CRC as appropriate. */ if (mmc_host_is_spi(host)) { err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err; } /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else err = mmc_all_send_cid(host, cid); if (err) goto err; if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { err = -ENOENT; goto err; } card = oldcard; } else { /* * Allocate card structure. */ card = mmc_alloc_card(host, &mmc_type); if (IS_ERR(card)) { err = PTR_ERR(card); goto err; } card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } /* * For native busses: set card RCA and quit open drain mode. */ if (!mmc_host_is_spi(host)) { err = mmc_set_relative_addr(card); if (err) goto free_card; mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } if (!oldcard) { /* * Fetch CSD from card. */ err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card; err = mmc_decode_csd(card); if (err) goto free_card; err = mmc_decode_cid(card); if (err) goto free_card; } /* * Select card, as all following commands rely on that. */ if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) goto free_card; } if (!oldcard) { /* * Fetch and process extended CSD. */ err = mmc_get_ext_csd(card, &ext_csd); if (err) goto free_card; err = mmc_read_ext_csd(card, ext_csd); if (err) goto free_card; /* If doing byte addressing, check if required to do sector * addressing. Handle the case of <2GB cards needing sector * addressing. See section 8.1 JEDEC Standard JED84-A441; * ocr register has bit 30 set for sector addressing. */ if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) mmc_card_set_blockaddr(card); /* Erase size depends on CSD and Extended CSD */ mmc_set_erase_size(card); if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR)) mmc_card_set_blockaddr(card); } /* * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF * bit. This bit will be lost every time after a reset or power off. */ if (card->ext_csd.enhanced_area_en) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GROUP_DEF, 1, 0); if (err && err != -EBADMSG) goto free_card; if (err) { err = 0; /* * Just disable enhanced area off & sz * will try to enable ERASE_GROUP_DEF * during next time reinit */ card->ext_csd.enhanced_area_offset = -EINVAL; card->ext_csd.enhanced_area_size = -EINVAL; } else { card->ext_csd.erase_group_def = 1; /* * enable ERASE_GRP_DEF successfully. * This will affect the erase size, so * here need to reset erase size */ mmc_set_erase_size(card); } } /* * Ensure eMMC user default partition is enabled */ if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, card->ext_csd.part_config, card->ext_csd.part_time); if (err && err != -EBADMSG) goto free_card; } /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1, 0); if (err && err != -EBADMSG) goto free_card; if (err) { printk(KERN_WARNING "%s: switch to highspeed failed\n", mmc_hostname(card->host)); err = 0; } else { mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_MMC_HS); } } /* * Compute bus speed. */ max_dtr = (unsigned int)-1; if (mmc_card_highspeed(card)) { if (max_dtr > card->ext_csd.hs_max_dtr) max_dtr = card->ext_csd.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { max_dtr = card->csd.max_dtr; } mmc_set_clock(host, max_dtr); /* * Indicate DDR mode (if supported). */ if (mmc_card_highspeed(card)) { if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) && ((host->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_8V_DDR_MODE; else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) && ((host->caps & (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)) == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_2V_DDR_MODE; } /* * Activate wide bus and DDR (if supported). */ if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { static unsigned ext_csd_bits[][2] = { { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, }; static unsigned bus_widths[] = { MMC_BUS_WIDTH_8, MMC_BUS_WIDTH_4, MMC_BUS_WIDTH_1 }; unsigned idx, bus_width = 0; if (host->caps & MMC_CAP_8_BIT_DATA) idx = 0; else idx = 1; for (; idx < ARRAY_SIZE(bus_widths); idx++) { bus_width = bus_widths[idx]; if (bus_width == MMC_BUS_WIDTH_1) ddr = 0; /* no DDR for 1-bit width */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][0], 0); if (!err) { mmc_set_bus_width(card->host, bus_width); /* * If controller can't handle bus width test, * compare ext_csd previously read in 1 bit mode * against ext_csd at new bus width */ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) err = mmc_compare_ext_csds(card, bus_width); else err = mmc_bus_test(card, bus_width); if (!err) break; } } if (!err && ddr) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, ext_csd_bits[idx][1], 0); } if (err) { printk(KERN_WARNING "%s: switch to bus width %d ddr %d " "failed\n", mmc_hostname(card->host), 1 << bus_width, ddr); goto free_card; } else if (ddr) { /* * eMMC cards can support 3.3V to 1.2V i/o (vccq) * signaling. * * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. * * 1.8V vccq at 3.3V core voltage (vcc) is not required * in the JEDEC spec for DDR. * * Do not force change in vccq since we are obviously * working and no change to vccq is needed. * * WARNING: eMMC rules are NOT the same as SD DDR */ if (ddr == MMC_1_2V_DDR_MODE) { err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0); if (err) goto err; } mmc_card_set_ddr_mode(card); mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); mmc_set_bus_width(card->host, bus_width); } } if (!oldcard) host->card = card; mmc_free_ext_csd(ext_csd); return 0; free_card: if (!oldcard) mmc_remove_card(card); err: mmc_free_ext_csd(ext_csd); return err; }
static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) { int ret; struct sdio_func_tuple *this, **prev; unsigned i, ptr = 0; /* * Note that this works for the common CIS (function number 0) as * well as a function's CIS * since SDIO_CCCR_CIS and SDIO_FBR_CIS * have the same offset. */ for (i = 0; i < 3; i++) { unsigned char x, fn; if (func) fn = func->num; else fn = 0; ret = mmc_io_rw_direct(card, 0, 0, SDIO_FBR_BASE(fn) + SDIO_FBR_CIS + i, 0, &x); if (ret) return ret; ptr |= x << (i * 8); } if (func) prev = &func->tuples; else prev = &card->tuples; BUG_ON(*prev); do { unsigned char tpl_code, tpl_link; ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); if (ret) break; /* 0xff means we're done */ if (tpl_code == 0xff) break; /* null entries have no link field or data */ if (tpl_code == 0x00) { if (card->cis.vendor == 0x70 && (card->cis.device == 0x2460 || card->cis.device == 0x0460 || card->cis.device == 0x23F1 || card->cis.device == 0x23F0)) break; else continue; } ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link); if (ret) break; /* a size of 0xff also means we're done */ if (tpl_link == 0xff) break; this = kmalloc(sizeof(*this) + tpl_link, GFP_KERNEL); if (!this) return -ENOMEM; for (i = 0; i < tpl_link; i++) { ret = mmc_io_rw_direct(card, 0, 0, ptr + i, 0, &this->data[i]); if (ret) break; } if (ret) { kfree(this); break; } /* Try to parse the CIS tuple */ ret = cis_tpl_parse(card, func, "CIS", cis_tpl_list, ARRAY_SIZE(cis_tpl_list), tpl_code, this->data, tpl_link); if (ret == -EILSEQ || ret == -ENOENT) { /* * The tuple is unknown or known but not parsed. * Queue the tuple for the function driver. */ this->next = NULL; this->code = tpl_code; this->size = tpl_link; *prev = this; prev = &this->next; if (ret == -ENOENT) { /* warn about unknown tuples */ pr_warning("%s: queuing unknown" " CIS tuple 0x%02x (%u bytes)\n", mmc_hostname(card->host), tpl_code, tpl_link); } /* keep on analyzing tuples */ ret = 0; } else { /* * We don't need the tuple anymore if it was * successfully parsed by the SDIO core or if it is * not going to be queued for a driver. */ kfree(this); } ptr += tpl_link; } while (!ret); /* * Link in all unknown tuples found in the common CIS so that * drivers don't have to go digging in two places. */ if (func) *prev = card->tuples; return ret; }
/* * Starting point for SD card init. */ int mmc_attach_sd(struct mmc_host *host, u32 ocr) { int err; BUG_ON(!host); WARN_ON(!host->claimed); mmc_sd_attach_bus_ops(host); /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { mmc_go_idle(host); err = mmc_spi_read_ocr(host, 0, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } if (ocr & MMC_VDD_165_195) { printk(KERN_WARNING "%s: SD card claims to support the " "incompletely defined 'low voltage range'. This " "will be ignored.\n", mmc_hostname(host)); ocr &= ~MMC_VDD_165_195; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage(s) of the card(s)? */ if (!host->ocr) { err = -EINVAL; goto err; } /* * Detect and init the card. */ err = mmc_sd_init_card(host, host->ocr, NULL); if (err) goto err; mmc_release_host(host); err = mmc_add_card(host->card); if (err) goto remove_card; return 0; remove_card: mmc_remove_card(host->card); host->card = NULL; mmc_claim_host(host); err: mmc_detach_bus(host); mmc_release_host(host); printk(KERN_ERR "%s: error %d whilst initialising SD card\n", mmc_hostname(host), err); return err; }
/* * Decode extended CSD. */ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) { int err = 0; BUG_ON(!card); if (!ext_csd) return 0; /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE]; if (card->csd.structure == 3) { if (card->ext_csd.raw_ext_csd_structure > 2) { printk(KERN_ERR "%s: unrecognised EXT_CSD structure " "version %d\n", mmc_hostname(card->host), card->ext_csd.raw_ext_csd_structure); err = -EINVAL; #if defined(MMC_CHECK_EXT_CSD) /* For debugging about ext_csd register value */ mmc_error_ext_csd(card, ext_csd, 0, EXT_CSD_STRUCTURE); #endif goto out; } } card->ext_csd.rev = ext_csd[EXT_CSD_REV]; /* eMMC 4.5 : ext_csd rev. is 6 * eMMC 5.0 : ext_csd rev. is 7 * It's temporary change. */ if (card->ext_csd.rev > 7) { printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", mmc_hostname(card->host), card->ext_csd.rev); err = -EINVAL; #if defined(MMC_CHECK_EXT_CSD) /* For debugging about ext_csd register value */ mmc_error_ext_csd(card, ext_csd, 0, EXT_CSD_REV); #endif goto out; } card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3]; if (card->ext_csd.rev >= 2) { card->ext_csd.sectors = ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24; /* Cards with density > 2GiB are sector addressed */ if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) mmc_card_set_blockaddr(card); } card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; if (card->host->caps2 & MMC_CAP2_HS200) { switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { case EXT_CSD_CARD_TYPE_SDR_ALL: case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V: case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V: case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52: card->ext_csd.hs_max_dtr = 200000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200; break; case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL: case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V: case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V: case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52: card->ext_csd.hs_max_dtr = 200000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V; break; case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL: case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V: case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V: case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52: card->ext_csd.hs_max_dtr = 200000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V; break; case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52; break; case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V; break; case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V; break; case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; break; case EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 26000000; break; default: #if defined(MMC_CHECK_EXT_CSD) /* For debugging about ext_csd register value */ mmc_error_ext_csd(card, ext_csd, 0, EXT_CSD_CARD_TYPE); #endif /* MMC v4 spec says this cannot happen */ printk(KERN_WARNING "%s: card is mmc v4 but doesn't " "support any high-speed modes.\n", mmc_hostname(card->host)); #if defined(MMC_RETRY_READ_EXT_CSD) err = -EINVAL; goto out; #endif } } else { pr_debug("%s: Ignore device type HS200.\n", mmc_hostname(card->host)); switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_NO_HS200_MASK) { case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52; break; case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V; break; case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V; break; case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 52000000; break; case EXT_CSD_CARD_TYPE_26: card->ext_csd.hs_max_dtr = 26000000; break; default: #if defined(MMC_CHECK_EXT_CSD) /* For debugging about ext_csd register value */ mmc_error_ext_csd(card, ext_csd, 0, EXT_CSD_CARD_TYPE); #endif /* MMC v4 spec says this cannot happen */ printk(KERN_WARNING "%s: card is mmc v4 but doesn't " "support any high-speed modes.\n", mmc_hostname(card->host)); #if defined(MMC_RETRY_READ_EXT_CSD) err = -EINVAL; goto out; #endif } } card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.raw_erase_timeout_mult = ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; card->ext_csd.raw_hc_erase_grp_size = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; if (card->ext_csd.rev >= 3) { u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; card->ext_csd.boot_part_prot = ext_csd[EXT_CSD_BOOT_CONFIG_PROT]; /* EXT_CSD value is in units of 10ms, but we store in ms */ card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) card->ext_csd.sa_timeout = 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.erase_group_def = ext_csd[EXT_CSD_ERASE_GROUP_DEF]; card->ext_csd.hc_erase_timeout = 300 * ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; card->ext_csd.hc_erase_size = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; /* * There are two boot regions of equal size, defined in * multiples of 128K. */ card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; } card->ext_csd.raw_hc_erase_gap_size = ext_csd[EXT_CSD_PARTITION_ATTRIBUTE]; card->ext_csd.raw_sec_trim_mult = ext_csd[EXT_CSD_SEC_TRIM_MULT]; card->ext_csd.raw_sec_erase_mult = ext_csd[EXT_CSD_SEC_ERASE_MULT]; card->ext_csd.raw_sec_feature_support = ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.raw_trim_mult = ext_csd[EXT_CSD_TRIM_MULT]; card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if (card->ext_csd.rev >= 4) { /* * Enhanced area feature support -- check whether the eMMC * card has the Enhanced area enabled. If so, export enhanced * area offset and size to user by adding sysfs interface. */ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { u8 hc_erase_grp_sz = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; u8 hc_wp_grp_sz = ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; card->ext_csd.enhanced_area_en = 1; /* * calculate the enhanced data area offset, in bytes */ card->ext_csd.enhanced_area_offset = (ext_csd[139] << 24) + (ext_csd[138] << 16) + (ext_csd[137] << 8) + ext_csd[136]; if (mmc_card_blockaddr(card)) card->ext_csd.enhanced_area_offset <<= 9; /* * calculate the enhanced data area size, in kilobytes */ card->ext_csd.enhanced_area_size = (ext_csd[142] << 16) + (ext_csd[141] << 8) + ext_csd[140]; card->ext_csd.enhanced_area_size *= (size_t)(hc_erase_grp_sz * hc_wp_grp_sz); card->ext_csd.enhanced_area_size <<= 9; } else { /* * If the enhanced area is not enabled, disable these * device attributes. */ card->ext_csd.enhanced_area_offset = -EINVAL; card->ext_csd.enhanced_area_size = -EINVAL; } card->ext_csd.sec_trim_mult = ext_csd[EXT_CSD_SEC_TRIM_MULT]; card->ext_csd.sec_erase_mult = ext_csd[EXT_CSD_SEC_ERASE_MULT]; card->ext_csd.sec_feature_support = ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.trim_timeout = 300 * ext_csd[EXT_CSD_TRIM_MULT]; } if (card->ext_csd.rev >= 5) { /* enable discard feature if emmc is 4.41+ */ if ((ext_csd[EXT_CSD_VENDOR_SPECIFIC_FIELD + 0] & 0x1) && (card->cid.manfid == 0x15)) card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; /* check whether the eMMC card supports HPI */ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { card->ext_csd.hpi = 1; if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION; else card->ext_csd.hpi_cmd = MMC_SEND_STATUS; /* * Indicate the maximum timeout to close * a command interrupted by HPI */ card->ext_csd.out_of_int_time = ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; } card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; } card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) card->erased_byte = 0xFF; else card->erased_byte = 0x0; /* eMMC v4.5 or later */ if (card->ext_csd.rev >= 6) { card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; card->ext_csd.generic_cmd6_time = 10 * ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; card->ext_csd.power_off_longtime = 10 * ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; card->ext_csd.cache_size = ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; card->ext_csd.max_packed_writes = ext_csd[EXT_CSD_MAX_PACKED_WRITES]; card->ext_csd.max_packed_reads = ext_csd[EXT_CSD_MAX_PACKED_READS]; } out: return err; }
static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) { //HTC_CSP_START #if defined(CONFIG_MACH_ENDEAVORU) || defined(CONFIG_MACH_ENDEAVORTD) int addr = 0; #endif //HTC_CSP_END const struct platform_device_id *platid = platform_get_device_id(pdev); struct sdhci_pltfm_data *pdata; struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct resource *iomem; int ret; if (platid && platid->driver_data) pdata = (void *)platid->driver_data; else pdata = pdev->dev.platform_data; iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { ret = -ENOMEM; goto err; } //HTC_CSP_START #if defined(CONFIG_MACH_ENDEAVORU) || defined(CONFIG_MACH_ENDEAVORTD) addr = iomem->start; //printk(KERN_INFO "start addr = 0x%x\n", addr); #endif //HTC_CSP_END if (resource_size(iomem) < 0x100) dev_err(&pdev->dev, "Invalid iomem size. You may " "experience problems.\n"); /* Some PCI-based MFD need the parent here */ if (pdev->dev.parent != &platform_bus) host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host)); else host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host)); if (IS_ERR(host)) { ret = PTR_ERR(host); goto err; } pltfm_host = sdhci_priv(host); host->hw_name = "platform"; if (pdata && pdata->ops) host->ops = pdata->ops; else host->ops = &sdhci_pltfm_ops; if (pdata) host->quirks = pdata->quirks; host->irq = platform_get_irq(pdev, 0); if (!request_mem_region(iomem->start, resource_size(iomem), mmc_hostname(host->mmc))) { dev_err(&pdev->dev, "cannot request region\n"); printk(KERN_INFO "%s: cannot request region\n", __func__); ret = -EBUSY; goto err_request; } host->ioaddr = ioremap(iomem->start, resource_size(iomem)); if (!host->ioaddr) { dev_err(&pdev->dev, "failed to remap registers\n"); ret = -ENOMEM; goto err_remap; } if (pdata && pdata->init) { ret = pdata->init(host, pdata); if (ret) goto err_plat_init; } ret = sdhci_add_host(host); if (ret) goto err_add_host; platform_set_drvdata(pdev, host); //HTC_CSP_START #if defined(CONFIG_MACH_ENDEAVORU) || defined(CONFIG_MACH_ENDEAVORTD) //printk(KERN_INFO "[SD] SdioDrv_probe pdev:0x%x mmc:0x%x mmc->index=%d pdev->resource[1].start=%x, addr=%x\n", // (int)pdev, host->mmc, host->mmc->index, pdev->resource[1].start, addr); if (addr == TEGRA_SDMMC3_BASE) { // printk(KERN_INFO "[SD] Save sdmmc3 dev, mmc\n"); // printk(KERN_INFO "[SD] SdioDrv_probe pdev:0x%x mmc:0x%x mmc->index=%d pdev->resource[1].start=%x, addr=%x\n", // (int)pdev, host->mmc, host->mmc->index, pdev->resource[1].start, addr); g_wlan_sdioDrv.pdev = pdev; g_wlan_sdioDrv.mmc = host->mmc; //TODO wlan_perf_lock = 0; } #endif //HTC_CSP_END return 0; err_add_host: if (pdata && pdata->exit) pdata->exit(host); err_plat_init: iounmap(host->ioaddr); err_remap: release_mem_region(iomem->start, resource_size(iomem)); err_request: sdhci_free_host(host); err: printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret); return ret; }