Exemple #1
0
/*
 * Suspend callback from host.
 */
static int mmc_suspend(struct mmc_host *host)
{
	int err = 0;

	BUG_ON(!host);
	BUG_ON(!host->card);

	mmc_claim_host(host);
	if (host->card->cid.manfid == 0x15) {
		if (!mmc_host_is_spi(host))
			mmc_deselect_cards(host);
	} else {
		/* for successful resuming for iNAND (manfid:0x45) */
		if(!strcmp(mmc_hostname(host),"mmc0") && host->card->cid.manfid == 0x45)
			mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1, 0);

		if (mmc_card_can_sleep(host))
			err = mmc_card_sleep(host);
		else if (!mmc_host_is_spi(host))
			mmc_deselect_cards(host);
	}
	host->card->state &= ~MMC_STATE_HIGHSPEED;
	mmc_release_host(host);

	return err;
}
static int
mmc_set_timing(struct mmc_softc *sc, int timing)
{
	int err;
	uint8_t	value;
	u_char switch_res[64];

	switch (timing) {
	case bus_timing_normal:
		value = 0;
		break;
	case bus_timing_hs:
		value = 1;
		break;
	default:
		return (MMC_ERR_INVALID);
	}
	if (mmcbr_get_mode(sc->dev) == mode_sd)
		err = mmc_sd_switch(sc, SD_SWITCH_MODE_SET, SD_SWITCH_GROUP1,
		    value, switch_res);
	else
		err = mmc_switch(sc, EXT_CSD_CMD_SET_NORMAL,
		    EXT_CSD_HS_TIMING, value);
	return (err);
}
Exemple #3
0
/*
 * Modify EXT_CSD[177] which is BOOT_BUS_WIDTH
 * based on the passed in values for BOOT_BUS_WIDTH, RESET_BOOT_BUS_WIDTH
 * and BOOT_MODE.
 *
 * Returns 0 on success.
 */
int mmc_set_boot_bus_width(struct mmc *mmc, u8 width, u8 reset, u8 mode)
{
	return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_BUS_WIDTH,
			  EXT_CSD_BOOT_BUS_WIDTH_MODE(mode) |
			  EXT_CSD_BOOT_BUS_WIDTH_RESET(reset) |
			  EXT_CSD_BOOT_BUS_WIDTH_WIDTH(width));
}
Exemple #4
0
/*
 * Suspend callback from host.
 */
static int mmc_suspend(struct mmc_host *host)
{
	int err = 0;
#ifdef CONFIG_HUAWEI_KERNEL
    struct mmc_cid card_cid;
#endif
	BUG_ON(!host);
	BUG_ON(!host->card);

	mmc_claim_host(host);
    /* we set the bus width to 1 bit of sandisk EMMC before suspend
     * this is a workaroud method to avoid CMD2 CRC errors
     * when be waken up from suspend, and this code 
     * should be changed after confirmed with sandisk.
     */
#ifdef CONFIG_HUAWEI_KERNEL
    card_cid=host->card->cid;
	if(HUAWEI_SANDISK_MID==card_cid.manfid)
	{
		printk("Setting the bus width to 1 bit before sleep \n");
		mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
	}
#endif
	if (mmc_card_can_sleep(host))
		err = mmc_card_sleep(host);
	else if (!mmc_host_is_spi(host))
		mmc_deselect_cards(host);
	host->card->state &= ~MMC_STATE_HIGHSPEED;
	mmc_release_host(host);

	return err;
}
Exemple #5
0
static int mmc_poweroff_notify(struct mmc_host *host, int notify)
{
	struct mmc_card *card;
	unsigned int timeout;
	unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
	int err;

	card = host->card;

	if (notify == MMC_PW_OFF_NOTIFY_SHORT) {
		notify_type = EXT_CSD_POWER_OFF_SHORT;
		timeout = card->ext_csd.generic_cmd6_time;
	} else if (notify == MMC_PW_OFF_NOTIFY_LONG) {
		notify_type = EXT_CSD_POWER_OFF_LONG;
		timeout = card->ext_csd.power_off_longtime;
	} else {
		pr_info("%s: mmc_poweroff_notify called "
			"with notify type %d\n", mmc_hostname(host), notify);
		return -EINVAL;
	}

	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			 EXT_CSD_POWER_OFF_NOTIFICATION,
			 notify_type, timeout, 1);

	if (err)
		pr_err("%s: Device failed to respond within %d "
		       "poweroff timeout.\n", mmc_hostname(host), timeout);
	else
		card->poweroff_notify_state =
					MMC_NO_POWER_NOTIFICATION;

	return err;
}
Exemple #6
0
int mmc_switch_part(int dev_num, unsigned int part_num)
{
	struct mmc *mmc = find_mmc_device(dev_num);

	if (!mmc)
		return -1;

	return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
			  (mmc->part_config & ~PART_ACCESS_MASK)
			  | (part_num & PART_ACCESS_MASK));
}
static int mmc_switch_part(struct mmc_card *card, u8 part)
{
	int ret;

	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_PART_CONFIG, part,
			card->ext_csd.part_time);
	if (ret)
		pr_err("%s: switch failed with %d, part %d\n",
				__func__, ret, part);

	return ret;
}
Exemple #8
0
static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
{
	u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
	int err;

	if (!card->ext_csd.cmdq_support)
		return -EOPNOTSUPP;

	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
			 val, card->ext_csd.generic_cmd6_time);
	if (!err)
		card->ext_csd.cmdq_en = enable;

	return err;
}
Exemple #9
0
/*
 * Modify EXT_CSD[179] which is PARTITION_CONFIG (formerly BOOT_CONFIG)
 * based on the passed in values for BOOT_ACK, BOOT_PARTITION_ENABLE and
 * PARTITION_ACCESS.
 *
 * Returns 0 on success.
 */
int mmc_set_part_conf(struct mmc *mmc, u8 ack, u8 part_num, u8 access)
{
	int ret;
	u8 part_conf;

	part_conf = EXT_CSD_BOOT_ACK(ack) |
		    EXT_CSD_BOOT_PART_NUM(part_num) |
		    EXT_CSD_PARTITION_ACCESS(access);

	ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONF,
			 part_conf);
	if (!ret)
		mmc->part_config = part_conf;

	return ret;
}
int mmc_change_freq(struct mmc *mmc)
{
	char ext_csd[512];
	char cardtype;
	int err;

	mmc->card_caps = 0;

	if (mmc_host_is_spi(mmc))
		return 0;

	/* Only version 4 supports high-speed */
	if (mmc->version < MMC_VERSION_4)
		return 0;

	mmc->card_caps |= MMC_MODE_4BIT;

	err = mmc_send_ext_csd(mmc, ext_csd);

	if (err)
		return err;

	cardtype = ext_csd[196] & 0xf;

	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1);

	if (err)
		return err;

	/* Now check to see that it worked */
	err = mmc_send_ext_csd(mmc, ext_csd);

	if (err)
		return err;

	/* No high-speed support */
	if (!ext_csd[185])
		return 0;

	/* High Speed is set, there are two types: 52MHz and 26MHz */
	if (cardtype & MMC_HS_52MHZ)
		mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
	else
		mmc->card_caps |= MMC_MODE_HS;

	return 0;
}
Exemple #11
0
/*
 * Suspend callback from host.
 */
static int mmc_suspend(struct mmc_host *host)
{
	BUG_ON(!host);
	BUG_ON(!host->card);

	mmc_claim_host(host);
	if (!mmc_host_is_spi(host))	{
#if defined(CONFIG_PHONE_ARIES_CTC) // iNAND 24n Patch
		if (!strcmp(mmc_hostname(host), "mmc0"))
			mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
				EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1,0);
#endif
		mmc_deselect_cards(host);
	}
	host->card->state &= ~MMC_STATE_HIGHSPEED;
	mmc_release_host(host);

	return 0;
}
static int
mmc_set_card_bus_width(struct mmc_softc *sc, uint16_t rca, int width)
{
	struct mmc_command cmd;
	int err;
	uint8_t	value;

	if (mmcbr_get_mode(sc->dev) == mode_sd) {
		memset(&cmd, 0, sizeof(struct mmc_command));
		cmd.opcode = ACMD_SET_BUS_WIDTH;
		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
		switch (width) {
		case bus_width_1:
			cmd.arg = SD_BUS_WIDTH_1;
			break;
		case bus_width_4:
			cmd.arg = SD_BUS_WIDTH_4;
			break;
		default:
			return (MMC_ERR_INVALID);
		}
		err = mmc_wait_for_app_cmd(sc, rca, &cmd, CMD_RETRIES);
	} else {
		switch (width) {
		case bus_width_1:
			value = EXT_CSD_BUS_WIDTH_1;
			break;
		case bus_width_4:
			value = EXT_CSD_BUS_WIDTH_4;
			break;
		case bus_width_8:
			value = EXT_CSD_BUS_WIDTH_8;
			break;
		default:
			return (MMC_ERR_INVALID);
		}
		err = mmc_switch(sc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
		    value);
	}
	return (err);
}
int mmc_bkops(struct mmc_card *card, int start)
{
	int err;
	int retry = 3;
	struct mmc_command cmd = {0};
	BUG_ON(!card);
	BUG_ON(!card->host);

	if (start) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				EXT_CSD_BKOPS_START, 1, 0);
		if (err)
			printk(KERN_ERR "%s : %s start bkops fail err = %d\n",
				mmc_hostname(card->host), __func__, err);
		else
			printk(KERN_DEBUG "%s : %s start bkops!!\n",
				mmc_hostname(card->host), __func__);
	} else {

		do {
			cmd.opcode = card->ext_csd.hpi_cmd;
			if (cmd.opcode == MMC_SEND_STATUS) {
				cmd.arg = (card->rca << 16 | 0x1);
				cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
			} else {
				cmd.arg =  (card->rca << 16 | 0x1);
				cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
			}
			err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
		} while (err && retry--);

		if (err || !retry) {
			printk(KERN_DEBUG "%s : %s stop bkops fail retry %d\n",
			mmc_hostname(card->host), __func__, retry);
		} else {
			printk(KERN_DEBUG "%s : %s stop bkops\n",
			mmc_hostname(card->host), __func__);
		}
	}
	return err;
}
Exemple #14
0
/*
 * Execute switch cmd and wait for the card to exit the busy state
 */
static int mmc_execute_switch(struct mmc_card *card, u8 set, u8 index, u8 value)
{
	int err;

	err = mmc_switch(card, set, index, value);

	if (err)
		goto err;

	//Workaround for Sandisk cards issue - need 1 ms delay
	//between CMD6 and the following commands
	if ( card->cid.manfid == 0x02 ) {
		mdelay(1);
	}

	//:TODO: Replace the magic numbers with defines
	err = mmc_wait_not_busy(card, 30 , 5);
err :

	return err;
}
Exemple #15
0
/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_init_card(struct mmc_host *host, u32 ocr,
	struct mmc_card *oldcard)
{
	struct mmc_card *card;
	int err;
	u32 cid[4];
#if defined(CONFIG_INAND_VERSION_PATCH)
	u32 rocr[1];
#endif
	unsigned int max_dtr;

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	/*
	 * Since we're changing the OCR value, we seem to
	 * need to tell some cards to go back to the idle
	 * state.  We wait 1ms to give cards time to
	 * respond.
	 */
	mmc_go_idle(host);

	/* The extra bit indicates that we support high capacity */
#if defined(CONFIG_INAND_VERSION_PATCH)		
	err = mmc_send_op_cond(host, ocr | (1 << 30), rocr);
#else
	err = mmc_send_op_cond(host, ocr | (1 << 30), NULL);
#endif
	if (err)
		goto err;

	/*
	 * For SPI, enable CRC as appropriate.
	 */
	if (mmc_host_is_spi(host)) {
		err = mmc_spi_set_crc(host, use_spi_crc);
		if (err)
			goto err;
	}

	/*
	 * Fetch CID from card.
	 */
	if (mmc_host_is_spi(host))
		err = mmc_send_cid(host, cid);
	else
		err = mmc_all_send_cid(host, cid);
	if (err)
		goto err;

	if (oldcard) {
		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
			err = -ENOENT;
			goto err;
		}

		card = oldcard;
	} else {
		/*
		 * Allocate card structure.
		 */
		card = mmc_alloc_card(host, &mmc_type);
		if (IS_ERR(card)) {
			err = PTR_ERR(card);
			goto err;
		}

		card->type = MMC_TYPE_MMC;
		card->rca = 1;
		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
		host->card = card;
	}

	/*
	 * For native busses:  set card RCA and quit open drain mode.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_set_relative_addr(card);
		if (err)
			goto free_card;

		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
	}

	if (!oldcard) {
		/*
		 * Fetch CSD from card.
		 */
		err = mmc_send_csd(card, card->raw_csd);
		if (err)
			goto free_card;

		err = mmc_decode_csd(card);
		if (err)
			goto free_card;
		err = mmc_decode_cid(card);
		if (err)
			goto free_card;
	}

	/*
	 * Select card, as all following commands rely on that.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_select_card(card);
		if (err)
			goto free_card;
	}

	if (!oldcard) {
		/*
		 * Fetch and process extended CSD.
		 */
		err = mmc_read_ext_csd(card);
		if (err)
			goto free_card;
#if defined(CONFIG_INAND_VERSION_PATCH)		
		if (rocr[0] & 0x40000000)
			mmc_card_set_blockaddr(card);
#endif
//[NAGSM_Android_HDLNC_SDcard_SEOJW_2011_01_12 : eMMC Trim add 
#if defined (CONFIG_MMC_DISCARD) && defined (CONFIG_S5PC110_DEMPSEY_BOARD)
		mmc_set_erase_size(card);
#endif /* CONFIG_MMC_DISCARD */
	}

	/*
	 * Activate high speed (if supported)
	 */
	if ((card->ext_csd.hs_max_dtr != 0) &&
		(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HS_TIMING, 1);
		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			printk(KERN_WARNING "%s: switch to highspeed failed\n",
			       mmc_hostname(card->host));
			err = 0;
		} else {
			mmc_card_set_highspeed(card);
			mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
		}
	}

	/*
	 * Compute bus speed.
	 */
	max_dtr = (unsigned int)-1;

	if (mmc_card_highspeed(card)) {
		if (max_dtr > card->ext_csd.hs_max_dtr)
			max_dtr = card->ext_csd.hs_max_dtr;
	} else if (max_dtr > card->csd.max_dtr) {
		max_dtr = card->csd.max_dtr;
	}

	mmc_set_clock(host, max_dtr);

	/*
	 * Activate wide bus (if supported).
	 */
	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
		unsigned ext_csd_bit, bus_width;

		if (host->caps & MMC_CAP_8_BIT_DATA) {
			ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
			bus_width = MMC_BUS_WIDTH_8;
		} else {
			ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
			bus_width = MMC_BUS_WIDTH_4;
		}

		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 EXT_CSD_BUS_WIDTH, ext_csd_bit);

		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			printk(KERN_WARNING "%s: switch to bus width %d "
			       "failed\n", mmc_hostname(card->host),
			       1 << bus_width);
			err = 0;
		} else {
			mmc_set_bus_width(card->host, bus_width);
			printk(KERN_WARNING "%s: switch to bus width %d "
			       , mmc_hostname(card->host),
			       1 << bus_width);
		}
	}

	return 0;

free_card:
	if (!oldcard) {
		mmc_remove_card(card);
		host->card = NULL;
	}
err:

	return err;
}
Exemple #16
0
/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_init_card(struct mmc_host *host, u32 ocr,
	struct mmc_card *oldcard)
{
	struct mmc_card *card;
	int err, ddr = 0;
	int card_is_null = 0;
	u32 cid[4];
	unsigned int max_dtr;
	u32 rocr;
	u8 *ext_csd = NULL;

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	/*
	 * Since we're changing the OCR value, we seem to
	 * need to tell some cards to go back to the idle
	 * state.  We wait 1ms to give cards time to
	 * respond.
	 */
	mmc_go_idle(host);

	/* The extra bit indicates that we support high capacity */
	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
	if (err)
		goto err;

	/*
	 * For SPI, enable CRC as appropriate.
	 */
	if (mmc_host_is_spi(host)) {
		err = mmc_spi_set_crc(host, use_spi_crc);
		if (err)
			goto err;
	}

	/*
	 * Fetch CID from card.
	 */
	if (mmc_host_is_spi(host))
		err = mmc_send_cid(host, cid);
	else
		err = mmc_all_send_cid(host, cid);
	if (err)
		goto err;

	if (oldcard) {
		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
			err = -ENOENT;
			goto err;
		}

		card = oldcard;
	} else {
		/*
		 * Allocate card structure.
		 */
		card = mmc_alloc_card(host, &mmc_type);
		if (IS_ERR(card)) {
			err = PTR_ERR(card);
			goto err;
		}

		card->type = MMC_TYPE_MMC;
		card->rca = 1;
		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
	}

	/*
	 * For native busses:  set card RCA and quit open drain mode.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_set_relative_addr(card);
		if (err)
			goto free_card;

		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
	}

	if (!oldcard) {
		/*
		 * Fetch CSD from card.
		 */
		err = mmc_send_csd(card, card->raw_csd);
		if (err)
			goto free_card;

		err = mmc_decode_csd(card);
		if (err)
			goto free_card;
		err = mmc_decode_cid(card);
		if (err)
			goto free_card;
	}

	/*
	 * Select card, as all following commands rely on that.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_select_card(card);
		if (err)
			goto free_card;
	}

	if (!oldcard) {
		/*
		 * Fetch and process extended CSD.
		 */

		err = mmc_get_ext_csd(card, &ext_csd);
		if (err)
			goto free_card;
		err = mmc_read_ext_csd(card, ext_csd);
		if (err)
			goto free_card;

		/* If doing byte addressing, check if required to do sector
		 * addressing.  Handle the case of <2GB cards needing sector
		 * addressing.  See section 8.1 JEDEC Standard JED84-A441;
		 * ocr register has bit 30 set for sector addressing.
		 */
		if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
			mmc_card_set_blockaddr(card);

		/* Erase size depends on CSD and Extended CSD */
		mmc_set_erase_size(card);
	}

	/*
	 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
	 * bit.  This bit will be lost every time after a reset or power off.
	 */
	if (card->ext_csd.enhanced_area_en) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 EXT_CSD_ERASE_GROUP_DEF, 1, 0);

		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			err = 0;
			/*
			 * Just disable enhanced area off & sz
			 * will try to enable ERASE_GROUP_DEF
			 * during next time reinit
			 */
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		} else {
			card->ext_csd.erase_group_def = 1;
			/*
			 * enable ERASE_GRP_DEF successfully.
			 * This will affect the erase size, so
			 * here need to reset erase size
			 */
			mmc_set_erase_size(card);
		}
	}

	/*
	 * Ensure eMMC user default partition is enabled
	 */
	if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
		card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
				 card->ext_csd.part_config,
				 card->ext_csd.part_time);
		if (err && err != -EBADMSG)
			goto free_card;
	}

	/*
	 * Activate high speed (if supported)
	 */
	if ((card->ext_csd.hs_max_dtr != 0) &&
		(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 EXT_CSD_HS_TIMING, 1, 0);
		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			printk(KERN_WARNING "%s: switch to highspeed failed\n",
			       mmc_hostname(card->host));
			err = 0;
		} else {
			mmc_card_set_highspeed(card);
			mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
		}
	}

	/*
	 * Compute bus speed.
	 */
	max_dtr = (unsigned int)-1;

	if (mmc_card_highspeed(card)) {
		if (max_dtr > card->ext_csd.hs_max_dtr)
			max_dtr = card->ext_csd.hs_max_dtr;
	} else if (max_dtr > card->csd.max_dtr) {
		max_dtr = card->csd.max_dtr;
	}

	mmc_set_clock(host, max_dtr);

	if (!host->card) {
		host->card = card;
		card_is_null = 1;
	}

	if (card->host->ops->execute_tuning)
		card->host->ops->execute_tuning(card->host);

	/*
	 * Indicate DDR mode (if supported).
	 */
	if (mmc_card_highspeed(card)) {
		if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
			&& ((host->caps & (MMC_CAP_1_8V_DDR |
			     MMC_CAP_UHS_DDR50))
				== (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
				ddr = MMC_1_8V_DDR_MODE;
		else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
			&& ((host->caps & (MMC_CAP_1_2V_DDR |
			     MMC_CAP_UHS_DDR50))
				== (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
				ddr = MMC_1_2V_DDR_MODE;
	}

	/*
	 * Activate wide bus and DDR (if supported).
	 */
	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
		static unsigned ext_csd_bits[][2] = {
			{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
			{ EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
			{ EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
		};
		static unsigned bus_widths[] = {
			MMC_BUS_WIDTH_8,
			MMC_BUS_WIDTH_4,
			MMC_BUS_WIDTH_1
		};
		unsigned idx, bus_width = 0;

		if (host->caps & MMC_CAP_8_BIT_DATA)
			idx = 0;
		else
			idx = 1;
		for (; idx < ARRAY_SIZE(bus_widths); idx++) {
			bus_width = bus_widths[idx];
			if (bus_width == MMC_BUS_WIDTH_1)
				ddr = 0; /* no DDR for 1-bit width */
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					 EXT_CSD_BUS_WIDTH,
					 ext_csd_bits[idx][0],
					 0);
			if (!err) {
				mmc_set_bus_width(card->host, bus_width);

				/*
				 * If controller can't handle bus width test,
				 * compare ext_csd previously read in 1 bit mode
				 * against ext_csd at new bus width
				 */
				if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) {
					/*
					err = mmc_compare_ext_csds(card,
						ext_csd,
						bus_width);
					*/
				} else
					err = mmc_bus_test(card, bus_width);
				if (!err)
					break;
			}
		}

		if (!err && ddr) {
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					 EXT_CSD_BUS_WIDTH,
					 ext_csd_bits[idx][1],
					 0);
		}
		if (err) {
			printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
				"failed\n", mmc_hostname(card->host),
				1 << bus_width, ddr);
			if (card_is_null)
				host->card = NULL;

			goto free_card;
		} else if (ddr) {
			/*
			 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
			 * signaling.
			 *
			 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
			 *
			 * 1.8V vccq at 3.3V core voltage (vcc) is not required
			 * in the JEDEC spec for DDR.
			 *
			 * Do not force change in vccq since we are obviously
			 * working and no change to vccq is needed.
			 *
			 * WARNING: eMMC rules are NOT the same as SD DDR
			 */
			if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
				err = mmc_set_signal_voltage(host,
					MMC_SIGNAL_VOLTAGE_120, 0);
				if (err) {
					if (card_is_null)
						host->card = NULL;
					goto err;
				}
			}
			mmc_card_set_ddr_mode(card);
			mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
			mmc_set_bus_width(card->host, bus_width);
		}
	}

	if (!oldcard)
		host->card = card;

	if (card->host->ops->execute_tuning)
		card->host->ops->execute_tuning(card->host);

	mmc_free_ext_csd(ext_csd);
	return 0;

free_card:
	if (!oldcard)
		mmc_remove_card(card);
err:
	mmc_free_ext_csd(ext_csd);

	return err;
}
static int simple_sd_ioctl_multi_rw(struct msdc_ioctl* msdc_ctl)
{
    char l_buf[512];
    struct scatterlist msdc_sg;
    struct mmc_data  msdc_data;
    struct mmc_command msdc_cmd;
    struct mmc_command msdc_stop;

#ifdef MTK_MSDC_USE_CMD23
	struct mmc_command msdc_sbc;
#endif
    
    struct mmc_request  msdc_mrq;
    struct msdc_host *host_ctl;
   
    host_ctl = mtk_msdc_host[msdc_ctl->host_num];
    BUG_ON(!host_ctl);
    BUG_ON(!host_ctl->mmc);
    BUG_ON(!host_ctl->mmc->card);

    mmc_claim_host(host_ctl->mmc);

#if DEBUG_MMC_IOCTL
    printk("user want access %d partition\n",msdc_ctl->partition);
#endif

    mmc_send_ext_csd(host_ctl->mmc->card, l_buf);
    switch (msdc_ctl->partition){
        case BOOT_PARTITION_1:
            if (0x1 != (l_buf[179] & 0x7)){
                /* change to access boot partition 1 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x1;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        case BOOT_PARTITION_2:
            if (0x2 != (l_buf[179] & 0x7)){
                /* change to access boot partition 2 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x2;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        default:
            /* make sure access partition is user data area */
            if (0 != (l_buf[179] & 0x7)){
                /* set back to access user area */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x0;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
    }

    if(msdc_ctl->total_size > 64*1024){
        msdc_ctl->result = -1;
        return  msdc_ctl->result;
    }

    memset(&msdc_data, 0, sizeof(struct mmc_data));
    memset(&msdc_mrq, 0, sizeof(struct mmc_request));
    memset(&msdc_cmd, 0, sizeof(struct mmc_command));
    memset(&msdc_stop, 0, sizeof(struct mmc_command));

#ifdef MTK_MSDC_USE_CMD23
    memset(&msdc_sbc, 0, sizeof(struct mmc_command));
#endif

    msdc_mrq.cmd = &msdc_cmd;
    msdc_mrq.data = &msdc_data;

    if(msdc_ctl->trans_type)
        dma_force[host_ctl->id] = FORCE_IN_DMA;
    else
        dma_force[host_ctl->id] = FORCE_IN_PIO;

    if (msdc_ctl->iswrite){
        msdc_data.flags = MMC_DATA_WRITE;
        msdc_cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT; 
            }
        } else {
            /* called from other kernel module */
            memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size);
        }
    } else {
        msdc_data.flags = MMC_DATA_READ;
        msdc_cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        memset(sg_msdc_multi_buffer, 0 , msdc_ctl->total_size);
    }

#ifdef MTK_MSDC_USE_CMD23
    if ((mmc_card_mmc(host_ctl->mmc->card) || (mmc_card_sd(host_ctl->mmc->card) && host_ctl->mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)) && 
            !(host_ctl->mmc->card->quirks & MMC_QUIRK_BLK_NO_CMD23)){
        msdc_mrq.sbc = &msdc_sbc;
        msdc_mrq.sbc->opcode = MMC_SET_BLOCK_COUNT;
        msdc_mrq.sbc->arg = msdc_data.blocks;
        msdc_mrq.sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
    }
#endif

    msdc_cmd.arg = msdc_ctl->address;

    if (!mmc_card_blockaddr(host_ctl->mmc->card)){
        printk("this device use byte address!!\n");
        msdc_cmd.arg <<= 9;
    }
    msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

    msdc_stop.opcode = MMC_STOP_TRANSMISSION;
    msdc_stop.arg = 0;
    msdc_stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;

    msdc_data.stop = &msdc_stop;
    msdc_data.blksz = 512;
    msdc_data.sg = &msdc_sg;
    msdc_data.sg_len = 1;

#if DEBUG_MMC_IOCTL
    printk("total size is %d\n",msdc_ctl->total_size);
#endif
    sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size);
    mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card);
    mmc_wait_for_req(host_ctl->mmc, &msdc_mrq);

    if (!msdc_ctl->iswrite){
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT;
                }
        } else {
            /* called from other kernel module */
            memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size);
        }
    }

    if (msdc_ctl->partition){
        mmc_send_ext_csd(host_ctl->mmc->card,l_buf);

        if (l_buf[179] & 0x7) {
            /* set back to access user area */
            l_buf[179] &= ~0x7;
            l_buf[179] |= 0x0;
            mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
        }
    }

    mmc_release_host(host_ctl->mmc);

    if (msdc_cmd.error)
        msdc_ctl->result = msdc_cmd.error;

    if (msdc_data.error){
        msdc_ctl->result = msdc_data.error;
    } else {
        msdc_ctl->result = 0;
    }
        dma_force[host_ctl->id] = FORCE_NOTHING;
    return msdc_ctl->result;

}
static int simple_sd_ioctl_single_rw(struct msdc_ioctl* msdc_ctl)
{
    char l_buf[512];
    struct scatterlist msdc_sg;
    struct mmc_data    msdc_data;
    struct mmc_command msdc_cmd;
    struct mmc_request msdc_mrq;
    struct msdc_host *host_ctl;
   
    host_ctl = mtk_msdc_host[msdc_ctl->host_num];
    BUG_ON(!host_ctl);
    BUG_ON(!host_ctl->mmc);
    BUG_ON(!host_ctl->mmc->card);

    mmc_claim_host(host_ctl->mmc);

#if DEBUG_MMC_IOCTL
    printk("user want access %d partition\n",msdc_ctl->partition);
#endif

    mmc_send_ext_csd(host_ctl->mmc->card, l_buf);
    switch (msdc_ctl->partition){
        case BOOT_PARTITION_1:
            if (0x1 != (l_buf[179] & 0x7)){
                /* change to access boot partition 1 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x1;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        case BOOT_PARTITION_2:
            if (0x2 != (l_buf[179] & 0x7)){
                /* change to access boot partition 2 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x2;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        default:
            /* make sure access partition is user data area */
            if (0 != (l_buf[179] & 0x7)){
                /* set back to access user area */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x0;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
    }
    
    if(msdc_ctl->total_size > 512){
        msdc_ctl->result = -1;
        return  msdc_ctl->result;
    }

#if DEBUG_MMC_IOCTL
    printk("start MSDC_SINGLE_READ_WRITE !!\n");
#endif    
    memset(&msdc_data, 0, sizeof(struct mmc_data));
    memset(&msdc_mrq, 0, sizeof(struct mmc_request));
    memset(&msdc_cmd, 0, sizeof(struct mmc_command));

    msdc_mrq.cmd = &msdc_cmd;
    msdc_mrq.data = &msdc_data;

    if(msdc_ctl->trans_type)
        dma_force[host_ctl->id] = FORCE_IN_DMA;
    else
        dma_force[host_ctl->id] = FORCE_IN_PIO;

    if (msdc_ctl->iswrite){
        msdc_data.flags = MMC_DATA_WRITE;
        msdc_cmd.opcode = MMC_WRITE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, 512)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT;
            }
        } else {
            /* called from other kernel module */
            memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, 512);
        }
    } else {
        msdc_data.flags = MMC_DATA_READ;
        msdc_cmd.opcode = MMC_READ_SINGLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;

        memset(sg_msdc_multi_buffer, 0 , 512);
    }

    msdc_cmd.arg = msdc_ctl->address;

    if (!mmc_card_blockaddr(host_ctl->mmc->card)){
        printk("the device is used byte address!\n");
        msdc_cmd.arg <<= 9;
    }

    msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

    msdc_data.stop = NULL;
    msdc_data.blksz = 512;
    msdc_data.sg = &msdc_sg;
    msdc_data.sg_len = 1;

#if DEBUG_MMC_IOCTL
    printk("single block: ueser buf address is 0x%p!\n",msdc_ctl->buffer);
#endif    
    sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size);
    mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card);

    mmc_wait_for_req(host_ctl->mmc, &msdc_mrq);

    if (!msdc_ctl->iswrite){
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_to_user(msdc_ctl->buffer,sg_msdc_multi_buffer,512)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT;
                }
        } else {
            /* called from other kernel module */
            memcpy(msdc_ctl->buffer,sg_msdc_multi_buffer,512);
        }
    }

    if (msdc_ctl->partition){
        mmc_send_ext_csd(host_ctl->mmc->card,l_buf);

        if (l_buf[179] & 0x7) {
            /* set back to access user area */
            l_buf[179] &= ~0x7;
            l_buf[179] |= 0x0;
            mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
        }
    }

    mmc_release_host(host_ctl->mmc);

    if (msdc_cmd.error)
        msdc_ctl->result= msdc_cmd.error;

    if (msdc_data.error)
        msdc_ctl->result= msdc_data.error;
    else
        msdc_ctl->result= 0;

        dma_force[host_ctl->id] = FORCE_NOTHING;
    return msdc_ctl->result;
}
/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_init_card(struct mmc_host *host, u32 ocr,
	struct mmc_card *oldcard)
{
	struct mmc_card *card;
	int err, ddr = 0;
	u32 cid[4];
	unsigned int max_dtr;
	u32 rocr;

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	/*
	 * Since we're changing the OCR value, we seem to
	 * need to tell some cards to go back to the idle
	 * state.  We wait 1ms to give cards time to
	 * respond.
	 */
	mmc_go_idle(host);

	/* The extra bit indicates that we support high capacity */
	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
	if (err)
		goto err;

	/*
	 * For SPI, enable CRC as appropriate.
	 */
	if (mmc_host_is_spi(host)) {
		err = mmc_spi_set_crc(host, use_spi_crc);
		if (err)
			goto err;
	}

	/*
	 * Fetch CID from card.
	 */
	if (mmc_host_is_spi(host))
		err = mmc_send_cid(host, cid);
	else
		err = mmc_all_send_cid(host, cid);
	if (err)
		goto err;

	if (oldcard) {
		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
			err = -ENOENT;
			goto err;
		}

		card = oldcard;
	} else {
		/*
		 * Allocate card structure.
		 */
		card = mmc_alloc_card(host, &mmc_type);
		if (IS_ERR(card)) {
			err = PTR_ERR(card);
			goto err;
		}

		card->type = MMC_TYPE_MMC;
		card->rca = 1;
		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
	}

	/*
	 * For native busses:  set card RCA and quit open drain mode.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_set_relative_addr(card);
		if (err)
			goto free_card;

		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
	}

	if (!oldcard) {
		/*
		 * Fetch CSD from card.
		 */
		err = mmc_send_csd(card, card->raw_csd);
		if (err)
			goto free_card;

		err = mmc_decode_csd(card);
		if (err)
			goto free_card;
		err = mmc_decode_cid(card);
		if (err)
			goto free_card;
	}

	/*
	 * Select card, as all following commands rely on that.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_select_card(card);
		if (err)
			goto free_card;
	}

	if (!oldcard) {
		/*
		 * Fetch and process extended CSD.
		 */
		err = mmc_read_ext_csd(card);
		if (err)
			goto free_card;

		/* If doing byte addressing, check if required to do sector
		 * addressing.  Handle the case of <2GB cards needing sector
		 * addressing.  See section 8.1 JEDEC Standard JED84-A441;
		 * ocr register has bit 30 set for sector addressing.
		 */
		if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
			mmc_card_set_blockaddr(card);

		/* Erase size depends on CSD and Extended CSD */
		mmc_set_erase_size(card);
	}

#if defined(CONFIG_ARCH_ACER_T20) || defined(CONFIG_ARCH_ACER_T30)
	if (card->cid.manfid == SANDISK_X3_CID_MID) {
		err = mmc_switch(card, 0x0, EXT_CSD_POWER_CLASS, 4);
		if (err)
			printk(KERN_ERR "%s: switch power class fail \n", mmc_hostname(card->host));
	}
#endif

	/*
	 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
	 * bit.  This bit will be lost every time after a reset or power off.
	 */
	if (card->ext_csd.enhanced_area_en) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				EXT_CSD_ERASE_GROUP_DEF, 1);

		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			err = 0;
			/*
			 * Just disable enhanced area off & sz
			 * will try to enable ERASE_GROUP_DEF
			 * during next time reinit
			 */
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		} else {
			card->ext_csd.erase_group_def = 1;
			/*
			 * enable ERASE_GRP_DEF successfully.
			 * This will affect the erase size, so
			 * here need to reset erase size
			 */
			mmc_set_erase_size(card);
		}
	}

	/*
	 * Activate high speed (if supported)
	 */
	if ((card->ext_csd.hs_max_dtr != 0) &&
		(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HS_TIMING, 1);
		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			printk(KERN_WARNING "%s: switch to highspeed failed\n",
			       mmc_hostname(card->host));
			err = 0;
		} else {
			mmc_card_set_highspeed(card);
			mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
		}
	}

	/*
	 * Enable HPI feature (if supported)
	 */
	if (card->ext_csd.hpi && (card->host->caps & MMC_CAP_BKOPS)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HPI_MGMT, 1);
		if (err && err != -EBADMSG)
			goto free_card;
		if (err) {
			pr_warning("%s: Enabling HPI failed\n",
				   mmc_hostname(card->host));
			err = 0;
		} else {
			card->ext_csd.hpi_en = 1;
		}
	}

	/*
	 * Enable Background ops feature (if supported)
	 */
	if (card->ext_csd.bk_ops && (card->host->caps & MMC_CAP_BKOPS)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_BKOPS_EN, 1);
		if (err && err != -EBADMSG)
			goto free_card;
		if (err) {
			pr_warning("%s: Enabling BK ops failed\n",
				   mmc_hostname(card->host));
			err = 0;
		} else {
			card->ext_csd.bk_ops_en = 1;
		}
	}

	/*
	 * Compute bus speed.
	 */
	max_dtr = (unsigned int)-1;

	if (mmc_card_highspeed(card)) {
		if (max_dtr > card->ext_csd.hs_max_dtr)
			max_dtr = card->ext_csd.hs_max_dtr;
	} else if (max_dtr > card->csd.max_dtr) {
		max_dtr = card->csd.max_dtr;
	}

	mmc_set_clock(host, max_dtr);

	/*
	 * Indicate DDR mode (if supported).
	 */
	if (mmc_card_highspeed(card)) {
		if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
			&& ((host->caps & (MMC_CAP_1_8V_DDR |
			     MMC_CAP_UHS_DDR50))
				== (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
				ddr = MMC_1_8V_DDR_MODE;
		else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
			&& ((host->caps & (MMC_CAP_1_2V_DDR |
			     MMC_CAP_UHS_DDR50))
				== (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
				ddr = MMC_1_2V_DDR_MODE;
	}

	/*
	 * Activate wide bus and DDR (if supported).
	 */
	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
		static unsigned ext_csd_bits[][2] = {
			{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
			{ EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
			{ EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
		};
		static unsigned bus_widths[] = {
			MMC_BUS_WIDTH_8,
			MMC_BUS_WIDTH_4,
			MMC_BUS_WIDTH_1
		};
		unsigned idx, bus_width = 0;

		if (host->caps & MMC_CAP_8_BIT_DATA)
			idx = 0;
		else
			idx = 1;
		for (; idx < ARRAY_SIZE(bus_widths); idx++) {
			bus_width = bus_widths[idx];
			if (bus_width == MMC_BUS_WIDTH_1)
				ddr = 0; /* no DDR for 1-bit width */
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					 EXT_CSD_BUS_WIDTH,
					 ext_csd_bits[idx][0]);
			if (!err) {
				mmc_set_bus_width(card->host, bus_width);
				/*
				 * If controller can't handle bus width test,
				 * use the highest bus width to maintain
				 * compatibility with previous MMC behavior.
				 */
				if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
					break;
				err = mmc_bus_test(card, bus_width);
				if (!err)
					break;
			}
		}

		if (!err && ddr) {
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					EXT_CSD_BUS_WIDTH,
					ext_csd_bits[idx][1]);
		}
		if (err) {
			printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
				"failed\n", mmc_hostname(card->host),
				1 << bus_width, ddr);
			goto free_card;
		} else if (ddr) {
			/*
			 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
			 * signaling.
			 *
			 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
			 *
			 * 1.8V vccq at 3.3V core voltage (vcc) is not required
			 * in the JEDEC spec for DDR.
			 *
			 * Do not force change in vccq since we are obviously
			 * working and no change to vccq is needed.
			 *
			 * WARNING: eMMC rules are NOT the same as SD DDR
			 */
			if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
				err = mmc_set_signal_voltage(host,
					MMC_SIGNAL_VOLTAGE_120);
				if (err)
					goto err;
			}
			mmc_card_set_ddr_mode(card);
			mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
			mmc_set_bus_width(card->host, bus_width);
		}
	}

	if (!oldcard)
		host->card = card;

#if defined(CONFIG_ARCH_ACER_T20) || defined(CONFIG_ARCH_ACER_T30)
	switch (card->type) {
		case MMC_TYPE_MMC:
			sprintf(emmc_type, "MMC");
			break;
		case MMC_TYPE_SD:
			sprintf(emmc_type, "SD");
			break;
		case MMC_TYPE_SDIO:
			sprintf(emmc_type, "SDIO");
			break;
		case MMC_TYPE_SD_COMBO:
			sprintf(emmc_type, "SDcombo");
			break;
		default:
			sprintf(emmc_type, "unknow");
	}

	sprintf(emmc_date, "%02d/%04d", card->cid.month, card->cid.year);
	emmc_size = card->ext_csd.sectors >> 11;
	emmc_name = card->cid.prod_name;

	if (device_info_kobj == NULL) {
		device_info_kobj = kobject_create_and_add("dev-info_rom", NULL);
		if (device_info_kobj == NULL) {
			pr_warning("%s: subsystem_register failed\n",
					mmc_hostname(card->host));
		} else {
			err = sysfs_create_group(device_info_kobj, &attr_group);
			if(err) {
				pr_warning("%s: sysfs_create_group failed\n",
						mmc_hostname(card->host));
			}
		}
	}
#endif

	return 0;

free_card:
	if (!oldcard)
		mmc_remove_card(card);
err:

	return err;
}
static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
	int err = 0, idx;
	unsigned int part_size;
	u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0;

	BUG_ON(!card);

	if (!ext_csd)
		return 0;

	
	card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
	if (card->csd.structure == 3) {
		if (card->ext_csd.raw_ext_csd_structure > 2) {
			pr_err("%s: unrecognised EXT_CSD structure "
				"version %d\n", mmc_hostname(card->host),
					card->ext_csd.raw_ext_csd_structure);
			err = -EINVAL;
			goto out;
		}
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 6) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
			mmc_hostname(card->host), card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
	card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
	card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
	card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;

		
		if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
			mmc_card_set_blockaddr(card);
	}

	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
	mmc_select_card_type(card);

	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
	card->ext_csd.raw_erase_timeout_mult =
		ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
	card->ext_csd.raw_hc_erase_grp_size =
		ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];

		
		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];

		
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.erase_group_def =
			ext_csd[EXT_CSD_ERASE_GROUP_DEF];
		card->ext_csd.hc_erase_timeout = 300 *
			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
		card->ext_csd.hc_erase_size =
			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;

		card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];

		if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
			for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
				part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
				mmc_part_add(card, part_size,
					EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
					"boot%d", idx, true,
					MMC_BLK_DATA_AREA_BOOT);
			}
		}
	}

	card->ext_csd.raw_hc_erase_gap_size =
		ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
	card->ext_csd.raw_sec_trim_mult =
		ext_csd[EXT_CSD_SEC_TRIM_MULT];
	card->ext_csd.raw_sec_erase_mult =
		ext_csd[EXT_CSD_SEC_ERASE_MULT];
	card->ext_csd.raw_sec_feature_support =
		ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
	card->ext_csd.raw_trim_mult =
		ext_csd[EXT_CSD_TRIM_MULT];
	if (card->ext_csd.rev >= 4) {
		card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
		if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
		    (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
			hc_erase_grp_sz =
				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
			hc_wp_grp_sz =
				ext_csd[EXT_CSD_HC_WP_GRP_SIZE];

			card->ext_csd.enhanced_area_en = 1;
			card->ext_csd.enhanced_area_offset =
				(ext_csd[139] << 24) + (ext_csd[138] << 16) +
				(ext_csd[137] << 8) + ext_csd[136];
			if (mmc_card_blockaddr(card))
				card->ext_csd.enhanced_area_offset <<= 9;
			card->ext_csd.enhanced_area_size =
				(ext_csd[142] << 16) + (ext_csd[141] << 8) +
				ext_csd[140];
			card->ext_csd.enhanced_area_size *=
				(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
			card->ext_csd.enhanced_area_size <<= 9;
		} else {
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		}

		if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
			EXT_CSD_PART_SUPPORT_PART_EN) {
			if (card->ext_csd.enhanced_area_en != 1) {
				hc_erase_grp_sz =
					ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
				hc_wp_grp_sz =
					ext_csd[EXT_CSD_HC_WP_GRP_SIZE];

				card->ext_csd.enhanced_area_en = 1;
			}

			for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
				if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
				!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
				!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
					continue;
				part_size =
				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
					<< 16) +
				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
					<< 8) +
				ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
				part_size *= (size_t)(hc_erase_grp_sz *
					hc_wp_grp_sz);
				mmc_part_add(card, part_size << 19,
					EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
					"gp%d", idx, false,
					MMC_BLK_DATA_AREA_GP);
			}
		}
		card->ext_csd.sec_trim_mult =
			ext_csd[EXT_CSD_SEC_TRIM_MULT];
		card->ext_csd.sec_erase_mult =
			ext_csd[EXT_CSD_SEC_ERASE_MULT];
		card->ext_csd.sec_feature_support =
			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
		card->ext_csd.trim_timeout = 300 *
			ext_csd[EXT_CSD_TRIM_MULT];

		card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
		card->ext_csd.boot_ro_lockable = true;
	}

	if (card->ext_csd.rev >= 5) {
		
		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
			card->ext_csd.bkops = 1;
			card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
			pr_info("[HTC_KER_ADD]%s: ext_csd[EXT_CSD_BKOPS_EN] = %d\n",
			mmc_hostname(card->host),
			ext_csd[EXT_CSD_BKOPS_EN]);
			card->ext_csd.raw_bkops_status =
				ext_csd[EXT_CSD_BKOPS_STATUS];
			if (!card->ext_csd.bkops_en &&
				card->host->caps2 & MMC_CAP2_INIT_BKOPS) {
				err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					EXT_CSD_BKOPS_EN, 1, 0);
				if (err)
					pr_warning("%s: Enabling BKOPS failed\n",
						mmc_hostname(card->host));
				else {
					card->ext_csd.bkops_en = 1;
					pr_info("[HTC_KER_ADD]%s: Enabled BKOPs\n",
					mmc_hostname(card->host));
				}
			}
		}

		
		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
			card->ext_csd.hpi = 1;
			if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
				card->ext_csd.hpi_cmd =	MMC_STOP_TRANSMISSION;
			else
				card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
			card->ext_csd.out_of_int_time =
				ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
		}

		card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
		card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
	}

	card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
		card->erased_byte = 0xFF;
	else
		card->erased_byte = 0x0;

	
	if (card->ext_csd.rev >= 6) {
		card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;

		card->ext_csd.generic_cmd6_time = 10 *
			ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
		card->ext_csd.power_off_longtime = 10 *
			ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];

		card->ext_csd.cache_size =
			ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
			ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
			ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
			ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;

		if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
			card->ext_csd.data_sector_size = 4096;
		else
			card->ext_csd.data_sector_size = 512;

		if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
		    (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
			card->ext_csd.data_tag_unit_size =
			((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
			(card->ext_csd.data_sector_size);
		} else {
			card->ext_csd.data_tag_unit_size = 0;
		}
		card->ext_csd.max_packed_writes =
			ext_csd[EXT_CSD_MAX_PACKED_WRITES];
		card->ext_csd.max_packed_reads =
			ext_csd[EXT_CSD_MAX_PACKED_READS];
	}

	if (card->cid.manfid == SANDISK_MMC) {
		char buf[7] = {0};
		sprintf(buf, "%c%c%c%c%c%c", ext_csd[73], ext_csd[74], ext_csd[75],
										ext_csd[76], ext_csd[77], ext_csd[78]);
		strncpy(card->ext_csd.fwrev, buf, strlen(buf));
	}

	if (mmc_card_mmc(card)) {
		char *buf;
		int i, j;
		ssize_t n = 0;
		pr_info("%s: cid %08x%08x%08x%08x\n",
			mmc_hostname(card->host),
			card->raw_cid[0], card->raw_cid[1],
			card->raw_cid[2], card->raw_cid[3]);
		pr_info("%s: csd %08x%08x%08x%08x\n",
			mmc_hostname(card->host),
			card->raw_csd[0], card->raw_csd[1],
			card->raw_csd[2], card->raw_csd[3]);

		buf = kmalloc(512, GFP_KERNEL);
		if (buf) {
			for (i = 0; i < 32; i++) {
				for (j = 511 - (16 * i); j >= 496 - (16 * i); j--)
					n += sprintf(buf + n, "%02x", ext_csd[j]);
				n += sprintf(buf + n, "\n");
				pr_info("%s: ext_csd %s", mmc_hostname(card->host), buf);
				n = 0;
			}
		}
		if (buf)
			kfree(buf);
	}

out:
	return err;
}
Exemple #21
0
/*
 * Read and decode extended CSD. Switch to high-speed and wide bus
 * if supported.
 */
static int mmc_process_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);

	err = MMC_ERR_FAILED;

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return MMC_ERR_NONE;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd. mmc v4 cards will be "
			"treated as v3.\n", mmc_hostname(card->host));
		return MMC_ERR_FAILED;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err != MMC_ERR_NONE) {
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = MMC_ERR_NONE;
		}
		goto out;
	}

	card->ext_csd.sectors =
		ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
		ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
		ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
		ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
	if (card->ext_csd.sectors)
		mmc_card_set_blockaddr(card);

	switch (ext_csd[EXT_CSD_CARD_TYPE]) {
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
		goto out;
	}

	if (card->host->caps & MMC_CAP_MMC_HIGHSPEED) {
		/* Activate highspeed support. */
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HS_TIMING, 1);
		if (err != MMC_ERR_NONE) {
			printk(KERN_WARNING "%s: failed to switch "
				"card to mmc v4 high-speed mode.\n",
			       mmc_hostname(card->host));
			err = MMC_ERR_NONE;
			goto out;
		}

		mmc_card_set_highspeed(card);

		mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
	}

	/* Check for host support for wide-bus modes. */
	if (card->host->caps & MMC_CAP_4_BIT_DATA) {
		/* Activate 4-bit support. */
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_4);
		if (err != MMC_ERR_NONE) {
			printk(KERN_WARNING "%s: failed to switch "
				"card to mmc v4 4-bit bus mode.\n",
			       mmc_hostname(card->host));
			err = MMC_ERR_NONE;
			goto out;
		}

		mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
	}

out:
	kfree(ext_csd);

	return err;
}
/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_init_card(struct mmc_host *host, u32 ocr,
	struct mmc_card *oldcard)
{
	struct mmc_card *card;
	int err, ddr = 0;
	u32 cid[4];
	unsigned int max_dtr;
	u32 rocr;
	u8 *ext_csd = NULL;
#if defined(CONFIG_MMC_DISABLE_WP_RFG_5)
	/* 2012 March detect write protection status for SHR/SHR#K workaround */
	/* mfg partition start sector = LBA 65536                             */
	unsigned char WP_STATUS[8] = {0};
#endif
	BUG_ON(!host);
	WARN_ON(!host->claimed);

	/*
	 * Since we're changing the OCR value, we seem to
	 * need to tell some cards to go back to the idle
	 * state.  We wait 1ms to give cards time to
	 * respond.
	 */
	mmc_go_idle(host);

	/* The extra bit indicates that we support high capacity */
	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
	if (err)
		goto err;

	/*
	 * For SPI, enable CRC as appropriate.
	 */
	if (mmc_host_is_spi(host)) {
		err = mmc_spi_set_crc(host, use_spi_crc);
		if (err)
			goto err;
	}

	/*
	 * Fetch CID from card.
	 */
	if (mmc_host_is_spi(host))
		err = mmc_send_cid(host, cid);
	else
		err = mmc_all_send_cid(host, cid);
	if (err)
		goto err;

	if (oldcard) {
		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
			err = -ENOENT;
			goto err;
		}

		card = oldcard;
	} else {
		/*
		 * Allocate card structure.
		 */
		card = mmc_alloc_card(host, &mmc_type);
		if (IS_ERR(card)) {
			err = PTR_ERR(card);
			goto err;
		}

		card->type = MMC_TYPE_MMC;
		card->rca = 1;
		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
	}

	/*
	 * For native busses:  set card RCA and quit open drain mode.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_set_relative_addr(card);
		if (err)
			goto free_card;

		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
	}

	if (!oldcard) {
		/*
		 * Fetch CSD from card.
		 */
		err = mmc_send_csd(card, card->raw_csd);
		if (err)
			goto free_card;

		err = mmc_decode_csd(card);
		if (err)
			goto free_card;
		err = mmc_decode_cid(card);
		if (err)
			goto free_card;
	}

	/*
	 * Select card, as all following commands rely on that.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_select_card(card);
		if (err)
			goto free_card;
	}

	if (!oldcard) {
		/*
		 * Fetch and process extended CSD.
		 */

		err = mmc_get_ext_csd(card, &ext_csd);
		if (err)
			goto free_card;
		err = mmc_read_ext_csd(card, ext_csd);
		if (err)
			goto free_card;

		/* If doing byte addressing, check if required to do sector
		 * addressing.  Handle the case of <2GB cards needing sector
		 * addressing.  See section 8.1 JEDEC Standard JED84-A441;
		 * ocr register has bit 30 set for sector addressing.
		 */
		if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
			mmc_card_set_blockaddr(card);

		/* Erase size depends on CSD and Extended CSD */
		mmc_set_erase_size(card);

		if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR))
			mmc_card_set_blockaddr(card);
	}

	/* If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF     */
	/* bit.  This bit will be lost every time after a reset or power off.  */
	/* For 2GB eMMC, there will no HC_ERASE_GROUP define                   */

	if (card->ext_csd.sectors > 4194304) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 EXT_CSD_ERASE_GROUP_DEF, 1, 0);

		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			err = 0;
			/*
			 * Just disable enhanced area off & sz
			 * will try to enable ERASE_GROUP_DEF
			 * during next time reinit
			 */
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		} else {
			card->ext_csd.erase_group_def = 1;
			/*
			 * enable ERASE_GRP_DEF successfully.
			 * This will affect the erase size, so
			 * here need to reset erase size
			 */
			mmc_set_erase_size(card);
		}
	}

	/*
	 * Ensure eMMC user default partition is enabled
	 */
	if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
		card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
				 card->ext_csd.part_config,
				 card->ext_csd.part_time);
		if (err && err != -EBADMSG)
			goto free_card;
	}

	/* For SanDisk X3, we have to enable power class 4 */
	if (card->cid.manfid == 0x45) {
		if (card->ext_csd.sectors > 33554432) { /* the storage size larger than 16GB */
				err = mmc_switch(card, EXT_CSD_CMD_SET_ZERO, EXT_CSD_POWER_CLASS, 4, 0);
				if (err && err != -EBADMSG)
					goto free_card;

				if (err) {
					printk(KERN_WARNING "%s: switch to power class 4 failed\n",
						mmc_hostname(card->host));
					err = 0;
				} else {
					printk(KERN_WARNING "%s: switch to power class 4 sucessfully\n",
						mmc_hostname(card->host));
				}
		} else if (card->ext_csd.sectors == 31105024) {
				err = mmc_switch(card, EXT_CSD_CMD_SET_ZERO, EXT_CSD_POWER_CLASS, 4, 0);
				if (err && err != -EBADMSG)
					goto free_card;

				if (err) {
					printk(KERN_WARNING "%s: switch to power class 4 failed\n",
						mmc_hostname(card->host));
					err = 0;
				} else {
					printk(KERN_WARNING "%s: switch to power class 4 sucessfully\n",
						mmc_hostname(card->host));
				}
		}
	}

	/*
	 * Activate high speed (if supported)
	 */
	if ((card->ext_csd.hs_max_dtr != 0) &&
		(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 EXT_CSD_HS_TIMING, 1, 0);
		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			printk(KERN_WARNING "%s: switch to highspeed failed\n",
			       mmc_hostname(card->host));
			err = 0;
		} else {
			mmc_card_set_highspeed(card);
			mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
		}
	}
	if (card->cid.manfid == 0x45) {
		/* Sandisk 24nm extreme 16G */
		if ((card->ext_csd.sectors == 31105024) && !strcmp(card->cid.prod_name, "SEM16G"))
			card->wr_perf = 12;
		/* Sandisk 24nm extreme 32G */
		else if ((card->ext_csd.sectors == 62324736) && !strcmp(card->cid.prod_name, "SEM32G"))
			card->wr_perf = 12;
	} else if (card->cid.manfid == 0x15) {
		pr_info("%s: sectors %u\n", mmc_hostname(card->host), card->ext_csd.sectors);
		/* Samsung 27nm 16G */
		if ((card->ext_csd.sectors == 30777344) && !strcmp(card->cid.prod_name, "KYL00M"))
			card->wr_perf = 11;
		else if ((card->ext_csd.sectors == 62521344) && !strcmp(card->cid.prod_name, "MBG8FA"))
			card->wr_perf = 11;
		/* Samsung 21nm 16G */
		else if ((card->ext_csd.sectors == 30535680) && !strcmp(card->cid.prod_name, "MAG2GA"))
			card->wr_perf = 14;
	}

	/*
	 * Compute bus speed.
	 */
	max_dtr = (unsigned int)-1;

	if (mmc_card_highspeed(card)) {
		if (max_dtr > card->ext_csd.hs_max_dtr)
			max_dtr = card->ext_csd.hs_max_dtr;
	} else if (max_dtr > card->csd.max_dtr) {
		max_dtr = card->csd.max_dtr;
	}

	mmc_set_clock(host, max_dtr);

	/*
	 * Indicate DDR mode (if supported).
	 */
	if (mmc_card_highspeed(card)) {
		if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
			&& ((host->caps & (MMC_CAP_1_8V_DDR |
			     MMC_CAP_UHS_DDR50))
				== (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
				ddr = MMC_1_8V_DDR_MODE;
		else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
			&& ((host->caps & (MMC_CAP_1_2V_DDR |
			     MMC_CAP_UHS_DDR50))
				== (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
				ddr = MMC_1_2V_DDR_MODE;
	}

	/*
	 * Activate wide bus and DDR (if supported).
	 */
	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
		static unsigned ext_csd_bits[][2] = {
			{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
			{ EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
			{ EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
		};
		static unsigned bus_widths[] = {
			MMC_BUS_WIDTH_8,
			MMC_BUS_WIDTH_4,
			MMC_BUS_WIDTH_1
		};
		unsigned idx, bus_width = 0;

		if (host->caps & MMC_CAP_8_BIT_DATA)
			idx = 0;
		else
			idx = 1;
		for (; idx < ARRAY_SIZE(bus_widths); idx++) {
			bus_width = bus_widths[idx];
			if (bus_width == MMC_BUS_WIDTH_1)
				ddr = 0; /* no DDR for 1-bit width */
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					 EXT_CSD_BUS_WIDTH,
					 ext_csd_bits[idx][0],
					 0);
			if (!err) {
				mmc_set_bus_width(card->host, bus_width);

				/*
				 * If controller can't handle bus width test,
				 * compare ext_csd previously read in 1 bit mode
				 * against ext_csd at new bus width
				 */
				if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
					err = mmc_compare_ext_csds(card,
						bus_width);
				else
					err = mmc_bus_test(card, bus_width);
				if (!err)
					break;
			}
		}

		if (!err && ddr) {
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					 EXT_CSD_BUS_WIDTH,
					 ext_csd_bits[idx][1],
					 0);
		}
		if (err) {
			printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
				"failed\n", mmc_hostname(card->host),
				1 << bus_width, ddr);
			goto free_card;
		} else if (ddr) {
			/*
			 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
			 * signaling.
			 *
			 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
			 *
			 * 1.8V vccq at 3.3V core voltage (vcc) is not required
			 * in the JEDEC spec for DDR.
			 *
			 * Do not force change in vccq since we are obviously
			 * working and no change to vccq is needed.
			 *
			 * WARNING: eMMC rules are NOT the same as SD DDR
			 */
			if (ddr == MMC_1_2V_DDR_MODE) {
				err = mmc_set_signal_voltage(host,
					MMC_SIGNAL_VOLTAGE_120, 0);
				if (err)
					goto err;
			}
			mmc_card_set_ddr_mode(card);
			mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
			mmc_set_bus_width(card->host, bus_width);
		}
	}

#if defined(CONFIG_MMC_DISABLE_WP_RFG_5)
	/* 2012 March detect write protection status for SHR/SHR#K workaround */
	/* mfg partition start sector = LBA 65536                             */
	err = mmc_set_block_length(card, 8);

	if (err && err != -EBADMSG)
		goto free_card;

	if (err) {
		pr_err("%s: set block length to 8 fail\n", mmc_hostname(card->host));
		err = 0;
	}

	err = mmc_send_write_prot_type(card, WP_STATUS, 65536);

	if (err && err != -EBADMSG)
		goto free_card;

	if (err) {
		pr_err("%s: send write protection type at address 65536 failed\n", mmc_hostname(card->host));
		err = 0;
	}

	if (WP_STATUS[0] & 0xAA) {
		pr_info("%s: trigger software write protection\n", mmc_hostname(card->host));
		card->write_prot_type = 1;
	} else {
		pr_info("%s: disable software write protection\n", mmc_hostname(card->host));
		card->write_prot_type = 0;
	}

	err = mmc_set_block_length(card, 512);

	if (err && err != -EBADMSG)
		goto free_card;

	if (err) {
		pr_err("%s: set block length to 512 fail\n", mmc_hostname(card->host));
		err = 0;
	}
#endif
#if defined(CONFIG_ARCH_MSM7230)
	/* 2012 March detect write protection status for Kingston workaround
	   System partition start sector = LBA 200704 */
	if (card->cid.manfid == 0x70) {
		unsigned char WP_STATUS[8] = {0};
		err = mmc_set_block_length(card, 8);

		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			pr_err("%s: set block length to 8 fail\n", mmc_hostname(card->host));
			err = 0;
		}

		err = mmc_send_write_prot_type(card, WP_STATUS, 200704);

		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			pr_err("%s: send write protection type at address 200704 failed\n", mmc_hostname(card->host));
			err = 0;
		}

		if (WP_STATUS[0] & 0xAA) {
			pr_info("%s: trigger Kingston write protection\n", mmc_hostname(card->host));
			card->write_prot_type = 1;
		} else {
			pr_info("%s: disable Kingston write protection\n", mmc_hostname(card->host));
			card->write_prot_type = 0;
		}

		err = mmc_set_block_length(card, 512);

		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			pr_err("%s: set block length to 512 fail\n", mmc_hostname(card->host));
			err = 0;
		}
	}
#endif
	if (!oldcard)
		host->card = card;

	mmc_free_ext_csd(ext_csd);
	return 0;

free_card:
	if (!oldcard)
		mmc_remove_card(card);
err:
	mmc_free_ext_csd(ext_csd);

	return err;
}
/*
 * Decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
	int err = 0;

	BUG_ON(!card);

	if (!ext_csd)
		return 0;

	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
	card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
	if (card->csd.structure == 3) {
		if (card->ext_csd.raw_ext_csd_structure > 2) {
			printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
				"version %d\n", mmc_hostname(card->host),
					card->ext_csd.raw_ext_csd_structure);
			err = -EINVAL;
			goto out;
		}
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 5) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
			mmc_hostname(card->host), card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
	card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
	card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
	card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;

		/* Cards with density > 2GiB are sector addressed */
		if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
			mmc_card_set_blockaddr(card);
	}
	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
	switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
	case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
	     EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
		break;
	case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
	     EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
		break;
	case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
	     EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
		break;
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
	}

	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
	card->ext_csd.raw_erase_timeout_mult =
		ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
	card->ext_csd.raw_hc_erase_grp_size =
		ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];

		/* EXT_CSD value is in units of 10ms, but we store in ms */
		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];

		/* Sleep / awake timeout in 100ns units */
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.erase_group_def =
			ext_csd[EXT_CSD_ERASE_GROUP_DEF];
		card->ext_csd.hc_erase_timeout = 300 *
			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
		card->ext_csd.hc_erase_size =
			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;

		card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];

		/*
		 * There are two boot regions of equal size, defined in
		 * multiples of 128K.
		 */
		card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
	}

	card->ext_csd.raw_hc_erase_gap_size =
		ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
	card->ext_csd.raw_sec_trim_mult =
		ext_csd[EXT_CSD_SEC_TRIM_MULT];
	card->ext_csd.raw_sec_erase_mult =
		ext_csd[EXT_CSD_SEC_ERASE_MULT];
	card->ext_csd.raw_sec_feature_support =
		ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
	card->ext_csd.raw_trim_mult =
		ext_csd[EXT_CSD_TRIM_MULT];
	card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
	if (card->ext_csd.rev >= 4) {
		/*
		 * Enhanced area feature support -- check whether the eMMC
		 * card has the Enhanced area enabled.  If so, export enhanced
		 * area offset and size to user by adding sysfs interface.
		 */
		if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
		    (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
			u8 hc_erase_grp_sz =
				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
			u8 hc_wp_grp_sz =
				ext_csd[EXT_CSD_HC_WP_GRP_SIZE];

			card->ext_csd.enhanced_area_en = 1;
			/*
			 * calculate the enhanced data area offset, in bytes
			 */
			card->ext_csd.enhanced_area_offset =
				(ext_csd[139] << 24) + (ext_csd[138] << 16) +
				(ext_csd[137] << 8) + ext_csd[136];
			if (mmc_card_blockaddr(card))
				card->ext_csd.enhanced_area_offset <<= 9;
			/*
			 * calculate the enhanced data area size, in kilobytes
			 */
			card->ext_csd.enhanced_area_size =
				(ext_csd[142] << 16) + (ext_csd[141] << 8) +
				ext_csd[140];
			card->ext_csd.enhanced_area_size *=
				(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
			card->ext_csd.enhanced_area_size <<= 9;
		} else {
			/*
			 * If the enhanced area is not enabled, disable these
			 * device attributes.
			 */
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		}
		card->ext_csd.sec_trim_mult =
			ext_csd[EXT_CSD_SEC_TRIM_MULT];
		card->ext_csd.sec_erase_mult =
			ext_csd[EXT_CSD_SEC_ERASE_MULT];
		card->ext_csd.sec_feature_support =
			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
		card->ext_csd.trim_timeout = 300 *
			ext_csd[EXT_CSD_TRIM_MULT];
	}

	if (card->ext_csd.rev >= 5) {
		/* check whether the eMMC card support BKOPS */
		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
			card->ext_csd.bkops = 1;
			card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
			card->ext_csd.raw_bkops_status =
				ext_csd[EXT_CSD_BKOPS_STATUS];
			if (mmc_card_support_bkops(card)) {
				if (!card->ext_csd.bkops_en &&
					card->host->caps2 & MMC_CAP2_INIT_BKOPS) {
					err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
						EXT_CSD_BKOPS_EN, 1, 0);
					if (err)
						pr_warning("%s: Enabling BKOPS failed\n",
							mmc_hostname(card->host));
					else {
						card->ext_csd.bkops_en = 1;
						pr_warning("%s: BKOPS enabled\n",
							mmc_hostname(card->host));
					}
				} else {
					pr_warning("%s: BKOPS enabled\n",
						mmc_hostname(card->host));
				}
			} else {
				pr_warning("%s: BKOPS not enabled\n",
					mmc_hostname(card->host));
			}
		} else {
			pr_warning("%s: BKOPS not support\n", mmc_hostname(card->host));
		}
		/* check whether the eMMC card supports HPI */
		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
			card->ext_csd.hpi = 1;
			if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
				card->ext_csd.hpi_cmd =	MMC_STOP_TRANSMISSION;
			else
				card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
		if (card->ext_csd.rev >= 5)
			if (!(ext_csd[EXT_CSD_HPI_MGMT] & 1)) {
				err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					EXT_CSD_HPI_MGMT, 1, 0);
				if (err)
					pr_warning("%s: Enabling HPI failed\n",
						mmc_hostname(card->host));
				else {
					pr_warning("%s: Enabling HPI success\n",
						mmc_hostname(card->host));
					card->ext_csd.hpi_en = 1;
				}
			}
			/*
			 * Indicate the maximum timeout to close
			 * a command interrupted by HPI
			 */
			card->ext_csd.out_of_int_time =
				ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
		}
		card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
		card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
	}

	card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
		card->erased_byte = 0xFF;
	else
		card->erased_byte = 0x0;

out:
	return err;
}
static int mmc_awake(struct mmc_host *host)
{
	struct mmc_card *card = host->card;
	int err = -ENOSYS;
	int ddr = 0;
	unsigned int max_dtr;
	if (card && card->ext_csd.rev >= 3) {
		err = mmc_card_sleepawake(host, 0);
		if (err < 0) {
			pr_debug("%s: Error %d while awaking sleeping card",
				 mmc_hostname(host), err);
			return err;
		}
		/*
		* Ensure eMMC user default partition is enabled
		*/
		if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
			card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
					card->ext_csd.part_config,
					card->ext_csd.part_time);
			if (err && err != -EBADMSG)
				goto err;
		}

		/*
		* Activate high speed (if supported)
		*/
		if ((card->ext_csd.hs_max_dtr != 0) &&
			(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
				mmc_card_set_highspeed(card);
				mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
		}
		/*
		* Compute bus speed.
		*/
		max_dtr = (unsigned int)-1;

		if (mmc_card_highspeed(card)) {
			if (max_dtr > card->ext_csd.hs_max_dtr)
				max_dtr = card->ext_csd.hs_max_dtr;
		} else if (max_dtr > card->csd.max_dtr) {
			max_dtr = card->csd.max_dtr;
		}

		mmc_set_clock(host, max_dtr);
		/*
		* Indicate DDR mode (if supported).
		*/
		if (mmc_card_highspeed(card)) {
			if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
				&& ((host->caps & (MMC_CAP_1_8V_DDR |
					MMC_CAP_UHS_DDR50))
					== (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
					ddr = MMC_1_8V_DDR_MODE;
			else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
				&& ((host->caps & (MMC_CAP_1_2V_DDR |
					MMC_CAP_UHS_DDR50))
					== (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
					ddr = MMC_1_2V_DDR_MODE;
		}
		/*
		* Activate wide bus and DDR (if supported).
		*/
		if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
			(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
			static unsigned ext_csd_bits[][2] = {
				{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
				{ EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
				{ EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
			};

			static unsigned bus_widths[] = {
				MMC_BUS_WIDTH_8,
				MMC_BUS_WIDTH_4,
				MMC_BUS_WIDTH_1
			};

			unsigned idx, bus_width = 0;
			if (host->caps & MMC_CAP_8_BIT_DATA)
				idx = 0;
			else
				idx = 1;
			for (; idx < ARRAY_SIZE(bus_widths); idx++) {
				bus_width = bus_widths[idx];
				if (bus_width == MMC_BUS_WIDTH_1)
					ddr = 0; /* no DDR for 1-bit width */
				err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
						EXT_CSD_BUS_WIDTH,
						ext_csd_bits[idx][0],
						0);
				if (!err) {
					mmc_set_bus_width(card->host, bus_width);
					break;

				}
			}

			if (!err && ddr) {
				err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
						EXT_CSD_BUS_WIDTH,
						ext_csd_bits[idx][1],
						0);
			}
			if (err) {
				printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
					"failed\n", mmc_hostname(card->host),
					1 << bus_width, ddr);
				goto err;
			} else if (ddr) {
				/*
				* eMMC cards can support 3.3V to 1.2V i/o (vccq)
				* signaling.
				*
				* EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
				*
				* 1.8V vccq at 3.3V core voltage (vcc) is not required
				* in the JEDEC spec for DDR.
				*
				* Do not force change in vccq since we are obviously
				* working and no change to vccq is needed.
				*
				* WARNING: eMMC rules are NOT the same as SD DDR
				*/
				if (ddr == MMC_1_2V_DDR_MODE) {
					err = mmc_set_signal_voltage(host,
						MMC_SIGNAL_VOLTAGE_120, 0);
					if (err)
						goto err;
				}
				mmc_card_set_ddr_mode(card);
				mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
				mmc_set_bus_width(card->host, bus_width);
			}
		}
	}
err:
	return err;
}
static void
mmc_discover_cards(struct mmc_softc *sc)
{
	struct mmc_ivars *ivar = NULL;
	device_t *devlist;
	int err, i, devcount, newcard;
	uint32_t raw_cid[4];
	uint32_t resp, sec_count;
	device_t child;
	uint16_t rca = 2;
	u_char switch_res[64];

	if (bootverbose || mmc_debug)
		device_printf(sc->dev, "Probing cards\n");
	while (1) {
		err = mmc_all_send_cid(sc, raw_cid);
		if (err == MMC_ERR_TIMEOUT)
			break;
		if (err != MMC_ERR_NONE) {
			device_printf(sc->dev, "Error reading CID %d\n", err);
			break;
		}
		newcard = 1;
		if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0)
			return;
		for (i = 0; i < devcount; i++) {
			ivar = device_get_ivars(devlist[i]);
			if (memcmp(ivar->raw_cid, raw_cid, sizeof(raw_cid)) == 0) {
				newcard = 0;
				break;
			}
		}
		free(devlist, M_TEMP);
		if (bootverbose || mmc_debug) {
			device_printf(sc->dev, "%sard detected (CID %08x%08x%08x%08x)\n",
			    newcard ? "New c" : "C",
			    raw_cid[0], raw_cid[1], raw_cid[2], raw_cid[3]);
		}
		if (newcard) {
			ivar = malloc(sizeof(struct mmc_ivars), M_DEVBUF,
			    M_WAITOK | M_ZERO);
			if (!ivar)
				return;
			memcpy(ivar->raw_cid, raw_cid, sizeof(raw_cid));
		}
		if (mmcbr_get_ro(sc->dev))
			ivar->read_only = 1;
		ivar->bus_width = bus_width_1;
		ivar->timing = bus_timing_normal;
		ivar->mode = mmcbr_get_mode(sc->dev);
		if (ivar->mode == mode_sd) {
			mmc_decode_cid_sd(ivar->raw_cid, &ivar->cid);
			mmc_send_relative_addr(sc, &resp);
			ivar->rca = resp >> 16;
			/* Get card CSD. */
			mmc_send_csd(sc, ivar->rca, ivar->raw_csd);
			mmc_decode_csd_sd(ivar->raw_csd, &ivar->csd);
			ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE;
			if (ivar->csd.csd_structure > 0)
				ivar->high_cap = 1;
			ivar->tran_speed = ivar->csd.tran_speed;
			ivar->erase_sector = ivar->csd.erase_sector * 
			    ivar->csd.write_bl_len / MMC_SECTOR_SIZE;
			/* Get card SCR. Card must be selected to fetch it. */
			mmc_select_card(sc, ivar->rca);
			mmc_app_send_scr(sc, ivar->rca, ivar->raw_scr);
			mmc_app_decode_scr(ivar->raw_scr, &ivar->scr);
			/* Get card switch capabilities (command class 10). */
			if ((ivar->scr.sda_vsn >= 1) &&
			    (ivar->csd.ccc & (1<<10))) {
				mmc_sd_switch(sc, SD_SWITCH_MODE_CHECK,
				    SD_SWITCH_GROUP1, SD_SWITCH_NOCHANGE,
				    switch_res);
				if (switch_res[13] & 2) {
					ivar->timing = bus_timing_hs;
					ivar->hs_tran_speed = SD_MAX_HS;
				}
			}
			mmc_app_sd_status(sc, ivar->rca, ivar->raw_sd_status);
			mmc_app_decode_sd_status(ivar->raw_sd_status,
			    &ivar->sd_status);
			if (ivar->sd_status.au_size != 0) {
				ivar->erase_sector =
				    16 << ivar->sd_status.au_size;
			}
			mmc_select_card(sc, 0);
			/* Find max supported bus width. */
			if ((mmcbr_get_caps(sc->dev) & MMC_CAP_4_BIT_DATA) &&
			    (ivar->scr.bus_widths & SD_SCR_BUS_WIDTH_4))
				ivar->bus_width = bus_width_4;
			if (bootverbose || mmc_debug)
				mmc_log_card(sc->dev, ivar, newcard);
			if (newcard) {
				/* Add device. */
				child = device_add_child(sc->dev, NULL, -1);
				device_set_ivars(child, ivar);
			}
			return;
		}
		mmc_decode_cid_mmc(ivar->raw_cid, &ivar->cid);
		ivar->rca = rca++;
		mmc_set_relative_addr(sc, ivar->rca);
		/* Get card CSD. */
		mmc_send_csd(sc, ivar->rca, ivar->raw_csd);
		mmc_decode_csd_mmc(ivar->raw_csd, &ivar->csd);
		ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE;
		ivar->tran_speed = ivar->csd.tran_speed;
		ivar->erase_sector = ivar->csd.erase_sector * 
		    ivar->csd.write_bl_len / MMC_SECTOR_SIZE;
		/* Only MMC >= 4.x cards support EXT_CSD. */
		if (ivar->csd.spec_vers >= 4) {
			/* Card must be selected to fetch EXT_CSD. */
			mmc_select_card(sc, ivar->rca);
			mmc_send_ext_csd(sc, ivar->raw_ext_csd);
			/* Handle extended capacity from EXT_CSD */
			sec_count = ivar->raw_ext_csd[EXT_CSD_SEC_CNT] +
			    (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 1] << 8) +
			    (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 2] << 16) +
			    (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
			if (sec_count != 0) {
				ivar->sec_count = sec_count;
				ivar->high_cap = 1;
			}
			/* Get card speed in high speed mode. */
			ivar->timing = bus_timing_hs;
			if (ivar->raw_ext_csd[EXT_CSD_CARD_TYPE]
			    & EXT_CSD_CARD_TYPE_52)
				ivar->hs_tran_speed = MMC_TYPE_52_MAX_HS;
			else if (ivar->raw_ext_csd[EXT_CSD_CARD_TYPE]
			    & EXT_CSD_CARD_TYPE_26)
				ivar->hs_tran_speed = MMC_TYPE_26_MAX_HS;
			else
				ivar->hs_tran_speed = ivar->tran_speed;
			/* Find max supported bus width. */
			ivar->bus_width = mmc_test_bus_width(sc);
			mmc_select_card(sc, 0);
			/* Handle HC erase sector size. */
			if (ivar->raw_ext_csd[EXT_CSD_ERASE_GRP_SIZE] != 0) {
				ivar->erase_sector = 1024 *
				    ivar->raw_ext_csd[EXT_CSD_ERASE_GRP_SIZE];
				mmc_switch(sc, EXT_CSD_CMD_SET_NORMAL,
				    EXT_CSD_ERASE_GRP_DEF, 1);
			}
		} else {
			ivar->bus_width = bus_width_1;
			ivar->timing = bus_timing_normal;
		}
		if (bootverbose || mmc_debug)
			mmc_log_card(sc->dev, ivar, newcard);
		if (newcard) {
			/* Add device. */
			child = device_add_child(sc->dev, NULL, -1);
			device_set_ivars(child, ivar);
		}
	}
Exemple #26
0
int mmc_change_freq(struct mmc *mmc)
{
	char ext_csd[512];
	char cardtype;
	int err;
	int retry = 5;

	mmc->card_caps = 0;

	if (mmc_host_is_spi(mmc))
		return 0;

	/* Only version 4 supports high-speed */
	if (mmc->version < MMC_VERSION_4)
		return 0;

	mmc->card_caps |= MMC_MODE_4BIT;

	err = mmc_send_ext_csd(mmc, ext_csd);

	if (err){
		mmcinfo("mmc %d get ext csd failed\n",mmc->control_num);
		return err;
	}

	cardtype = ext_csd[196] & 0xf;

		//retry for Toshiba emmc,for the first time Toshiba emmc change to HS
	//it will return response crc err,so retry
	do{
		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1);
		if(!err){
			break;
		}
		mmcinfo("retry mmc switch(cmd6)\n");
	}while(retry--);

	if (err){
		mmcinfo("mmc %d change to hs failed\n",mmc->control_num);
		return err;
	}

	/* Now check to see that it worked */
	err = mmc_send_ext_csd(mmc, ext_csd);

	if (err){
		mmcinfo("mmc %d send ext csd faild\n",mmc->control_num);
		return err;
	}

	/* No high-speed support */
	if (!ext_csd[185])
		return 0;

	/* High Speed is set, there are two types: 52MHz and 26MHz */
	if (cardtype & MMC_HS_52MHZ)
		mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
	else
		mmc->card_caps |= MMC_MODE_HS;

	return 0;
}
Exemple #27
0
/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_init_card(struct mmc_host *host, u32 ocr,
                         struct mmc_card *oldcard)
{
    struct mmc_card *card;
    int err, ddr = 0;
    u32 cid[4];
    unsigned int max_dtr;

    BUG_ON(!host);
    WARN_ON(!host->claimed);

    /*
     * Since we're changing the OCR value, we seem to
     * need to tell some cards to go back to the idle
     * state.  We wait 1ms to give cards time to
     * respond.
     */
    mmc_go_idle(host);

    /* The extra bit indicates that we support high capacity */
    err = mmc_send_op_cond(host, ocr | (1 << 30), NULL);
    if (err)
        goto err;

    /*
     * For SPI, enable CRC as appropriate.
     */
    if (mmc_host_is_spi(host)) {
        err = mmc_spi_set_crc(host, use_spi_crc);
        if (err)
            goto err;
    }

    /*
     * Fetch CID from card.
     */
    if (mmc_host_is_spi(host))
        err = mmc_send_cid(host, cid);
    else
        err = mmc_all_send_cid(host, cid);
    if (err)
        goto err;

    if (oldcard) {
        if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
            err = -ENOENT;
            goto err;
        }

        card = oldcard;
    } else {
        /*
         * Allocate card structure.
         */
        card = mmc_alloc_card(host, &mmc_type);
        if (IS_ERR(card)) {
            err = PTR_ERR(card);
            goto err;
        }

        card->type = MMC_TYPE_MMC;
        card->rca = 1;
        memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
    }

    /*
     * For native busses:  set card RCA and quit open drain mode.
     */
    if (!mmc_host_is_spi(host)) {
        err = mmc_set_relative_addr(card);
        if (err)
            goto free_card;

        mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
    }

    if (!oldcard) {
        /*
         * Fetch CSD from card.
         */
        err = mmc_send_csd(card, card->raw_csd);
        if (err)
            goto free_card;

        err = mmc_decode_csd(card);
        if (err)
            goto free_card;
        err = mmc_decode_cid(card);
        if (err)
            goto free_card;
    }

    /*
     * Select card, as all following commands rely on that.
     */
    if (!mmc_host_is_spi(host)) {
        err = mmc_select_card(card);
        if (err)
            goto free_card;
    }

    if (!oldcard) {
        /*
         * Fetch and process extended CSD.
         */
        err = mmc_read_ext_csd(card);
        if (err)
            goto free_card;
    }

    /*
     * Activate high speed (if supported)
     */
    if ((card->ext_csd.hs_max_dtr != 0) &&
            (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                         EXT_CSD_HS_TIMING, 1);
        if (err && err != -EBADMSG)
            goto free_card;

        if (err) {
            printk(KERN_WARNING "%s: switch to highspeed failed\n",
                   mmc_hostname(card->host));
            err = 0;
        } else {
            mmc_card_set_highspeed(card);
            mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
        }
    }

    /*
     * Compute bus speed.
     */
    max_dtr = (unsigned int)-1;

    if (mmc_card_highspeed(card)) {
        if (max_dtr > card->ext_csd.hs_max_dtr)
            max_dtr = card->ext_csd.hs_max_dtr;
    } else if (max_dtr > card->csd.max_dtr) {
        max_dtr = card->csd.max_dtr;
    }

    mmc_set_clock(host, max_dtr);

    /*
     * Indicate DDR mode (if supported).
     */
    if (mmc_card_highspeed(card)) {
        if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
                && (host->caps & (MMC_CAP_1_8V_DDR)))
            ddr = MMC_1_8V_DDR_MODE;
        else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
                 && (host->caps & (MMC_CAP_1_2V_DDR)))
            ddr = MMC_1_2V_DDR_MODE;
    }

    /*
     * Activate wide bus and DDR (if supported).
     */
    if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
            (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
        unsigned ext_csd_bit, bus_width;

        if (host->caps & MMC_CAP_8_BIT_DATA) {
            if (ddr)
                ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
            else
                ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
            bus_width = MMC_BUS_WIDTH_8;
        } else {
            if (ddr)
                ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4;
            else
                ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
            bus_width = MMC_BUS_WIDTH_4;
        }

        err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                         EXT_CSD_BUS_WIDTH, ext_csd_bit);

        if (err && err != -EBADMSG)
            goto free_card;

        if (err) {
            printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
                   "failed\n", mmc_hostname(card->host),
                   1 << bus_width, ddr);
            err = 0;
        } else {
            if (ddr)
                mmc_card_set_ddr_mode(card);
            else
                ddr = MMC_SDR_MODE;

            mmc_set_bus_width_ddr(card->host, bus_width, ddr);
        }
    }

    if (!oldcard)
        host->card = card;

    return 0;

free_card:
    if (!oldcard)
        mmc_remove_card(card);
err:

    return err;
}
Exemple #28
0
/*
 * Given a 128-bit response, decode to our card CSD structure.
 */
static int mmc_decode_csd(struct mmc_card *card)
{
	struct mmc_csd *csd = &card->csd;
	unsigned int e, m, csd_struct;
	u32 *resp = card->raw_csd;

	/*
	 * We only understand CSD structure v1.1 and v1.2.
	 * v1.2 has extra information in bits 15, 11 and 10.
	 */
	csd_struct = UNSTUFF_BITS(resp, 126, 2);
#if defined(CONFIG_MACH_OMAP3630_EDP1) || defined(CONFIG_MACH_OMAP3621_EDP1) || defined(CONFIG_MACH_OMAP3621_BOXER) || defined(CONFIG_MACH_OMAP3621_EVT1A) || defined(CONFIG_MACH_OMAP3621_GOSSAMER)
	/* To recognize Boxer board eMMC */
	if (csd_struct != 1 && csd_struct != 2 && csd_struct != 3) {
#else
	if (csd_struct != 1 && csd_struct != 2) {
#endif
		printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
			mmc_hostname(card->host), csd_struct);
		return -EINVAL;
	}

	csd->mmca_vsn	 = UNSTUFF_BITS(resp, 122, 4);
	m = UNSTUFF_BITS(resp, 115, 4);
	e = UNSTUFF_BITS(resp, 112, 3);
	csd->tacc_ns	 = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
	csd->tacc_clks	 = UNSTUFF_BITS(resp, 104, 8) * 100;

	m = UNSTUFF_BITS(resp, 99, 4);
	e = UNSTUFF_BITS(resp, 96, 3);
	csd->max_dtr	  = tran_exp[e] * tran_mant[m];
	csd->cmdclass	  = UNSTUFF_BITS(resp, 84, 12);

	e = UNSTUFF_BITS(resp, 47, 3);
	m = UNSTUFF_BITS(resp, 62, 12);
	csd->capacity	  = (1 + m) << (e + 2);

	csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
	csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
	csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
	csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
	csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
	csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
	csd->write_partial = UNSTUFF_BITS(resp, 21, 1);

	return 0;
}

/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		/*
		 * We all hosts that cannot perform the command
		 * to fail more gracefully
		 */
		if (err != -EINVAL)
			goto out;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}

		goto out;
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
#if defined(CONFIG_MACH_OMAP3630_EDP1) || defined(CONFIG_MACH_OMAP3621_EDP1) || defined(CONFIG_MACH_OMAP3621_BOXER) || defined(CONFIG_MACH_OMAP3621_EVT1A) || defined(CONFIG_MACH_OMAP3621_GOSSAMER)
	/* To recognize Boxer board eMMC */
	if (card->ext_csd.rev > 5) {
#else
	if (card->ext_csd.rev > 2) {
#endif
		printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
			"version %d\n", mmc_hostname(card->host),
			card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

#ifdef CONFIG_HC_Broken_eMMC_ZOOM2
	/*
	 * Hack: eMMC on Zoom2 seems to have a lower EXT_CSD Rev.
	 * This is incorrect as it is an HC card. The card becomes
	 * unusable if not set to blockaddr mode.
	 * The low level driver sets up the unused bit for MMC2 on Zoom2.
	 * Revert this hack once it is fixed in the card.
	 */
	if (card->host->unused) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
		if (card->ext_csd.sectors)
			mmc_card_set_blockaddr(card);
	} else
#endif
	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
		if (mmc_card_blockaddr(card))
			mmc_card_set_blockaddr(card);
	}

	/* disable DDR detection */
	ext_csd[EXT_CSD_CARD_TYPE] &= EXT_CSD_CARD_TYPE_MASK;

	switch (ext_csd[EXT_CSD_CARD_TYPE]) {
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
		goto out;
	}

	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];

		/* Sleep / awake timeout in 100ns units */
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
		else{
			card->ext_csd.sa_timeout = MMC_SLEEP_TIMEOUT_DEFAULT;
			printk(KERN_WARNING "%s: card's S_A_TIMEOUT is out of range.\n",mmc_hostname(card->host));
		}
	}

out:
	kfree(ext_csd);

	return err;
}

MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
	card->raw_cid[2], card->raw_cid[3]);
MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
	card->raw_csd[2], card->raw_csd[3]);
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);

static struct attribute *mmc_std_attrs[] = {
	&dev_attr_cid.attr,
	&dev_attr_csd.attr,
	&dev_attr_date.attr,
	&dev_attr_fwrev.attr,
	&dev_attr_hwrev.attr,
	&dev_attr_manfid.attr,
	&dev_attr_name.attr,
	&dev_attr_oemid.attr,
	&dev_attr_serial.attr,
	NULL,
};

static struct attribute_group mmc_std_attr_group = {
	.attrs = mmc_std_attrs,
};

static struct attribute_group *mmc_attr_groups[] = {
	&mmc_std_attr_group,
	NULL,
};

static struct device_type mmc_type = {
	.groups = mmc_attr_groups,
};

/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_init_card(struct mmc_host *host, u32 ocr,
	struct mmc_card *oldcard)
{
	struct mmc_card *card;
	int err;
	u32 cid[4];
	u32 rocr;
	unsigned int max_dtr;

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	/*
	 * Since we're changing the OCR value, we seem to
	 * need to tell some cards to go back to the idle
	 * state.  We wait 1ms to give cards time to
	 * respond.
	 */
	mmc_go_idle(host);

	/* The extra bit indicates that we support high capacity */
	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
	if (err)
		goto err;

	/*
	 * For SPI, enable CRC as appropriate.
	 */
	if (mmc_host_is_spi(host)) {
		err = mmc_spi_set_crc(host, use_spi_crc);
		if (err)
			goto err;
	}

	/*
	 * Fetch CID from card.
	 */
	if (mmc_host_is_spi(host))
		err = mmc_send_cid(host, cid);
	else
		err = mmc_all_send_cid(host, cid);
	if (err)
		goto err;

	if (oldcard) {
		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
			err = -ENOENT;
			goto err;
		}

		card = oldcard;
	} else {
		/*
		 * Allocate card structure.
		 */
		card = mmc_alloc_card(host, &mmc_type);
		if (IS_ERR(card)) {
			err = PTR_ERR(card);
			goto err;
		}

		card->type = MMC_TYPE_MMC;
		card->rca = 1;
		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));

		/*
		 * If the OCR response to OP_COND from
		 * the card ack block addressing then
		 * enable it
                 */
		if (rocr & MMC_CARD_ACCESS_MODE_MASK)
			mmc_card_set_blockaddr(card);

	}

	/*
	 * For native busses:  set card RCA and quit open drain mode.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_set_relative_addr(card);
		if (err)
			goto free_card;

		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
	}

	if (!oldcard) {
		/*
		 * Fetch CSD from card.
		 */
		err = mmc_send_csd(card, card->raw_csd);
		if (err)
			goto free_card;

		err = mmc_decode_csd(card);
		if (err)
			goto free_card;
		err = mmc_decode_cid(card);
		if (err)
			goto free_card;
	}

	/*
	 * Select card, as all following commands rely on that.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_select_card(card);
		if (err)
			goto free_card;
	}

	if (!oldcard) {
		/*
		 * Fetch and process extended CSD.
		 */
		err = mmc_read_ext_csd(card);
		if (err)
			goto free_card;
	}

	/*
	 * Activate high speed (if supported)
	 */
	if ((card->ext_csd.hs_max_dtr != 0) &&
		(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HS_TIMING, 1);
		if (err)
			goto free_card;

		mmc_card_set_highspeed(card);

		mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
	}

	/*
	 * Compute bus speed.
	 */
	max_dtr = (unsigned int)-1;

	if (mmc_card_highspeed(card)) {
		if (max_dtr > card->ext_csd.hs_max_dtr)
			max_dtr = card->ext_csd.hs_max_dtr;
	} else if (max_dtr > card->csd.max_dtr) {
		max_dtr = card->csd.max_dtr;
	}

	mmc_set_clock(host, max_dtr);

	/*
	 * Activate wide bus (if supported).
	 */
	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
		unsigned ext_csd_bit, bus_width;

		if (host->caps & MMC_CAP_8_BIT_DATA) {
			ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
			bus_width = MMC_BUS_WIDTH_8;
		} else {
			ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
			bus_width = MMC_BUS_WIDTH_4;
		}

		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 EXT_CSD_BUS_WIDTH, ext_csd_bit);

		if (err)
			goto free_card;

		mmc_set_bus_width(card->host, bus_width);
	}

	if (!oldcard)
		host->card = card;

	return 0;

free_card:
	if (!oldcard)
		mmc_remove_card(card);
err:

	return err;
}

/*
 * Host is being removed. Free up the current card.
 */
static void mmc_remove(struct mmc_host *host)
{
	BUG_ON(!host);
	BUG_ON(!host->card);

	mmc_remove_card(host->card);
	host->card = NULL;
}
/*
 * Decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
	int err = 0, idx;
	unsigned int part_size;
	u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0;

	BUG_ON(!card);

	if (!ext_csd)
		return 0;

	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
	card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
	if (card->csd.structure == 3) {
		if (card->ext_csd.raw_ext_csd_structure > 2) {
			pr_err("%s: unrecognised EXT_CSD structure "
				"version %d\n", mmc_hostname(card->host),
					card->ext_csd.raw_ext_csd_structure);
			err = -EINVAL;
			goto out;
		}
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 6) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
			mmc_hostname(card->host), card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
	card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
	card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
	card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;

		/* Cards with density > 2GiB are sector addressed */
		if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
			mmc_card_set_blockaddr(card);
	}

	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
	mmc_select_card_type(card);

	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
	card->ext_csd.raw_erase_timeout_mult =
		ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
	card->ext_csd.raw_hc_erase_grp_size =
		ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];

		/* EXT_CSD value is in units of 10ms, but we store in ms */
		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];

		/* Sleep / awake timeout in 100ns units */
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.erase_group_def =
			ext_csd[EXT_CSD_ERASE_GROUP_DEF];
		card->ext_csd.hc_erase_timeout = 300 *
			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
		card->ext_csd.hc_erase_size =
			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;

		card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];

		/*
		 * There are two boot regions of equal size, defined in
		 * multiples of 128K.
		 */
		if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
			for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
				part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
				mmc_part_add(card, part_size,
					EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
					"boot%d", idx, true,
					MMC_BLK_DATA_AREA_BOOT);
			}
		}
	}

	card->ext_csd.raw_hc_erase_gap_size =
		ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
	card->ext_csd.raw_sec_trim_mult =
		ext_csd[EXT_CSD_SEC_TRIM_MULT];
	card->ext_csd.raw_sec_erase_mult =
		ext_csd[EXT_CSD_SEC_ERASE_MULT];
	card->ext_csd.raw_sec_feature_support =
		ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
	card->ext_csd.raw_trim_mult =
		ext_csd[EXT_CSD_TRIM_MULT];
	card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
	if (card->ext_csd.rev >= 4) {
		/*
		 * Enhanced area feature support -- check whether the eMMC
		 * card has the Enhanced area enabled.  If so, export enhanced
		 * area offset and size to user by adding sysfs interface.
		 */
		if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
		    (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
			hc_erase_grp_sz =
				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
			hc_wp_grp_sz =
				ext_csd[EXT_CSD_HC_WP_GRP_SIZE];

			card->ext_csd.enhanced_area_en = 1;
			/*
			 * calculate the enhanced data area offset, in bytes
			 */
			card->ext_csd.enhanced_area_offset =
				(ext_csd[139] << 24) + (ext_csd[138] << 16) +
				(ext_csd[137] << 8) + ext_csd[136];
			if (mmc_card_blockaddr(card))
				card->ext_csd.enhanced_area_offset <<= 9;
			/*
			 * calculate the enhanced data area size, in kilobytes
			 */
			card->ext_csd.enhanced_area_size =
				(ext_csd[142] << 16) + (ext_csd[141] << 8) +
				ext_csd[140];
			card->ext_csd.enhanced_area_size *=
				(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
			card->ext_csd.enhanced_area_size <<= 9;
		} else {
			/*
			 * If the enhanced area is not enabled, disable these
			 * device attributes.
			 */
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		}

		/*
		 * General purpose partition feature support --
		 * If ext_csd has the size of general purpose partitions,
		 * set size, part_cfg, partition name in mmc_part.
		 */
		if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
			EXT_CSD_PART_SUPPORT_PART_EN) {
			if (card->ext_csd.enhanced_area_en != 1) {
				hc_erase_grp_sz =
					ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
				hc_wp_grp_sz =
					ext_csd[EXT_CSD_HC_WP_GRP_SIZE];

				card->ext_csd.enhanced_area_en = 1;
			}

			for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
				if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
				!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
				!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
					continue;
				part_size =
				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
					<< 16) +
				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
					<< 8) +
				ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
				part_size *= (size_t)(hc_erase_grp_sz *
					hc_wp_grp_sz);
				mmc_part_add(card, part_size << 19,
					EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
					"gp%d", idx, false,
					MMC_BLK_DATA_AREA_GP);
			}
		}
		card->ext_csd.sec_trim_mult =
			ext_csd[EXT_CSD_SEC_TRIM_MULT];
		card->ext_csd.sec_erase_mult =
			ext_csd[EXT_CSD_SEC_ERASE_MULT];
		card->ext_csd.sec_feature_support =
			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
		card->ext_csd.trim_timeout = 300 *
			ext_csd[EXT_CSD_TRIM_MULT];

		/*
		 * Note that the call to mmc_part_add above defaults to read
		 * only. If this default assumption is changed, the call must
		 * take into account the value of boot_locked below.
		 */
		card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
		card->ext_csd.boot_ro_lockable = true;
	}

	if (card->ext_csd.rev >= 5) {
		/* check whether the eMMC card support BKOPS */
		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
			card->ext_csd.bkops = 1;
			card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
			card->ext_csd.raw_bkops_status =
				ext_csd[EXT_CSD_BKOPS_STATUS];
			if (!card->ext_csd.bkops_en &&
				card->host->caps2 & MMC_CAP2_INIT_BKOPS) {
				err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					EXT_CSD_BKOPS_EN, 1, 0);
				if (err)
					pr_warning("%s: Enabling BKOPS failed\n",
						mmc_hostname(card->host));
				else
					card->ext_csd.bkops_en = 1;
			}
		}

		/* check whether the eMMC card supports HPI */
		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
			card->ext_csd.hpi = 1;
			if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
				card->ext_csd.hpi_cmd =	MMC_STOP_TRANSMISSION;
			else
				card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
			/*
			 * Indicate the maximum timeout to close
			 * a command interrupted by HPI
			 */
			card->ext_csd.out_of_int_time =
				ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
		}

		card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
		card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
	}

	card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
		card->erased_byte = 0xFF;
	else
		card->erased_byte = 0x0;

	/* eMMC v4.5 or later */
	if (card->ext_csd.rev >= 6) {
		card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;

		card->ext_csd.generic_cmd6_time = 10 *
			ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
		card->ext_csd.power_off_longtime = 10 *
			ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];

		card->ext_csd.cache_size =
			ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
			ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
			ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
			ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;

		if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
			card->ext_csd.data_sector_size = 4096;
		else
			card->ext_csd.data_sector_size = 512;

		if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
		    (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
			card->ext_csd.data_tag_unit_size =
			((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
			(card->ext_csd.data_sector_size);
		} else {
			card->ext_csd.data_tag_unit_size = 0;
		}
		card->ext_csd.max_packed_writes =
			ext_csd[EXT_CSD_MAX_PACKED_WRITES];
		card->ext_csd.max_packed_reads =
			ext_csd[EXT_CSD_MAX_PACKED_READS];
	}

	/* [LGE_CHANGE_S] [email protected]  
	 * samsung support discard option on version 4.41+ 
	 * Enable discard only for V3, also check manufacture id in case of V3 support both memory(Hynix, Samsung).
	 * DO NOT add feature for v7/u0/m4/etc without firm reason on emmc spec or vendor confirm */
#if defined (CONFIG_MACH_MSM7X25A_V3) && defined (CONFIG_MACH_FEATURE_DISCARD)
#define MANUFACTURE_ID_SAMSUNG	0x15
	if (card->cid.manfid == MANUFACTURE_ID_SAMSUNG)
	{
		card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
		pr_info("%s: !!! card->cid.manfid:%d, prod_name:%s !!! \n", 
			mmc_hostname(card->host), card->cid.manfid, card->cid.prod_name);
	}
#endif

out:
	return err;
}
Exemple #30
0
/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_init_card(struct mmc_host *host, u32 ocr,
	struct mmc_card *oldcard)
{
	struct mmc_card *card;
	int err, ddr = 0;
	u32 cid[4];
	unsigned int max_dtr;
	u32 rocr;

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	/*
	 * Since we're changing the OCR value, we seem to
	 * need to tell some cards to go back to the idle
	 * state.  We wait 1ms to give cards time to
	 * respond.
	 */
	mmc_go_idle(host);

	/* The extra bit indicates that we support high capacity */
	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
	if (err)
		goto err;

	/*
	 * For SPI, enable CRC as appropriate.
	 */
	if (mmc_host_is_spi(host)) {
		err = mmc_spi_set_crc(host, use_spi_crc);
		if (err)
			goto err;
	}

	/*
	 * Fetch CID from card.
	 */
	if (mmc_host_is_spi(host))
		err = mmc_send_cid(host, cid);
	else
		err = mmc_all_send_cid(host, cid);
	if (err)
		goto err;

	if (oldcard) {
		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
			err = -ENOENT;
			goto err;
		}

		card = oldcard;
	} else {
		/*
		 * Allocate card structure.
		 */
		card = mmc_alloc_card(host, &mmc_type);
		if (IS_ERR(card)) {
			err = PTR_ERR(card);
			goto err;
		}

		card->type = MMC_TYPE_MMC;
		card->rca = 1;
		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
	}

	/*
	 * For native busses:  set card RCA and quit open drain mode.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_set_relative_addr(card);
		if (err)
			goto free_card;

		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
	}

	if (!oldcard) {
		/*
		 * Fetch CSD from card.
		 */
		err = mmc_send_csd(card, card->raw_csd);
		if (err)
			goto free_card;

		err = mmc_decode_csd(card);
		if (err)
			goto free_card;
		err = mmc_decode_cid(card);
		if (err)
			goto free_card;
	}

	/*
	 * Select card, as all following commands rely on that.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_select_card(card);
		if (err)
			goto free_card;
	}

	if (!oldcard) {
		/*
		 * Fetch and process extended CSD.
		 */
		err = mmc_read_ext_csd(card);
		if (err)
			goto free_card;

		/* If doing byte addressing, check if required to do sector
		 * addressing.  Handle the case of <2GB cards needing sector
		 * addressing.  See section 8.1 JEDEC Standard JED84-A441;
		 * ocr register has bit 30 set for sector addressing.
		 */
		if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
			mmc_card_set_blockaddr(card);

		/* Erase size depends on CSD and Extended CSD */
		mmc_set_erase_size(card);
	}

	/*
	 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
	 * bit.  This bit will be lost everytime after a reset or power off.
	 */
	if (card->ext_csd.enhanced_area_en) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				EXT_CSD_ERASE_GROUP_DEF, 1);

		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			err = 0;
			/*
			 * Just disable enhanced area off & sz
			 * will try to enable ERASE_GROUP_DEF
			 * during next time reinit
			 */
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		} else {
			card->ext_csd.erase_group_def = 1;
			/*
			 * enable ERASE_GRP_DEF successfully.
			 * This will affect the erase size, so
			 * here need to reset erase size
			 */
			mmc_set_erase_size(card);
		}
	}

	/*
	 * Activate high speed (if supported)
	 */
	if ((card->ext_csd.hs_max_dtr != 0) &&
		(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HS_TIMING, 1);
		if (err && err != -EBADMSG)
			goto free_card;

		if (err) {
			printk(KERN_WARNING "%s: switch to highspeed failed\n",
			       mmc_hostname(card->host));
			err = 0;
		} else {
			mmc_card_set_highspeed(card);
			mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
		}
	}

	/*
	 * Compute bus speed.
	 */
	max_dtr = (unsigned int)-1;

	if (mmc_card_highspeed(card)) {
		if (max_dtr > card->ext_csd.hs_max_dtr)
			max_dtr = card->ext_csd.hs_max_dtr;
	} else if (max_dtr > card->csd.max_dtr) {
		max_dtr = card->csd.max_dtr;
	}

	mmc_set_clock(host, max_dtr);

	/*
	 * Indicate DDR mode (if supported).
	 */
	if (mmc_card_highspeed(card)) {
		if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
			&& (host->caps & (MMC_CAP_1_8V_DDR)))
				ddr = MMC_1_8V_DDR_MODE;
		else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
			&& (host->caps & (MMC_CAP_1_2V_DDR)))
				ddr = MMC_1_2V_DDR_MODE;
	}

	/*
	 * Activate wide bus and DDR (if supported).
	 */
	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
		static unsigned ext_csd_bits[][2] = {
			{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
			{ EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
			{ EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
		};
		static unsigned bus_widths[] = {
			MMC_BUS_WIDTH_8,
			MMC_BUS_WIDTH_4,
			MMC_BUS_WIDTH_1
		};
		unsigned idx, bus_width = 0;

		if (host->caps & MMC_CAP_8_BIT_DATA)
			idx = 0;
		else
			idx = 1;
		for (; idx < ARRAY_SIZE(bus_widths); idx++) {
			bus_width = bus_widths[idx];
			if (bus_width == MMC_BUS_WIDTH_1)
				ddr = 0; /* no DDR for 1-bit width */
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					 EXT_CSD_BUS_WIDTH,
					 ext_csd_bits[idx][0]);
			if (!err) {
				mmc_set_bus_width_ddr(card->host,
						      bus_width, MMC_SDR_MODE);
				/*
				 * If controller can't handle bus width test,
				 * use the highest bus width to maintain
				 * compatibility with previous MMC behavior.
				 */
				if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
					break;
				err = mmc_bus_test(card, bus_width);
				if (!err)
					break;
			}
		}

		if (!err && ddr) {
			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					EXT_CSD_BUS_WIDTH,
					ext_csd_bits[idx][1]);
		}
		if (err) {
			printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
				"failed\n", mmc_hostname(card->host),
				1 << bus_width, ddr);
			goto free_card;
		} else if (ddr) {
			mmc_card_set_ddr_mode(card);
			mmc_set_bus_width_ddr(card->host, bus_width, ddr);
		}
	}

	if (!oldcard)
		host->card = card;

	return 0;

free_card:
	if (!oldcard)
		mmc_remove_card(card);
err:

	return err;
}