int mmc_change_freq(struct mmc *mmc)
{
	char ext_csd[512];
	char cardtype;
	int err;

	mmc->card_caps = 0;

	if (mmc_host_is_spi(mmc))
		return 0;

	/* Only version 4 supports high-speed */
	if (mmc->version < MMC_VERSION_4)
		return 0;

	mmc->card_caps |= MMC_MODE_4BIT;

	err = mmc_send_ext_csd(mmc, ext_csd);

	if (err)
		return err;

	cardtype = ext_csd[196] & 0xf;

	err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1);

	if (err)
		return err;

	/* Now check to see that it worked */
	err = mmc_send_ext_csd(mmc, ext_csd);

	if (err)
		return err;

	/* No high-speed support */
	if (!ext_csd[185])
		return 0;

	/* High Speed is set, there are two types: 52MHz and 26MHz */
	if (cardtype & MMC_HS_52MHZ)
		mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
	else
		mmc->card_caps |= MMC_MODE_HS;

	return 0;
}
Exemplo n.º 2
0
/*
 * Read extended CSD.
 */
static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);
	BUG_ON(!new_ext_csd);

	*new_ext_csd = NULL;

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		pr_err("%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		kfree(ext_csd);
		*new_ext_csd = NULL;

		/* If the host or the card can't do the switch,
		 * fail more gracefully. */
		if ((err != -EINVAL)
		 && (err != -ENOSYS)
		 && (err != -EFAULT))
			return err;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			pr_err("%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			pr_warning("%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}
	} else
		*new_ext_csd = ext_csd;

	return err;
}
Exemplo n.º 3
0
static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);
	BUG_ON(!new_ext_csd);

	*new_ext_csd = NULL;

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		pr_err("%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		kfree(ext_csd);
		*new_ext_csd = NULL;

		if ((err != -EINVAL)
		 && (err != -ENOSYS)
		 && (err != -EFAULT))
			return err;

		if (card->csd.capacity == (4096 * 512)) {
			pr_err("%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			pr_warning("%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}
	} else
		*new_ext_csd = ext_csd;

	return err;
}
Exemplo n.º 4
0
static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
{
	struct mmc_card *card = inode->i_private;
	char *buf;
	ssize_t n = 0;
	u8 *ext_csd;
	int err, i;

	DBG("[%s] s\n",__func__);
	
	buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
	if (!buf) {
		DBG("[%s] e1\n",__func__);
		return -ENOMEM;
	}

	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		err = -ENOMEM;
		goto out_free;
	}

	mmc_claim_host(card->host);
	err = mmc_send_ext_csd(card, ext_csd);
	mmc_release_host(card->host);
	if (err)
		goto out_free;

	for (i = 511; i >= 0; i--)
		n += sprintf(buf + n, "%02x", ext_csd[i]);
	n += sprintf(buf + n, "\n");
	BUG_ON(n != EXT_CSD_STR_LEN);

	filp->private_data = buf;
	kfree(ext_csd);

	DBG("[%s] e2\n",__func__);
	return 0;

out_free:
	kfree(buf);
	kfree(ext_csd);
	
	DBG("[%s] e3\n",__func__);
	return err;
}
Exemplo n.º 5
0
static int simple_sd_ioctl_get_excsd(struct msdc_ioctl *msdc_ctl)
{
	char l_buf[512];
	struct msdc_host *host_ctl;

#if DEBUG_MMC_IOCTL
	int i;
#endif

    if(!msdc_ctl)
        return -EINVAL;

    host_ctl = mtk_msdc_host[msdc_ctl->host_num];

    BUG_ON(!host_ctl);
    BUG_ON(!host_ctl->mmc);
    BUG_ON(!host_ctl->mmc->card);

    mmc_claim_host(host_ctl->mmc);

#if DEBUG_MMC_IOCTL
	pr_debug("user want the extend csd in msdc slot%d\n", msdc_ctl->host_num);
#endif
	mmc_send_ext_csd(host_ctl->mmc->card, l_buf);
	if (copy_to_user(msdc_ctl->buffer, l_buf, 512))
		return -EFAULT;

#if DEBUG_MMC_IOCTL
	for (i = 0; i < 512; i++) {
		pr_debug("%x", l_buf[i]);
		if (0 == ((i + 1) % 16)) {
			pr_debug("\n");
		}
	}
#endif

	if (copy_to_user(msdc_ctl->buffer, l_buf, 512))
		return -EFAULT;

	mmc_release_host(host_ctl->mmc);

	return 0;

}
Exemplo n.º 6
0
static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
{
	struct mmc_card *card = inode->i_private;
	char *buf;
	ssize_t n = 0;
	u8 *ext_csd;
	int err, i;

	buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		err = -ENOMEM;
		goto out_free;
	}

	mmc_rpm_hold(card->host, &card->dev);
	mmc_claim_host(card->host);
	err = mmc_send_ext_csd(card, ext_csd);
	mmc_release_host(card->host);
	mmc_rpm_release(card->host, &card->dev);
	if (err)
		goto out_free;

	for (i = 0; i < 512; i++)
		n += sprintf(buf + n, "%02x", ext_csd[i]);
	n += sprintf(buf + n, "\n");
	BUG_ON(n != EXT_CSD_STR_LEN);

	filp->private_data = buf;
	kfree(ext_csd);
	return 0;

out_free:
	kfree(buf);
	kfree(ext_csd);
	return err;
}
Exemplo n.º 7
0
int __simple_sd_ioctl_get_excsd(struct msdc_ioctl* msdc_ctl,u8 *excsd)
{
    char l_buf[512];
    struct msdc_host *host_ctl;
 
#if DEBUG_MMC_IOCTL
    int i;
#endif
  
    host_ctl = mtk_msdc_host[msdc_ctl->host_num];

    BUG_ON(!host_ctl);
    BUG_ON(!host_ctl->mmc);
    BUG_ON(!host_ctl->mmc->card);

    mmc_claim_host(host_ctl->mmc);

#if DEBUG_MMC_IOCTL
    printk("user want the extend csd in msdc slot%d \n",msdc_ctl->host_num);
#endif
    mmc_send_ext_csd(host_ctl->mmc->card,l_buf);
    
    memcpy(excsd, l_buf, 512);

#if DEBUG_MMC_IOCTL

    for (i = 0; i < 512; i++)
    {
        printk("%x", l_buf[i]);
        if (0 == ((i + 1) % 16)){
            printk("\n");
        }
    }
#endif

    mmc_release_host(host_ctl->mmc);

    return 0;
}
static int mmc_get_ext_csd_byte_val(struct mmc_card *card, u64 *val,
		unsigned int ext_csd_byte)
{
	u8 *ext_csd;
	int err = 0;

	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		err = -ENOMEM;
		return err;
	}

	mmc_claim_host(card->host);
	err = mmc_send_ext_csd(card, ext_csd);
	mmc_release_host(card->host);

	if (!err)
		*val = ext_csd[ext_csd_byte];

out_free:
	kfree(ext_csd);
	return err;
}
/* Here index starts with zero*/
static char *mmc_ext_csd_read_by_index(int start, int end,
		int strlen, struct inode *inode)
{
	struct mmc_card *card = inode->i_private;
	char *buf;
	ssize_t n = 0;
	u8 *ext_csd;
	int err, i;

	buf = kmalloc(strlen + 1, GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		kfree(buf);
		return ERR_PTR(-ENOMEM);
	}
	mmc_claim_host(card->host);
	err = mmc_send_ext_csd(card, ext_csd);
	mmc_release_host(card->host);
	if (err)
		goto out_free;

	for (i = start; i <= end; i++)
		n += sprintf(buf + n, "%02x", ext_csd[i]);
	n += sprintf(buf + n, "\n");

	kfree(ext_csd);
	return buf;

out_free:
	kfree(ext_csd);
	kfree(buf);
	return NULL;
}
Exemplo n.º 10
0
/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		/* If the host or the card can't do the switch,
		 * fail more gracefully. */
		if ((err != -EINVAL)
		 && (err != -ENOSYS)
		 && (err != -EFAULT))
			goto out;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}

		goto out;
	}

	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
	if (card->csd.structure == 3) {
		int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
		if (ext_csd_struct > 2) {
			printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
				"version %d\n", mmc_hostname(card->host),
					ext_csd_struct);
			err = -EINVAL;
			goto out;
		}
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 5) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
			mmc_hostname(card->host), card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;

		/* Cards with density > 2GiB are sector addressed */
		if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) {
			// LGE_CHANGE [[email protected]] 2010-12-31, 
			// [LGE_AP20] from Star K32
#ifdef CONFIG_EMBEDDED_MMC_START_OFFSET
			unsigned offs;
			offs = card->host->ops->get_host_offset(card->host);
			offs >>= 9;
			BUG_ON(offs >= card->ext_csd.sectors);
			card->ext_csd.sectors -= offs;

			// LGE_CHANGE [[email protected]] 2011-01-04,
			// [LGE_AP20] fix to report correct disk space in case of
			// emmc 4.3+ cards (from Star K32)
			offs = ext_csd[EXT_CSD_BOOT_SIZE_MULTI] * 512;
			card->ext_csd.sectors -= offs;
#else /* original codes */
#ifndef CONFIG_MACH_BSSQ
			unsigned boot_sectors;
			/* size is in 256K chunks, i.e. 512 sectors each */
			boot_sectors = ext_csd[EXT_CSD_BOOT_SIZE_MULTI] * 512;
			card->ext_csd.sectors -= boot_sectors;
#endif //#ifndef CONFIG_MACH_BSSQ
#endif
			mmc_card_set_blockaddr(card);
		}
Exemplo n.º 11
0
/*
 * Given a 128-bit response, decode to our card CSD structure.
 */
static int mmc_decode_csd(struct mmc_card *card)
{
	struct mmc_csd *csd = &card->csd;
	unsigned int e, m, csd_struct;
	u32 *resp = card->raw_csd;

	/*
	 * We only understand CSD structure v1.1 and v1.2.
	 * v1.2 has extra information in bits 15, 11 and 10.
	 */
	csd_struct = UNSTUFF_BITS(resp, 126, 2);
#if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60)
	/* for eMMC spec v4.4, csd_struct value will be 3		*/
	/* for eMMC spec v4.1-v4.3 csd struct value will be 2	*/
	/* currently we don't support csd_struct version No. 1.0	*/
	if (csd_struct == 0) {
		printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
			mmc_hostname(card->host), csd_struct);
#else
	/* For SanDisk iNAND, after comparing 1.3 with 1.2, I think	*/
	/* 1.3 is compatible with 1.2.					*/
	if (csd_struct != 1 && csd_struct != 2 && csd_struct != 3) {
		printk(KERN_ERR "%s: unrecognised CSD structure version 1.%d\n",
			mmc_hostname(card->host), csd_struct);
#endif
		return -EINVAL;
	}

	csd->mmca_vsn	 = UNSTUFF_BITS(resp, 122, 4);
	m = UNSTUFF_BITS(resp, 115, 4);
	e = UNSTUFF_BITS(resp, 112, 3);
	csd->tacc_ns	 = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
	csd->tacc_clks	 = UNSTUFF_BITS(resp, 104, 8) * 100;

	m = UNSTUFF_BITS(resp, 99, 4);
	e = UNSTUFF_BITS(resp, 96, 3);
	csd->max_dtr	  = tran_exp[e] * tran_mant[m];
	csd->cmdclass	  = UNSTUFF_BITS(resp, 84, 12);

	e = UNSTUFF_BITS(resp, 47, 3);
	m = UNSTUFF_BITS(resp, 62, 12);
	csd->capacity	  = (1 + m) << (e + 2);

	csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
	csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
	csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
	csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
	csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
	csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
	csd->write_partial = UNSTUFF_BITS(resp, 21, 1);

	return 0;
}

/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		/* If the host or the card can't do the switch,
		 * fail more gracefully. */
		if ((err != -EINVAL)
		 && (err != -ENOSYS)
		 && (err != -EFAULT))
			goto out;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}

		goto out;
	}

	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
	if (card->csd.structure == 3) {
		int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
		if (ext_csd_struct > 2) {
			printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
				"version %d\n", mmc_hostname(card->host),
					ext_csd_struct);
			err = -EINVAL;
			goto out;
		}
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 5) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
			mmc_hostname(card->host), card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
		/* for HTC config, we don't exec following code */
		/* original code: if (card->ext_csd.sectors) { */
		if (false) {
			unsigned boot_sectors;
			/* size is in 256K chunks, i.e. 512 sectors each */
			boot_sectors = ext_csd[EXT_CSD_BOOT_SIZE_MULTI] * 512;
			card->ext_csd.sectors -= boot_sectors;
		}
	}

	switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
#if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60)
	/* for eMMC v4.4 there are two additional card type defined */
	/* they are : 	High-Speed Dual Data Rate Multimedia Card @ 52Mhz - 1.8v or 3v IO */
	/* High-Speed Dual Data Rate Multimedia Card @ 52Mhz - 1.2v IO 	*/
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26 | EXT_CSD_CARD_TYPE_DDR_HV | EXT_CSD_CARD_TYPE_DDR_LV:
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26 | EXT_CSD_CARD_TYPE_DDR_HV:
#endif
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
	}

	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];

		/* Sleep / awake timeout in 100ns units */
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
	}

out:
	kfree(ext_csd);

	return err;
}
Exemplo n.º 12
0
/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;
	unsigned int ext_csd_struct;

	BUG_ON(!card);

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
		       "receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		/*
		 * We all hosts that cannot perform the command
		 * to fail more gracefully
		 */
		if (err != -EINVAL)
			goto out;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
			       "on a possible high capacity card. "
			       "Card will be ignored.\n",
			       mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
			       "EXT_CSD, performance might "
			       "suffer.\n", mmc_hostname(card->host));
			err = 0;
		}

		goto out;
	}

	ext_csd_struct = ext_csd[EXT_CSD_REV];
	if (ext_csd_struct > 2) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
		       "version %d\n", mmc_hostname(card->host),
		       ext_csd_struct);
		err = -EINVAL;
		goto out;
	}

	if (ext_csd_struct >= 2) {
		card->ext_csd.sectors =
		    ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
		    ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
		    ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
		    ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
		if (card->ext_csd.sectors)
			mmc_card_set_blockaddr(card);
	}

	switch (ext_csd[EXT_CSD_CARD_TYPE]) {
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
		       "support any high-speed modes.\n",
		       mmc_hostname(card->host));
		goto out;
	}

out:
	kfree(ext_csd);

	return err;
}
Exemplo n.º 13
0
/*
 * Read and decode extended CSD. Switch to high-speed and wide bus
 * if supported.
 */
static int mmc_process_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);

	err = MMC_ERR_FAILED;

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return MMC_ERR_NONE;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd. mmc v4 cards will be "
			"treated as v3.\n", mmc_hostname(card->host));
		return MMC_ERR_FAILED;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err != MMC_ERR_NONE) {
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = MMC_ERR_NONE;
		}
		goto out;
	}

	card->ext_csd.sectors =
		ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
		ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
		ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
		ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
	if (card->ext_csd.sectors)
		mmc_card_set_blockaddr(card);

	switch (ext_csd[EXT_CSD_CARD_TYPE]) {
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
		goto out;
	}

	if (card->host->caps & MMC_CAP_MMC_HIGHSPEED) {
		/* Activate highspeed support. */
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HS_TIMING, 1);
		if (err != MMC_ERR_NONE) {
			printk(KERN_WARNING "%s: failed to switch "
				"card to mmc v4 high-speed mode.\n",
			       mmc_hostname(card->host));
			err = MMC_ERR_NONE;
			goto out;
		}

		mmc_card_set_highspeed(card);

		mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
	}

	/* Check for host support for wide-bus modes. */
	if (card->host->caps & MMC_CAP_4_BIT_DATA) {
		/* Activate 4-bit support. */
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_4);
		if (err != MMC_ERR_NONE) {
			printk(KERN_WARNING "%s: failed to switch "
				"card to mmc v4 4-bit bus mode.\n",
			       mmc_hostname(card->host));
			err = MMC_ERR_NONE;
			goto out;
		}

		mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
	}

out:
	kfree(ext_csd);

	return err;
}
Exemplo n.º 14
0
Arquivo: mmc.c Projeto: cilynx/dd-wrt
/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);

	err = MMC_ERR_FAILED;

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return MMC_ERR_NONE;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd. mmc v4 cards will be "
			"treated as v3.\n", mmc_hostname(card->host));
		return MMC_ERR_FAILED;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err != MMC_ERR_NONE) {
		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = MMC_ERR_NONE;
		}
		goto out;
	}

	card->ext_csd.sectors =
		ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
		ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
		ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
		ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
	if (card->ext_csd.sectors)
		mmc_card_set_blockaddr(card);

	switch (ext_csd[EXT_CSD_CARD_TYPE]) {
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
		goto out;
	}

out:
	kfree(ext_csd);

	return err;
}
static int simple_sd_ioctl_multi_rw(struct msdc_ioctl* msdc_ctl)
{
    char l_buf[512];
    struct scatterlist msdc_sg;
    struct mmc_data  msdc_data;
    struct mmc_command msdc_cmd;
    struct mmc_command msdc_stop;

#ifdef MTK_MSDC_USE_CMD23
	struct mmc_command msdc_sbc;
#endif
    
    struct mmc_request  msdc_mrq;
    struct msdc_host *host_ctl;
   
    host_ctl = mtk_msdc_host[msdc_ctl->host_num];
    BUG_ON(!host_ctl);
    BUG_ON(!host_ctl->mmc);
    BUG_ON(!host_ctl->mmc->card);

    mmc_claim_host(host_ctl->mmc);

#if DEBUG_MMC_IOCTL
    printk("user want access %d partition\n",msdc_ctl->partition);
#endif

    mmc_send_ext_csd(host_ctl->mmc->card, l_buf);
    switch (msdc_ctl->partition){
        case BOOT_PARTITION_1:
            if (0x1 != (l_buf[179] & 0x7)){
                /* change to access boot partition 1 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x1;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        case BOOT_PARTITION_2:
            if (0x2 != (l_buf[179] & 0x7)){
                /* change to access boot partition 2 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x2;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        default:
            /* make sure access partition is user data area */
            if (0 != (l_buf[179] & 0x7)){
                /* set back to access user area */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x0;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
    }

    if(msdc_ctl->total_size > 64*1024){
        msdc_ctl->result = -1;
        return  msdc_ctl->result;
    }

    memset(&msdc_data, 0, sizeof(struct mmc_data));
    memset(&msdc_mrq, 0, sizeof(struct mmc_request));
    memset(&msdc_cmd, 0, sizeof(struct mmc_command));
    memset(&msdc_stop, 0, sizeof(struct mmc_command));

#ifdef MTK_MSDC_USE_CMD23
    memset(&msdc_sbc, 0, sizeof(struct mmc_command));
#endif

    msdc_mrq.cmd = &msdc_cmd;
    msdc_mrq.data = &msdc_data;

    if(msdc_ctl->trans_type)
        dma_force[host_ctl->id] = FORCE_IN_DMA;
    else
        dma_force[host_ctl->id] = FORCE_IN_PIO;

    if (msdc_ctl->iswrite){
        msdc_data.flags = MMC_DATA_WRITE;
        msdc_cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT; 
            }
        } else {
            /* called from other kernel module */
            memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size);
        }
    } else {
        msdc_data.flags = MMC_DATA_READ;
        msdc_cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        memset(sg_msdc_multi_buffer, 0 , msdc_ctl->total_size);
    }

#ifdef MTK_MSDC_USE_CMD23
    if ((mmc_card_mmc(host_ctl->mmc->card) || (mmc_card_sd(host_ctl->mmc->card) && host_ctl->mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)) && 
            !(host_ctl->mmc->card->quirks & MMC_QUIRK_BLK_NO_CMD23)){
        msdc_mrq.sbc = &msdc_sbc;
        msdc_mrq.sbc->opcode = MMC_SET_BLOCK_COUNT;
        msdc_mrq.sbc->arg = msdc_data.blocks;
        msdc_mrq.sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
    }
#endif

    msdc_cmd.arg = msdc_ctl->address;

    if (!mmc_card_blockaddr(host_ctl->mmc->card)){
        printk("this device use byte address!!\n");
        msdc_cmd.arg <<= 9;
    }
    msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

    msdc_stop.opcode = MMC_STOP_TRANSMISSION;
    msdc_stop.arg = 0;
    msdc_stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;

    msdc_data.stop = &msdc_stop;
    msdc_data.blksz = 512;
    msdc_data.sg = &msdc_sg;
    msdc_data.sg_len = 1;

#if DEBUG_MMC_IOCTL
    printk("total size is %d\n",msdc_ctl->total_size);
#endif
    sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size);
    mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card);
    mmc_wait_for_req(host_ctl->mmc, &msdc_mrq);

    if (!msdc_ctl->iswrite){
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT;
                }
        } else {
            /* called from other kernel module */
            memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size);
        }
    }

    if (msdc_ctl->partition){
        mmc_send_ext_csd(host_ctl->mmc->card,l_buf);

        if (l_buf[179] & 0x7) {
            /* set back to access user area */
            l_buf[179] &= ~0x7;
            l_buf[179] |= 0x0;
            mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
        }
    }

    mmc_release_host(host_ctl->mmc);

    if (msdc_cmd.error)
        msdc_ctl->result = msdc_cmd.error;

    if (msdc_data.error){
        msdc_ctl->result = msdc_data.error;
    } else {
        msdc_ctl->result = 0;
    }
        dma_force[host_ctl->id] = FORCE_NOTHING;
    return msdc_ctl->result;

}
Exemplo n.º 16
0
/*
 * Given a 128-bit response, decode to our card CSD structure.
 */
static int mmc_decode_csd(struct mmc_card *card)
{
	struct mmc_csd *csd = &card->csd;
//[NAGSM_Android_HDLNC_SDcard_SEOJW_2011_01_12 : eMMC Trim add 	
#if defined (CONFIG_MMC_DISCARD) && defined (CONFIG_S5PC110_DEMPSEY_BOARD)
	unsigned int e, m, csd_struct, a, b;
#else /* CONFIG_MMC_DISCARD */
	unsigned int e, m, csd_struct;
#endif /* CONFIG_MMC_DISCARD */
	u32 *resp = card->raw_csd;

	/*
	 * We only understand CSD structure v1.1 and v1.2.
	 * v1.2 has extra information in bits 15, 11 and 10.
	 */
	csd_struct = UNSTUFF_BITS(resp, 126, 2);
#if defined(CONFIG_INAND_VERSION_PATCH)
	if (csd_struct != 1 && csd_struct != 2 && csd_struct != 3) {
#else
	if (csd_struct != 1 && csd_struct != 2) {
#endif
		printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
			mmc_hostname(card->host), csd_struct);
		return -EINVAL;
	}

	csd->mmca_vsn	 = UNSTUFF_BITS(resp, 122, 4);
	m = UNSTUFF_BITS(resp, 115, 4);
	e = UNSTUFF_BITS(resp, 112, 3);
	csd->tacc_ns	 = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
	csd->tacc_clks	 = UNSTUFF_BITS(resp, 104, 8) * 100;

	m = UNSTUFF_BITS(resp, 99, 4);
	e = UNSTUFF_BITS(resp, 96, 3);
	csd->max_dtr	  = tran_exp[e] * tran_mant[m];
	csd->cmdclass	  = UNSTUFF_BITS(resp, 84, 12);

	e = UNSTUFF_BITS(resp, 47, 3);
	m = UNSTUFF_BITS(resp, 62, 12);
	csd->capacity	  = (1 + m) << (e + 2);

	csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
	csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
	csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
	csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
	csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
	csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
	csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
//[NAGSM_Android_HDLNC_SDcard_SEOJW_2011_01_12 : eMMC Trim add 
#if defined (CONFIG_MMC_DISCARD) && defined (CONFIG_S5PC110_DEMPSEY_BOARD)

   if (csd->write_blkbits >= 9) {
		a = UNSTUFF_BITS(resp, 42, 5);
		b = UNSTUFF_BITS(resp, 37, 5);
		csd->erase_size = (a + 1) * (b + 1);
		csd->erase_size <<= csd->write_blkbits - 9;
	}
#endif /* CONFIG_MMC_DISCARD */
	return 0;
}

/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		/* If the host or the card can't do the switch,
		 * fail more gracefully. */
		if ((err != -EINVAL)
		 && (err != -ENOSYS)
		 && (err != -EFAULT))
			goto out;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}

		goto out;
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 5) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
			"version %d\n", mmc_hostname(card->host),
			card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
#if !defined(CONFIG_INAND_VERSION_PATCH)
		if (card->ext_csd.sectors)
			mmc_card_set_blockaddr(card);
#endif
	}

	switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
//[NAGSM_Android_HDLNC_SDcard_SEOJW_2011_01_12 : eMMC Trim add 	
#if defined (CONFIG_MMC_DISCARD) && defined (CONFIG_S5PC110_DEMPSEY_BOARD)
	case EXT_CSD_CARD_TYPE_TEMP |EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
#endif
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
	}

	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];

		/* Sleep / awake timeout in 100ns units */
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
//[NAGSM_Android_HDLNC_SDcard_SEOJW_2011_01_12 : eMMC Trim add 					
#if defined (CONFIG_MMC_DISCARD) && defined (CONFIG_S5PC110_DEMPSEY_BOARD)
      card->ext_csd.erase_group_def =
				ext_csd[EXT_CSD_ERASE_GROUP_DEF];
			card->ext_csd.hc_erase_timeout = 300 *
				ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
			card->ext_csd.hc_erase_size =
				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
#endif /* CONFIG_MMC_DISCARD */
	}
//[NAGSM_Android_HDLNC_SDcard_SEOJW_2011_01_12 : eMMC Trim add 	
#if defined (CONFIG_MMC_DISCARD) && defined (CONFIG_S5PC110_DEMPSEY_BOARD)
   if (card->ext_csd.rev >= 4) {
		card->ext_csd.sec_trim_mult =
			ext_csd[EXT_CSD_SEC_TRIM_MULT];
		card->ext_csd.sec_erase_mult =
			ext_csd[EXT_CSD_SEC_ERASE_MULT];
		card->ext_csd.sec_feature_support =
			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
		card->ext_csd.trim_timeout = 300 *
			ext_csd[EXT_CSD_TRIM_MULT];
	}
#endif /* CONFIG_MMC_DISCARD */

out:
	kfree(ext_csd);

	return err;
}
Exemplo n.º 17
0
static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
#endif
{
#ifdef CONFIG_MACH_LGE
	struct mmc_card *card = s->private;
#else
	struct mmc_card *card = inode->i_private;
	char *buf;
	ssize_t n = 0;
#endif
	u8 *ext_csd;
#ifdef CONFIG_MACH_LGE
	u8 ext_csd_rev;
	int err;
	const char *str;
	char *buf_for_health_report;
	char *buf_for_firmwware_version;
	ssize_t output = 0;
	int cnt;
#else
	int err, i;

	buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
#endif
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		err = -ENOMEM;
		goto out_free;
	}

	mmc_rpm_hold(card->host, &card->dev);
	mmc_claim_host(card->host);
	err = mmc_send_ext_csd(card, ext_csd);
	mmc_release_host(card->host);
	mmc_rpm_release(card->host, &card->dev);
	if (err)
		goto out_free;
#ifdef CONFIG_MACH_LGE

	ext_csd_rev = ext_csd[192];
#else
	for (i = 0; i < 512; i++)
		n += sprintf(buf + n, "%02x", ext_csd[i]);
	n += sprintf(buf + n, "\n");
	BUG_ON(n != EXT_CSD_STR_LEN);

	filp->private_data = buf;
	kfree(ext_csd);
	return 0;
#endif

#ifdef CONFIG_MACH_LGE
	switch (ext_csd_rev) {
	case 7:
	       str = "5.0";
	       break;
       case 6:
               str = "4.5";
               break;
       case 5:
               str = "4.41";
               break;
       case 3:
               str = "4.3";
               break;
       case 2:
               str = "4.2";
               break;
       case 1:
               str = "4.1";
               break;
       case 0:
               str = "4.0";
               break;
       default:
               goto out_free;
       }
       seq_printf(s, "Extended CSD rev 1.%d (MMC %s)\n", ext_csd_rev, str);
       if (ext_csd_rev < 3)
               goto out_free; /* No ext_csd */
	if (ext_csd_rev >= 7) {
		seq_printf(s, "[505] Extended Security Commands Error, ext_security_err: 0x%02x\n", ext_csd[505]);
	}
       seq_printf(s, "[504] Supported Command Sets, s_cmd_set: 0x%02x\n", ext_csd[504]);
       seq_printf(s, "[503] HPI features, hpi_features: 0x%02x\n", ext_csd[503]);
       seq_printf(s, "[502] Background operations support, bkops_support: 0x%02x\n", ext_csd[502]);
       if (ext_csd_rev >= 6) {
		seq_printf(s, "[501] Max packed read commands, max_packed_reads: 0x%02x\n", ext_csd[501]);
		seq_printf(s, "[500] Max packed write commands, max_packed_writes: 0x%02x\n", ext_csd[500]);
		seq_printf(s, "[499] Data Tag Support, data_tag_support: 0x%02x\n", ext_csd[499]);
		seq_printf(s, "[498] Tag Unit Size, tag_unit_size: 0x%02x\n", ext_csd[498]);
		seq_printf(s, "[497] Tag Resources Size, tag_res_size: 0x%02x\n", ext_csd[497]);
		seq_printf(s, "[496] Context management capabilities, context_capabilities: 0x%02x\n", ext_csd[496]);
		seq_printf(s, "[495] Large Unit size, large_unit_size_m1: 0x%02x\n", ext_csd[495]);
		seq_printf(s, "[494] Extended partitions attribute support, ext_support: 0x%02x\n", ext_csd[494]);
		if (ext_csd_rev >= 7) {
			buf_for_health_report = kmalloc(66, GFP_KERNEL);
			if (!buf_for_health_report)
			return -ENOMEM;
			buf_for_firmwware_version = kmalloc(18, GFP_KERNEL);
			if (!buf_for_firmwware_version)
			return -ENOMEM;
			seq_printf(s, "[493] Supported modes, supported_modes: 0x%02x\n", ext_csd[493]);
			seq_printf(s, "[492] Ffu features, ffu_features: 0x%02x\n", ext_csd[492]);
			seq_printf(s, "[491] Operation codes timeout, operation_code_timeout: 0x%02x\n", ext_csd[491]);
			seq_printf(s, "[490:487] Ffu features, ffu_features: 0x%08x\n", (ext_csd[487] << 0) | (ext_csd[488] << 8) | (ext_csd[489] << 16) | (ext_csd[490] << 24));
			seq_printf(s, "[305:302] Number of FW sectors correctly programmed, number_of_fw_sectors_correctly_programmed: 0x%08x\n", (ext_csd[302] << 0) | (ext_csd[303] << 8) | (ext_csd[304] << 16) | (ext_csd[305] << 24));
			output = 0;
			for (cnt = 301 ; cnt >= 270 ; cnt--)
				output += sprintf(buf_for_health_report + output, "%02x", ext_csd[cnt]);
			output += sprintf(buf_for_health_report + output, "\n");
			seq_printf(s, "[301:270] Vendor proprietary health report, vendor_proprietary_health_report(raw data): %s", buf_for_health_report); //mina.park;
			kfree(buf_for_health_report);
			seq_printf(s, "[269] Device life time estimation type B, device_life_time_est_typ_b: 0x%02x\n", ext_csd[269]);
			seq_printf(s, "[268] Device life time estimation type A, device_life_time_est_typ_a: 0x%02x\n", ext_csd[268]);
			seq_printf(s, "[267] Pre EOL information, pre_eol_info: 0x%02x\n", ext_csd[267]);
			seq_printf(s, "[266] Optimal read size, optimal_read_size: 0x%02x\n", ext_csd[266]);
			seq_printf(s, "[265] Optimal write size, optimal_write_size: 0x%02x\n", ext_csd[265]);
			seq_printf(s, "[264] Optimal trim unit size, optimal_trim_unit_size: 0x%02x\n", ext_csd[264]);
			seq_printf(s, "[263:262] Device version, device_version: 0x%02x\n", (ext_csd[262] << 0) | (ext_csd[263] << 8));
			output=0;
			for (cnt = 261 ; cnt >= 254 ; cnt--)
				output += sprintf(buf_for_firmwware_version + output, "%02x", ext_csd[cnt]);
			output += sprintf(buf_for_firmwware_version + output, "\n");
			seq_printf(s, "[261:254] Firmware version, firmwware_version(raw data): %s", buf_for_firmwware_version); //mina.park;
			kfree(buf_for_firmwware_version);
			seq_printf(s, "[253] Power class for 200MHz, DDR at VCC=3.6V, pwr_cl_ddr_200_360: 0x%02x\n", ext_csd[253]);
		}
		seq_printf(s, "[252:249] Cache size, cache_size %d KiB\n", (ext_csd[249] << 0) |
                          (ext_csd[250] << 8) | (ext_csd[251] << 16) |
                          (ext_csd[252] << 24));
		seq_printf(s, "[248] Generic CMD6 timeout, generic_cmd6_time: 0x%02x\n", ext_csd[248]);
		seq_printf(s, "[247] Power off notification timeout, power_off_long_time: 0x%02x\n", ext_csd[247]);
		seq_printf(s, "[246] Background operations status, bkops_status: 0x%02x\n", ext_csd[246]);
		seq_printf(s, "[245:242] Number of correctly programmed sectors, correctly_prg_sectors_num %d KiB\n", (ext_csd[242] << 0) | (ext_csd[243] << 8) | (ext_csd[244] << 16) | (ext_csd[245] << 24));
       }
       if (ext_csd_rev >= 5) {
               seq_printf(s, "[241] 1st initialization time after partitioning, ini_timeout_ap: 0x%02x\n", ext_csd[241]);
               seq_printf(s, "[239] Power class for 52MHz, DDR at 3.6V, pwr_cl_ddr_52_360: 0x%02x\n", ext_csd[239]);
               seq_printf(s, "[238] POwer class for 52MHz, DDR at 1.95V, pwr_cl_ddr_52_195: 0x%02x\n", ext_csd[238]);
               if (ext_csd_rev >= 6) {
			seq_printf(s, "[237] Power class for 200MHz, SDR at 3.6V, pwr_cl_200_360: 0x%02x\n", ext_csd[237]);
			seq_printf(s, "[236] Power class for 200MHz, SDR at 1.95V, pwr_cl_200_195: 0x%02x\n", ext_csd[236]);
               }
               seq_printf(s, "[235] Minimun Write Performance for 8bit at 52MHz in DDR mode, min_perf_ddr_w_8_52: 0x%02x\n", ext_csd[235]);
               seq_printf(s, "[234] Minimun Read Performance for 8bit at 52MHz in DDR modemin_perf_ddr_r_8_52: 0x%02x\n", ext_csd[234]);
               seq_printf(s, "[232] TRIM Multiplier, trim_mult: 0x%02x\n", ext_csd[232]);
               seq_printf(s, "[231] Secure Feature support, sec_feature_support: 0x%02x\n", ext_csd[231]);
       }
	if (ext_csd_rev == 5 || ext_csd_rev == 7) { /* Obsolete in 4.5 */  /*---->revived in 5.0*/
               seq_printf(s, "[230] Secure Erase Multiplier, sec_erase_mult: 0x%02x\n", ext_csd[230]);
               seq_printf(s, "[229] Secure TRIM Multiplier, sec_trim_mult:  0x%02x\n", ext_csd[229]);
       }
       seq_printf(s, "[228] Boot information, boot_info: 0x%02x\n", ext_csd[228]);
       seq_printf(s, "[226] Boot partition size, boot_size_mult : 0x%02x\n", ext_csd[226]);
       seq_printf(s, "[225] Access size, acc_size: 0x%02x\n", ext_csd[225]);
       seq_printf(s, "[224] High-capacity erase unit size, hc_erase_grp_size: 0x%02x\n", ext_csd[224]);
       seq_printf(s, "[223] High-capacity erase timeout, erase_timeout_mult: 0x%02x\n", ext_csd[223]);
       seq_printf(s, "[222] Reliable write sector count, rel_wr_sec_c: 0x%02x\n", ext_csd[222]);
       seq_printf(s, "[221] High-capacity write protect group size, hc_wp_grp_size: 0x%02x\n", ext_csd[221]);
       seq_printf(s, "[220] Sleep current(VCC), s_c_vcc: 0x%02x\n", ext_csd[220]);
       seq_printf(s, "[219] Sleep current(VCCQ), s_c_vccq: 0x%02x\n", ext_csd[219]);
	if (ext_csd_rev == 7) {
		seq_printf(s, "[218] Production state awareness timeout, production_state_awareness_timeout: 0x%02x\n", ext_csd[218]);
	}
       seq_printf(s, "[217] Sleep/awake timeout, s_a_timeout: 0x%02x\n", ext_csd[217]);
		if (ext_csd_rev == 7) {
		seq_printf(s, "[216] Sleep notification timeout, sleep_notification_time: 0x%02x\n", ext_csd[216]);
	}
       seq_printf(s, "[215:212] Sector Count, sec_count: 0x%08x\n", (ext_csd[215] << 24) |(ext_csd[214] << 16) | (ext_csd[213] << 8)  | ext_csd[212]);
       seq_printf(s, "[210] Minimum Write Performance for 8bit at 52MHz, min_perf_w_8_52: 0x%02x\n", ext_csd[210]);
       seq_printf(s, "[209] Minimum Read Performance for 8bit at 52MHz, min_perf_r_8_52: 0x%02x\n", ext_csd[209]);
       seq_printf(s, "[208] Minimum Write Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_w_8_26_4_52: 0x%02x\n", ext_csd[208]);
       seq_printf(s, "[207] Minimum Read Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_r_8_26_4_52: 0x%02x\n", ext_csd[207]);
       seq_printf(s, "[206] Minimum Write Performance for 4bit at 26MHz, min_perf_w_4_26: 0x%02x\n", ext_csd[206]);
       seq_printf(s, "[205] Minimum Read Performance for 4bit at 26MHz, min_perf_r_4_26: 0x%02x\n", ext_csd[205]);
       seq_printf(s, "[203] Power class for 26MHz at 3.6V, pwr_cl_26_360: 0x%02x\n", ext_csd[203]);
       seq_printf(s, "[202] Power class for 52MHz at 3.6V, pwr_cl_52_360: 0x%02x\n", ext_csd[202]);
       seq_printf(s, "[201] Power class for 26MHz at 1.95V, pwr_cl_26_195: 0x%02x\n", ext_csd[201]);
       seq_printf(s, "[200] Power class for 52MHz at 1.95V, pwr_cl_52_195: 0x%02x\n", ext_csd[200]);
       if (ext_csd_rev >= 5) {
               seq_printf(s, "[199] Partition switching timing, partition_switch_time: 0x%02x\n", ext_csd[199]);
               seq_printf(s, "[198] Out-of-interrupt busy timing, out_of_interrupt_time: 0x%02x\n", ext_csd[198]);
       }
       if (ext_csd_rev >= 6)
		seq_printf(s, "[197] IO Driver Strength, driver_strength: 0x%02x\n", ext_csd[197]);
	seq_printf(s, "[196] Device type, device_type: 0x%02x\n", ext_csd[196]);
       seq_printf(s, "[194] CSD structure version, csd_structure: 0x%02x\n", ext_csd[194]);
       seq_printf(s, "[192] Extended CSD revision, ext_csd_rev: 0x%02x\n", ext_csd[192]);
       seq_printf(s, "[191] Command set, cmd_set: 0x%02x\n", ext_csd[191]);
       seq_printf(s, "[189] Command set revision, cmd_set_rev: 0x%02x\n", ext_csd[189]);
       seq_printf(s, "[187] Power class, power_class: 0x%02x\n", ext_csd[187]);
       seq_printf(s, "[185] High-speed interface timing, hs_timing: 0x%02x\n", ext_csd[185]);
       seq_printf(s, "[181] Erased memory content, erased_mem_cont: 0x%02x\n", ext_csd[181]);
       seq_printf(s, "[179] Partition configuration, partition_config: 0x%02x\n", ext_csd[179]);
       seq_printf(s, "[178] Boot config protection, boot_config_prot: 0x%02x\n", ext_csd[178]);
	seq_printf(s, "[177] Boot bus Conditions, boot_bus_conditions: 0x%02x\n", ext_csd[177]);
       seq_printf(s, "[175] High-density erase group definition, erase_group_def: 0x%02x\n", ext_csd[175]);
       if (ext_csd_rev >= 5) {
		seq_printf(s, "[174] Boot write protection status registers, boot_wp_status: 0x%02x\n", ext_csd[174]);
               seq_printf(s, "[173] Boot area write protection register, boot_wp: 0x%02x\n", ext_csd[173]);
               seq_printf(s, "[171] User area write protection register, user_wp: 0x%02x\n", ext_csd[171]);
               seq_printf(s, "[169] FW configuration, fw_config: 0x%02x\n", ext_csd[169]);
               seq_printf(s, "[168] RPMB Size, rpmb_size_mult: 0x%02x\n", ext_csd[168]);
               seq_printf(s, "[167] Write reliability setting register, wr_rel_set: 0x%02x\n", ext_csd[167]);
               seq_printf(s, "[166] Write reliability parameter register, wr_rel_param: 0x%02x\n", ext_csd[166]);
               seq_printf(s, "[163] Enable background operations handshake, bkops_en: 0x%02x\n", ext_csd[163]);
               seq_printf(s, "[162] H/W reset function, rst_n_function: 0x%02x\n", ext_csd[162]);
		seq_printf(s, "[161] HPI management, hpi_mgmt: 0x%02x\n", ext_csd[161]);
		seq_printf(s, "[160] Partitioning Support, partitioning_support: 0x%02x\n", ext_csd[160]);
               seq_printf(s, "[159:157] Max Enhanced Area Size, max_enh_size_mult: 0x%06x\n", (ext_csd[159] << 16) | (ext_csd[158] << 8) |ext_csd[157]);
               seq_printf(s, "[156] Partitions attribute, partitions_attribute: 0x%02x\n", ext_csd[156]);
               seq_printf(s, "[155] Partitioning Setting, partition_setting_completed: 0x%02x\n", ext_csd[155]);
               seq_printf(s, "[154:152] General Purpose Partition Size, gp_size_mult_4: 0x%06x\n", (ext_csd[154] << 16) |(ext_csd[153] << 8) | ext_csd[152]);
               seq_printf(s, "[151:149] General Purpose Partition Size, gp_size_mult_3: 0x%06x\n", (ext_csd[151] << 16) |(ext_csd[150] << 8) | ext_csd[149]);
               seq_printf(s, "[148:146] General Purpose Partition Size, gp_size_mult_2: 0x%06x\n", (ext_csd[148] << 16) |(ext_csd[147] << 8) | ext_csd[146]);
               seq_printf(s, "[145:143] General Purpose Partition Size, gp_size_mult_1: 0x%06x\n", (ext_csd[145] << 16) |(ext_csd[144] << 8) | ext_csd[143]);
               seq_printf(s, "[142:140] Enhanced User Data Area Size, enh_size_mult: 0x%06x\n", (ext_csd[142] << 16) |(ext_csd[141] << 8) | ext_csd[140]);
		seq_printf(s, "[139:136] Enhanced User Data Start Address, enh_start_addr: 0x%06x\n", (ext_csd[139] << 24) | (ext_csd[138] << 16) | (ext_csd[137] << 8) | ext_csd[136]);
               seq_printf(s, "[134] Bad Block Management mode, sec_bad_blk_mgmnt: 0x%02x\n", ext_csd[134]);
       }
       if (ext_csd_rev >= 6) {
               int j;
		seq_printf(s, "[131] Periodic Wake-up, periodic_wakeup: 0x%02x\n", ext_csd[131]);
		seq_printf(s, "[130] Program CID CSD in DDR mode support, program_cid_csd_ddr_support: 0x%02x\n",
                          ext_csd[130]);
               for (j = 127; j >= 64; j--)
			seq_printf(s, "[127:64] Vendor Specific Fields, vendor_specific_field[%d]: 0x%02x\n",
                                  j, ext_csd[j]);
		seq_printf(s, "[63] Native sector size, native_sector_size: 0x%02x\n", ext_csd[63]);
		seq_printf(s, "[62] Sector size emulation, use_native_sector: 0x%02x\n", ext_csd[62]);
		seq_printf(s, "[61] Sector size, data_sector_size: 0x%02x\n", ext_csd[61]);
		seq_printf(s, "[60] 1st initialization after disabling sector size emulation, ini_timeout_emu: 0x%02x\n", ext_csd[60]);
		seq_printf(s, "[59] Class 6 commands control, class_6_ctrl: 0x%02x\n", ext_csd[59]);
		seq_printf(s, "[58] Number of addressed group to be Released, dyncap_needed: 0x%02x\n", ext_csd[58]);
		seq_printf(s, "[57:56] Exception events control, exception_events_ctrl: 0x%04x\n",
                          (ext_csd[57] << 8) | ext_csd[56]);
		seq_printf(s, "[55:54] Exception events status, exception_events_status: 0x%04x\n",
                          (ext_csd[55] << 8) | ext_csd[54]);
		seq_printf(s, "[53:52] Extended Partitions Attribute, ext_partitions_attribute: 0x%04x\n",
                          (ext_csd[53] << 8) | ext_csd[52]);
               for (j = 51; j >= 37; j--)
			seq_printf(s, "[51:37]Context configuration, context_conf[%d]: 0x%02x\n", j,
                                  ext_csd[j]);
		seq_printf(s, "[36] Packed command status, packed_command_status: 0x%02x\n", ext_csd[36]);
		seq_printf(s, "[35] Packed command failure index, packed_failure_index: 0x%02x\n", ext_csd[35]);
		seq_printf(s, "[34] Power Off Notification, power_off_notification: 0x%02x\n", ext_csd[34]);
		seq_printf(s, "[33] Control to turn the Cache On Off, cache_ctrl: 0x%02x\n", ext_csd[33]);
       }
#endif
out_free:
#ifndef CONFIG_MACH_LGE
	kfree(buf);
#endif
	kfree(ext_csd);
	return err;
}
Exemplo n.º 18
0
/*
 * Given a 128-bit response, decode to our card CSD structure.
 */
static int mmc_decode_csd(struct mmc_card *card)
{
	struct mmc_csd *csd = &card->csd;
	unsigned int e, m, csd_struct;
	u32 *resp = card->raw_csd;

	/*
	 * We only understand CSD structure v1.1 and v1.2.
	 * v1.2 has extra information in bits 15, 11 and 10.
	 */
	csd_struct = UNSTUFF_BITS(resp, 126, 2);
#if defined(CONFIG_MACH_OMAP3630_EDP1) || defined(CONFIG_MACH_OMAP3621_EDP1) || defined(CONFIG_MACH_OMAP3621_BOXER) || defined(CONFIG_MACH_OMAP3621_EVT1A) || defined(CONFIG_MACH_OMAP3621_GOSSAMER)
	/* To recognize Boxer board eMMC */
	if (csd_struct != 1 && csd_struct != 2 && csd_struct != 3) {
#else
	if (csd_struct != 1 && csd_struct != 2) {
#endif
		printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
			mmc_hostname(card->host), csd_struct);
		return -EINVAL;
	}

	csd->mmca_vsn	 = UNSTUFF_BITS(resp, 122, 4);
	m = UNSTUFF_BITS(resp, 115, 4);
	e = UNSTUFF_BITS(resp, 112, 3);
	csd->tacc_ns	 = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
	csd->tacc_clks	 = UNSTUFF_BITS(resp, 104, 8) * 100;

	m = UNSTUFF_BITS(resp, 99, 4);
	e = UNSTUFF_BITS(resp, 96, 3);
	csd->max_dtr	  = tran_exp[e] * tran_mant[m];
	csd->cmdclass	  = UNSTUFF_BITS(resp, 84, 12);

	e = UNSTUFF_BITS(resp, 47, 3);
	m = UNSTUFF_BITS(resp, 62, 12);
	csd->capacity	  = (1 + m) << (e + 2);

	csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
	csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
	csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
	csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
	csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
	csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
	csd->write_partial = UNSTUFF_BITS(resp, 21, 1);

	return 0;
}

/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		/*
		 * We all hosts that cannot perform the command
		 * to fail more gracefully
		 */
		if (err != -EINVAL)
			goto out;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}

		goto out;
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
#if defined(CONFIG_MACH_OMAP3630_EDP1) || defined(CONFIG_MACH_OMAP3621_EDP1) || defined(CONFIG_MACH_OMAP3621_BOXER) || defined(CONFIG_MACH_OMAP3621_EVT1A) || defined(CONFIG_MACH_OMAP3621_GOSSAMER)
	/* To recognize Boxer board eMMC */
	if (card->ext_csd.rev > 5) {
#else
	if (card->ext_csd.rev > 2) {
#endif
		printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
			"version %d\n", mmc_hostname(card->host),
			card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

#ifdef CONFIG_HC_Broken_eMMC_ZOOM2
	/*
	 * Hack: eMMC on Zoom2 seems to have a lower EXT_CSD Rev.
	 * This is incorrect as it is an HC card. The card becomes
	 * unusable if not set to blockaddr mode.
	 * The low level driver sets up the unused bit for MMC2 on Zoom2.
	 * Revert this hack once it is fixed in the card.
	 */
	if (card->host->unused) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
		if (card->ext_csd.sectors)
			mmc_card_set_blockaddr(card);
	} else
#endif
	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
		if (mmc_card_blockaddr(card))
			mmc_card_set_blockaddr(card);
	}

	/* disable DDR detection */
	ext_csd[EXT_CSD_CARD_TYPE] &= EXT_CSD_CARD_TYPE_MASK;

	switch (ext_csd[EXT_CSD_CARD_TYPE]) {
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
		goto out;
	}

	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];

		/* Sleep / awake timeout in 100ns units */
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
		else{
			card->ext_csd.sa_timeout = MMC_SLEEP_TIMEOUT_DEFAULT;
			printk(KERN_WARNING "%s: card's S_A_TIMEOUT is out of range.\n",mmc_hostname(card->host));
		}
	}

out:
	kfree(ext_csd);

	return err;
}

MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
	card->raw_cid[2], card->raw_cid[3]);
MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
	card->raw_csd[2], card->raw_csd[3]);
MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);

static struct attribute *mmc_std_attrs[] = {
	&dev_attr_cid.attr,
	&dev_attr_csd.attr,
	&dev_attr_date.attr,
	&dev_attr_fwrev.attr,
	&dev_attr_hwrev.attr,
	&dev_attr_manfid.attr,
	&dev_attr_name.attr,
	&dev_attr_oemid.attr,
	&dev_attr_serial.attr,
	NULL,
};

static struct attribute_group mmc_std_attr_group = {
	.attrs = mmc_std_attrs,
};

static struct attribute_group *mmc_attr_groups[] = {
	&mmc_std_attr_group,
	NULL,
};

static struct device_type mmc_type = {
	.groups = mmc_attr_groups,
};

/*
 * Handle the detection and initialisation of a card.
 *
 * In the case of a resume, "oldcard" will contain the card
 * we're trying to reinitialise.
 */
static int mmc_init_card(struct mmc_host *host, u32 ocr,
	struct mmc_card *oldcard)
{
	struct mmc_card *card;
	int err;
	u32 cid[4];
	u32 rocr;
	unsigned int max_dtr;

	BUG_ON(!host);
	WARN_ON(!host->claimed);

	/*
	 * Since we're changing the OCR value, we seem to
	 * need to tell some cards to go back to the idle
	 * state.  We wait 1ms to give cards time to
	 * respond.
	 */
	mmc_go_idle(host);

	/* The extra bit indicates that we support high capacity */
	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
	if (err)
		goto err;

	/*
	 * For SPI, enable CRC as appropriate.
	 */
	if (mmc_host_is_spi(host)) {
		err = mmc_spi_set_crc(host, use_spi_crc);
		if (err)
			goto err;
	}

	/*
	 * Fetch CID from card.
	 */
	if (mmc_host_is_spi(host))
		err = mmc_send_cid(host, cid);
	else
		err = mmc_all_send_cid(host, cid);
	if (err)
		goto err;

	if (oldcard) {
		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
			err = -ENOENT;
			goto err;
		}

		card = oldcard;
	} else {
		/*
		 * Allocate card structure.
		 */
		card = mmc_alloc_card(host, &mmc_type);
		if (IS_ERR(card)) {
			err = PTR_ERR(card);
			goto err;
		}

		card->type = MMC_TYPE_MMC;
		card->rca = 1;
		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));

		/*
		 * If the OCR response to OP_COND from
		 * the card ack block addressing then
		 * enable it
                 */
		if (rocr & MMC_CARD_ACCESS_MODE_MASK)
			mmc_card_set_blockaddr(card);

	}

	/*
	 * For native busses:  set card RCA and quit open drain mode.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_set_relative_addr(card);
		if (err)
			goto free_card;

		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
	}

	if (!oldcard) {
		/*
		 * Fetch CSD from card.
		 */
		err = mmc_send_csd(card, card->raw_csd);
		if (err)
			goto free_card;

		err = mmc_decode_csd(card);
		if (err)
			goto free_card;
		err = mmc_decode_cid(card);
		if (err)
			goto free_card;
	}

	/*
	 * Select card, as all following commands rely on that.
	 */
	if (!mmc_host_is_spi(host)) {
		err = mmc_select_card(card);
		if (err)
			goto free_card;
	}

	if (!oldcard) {
		/*
		 * Fetch and process extended CSD.
		 */
		err = mmc_read_ext_csd(card);
		if (err)
			goto free_card;
	}

	/*
	 * Activate high speed (if supported)
	 */
	if ((card->ext_csd.hs_max_dtr != 0) &&
		(host->caps & MMC_CAP_MMC_HIGHSPEED)) {
		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
			EXT_CSD_HS_TIMING, 1);
		if (err)
			goto free_card;

		mmc_card_set_highspeed(card);

		mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
	}

	/*
	 * Compute bus speed.
	 */
	max_dtr = (unsigned int)-1;

	if (mmc_card_highspeed(card)) {
		if (max_dtr > card->ext_csd.hs_max_dtr)
			max_dtr = card->ext_csd.hs_max_dtr;
	} else if (max_dtr > card->csd.max_dtr) {
		max_dtr = card->csd.max_dtr;
	}

	mmc_set_clock(host, max_dtr);

	/*
	 * Activate wide bus (if supported).
	 */
	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
		unsigned ext_csd_bit, bus_width;

		if (host->caps & MMC_CAP_8_BIT_DATA) {
			ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
			bus_width = MMC_BUS_WIDTH_8;
		} else {
			ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
			bus_width = MMC_BUS_WIDTH_4;
		}

		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
				 EXT_CSD_BUS_WIDTH, ext_csd_bit);

		if (err)
			goto free_card;

		mmc_set_bus_width(card->host, bus_width);
	}

	if (!oldcard)
		host->card = card;

	return 0;

free_card:
	if (!oldcard)
		mmc_remove_card(card);
err:

	return err;
}

/*
 * Host is being removed. Free up the current card.
 */
static void mmc_remove(struct mmc_host *host)
{
	BUG_ON(!host);
	BUG_ON(!host->card);

	mmc_remove_card(host->card);
	host->card = NULL;
}
Exemplo n.º 19
0
/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;
	unsigned offs;

	BUG_ON(!card);

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		/* If the host or the card can't do the switch,
		 * fail more gracefully. */
		if ((err != -EINVAL)
		 && (err != -ENOSYS)
		 && (err != -EFAULT))
			goto out;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}

		goto out;
	}

	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
	if (card->csd.structure == 3) {
		int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
		if (ext_csd_struct > 2) {
			printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
				"version %d\n", mmc_hostname(card->host),
					ext_csd_struct);
			err = -EINVAL;
			goto out;
		}
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 5) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
			mmc_hostname(card->host), card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
		if (card->ext_csd.sectors) {
#ifdef CONFIG_EMBEDDED_MMC_START_OFFSET
			offs = card->host->ops->get_host_offset(card->host);
			offs >>= 9;
			BUG_ON(offs > card->ext_csd.sectors);
			card->ext_csd.sectors -= offs;
#endif
			offs = ext_csd[EXT_CSD_BOOT_SIZE_MULTI] * SZ_256K / 512;
			card->ext_csd.sectors -= offs;

		}
Exemplo n.º 20
0
static void
mmc_discover_cards(struct mmc_softc *sc)
{
	struct mmc_ivars *ivar = NULL;
	device_t *devlist;
	int err, i, devcount, newcard;
	uint32_t raw_cid[4];
	uint32_t resp, sec_count;
	device_t child;
	uint16_t rca = 2;
	u_char switch_res[64];

	if (bootverbose || mmc_debug)
		device_printf(sc->dev, "Probing cards\n");
	while (1) {
		err = mmc_all_send_cid(sc, raw_cid);
		if (err == MMC_ERR_TIMEOUT)
			break;
		if (err != MMC_ERR_NONE) {
			device_printf(sc->dev, "Error reading CID %d\n", err);
			break;
		}
		newcard = 1;
		if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0)
			return;
		for (i = 0; i < devcount; i++) {
			ivar = device_get_ivars(devlist[i]);
			if (memcmp(ivar->raw_cid, raw_cid, sizeof(raw_cid)) == 0) {
				newcard = 0;
				break;
			}
		}
		free(devlist, M_TEMP);
		if (bootverbose || mmc_debug) {
			device_printf(sc->dev, "%sard detected (CID %08x%08x%08x%08x)\n",
			    newcard ? "New c" : "C",
			    raw_cid[0], raw_cid[1], raw_cid[2], raw_cid[3]);
		}
		if (newcard) {
			ivar = malloc(sizeof(struct mmc_ivars), M_DEVBUF,
			    M_WAITOK | M_ZERO);
			if (!ivar)
				return;
			memcpy(ivar->raw_cid, raw_cid, sizeof(raw_cid));
		}
		if (mmcbr_get_ro(sc->dev))
			ivar->read_only = 1;
		ivar->bus_width = bus_width_1;
		ivar->timing = bus_timing_normal;
		ivar->mode = mmcbr_get_mode(sc->dev);
		if (ivar->mode == mode_sd) {
			mmc_decode_cid_sd(ivar->raw_cid, &ivar->cid);
			mmc_send_relative_addr(sc, &resp);
			ivar->rca = resp >> 16;
			/* Get card CSD. */
			mmc_send_csd(sc, ivar->rca, ivar->raw_csd);
			mmc_decode_csd_sd(ivar->raw_csd, &ivar->csd);
			ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE;
			if (ivar->csd.csd_structure > 0)
				ivar->high_cap = 1;
			ivar->tran_speed = ivar->csd.tran_speed;
			ivar->erase_sector = ivar->csd.erase_sector * 
			    ivar->csd.write_bl_len / MMC_SECTOR_SIZE;
			/* Get card SCR. Card must be selected to fetch it. */
			mmc_select_card(sc, ivar->rca);
			mmc_app_send_scr(sc, ivar->rca, ivar->raw_scr);
			mmc_app_decode_scr(ivar->raw_scr, &ivar->scr);
			/* Get card switch capabilities (command class 10). */
			if ((ivar->scr.sda_vsn >= 1) &&
			    (ivar->csd.ccc & (1<<10))) {
				mmc_sd_switch(sc, SD_SWITCH_MODE_CHECK,
				    SD_SWITCH_GROUP1, SD_SWITCH_NOCHANGE,
				    switch_res);
				if (switch_res[13] & 2) {
					ivar->timing = bus_timing_hs;
					ivar->hs_tran_speed = SD_MAX_HS;
				}
			}
			mmc_app_sd_status(sc, ivar->rca, ivar->raw_sd_status);
			mmc_app_decode_sd_status(ivar->raw_sd_status,
			    &ivar->sd_status);
			if (ivar->sd_status.au_size != 0) {
				ivar->erase_sector =
				    16 << ivar->sd_status.au_size;
			}
			mmc_select_card(sc, 0);
			/* Find max supported bus width. */
			if ((mmcbr_get_caps(sc->dev) & MMC_CAP_4_BIT_DATA) &&
			    (ivar->scr.bus_widths & SD_SCR_BUS_WIDTH_4))
				ivar->bus_width = bus_width_4;
			if (bootverbose || mmc_debug)
				mmc_log_card(sc->dev, ivar, newcard);
			if (newcard) {
				/* Add device. */
				child = device_add_child(sc->dev, NULL, -1);
				device_set_ivars(child, ivar);
			}
			return;
		}
		mmc_decode_cid_mmc(ivar->raw_cid, &ivar->cid);
		ivar->rca = rca++;
		mmc_set_relative_addr(sc, ivar->rca);
		/* Get card CSD. */
		mmc_send_csd(sc, ivar->rca, ivar->raw_csd);
		mmc_decode_csd_mmc(ivar->raw_csd, &ivar->csd);
		ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE;
		ivar->tran_speed = ivar->csd.tran_speed;
		ivar->erase_sector = ivar->csd.erase_sector * 
		    ivar->csd.write_bl_len / MMC_SECTOR_SIZE;
		/* Only MMC >= 4.x cards support EXT_CSD. */
		if (ivar->csd.spec_vers >= 4) {
			/* Card must be selected to fetch EXT_CSD. */
			mmc_select_card(sc, ivar->rca);
			mmc_send_ext_csd(sc, ivar->raw_ext_csd);
			/* Handle extended capacity from EXT_CSD */
			sec_count = ivar->raw_ext_csd[EXT_CSD_SEC_CNT] +
			    (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 1] << 8) +
			    (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 2] << 16) +
			    (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
			if (sec_count != 0) {
				ivar->sec_count = sec_count;
				ivar->high_cap = 1;
			}
			/* Get card speed in high speed mode. */
			ivar->timing = bus_timing_hs;
			if (ivar->raw_ext_csd[EXT_CSD_CARD_TYPE]
			    & EXT_CSD_CARD_TYPE_52)
				ivar->hs_tran_speed = MMC_TYPE_52_MAX_HS;
			else if (ivar->raw_ext_csd[EXT_CSD_CARD_TYPE]
			    & EXT_CSD_CARD_TYPE_26)
				ivar->hs_tran_speed = MMC_TYPE_26_MAX_HS;
			else
				ivar->hs_tran_speed = ivar->tran_speed;
			/* Find max supported bus width. */
			ivar->bus_width = mmc_test_bus_width(sc);
			mmc_select_card(sc, 0);
			/* Handle HC erase sector size. */
			if (ivar->raw_ext_csd[EXT_CSD_ERASE_GRP_SIZE] != 0) {
				ivar->erase_sector = 1024 *
				    ivar->raw_ext_csd[EXT_CSD_ERASE_GRP_SIZE];
				mmc_switch(sc, EXT_CSD_CMD_SET_NORMAL,
				    EXT_CSD_ERASE_GRP_DEF, 1);
			}
		} else {
			ivar->bus_width = bus_width_1;
			ivar->timing = bus_timing_normal;
		}
		if (bootverbose || mmc_debug)
			mmc_log_card(sc->dev, ivar, newcard);
		if (newcard) {
			/* Add device. */
			child = device_add_child(sc->dev, NULL, -1);
			device_set_ivars(child, ivar);
		}
	}
Exemplo n.º 21
0
/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		/* If the host or the card can't do the switch,
		 * fail more gracefully. */
		if ((err != -EINVAL)
		 && (err != -ENOSYS)
		 && (err != -EFAULT))
			goto out;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}

		goto out;
	}

	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
	if (card->csd.structure == 3) {
		int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
		if (ext_csd_struct > 5) {
			printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
				"version %d\n", mmc_hostname(card->host),
					ext_csd_struct);
			err = -EINVAL;
			goto out;
		}
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 5) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
			mmc_hostname(card->host), card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
		if (card->ext_csd.sectors) {
			unsigned boot_sectors;
			/* size is in 256K chunks, i.e. 512 sectors each */
			boot_sectors = ext_csd[EXT_CSD_BOOT_SIZE_MULTI] * 512;
			card->ext_csd.sectors -= boot_sectors;
			mmc_card_set_blockaddr(card);
		}
	}

	switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
#ifdef CONFIG_MMC_DISCARD
	case EXT_CSD_CARD_TYPE_TEMP |EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
       // printk("[MMC] %s : MAX CSD TYPE\n",__func__);
		card->ext_csd.hs_max_dtr = 52000000;
		break;
#endif
	case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_DDR_1_8V
		 | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
    case EXT_CSD_CARD_TYPE_DDR_1_8V
		 | EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
	}

	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];

		/* Sleep / awake timeout in 100ns units */
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
#ifdef CONFIG_MMC_DISCARD
			card->ext_csd.erase_group_def =
				ext_csd[EXT_CSD_ERASE_GROUP_DEF];
			card->ext_csd.hc_erase_timeout = 300 *
				ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
			card->ext_csd.hc_erase_size =
				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
#endif /* CONFIG_MMC_DISCARD */
	}

#ifdef CONFIG_MMC_DISCARD
	if (card->ext_csd.rev >= 4) {
		card->ext_csd.sec_trim_mult =
			ext_csd[EXT_CSD_SEC_TRIM_MULT];
		card->ext_csd.sec_erase_mult =
			ext_csd[EXT_CSD_SEC_ERASE_MULT];
		card->ext_csd.sec_feature_support =
			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
		card->ext_csd.trim_timeout = 300 *
			ext_csd[EXT_CSD_TRIM_MULT];
	}
#endif /* CONFIG_MMC_DISCARD */

out:
	kfree(ext_csd);

	return err;
}
Exemplo n.º 22
0
/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;
	unsigned int ext_csd_struct;

	BUG_ON(!card);

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		/*
		 * We all hosts that cannot perform the command
		 * to fail more gracefully
		 */
		if (err != -EINVAL)
			goto out;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}

		goto out;
	}

	ext_csd_struct = ext_csd[EXT_CSD_VER];
	if (ext_csd_struct > 3) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
			"version %d\n", mmc_hostname(card->host),
			ext_csd_struct);
		err = -EINVAL;
		goto out;
	}

	if (ext_csd_struct >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
#if !defined(CONFIG_PLAT_BCM476X)   // @KP: 090306
		if (card->ext_csd.sectors)
			mmc_card_set_blockaddr(card);
#else
		// since set block address support will cause address argument in read multi not to be converted to byte address
		// we need to check to make sure the card support block sector address mode by setting bit 30 in OCR of the
		// response to SEND_OP_COND (CMD1)
		if ((card->ext_csd.sectors) && (card->host->ocr & 0x40000000))
			mmc_card_set_blockaddr(card);
#endif
	}

	switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
		goto out;
	}

out:
	kfree(ext_csd);

	return err;
}
static int simple_sd_ioctl_single_rw(struct msdc_ioctl* msdc_ctl)
{
    char l_buf[512];
    struct scatterlist msdc_sg;
    struct mmc_data    msdc_data;
    struct mmc_command msdc_cmd;
    struct mmc_request msdc_mrq;
    struct msdc_host *host_ctl;
   
    host_ctl = mtk_msdc_host[msdc_ctl->host_num];
    BUG_ON(!host_ctl);
    BUG_ON(!host_ctl->mmc);
    BUG_ON(!host_ctl->mmc->card);

    mmc_claim_host(host_ctl->mmc);

#if DEBUG_MMC_IOCTL
    printk("user want access %d partition\n",msdc_ctl->partition);
#endif

    mmc_send_ext_csd(host_ctl->mmc->card, l_buf);
    switch (msdc_ctl->partition){
        case BOOT_PARTITION_1:
            if (0x1 != (l_buf[179] & 0x7)){
                /* change to access boot partition 1 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x1;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        case BOOT_PARTITION_2:
            if (0x2 != (l_buf[179] & 0x7)){
                /* change to access boot partition 2 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x2;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        default:
            /* make sure access partition is user data area */
            if (0 != (l_buf[179] & 0x7)){
                /* set back to access user area */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x0;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
    }
    
    if(msdc_ctl->total_size > 512){
        msdc_ctl->result = -1;
        return  msdc_ctl->result;
    }

#if DEBUG_MMC_IOCTL
    printk("start MSDC_SINGLE_READ_WRITE !!\n");
#endif    
    memset(&msdc_data, 0, sizeof(struct mmc_data));
    memset(&msdc_mrq, 0, sizeof(struct mmc_request));
    memset(&msdc_cmd, 0, sizeof(struct mmc_command));

    msdc_mrq.cmd = &msdc_cmd;
    msdc_mrq.data = &msdc_data;

    if(msdc_ctl->trans_type)
        dma_force[host_ctl->id] = FORCE_IN_DMA;
    else
        dma_force[host_ctl->id] = FORCE_IN_PIO;

    if (msdc_ctl->iswrite){
        msdc_data.flags = MMC_DATA_WRITE;
        msdc_cmd.opcode = MMC_WRITE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, 512)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT;
            }
        } else {
            /* called from other kernel module */
            memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, 512);
        }
    } else {
        msdc_data.flags = MMC_DATA_READ;
        msdc_cmd.opcode = MMC_READ_SINGLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;

        memset(sg_msdc_multi_buffer, 0 , 512);
    }

    msdc_cmd.arg = msdc_ctl->address;

    if (!mmc_card_blockaddr(host_ctl->mmc->card)){
        printk("the device is used byte address!\n");
        msdc_cmd.arg <<= 9;
    }

    msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

    msdc_data.stop = NULL;
    msdc_data.blksz = 512;
    msdc_data.sg = &msdc_sg;
    msdc_data.sg_len = 1;

#if DEBUG_MMC_IOCTL
    printk("single block: ueser buf address is 0x%p!\n",msdc_ctl->buffer);
#endif    
    sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size);
    mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card);

    mmc_wait_for_req(host_ctl->mmc, &msdc_mrq);

    if (!msdc_ctl->iswrite){
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_to_user(msdc_ctl->buffer,sg_msdc_multi_buffer,512)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT;
                }
        } else {
            /* called from other kernel module */
            memcpy(msdc_ctl->buffer,sg_msdc_multi_buffer,512);
        }
    }

    if (msdc_ctl->partition){
        mmc_send_ext_csd(host_ctl->mmc->card,l_buf);

        if (l_buf[179] & 0x7) {
            /* set back to access user area */
            l_buf[179] &= ~0x7;
            l_buf[179] |= 0x0;
            mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
        }
    }

    mmc_release_host(host_ctl->mmc);

    if (msdc_cmd.error)
        msdc_ctl->result= msdc_cmd.error;

    if (msdc_data.error)
        msdc_ctl->result= msdc_data.error;
    else
        msdc_ctl->result= 0;

        dma_force[host_ctl->id] = FORCE_NOTHING;
    return msdc_ctl->result;
}
Exemplo n.º 24
0
static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
#endif
{
#ifdef CONFIG_MACH_LGE
	/* LGE_CHANGE
	* http://www.mail-archive.com/[email protected]/msg10669.html
	* 2012-03-09, [email protected]
	*/
	struct mmc_card *card = s->private;
#else
	struct mmc_card *card = inode->i_private;
	char *buf;
	ssize_t n = 0;
#endif
	u8 *ext_csd;
#ifdef CONFIG_MACH_LGE
	/* LGE_CHANGE
	* http://www.mail-archive.com/[email protected]/msg10669.html
	* 2012-03-09, [email protected]
	*/
	u8 ext_csd_rev;
	int err;
	const char *str;
#else
	int err, i;

	buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
#endif
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		err = -ENOMEM;
		goto out_free;
	}

	mmc_claim_host(card->host);
	err = mmc_send_ext_csd(card, ext_csd);
	mmc_release_host(card->host);
	if (err)
		goto out_free;
#ifdef CONFIG_MACH_LGE
	/* LGE_CHANGE
	* http://www.mail-archive.com/[email protected]/msg10669.html
	* 2012-03-09, [email protected]
	*/
	ext_csd_rev = ext_csd[192];
#else
	for (i = 511; i >= 0; i--)
		n += sprintf(buf + n, "%02x", ext_csd[i]);
	n += sprintf(buf + n, "\n");
	BUG_ON(n != EXT_CSD_STR_LEN);

	filp->private_data = buf;
	kfree(ext_csd);
	return 0;
#endif

#ifdef CONFIG_MACH_LGE
	/* LGE_CHANGE
	* http://www.mail-archive.com/[email protected]/msg10669.html
	* 2012-03-09, [email protected]
	*/

	switch (ext_csd_rev) {
       case 6:
               str = "4.5";
               break;
       case 5:
               str = "4.41";
               break;
       case 3:
               str = "4.3";
               break;
       case 2:
               str = "4.2";
               break;
       case 1:
               str = "4.1";
               break;
       case 0:
               str = "4.0";
               break;
       default:
               goto out_free;
       }
       seq_printf(s, "Extended CSD rev 1.%d (MMC %s)\n", ext_csd_rev, str);

       if (ext_csd_rev < 3)
               goto out_free; /* No ext_csd */

       /* Parse the Extended CSD registers.
        * Reserved bit should be read as "0" in case of spec older
        * than A441.
        */
       seq_printf(s, "[504] Supported Command Sets, s_cmd_set: 0x%02x\n", ext_csd[504]);
       seq_printf(s, "[503] HPI features, hpi_features: 0x%02x\n", ext_csd[503]);
       seq_printf(s, "[502] Background operations support, bkops_support: 0x%02x\n", ext_csd[502]);

       if (ext_csd_rev >= 6) {
               seq_printf(s, "max_packed_reads: 0x%02x\n", ext_csd[501]);
               seq_printf(s, "max_packed_writes: 0x%02x\n", ext_csd[500]);
               seq_printf(s, "data_tag_support: 0x%02x\n", ext_csd[499]);
               seq_printf(s, "tag_unit_size: 0x%02x\n", ext_csd[498]);
               seq_printf(s, "tag_res_size: 0x%02x\n", ext_csd[497]);
               seq_printf(s, "context_capabilities: 0x%02x\n", ext_csd[496]);
               seq_printf(s, "large_unit_size_m1: 0x%02x\n", ext_csd[495]);
               seq_printf(s, "ext_support: 0x%02x\n", ext_csd[494]);
               seq_printf(s, "generic_cmd6_time: 0x%02x\n", ext_csd[248]);
               seq_printf(s, "power_off_long_time: 0x%02x\n", ext_csd[247]);
               seq_printf(s, "cache_size %d KiB\n", ext_csd[249] << 0 |
                          (ext_csd[250] << 8) | (ext_csd[251] << 16) |
                          (ext_csd[252] << 24));
       }

       /* A441: Reserved [501:247]
           A43: reserved [246:229] */
       if (ext_csd_rev >= 5) {
               seq_printf(s, "[241] 1st initialization time after partitioning, ini_timeout_ap: 0x%02x\n", ext_csd[241]);
               /* A441: reserved [240] */
               seq_printf(s, "[239] Power class for 52MHz, DDR at 3.6V, pwr_cl_ddr_52_360: 0x%02x\n", ext_csd[239]);
               seq_printf(s, "[238] POwer class for 52MHz, DDR at 1.95V, pwr_cl_ddr_52_195: 0x%02x\n", ext_csd[238]);

               /* A441: reserved [237-236] */

               if (ext_csd_rev >= 6) {
                       seq_printf(s, "pwr_cl_200_360: 0x%02x\n", ext_csd[237]);
                       seq_printf(s, "pwr_cl_200_195: 0x%02x\n", ext_csd[236]);
               }

               seq_printf(s, "[235] Minimun Write Performance for 8bit at 52MHz in DDR mode, min_perf_ddr_w_8_52: 0x%02x\n", ext_csd[235]);
               seq_printf(s, "[234] Minimun Read Performance for 8bit at 52MHz in DDR modemin_perf_ddr_r_8_52: 0x%02x\n", ext_csd[234]);
               /* A441: reserved [233] */
               seq_printf(s, "[232] TRIM Multiplier, trim_mult: 0x%02x\n", ext_csd[232]);
               seq_printf(s, "[231] Secure Feature support, sec_feature_support: 0x%02x\n", ext_csd[231]);
       }
       if (ext_csd_rev == 5) { /* Obsolete in 4.5 */
               seq_printf(s, "[230] Secure Erase Multiplier, sec_erase_mult: 0x%02x\n", ext_csd[230]);
               seq_printf(s, "[229] Secure TRIM Multiplier, sec_trim_mult:  0x%02x\n", ext_csd[229]);
       }
       seq_printf(s, "[228] Boot information, boot_info: 0x%02x\n", ext_csd[228]);
       /* A441/A43: reserved [227] */
       seq_printf(s, "[226] Boot partition size, boot_size_mult : 0x%02x\n", ext_csd[226]);
       seq_printf(s, "[225] Access size, acc_size: 0x%02x\n", ext_csd[225]);
       seq_printf(s, "[224] High-capacity erase unit size, hc_erase_grp_size: 0x%02x\n", ext_csd[224]);
       seq_printf(s, "[223] High-capacity erase timeout, erase_timeout_mult: 0x%02x\n", ext_csd[223]);
       seq_printf(s, "[222] Reliable write sector count, rel_wr_sec_c: 0x%02x\n", ext_csd[222]);
       seq_printf(s, "[221] High-capacity write protect group size, hc_wp_grp_size: 0x%02x\n", ext_csd[221]);
       seq_printf(s, "[220] Sleep current(VCC), s_c_vcc: 0x%02x\n", ext_csd[220]);
       seq_printf(s, "[219] Sleep current(VCCQ), s_c_vccq: 0x%02x\n", ext_csd[219]);
       /* A441/A43: reserved [218] */
       seq_printf(s, "[217] Sleep/awake timeout, s_a_timeout: 0x%02x\n", ext_csd[217]);
       /* A441/A43: reserved [216] */
       seq_printf(s, "[215:212] Sector Count, sec_count: 0x%08x\n", (ext_csd[215] << 24) |(ext_csd[214] << 16) | (ext_csd[213] << 8)  | ext_csd[212]);
       /* A441/A43: reserved [211] */
       seq_printf(s, "[210] Minimum Write Performance for 8bit at 52MHz, min_perf_w_8_52: 0x%02x\n", ext_csd[210]);
       seq_printf(s, "[209] Minimum Read Performance for 8bit at 52MHz, min_perf_r_8_52: 0x%02x\n", ext_csd[209]);
       seq_printf(s, "[208] Minimum Write Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_w_8_26_4_52: 0x%02x\n", ext_csd[208]);
       seq_printf(s, "[207] Minimum Read Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_r_8_26_4_52: 0x%02x\n", ext_csd[207]);
       seq_printf(s, "[206] Minimum Write Performance for 4bit at 26MHz, min_perf_w_4_26: 0x%02x\n", ext_csd[206]);
       seq_printf(s, "[205] Minimum Read Performance for 4bit at 26MHz, min_perf_r_4_26: 0x%02x\n", ext_csd[205]);
       /* A441/A43: reserved [204] */
       seq_printf(s, "[203] Power class for 26MHz at 3.6V, pwr_cl_26_360: 0x%02x\n", ext_csd[203]);
       seq_printf(s, "[202] Power class for 52MHz at 3.6V, pwr_cl_52_360: 0x%02x\n", ext_csd[202]);
       seq_printf(s, "[201] Power class for 26MHz at 1.95V, pwr_cl_26_195: 0x%02x\n", ext_csd[201]);
       seq_printf(s, "[200] Power class for 52MHz at 1.95V, pwr_cl_52_195: 0x%02x\n", ext_csd[200]);

       /* A43: reserved [199:198] */
       if (ext_csd_rev >= 5) {
               seq_printf(s, "[199] Partition switching timing, partition_switch_time: 0x%02x\n", ext_csd[199]);
               seq_printf(s, "[198] Out-of-interrupt busy timing, out_of_interrupt_time: 0x%02x\n", ext_csd[198]);
       }

       /* A441/A43: reserved   [197] [195] [193] [190] [188]
        * [186] [184] [182] [180] [176] */

       if (ext_csd_rev >= 6)
               seq_printf(s, "driver_strength: 0x%02x\n", ext_csd[197]);

       seq_printf(s, "[196] Card type, card_type: 0x%02x\n", ext_csd[196]);
       seq_printf(s, "[194] CSD structure version, csd_structure: 0x%02x\n", ext_csd[194]);
       seq_printf(s, "[192] Extended CSD revision, ext_csd_rev: 0x%02x\n", ext_csd[192]);
       seq_printf(s, "[191] Command set, cmd_set: 0x%02x\n", ext_csd[191]);
       seq_printf(s, "[189] Command set revision, cmd_set_rev: 0x%02x\n", ext_csd[189]);
       seq_printf(s, "[187] Power class, power_class: 0x%02x\n", ext_csd[187]);
       seq_printf(s, "[185] High-speed interface timing, hs_timing: 0x%02x\n", ext_csd[185]);
       /* bus_width: ext_csd[183] not readable */
       seq_printf(s, "[181] Erased memory content, erased_mem_cont: 0x%02x\n", ext_csd[181]);
       seq_printf(s, "[179] Partition configuration, partition_config: 0x%02x\n", ext_csd[179]);
       seq_printf(s, "[178] Boot config protection, boot_config_prot: 0x%02x\n", ext_csd[178]);
       seq_printf(s, "[177] Boot bus width1, boot_bus_conditions: 0x%02x\n", ext_csd[177]);
       seq_printf(s, "[175] High-density erase group definition, erase_group_def: 0x%02x\n", ext_csd[175]);

       /* A43: reserved [174:0] */
       if (ext_csd_rev >= 5) {
               seq_printf(s, "[174] boot_wp_status: 0x%02x\n", ext_csd[174]);
               seq_printf(s, "[173] Boot area write protection register, boot_wp: 0x%02x\n", ext_csd[173]);
               /* A441: reserved [172] */
               seq_printf(s, "[171] User area write protection register, user_wp: 0x%02x\n", ext_csd[171]);
               /* A441: reserved [170] */
               seq_printf(s, "[169] FW configuration, fw_config: 0x%02x\n", ext_csd[169]);
               seq_printf(s, "[168] RPMB Size, rpmb_size_mult: 0x%02x\n", ext_csd[168]);
               seq_printf(s, "[167] Write reliability setting register, wr_rel_set: 0x%02x\n", ext_csd[167]);
               seq_printf(s, "[166] Write reliability parameter register, wr_rel_param: 0x%02x\n", ext_csd[166]);
               /* sanitize_start ext_csd[165]: not readable
                * bkops_start ext_csd[164]: only writable */
               seq_printf(s, "[163] Enable background operations handshake, bkops_en: 0x%02x\n", ext_csd[163]);
               seq_printf(s, "[162] H/W reset function, rst_n_function: 0x%02x\n", ext_csd[162]);
               seq_printf(s, "[160] HPI management, hpi_mgmt: 0x%02x\n", ext_csd[161]);
               seq_printf(s, "[169] Partitioning Support, partitioning_support: 0x%02x\n", ext_csd[160]);
               seq_printf(s, "[159:157] Max Enhanced Area Size, max_enh_size_mult: 0x%06x\n", (ext_csd[159] << 16) | (ext_csd[158] << 8) |ext_csd[157]);
               seq_printf(s, "[156] Partitions attribute, partitions_attribute: 0x%02x\n", ext_csd[156]);
               seq_printf(s, "[155] Partitioning Setting, partition_setting_completed: 0x%02x\n", ext_csd[155]);
               seq_printf(s, "[154:152] General Purpose Partition Size, gp_size_mult_4: 0x%06x\n", (ext_csd[154] << 16) |(ext_csd[153] << 8) | ext_csd[152]);
               seq_printf(s, "[151:149] General Purpose Partition Size, gp_size_mult_3: 0x%06x\n", (ext_csd[151] << 16) |(ext_csd[150] << 8) | ext_csd[149]);
               seq_printf(s, "[148:146] General Purpose Partition Size, gp_size_mult_2: 0x%06x\n", (ext_csd[148] << 16) |(ext_csd[147] << 8) | ext_csd[146]);
               seq_printf(s, "[145:143] General Purpose Partition Size, gp_size_mult_1: 0x%06x\n", (ext_csd[145] << 16) |(ext_csd[144] << 8) | ext_csd[143]);
               seq_printf(s, "[142:140] Enhanced User Data Area Size, enh_size_mult: 0x%06x\n", (ext_csd[142] << 16) |(ext_csd[141] << 8) | ext_csd[140]);
               seq_printf(s, "[139:137] Enhanced User Data Start Address, enh_start_addr: 0x%06x\n", (ext_csd[139] << 16) |(ext_csd[138] << 8) | ext_csd[137]);

               /* A441: reserved [135] */
               seq_printf(s, "[134] Bad Block Management mode, sec_bad_blk_mgmnt: 0x%02x\n", ext_csd[134]);
               /* A441: reserved [133:0] */
       }
       /* B45 */
       if (ext_csd_rev >= 6) {
               int j;
               /* tcase_support ext_csd[132] not readable */
               seq_printf(s, "periodic_wakeup: 0x%02x\n", ext_csd[131]);
               seq_printf(s, "program_cid_csd_ddr_support: 0x%02x\n",
                          ext_csd[130]);

               for (j = 127; j >= 64; j--)
                       seq_printf(s, "vendor_specific_field[%d]: 0x%02x\n",
                                  j, ext_csd[j]);

               seq_printf(s, "native_sector_size: 0x%02x\n", ext_csd[63]);
               seq_printf(s, "use_native_sector: 0x%02x\n", ext_csd[62]);
               seq_printf(s, "data_sector_size: 0x%02x\n", ext_csd[61]);
               seq_printf(s, "ini_timeout_emu: 0x%02x\n", ext_csd[60]);
               seq_printf(s, "class_6_ctrl: 0x%02x\n", ext_csd[59]);
               seq_printf(s, "dyncap_needed: 0x%02x\n", ext_csd[58]);
               seq_printf(s, "exception_events_ctrl: 0x%04x\n",
                          (ext_csd[57] << 8) | ext_csd[56]);
               seq_printf(s, "exception_events_status: 0x%04x\n",
                          (ext_csd[55] << 8) | ext_csd[54]);
               seq_printf(s, "ext_partitions_attribute: 0x%04x\n",
                          (ext_csd[53] << 8) | ext_csd[52]);
               for (j = 51; j >= 37; j--)
                       seq_printf(s, "context_conf[%d]: 0x%02x\n", j,
                                  ext_csd[j]);

               seq_printf(s, "packed_command_status: 0x%02x\n", ext_csd[36]);
               seq_printf(s, "packed_failure_index: 0x%02x\n", ext_csd[35]);
               seq_printf(s, "power_off_notification: 0x%02x\n", ext_csd[34]);
               seq_printf(s, "cache_ctrl: 0x%02x\n", ext_csd[33]);
               /* flush_cache ext_csd[32] not readable */
               /*Reserved [31:0] */
       }
#endif
out_free:
#ifndef CONFIG_MACH_LGE
	/* LGE_CHANGE
	* http://www.mail-archive.com/[email protected]/msg10669.html
	* 2012-03-09, [email protected]
	*/
	kfree(buf);
#endif
	kfree(ext_csd);
	return err;
}
Exemplo n.º 25
0
/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
	int err;
	u8 *ext_csd;

	BUG_ON(!card);

	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
		return 0;

	/*
	 * As the ext_csd is so large and mostly unused, we don't store the
	 * raw block in mmc_card.
	 */
	ext_csd = kmalloc(512, GFP_KERNEL);
	if (!ext_csd) {
		printk(KERN_ERR "%s: could not allocate a buffer to "
			"receive the ext_csd.\n", mmc_hostname(card->host));
		return -ENOMEM;
	}

	err = mmc_send_ext_csd(card, ext_csd);
	if (err) {
		/* If the host or the card can't do the switch,
		 * fail more gracefully. */
		if ((err != -EINVAL)
		 && (err != -ENOSYS)
		 && (err != -EFAULT))
			goto out;

		/*
		 * High capacity cards should have this "magic" size
		 * stored in their CSD.
		 */
		if (card->csd.capacity == (4096 * 512)) {
			printk(KERN_ERR "%s: unable to read EXT_CSD "
				"on a possible high capacity card. "
				"Card will be ignored.\n",
				mmc_hostname(card->host));
		} else {
			printk(KERN_WARNING "%s: unable to read "
				"EXT_CSD, performance might "
				"suffer.\n",
				mmc_hostname(card->host));
			err = 0;
		}

		goto out;
	}

	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
	if (card->csd.structure == 3) {
		int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
		if (ext_csd_struct > 2) {
			printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
				"version %d\n", mmc_hostname(card->host),
					ext_csd_struct);
			err = -EINVAL;
			goto out;
		}
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 5) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
			mmc_hostname(card->host), card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;

		/* Cards with density > 2GiB are sector addressed */
		if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
			mmc_card_set_blockaddr(card);
	}

	switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
	case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
	     EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
		break;
	case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
	     EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
		break;
	case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
	     EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
		break;
	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 52000000;
		break;
	case EXT_CSD_CARD_TYPE_26:
		card->ext_csd.hs_max_dtr = 26000000;
		break;
	default:
		/* MMC v4 spec says this cannot happen */
		printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
			"support any high-speed modes.\n",
			mmc_hostname(card->host));
	}

	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];

		/* Sleep / awake timeout in 100ns units */
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.erase_group_def =
			ext_csd[EXT_CSD_ERASE_GROUP_DEF];
		card->ext_csd.hc_erase_timeout = 300 *
			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
		card->ext_csd.hc_erase_size =
			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
	}

	if (card->ext_csd.rev >= 4) {
		/*
		 * Enhanced area feature support -- check whether the eMMC
		 * card has the Enhanced area enabled.  If so, export enhanced
		 * area offset and size to user by adding sysfs interface.
		 */
		if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
				(ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
			u8 hc_erase_grp_sz =
				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
			u8 hc_wp_grp_sz =
				ext_csd[EXT_CSD_HC_WP_GRP_SIZE];

			card->ext_csd.enhanced_area_en = 1;
			/*
			 * calculate the enhanced data area offset, in bytes
			 */
			card->ext_csd.enhanced_area_offset =
				(ext_csd[139] << 24) + (ext_csd[138] << 16) +
				(ext_csd[137] << 8) + ext_csd[136];
			if (mmc_card_blockaddr(card))
				card->ext_csd.enhanced_area_offset <<= 9;
			/*
			 * calculate the enhanced data area size, in kilobytes
			 */
			card->ext_csd.enhanced_area_size =
				(ext_csd[142] << 16) + (ext_csd[141] << 8) +
				ext_csd[140];
			card->ext_csd.enhanced_area_size *=
				(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
			card->ext_csd.enhanced_area_size <<= 9;
		} else {
			/*
			 * If the enhanced area is not enabled, disable these
			 * device attributes.
			 */
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		}
		card->ext_csd.sec_trim_mult =
			ext_csd[EXT_CSD_SEC_TRIM_MULT];
		card->ext_csd.sec_erase_mult =
			ext_csd[EXT_CSD_SEC_ERASE_MULT];
		card->ext_csd.sec_feature_support =
			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
		card->ext_csd.trim_timeout = 300 *
			ext_csd[EXT_CSD_TRIM_MULT];
	}

	if (card->ext_csd.rev >= 5) {
		/* check whether the eMMC card supports HPI */
		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
			card->ext_csd.hpi = 1;
			if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
				card->ext_csd.hpi_cmd =	MMC_STOP_TRANSMISSION;
			else
				card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
			/*
			 * Indicate the maximum timeout to close
			 * a command interrupted by HPI
			 */
			card->ext_csd.out_of_int_time =
				ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
		}

		/* Check whether the eMMC card supports background ops */
		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)
			card->ext_csd.bk_ops = 1;
	}

	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
		card->erased_byte = 0xFF;
	else
		card->erased_byte = 0x0;

out:
	kfree(ext_csd);

	return err;
}
Exemplo n.º 26
0
/*
 * Given a 128-bit response, decode to our card CSD structure.
 */
static int mmc_decode_csd(struct mmc_card *card)
{
    struct mmc_csd *csd = &card->csd;
    unsigned int e, m;
    u32 *resp = card->raw_csd;

    /*
     * We only understand CSD structure v1.1 and v1.2.
     * v1.2 has extra information in bits 15, 11 and 10.
     * We also support eMMC v4.4 & v4.41.
     */
    csd->structure = UNSTUFF_BITS(resp, 126, 2);
    //if (csd->structure == 0) {
#if defined(CONFIG_INAND_VERSION_PATCH)
    if (csd->structure != 1 && csd->structure != 2 && csd->structure != 3) {
#else
    if (csd->structure != 1 && csd->structure != 2) {
#endif
        printk(KERN_ERR "%s: unrecognised CSD structure version %d\n",
               mmc_hostname(card->host), csd->structure);
        return -EINVAL;
    }

    csd->mmca_vsn	 = UNSTUFF_BITS(resp, 122, 4);
    m = UNSTUFF_BITS(resp, 115, 4);
    e = UNSTUFF_BITS(resp, 112, 3);
    csd->tacc_ns	 = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
    csd->tacc_clks	 = UNSTUFF_BITS(resp, 104, 8) * 100;

    m = UNSTUFF_BITS(resp, 99, 4);
    e = UNSTUFF_BITS(resp, 96, 3);
    csd->max_dtr	  = tran_exp[e] * tran_mant[m];
    csd->cmdclass	  = UNSTUFF_BITS(resp, 84, 12);

    e = UNSTUFF_BITS(resp, 47, 3);
    m = UNSTUFF_BITS(resp, 62, 12);
    csd->capacity	  = (1 + m) << (e + 2);

    csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
    csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
    csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
    csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
    csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
    csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
    csd->write_partial = UNSTUFF_BITS(resp, 21, 1);

    return 0;
}

/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
    int err;
    u8 *ext_csd;

    BUG_ON(!card);

    if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
        return 0;

    /*
     * As the ext_csd is so large and mostly unused, we don't store the
     * raw block in mmc_card.
     */
    ext_csd = kmalloc(512, GFP_KERNEL);
    if (!ext_csd) {
        printk(KERN_ERR "%s: could not allocate a buffer to "
               "receive the ext_csd.\n", mmc_hostname(card->host));
        return -ENOMEM;
    }

    err = mmc_send_ext_csd(card, ext_csd);
    if (err) {
        /* If the host or the card can't do the switch,
         * fail more gracefully. */
        if ((err != -EINVAL)
                && (err != -ENOSYS)
                && (err != -EFAULT))
            goto out;

        /*
         * High capacity cards should have this "magic" size
         * stored in their CSD.
         */
        if (card->csd.capacity == (4096 * 512)) {
            printk(KERN_ERR "%s: unable to read EXT_CSD "
                   "on a possible high capacity card. "
                   "Card will be ignored.\n",
                   mmc_hostname(card->host));
        } else {
            printk(KERN_WARNING "%s: unable to read "
                   "EXT_CSD, performance might "
                   "suffer.\n",
                   mmc_hostname(card->host));
            err = 0;
        }

        goto out;
    }

    /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
    if (card->csd.structure == 3) {
        int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
        if (ext_csd_struct > 2) {
            printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
                   "version %d\n", mmc_hostname(card->host),
                   ext_csd_struct);
            err = -EINVAL;
            goto out;
        }
    }

    card->ext_csd.rev = ext_csd[EXT_CSD_REV];
    if (card->ext_csd.rev > 5) {
        printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
               mmc_hostname(card->host), card->ext_csd.rev);
        err = -EINVAL;
        goto out;
    }

    if (card->ext_csd.rev >= 2) {
        card->ext_csd.sectors =
            ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
            ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
            ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
            ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
#if !defined(CONFIG_INAND_VERSION_PATCH)
        if (card->ext_csd.sectors)
            mmc_card_set_blockaddr(card);
#endif
    }

    switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
    case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
        card->ext_csd.hs_max_dtr = 52000000;
        break;
    case EXT_CSD_CARD_TYPE_26:
        card->ext_csd.hs_max_dtr = 26000000;
        break;
    default:
        /* MMC v4 spec says this cannot happen */
        printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
               "support any high-speed modes.\n",
               mmc_hostname(card->host));
    }

    if (card->ext_csd.rev >= 3) {
        u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];

        /* Sleep / awake timeout in 100ns units */
        if (sa_shift > 0 && sa_shift <= 0x17)
            card->ext_csd.sa_timeout =
                1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
    }

out:
    kfree(ext_csd);

    return err;
}
Exemplo n.º 27
0
/*
 * Read and decode extended CSD.
 */
static int mmc_read_ext_csd(struct mmc_card *card)
{
    int err;
    u8 *ext_csd;

    BUG_ON(!card);

    if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
        return 0;

    /*
     * As the ext_csd is so large and mostly unused, we don't store the
     * raw block in mmc_card.
     */
    ext_csd = kmalloc(512, GFP_KERNEL);
    if (!ext_csd) {
        printk(KERN_ERR "%s: could not allocate a buffer to "
               "receive the ext_csd.\n", mmc_hostname(card->host));
        return -ENOMEM;
    }

    err = mmc_send_ext_csd(card, ext_csd);
    if (err) {
        /* If the host or the card can't do the switch,
         * fail more gracefully. */
        if ((err != -EINVAL)
                && (err != -ENOSYS)
                && (err != -EFAULT))
            goto out;

        /*
         * High capacity cards should have this "magic" size
         * stored in their CSD.
         */
        if (card->csd.capacity == (4096 * 512)) {
            printk(KERN_ERR "%s: unable to read EXT_CSD "
                   "on a possible high capacity card. "
                   "Card will be ignored.\n",
                   mmc_hostname(card->host));
        } else {
            printk(KERN_WARNING "%s: unable to read "
                   "EXT_CSD, performance might "
                   "suffer.\n",
                   mmc_hostname(card->host));
            err = 0;
        }

        goto out;
    }

    /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
    if (card->csd.structure == 3) {
        int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
        if (ext_csd_struct > 2) {
            printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
                   "version %d\n", mmc_hostname(card->host),
                   ext_csd_struct);
            err = -EINVAL;
            goto out;
        }
    }

    card->ext_csd.rev = ext_csd[EXT_CSD_REV];
    if (card->ext_csd.rev > 5) {
        printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
               mmc_hostname(card->host), card->ext_csd.rev);
        err = -EINVAL;
        goto out;
    }

    if (card->ext_csd.rev >= 2) {
        card->ext_csd.sectors =
            ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
            ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
            ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
            ext_csd[EXT_CSD_SEC_CNT + 3] << 24;

        /* Cards with density > 2GiB are sector addressed */
        if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
            mmc_card_set_blockaddr(card);
    }

    switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
    case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
            EXT_CSD_CARD_TYPE_26:
        card->ext_csd.hs_max_dtr = 52000000;
        card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
        break;
    case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
            EXT_CSD_CARD_TYPE_26:
        card->ext_csd.hs_max_dtr = 52000000;
        card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
        break;
    case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
            EXT_CSD_CARD_TYPE_26:
        card->ext_csd.hs_max_dtr = 52000000;
        card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
        break;
    case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
        card->ext_csd.hs_max_dtr = 52000000;
        break;
    case EXT_CSD_CARD_TYPE_26:
        card->ext_csd.hs_max_dtr = 26000000;
        break;
    default:
        /* MMC v4 spec says this cannot happen */
        printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
               "support any high-speed modes.\n",
               mmc_hostname(card->host));
    }

    if (card->ext_csd.rev >= 3) {
        u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];

        /* Sleep / awake timeout in 100ns units */
        if (sa_shift > 0 && sa_shift <= 0x17)
            card->ext_csd.sa_timeout =
                1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
    }

out:
    kfree(ext_csd);

    return err;
}
Exemplo n.º 28
0
int mmc_change_freq(struct mmc *mmc)
{
	char ext_csd[512];
	char cardtype;
	int err;
	int retry = 5;

	mmc->card_caps = 0;

	if (mmc_host_is_spi(mmc))
		return 0;

	/* Only version 4 supports high-speed */
	if (mmc->version < MMC_VERSION_4)
		return 0;

	mmc->card_caps |= MMC_MODE_4BIT;

	err = mmc_send_ext_csd(mmc, ext_csd);

	if (err){
		mmcinfo("mmc %d get ext csd failed\n",mmc->control_num);
		return err;
	}

	cardtype = ext_csd[196] & 0xf;

		//retry for Toshiba emmc,for the first time Toshiba emmc change to HS
	//it will return response crc err,so retry
	do{
		err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, 1);
		if(!err){
			break;
		}
		mmcinfo("retry mmc switch(cmd6)\n");
	}while(retry--);

	if (err){
		mmcinfo("mmc %d change to hs failed\n",mmc->control_num);
		return err;
	}

	/* Now check to see that it worked */
	err = mmc_send_ext_csd(mmc, ext_csd);

	if (err){
		mmcinfo("mmc %d send ext csd faild\n",mmc->control_num);
		return err;
	}

	/* No high-speed support */
	if (!ext_csd[185])
		return 0;

	/* High Speed is set, there are two types: 52MHz and 26MHz */
	if (cardtype & MMC_HS_52MHZ)
		mmc->card_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
	else
		mmc->card_caps |= MMC_MODE_HS;

	return 0;
}
Exemplo n.º 29
0
static void mmc_error_ext_csd(struct mmc_card *card, u8 *ext_csd,
		int backup, unsigned int slice)
{
	int i = 0;
	int err = 0;
	unsigned int available_new = 0;
	u8 *ext_csd_new;

	if (backup) {
		kfree(ext_csd_backup);

		ext_csd_backup = kmalloc(512, GFP_KERNEL);
		if (!ext_csd_backup) {
			pr_err("%s: kmalloc is failed(512B).\n", __func__);
			return;
		}

		memcpy(ext_csd_backup, ext_csd, 512);
	} else {
		ext_csd_new = kmalloc(512, GFP_KERNEL);
		if (!ext_csd_new) {
			pr_err("%s: ext_csd_new kmalloc is failed(512B).\n",
					__func__);
		} else {
			err = mmc_send_ext_csd(card, ext_csd_new);
			if (err)
				pr_err("Fail to get new EXT_CSD.\n");
			else
				available_new = 1;
		}
		pr_err("%s: starting diff ext_csd.\n", __func__);
		pr_err("%s: error on slice %d: backup=%d, now=%d,"
				"new=%d.\n",
				__func__, slice,
				ext_csd_backup[slice], ext_csd[slice],
				available_new ? ext_csd_new[slice] : 0);
		for (i = 0 ; i < 512 ; i++) {
			if (available_new) {
				if (ext_csd_backup[i] != ext_csd[i] ||
						ext_csd_new[i] != ext_csd[i])
					pr_err("%d : ext_csd_backup=%d,"
							"ext_csd=%d,"
							"ext_csd_new=%d.\n",
							i,
							ext_csd_backup[i],
							ext_csd[i],
							ext_csd_new[i]);
			} else {
				if (ext_csd_backup[i] != ext_csd[i])
					pr_err("%d : ext_csd_backup=%d,"
							"ext_csd=%d.\n",
							i,
							ext_csd_backup[i],
							ext_csd[i]);
			}
		}
		panic("eMMC's EXT_CSD error.\n");
	}
	return;

}
Exemplo n.º 30
0
static void mmc_error_ext_csd(struct mmc_card *card, u8 *ext_csd,
		int backup, unsigned int slice)
{
	int i = 0;
	int err = 0;
	unsigned int available_new = 0;
	u8 *ext_csd_new;

	if (backup) {
		kfree(ext_csd_backup);

		ext_csd_backup = kmalloc(512, GFP_KERNEL);
		if (!ext_csd_backup) {
			pr_err("%s: kmalloc is failed(512B).\n", __func__);
			return;
		}

		memcpy(ext_csd_backup, ext_csd, 512);
#if 0	/* Just checking */
#define EXT_CSD_REV			192	/* RO */
#define EXT_CSD_STRUCTURE		194	/* RO */
#define EXT_CSD_CARD_TYPE		196	/* RO */
#endif
		pr_err("[TEST] eMMC check : %d, %d, %d.\n",
				ext_csd_backup[EXT_CSD_REV],
				ext_csd_backup[EXT_CSD_STRUCTURE],
				ext_csd_backup[EXT_CSD_CARD_TYPE]);
	} else {
		ext_csd_new = kmalloc(512, GFP_KERNEL);
		if (!ext_csd_new) {
			pr_err("%s: ext_csd_new kmalloc is failed(512B).\n",
					__func__);
		} else {
			err = mmc_send_ext_csd(card, ext_csd_new);
			if (err)
				pr_err("[TEST] Fail to get new EXT_CSD.\n");
			else
				available_new = 1;
		}
		pr_err("[TEST] %s: starting diff ext_csd.\n", __func__);
		pr_err("[TEST] %s: error on slice %d: backup=%d, now=%d,"
				"new=%d.\n",
				__func__, slice,
				ext_csd_backup[slice], ext_csd[slice],
				available_new ? ext_csd_new[slice] : 0);
		for (i = 0 ; i < 512 ; i++) {
			if (available_new) {
				if (ext_csd_backup[i] != ext_csd[i] ||
						ext_csd_new[i] != ext_csd[i])
					pr_err("%d : ext_csd_backup=%d,"
							"ext_csd=%d,"
							"ext_csd_new=%d.\n",
							i,
							ext_csd_backup[i],
							ext_csd[i],
							ext_csd_new[i]);
			} else {
				if (ext_csd_backup[i] != ext_csd[i])
					pr_err("%d : ext_csd_backup=%d,"
							"ext_csd=%d.\n",
							i,
							ext_csd_backup[i],
							ext_csd[i]);
			}
		}
		panic("eMMC's EXT_CSD error.\n");
	}
	return;

}