void mmc_add_card_debugfs(struct mmc_card *card)
{
	struct mmc_host	*host = card->host;
	struct dentry	*root;

	if (!host->debugfs_root)
		return;

	root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
	if (IS_ERR(root))
		/* Don't complain -- debugfs just isn't enabled */
		return;
	if (!root)
		/* Complain -- debugfs is enabled, but it failed to
		 * create the directory. */
		goto err;

	card->debugfs_root = root;

	if (!debugfs_create_x32("state", S_IRUSR, root, &card->state))
		goto err;

	if (mmc_card_mmc(card) || mmc_card_sd(card))
		if (!debugfs_create_file("status", S_IRUSR, root, card,
					&mmc_dbg_card_status_fops))
			goto err;

	if (mmc_card_mmc(card))
		if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_fops))
			goto err;

	if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) &&
	    (card->host->caps2 & MMC_CAP2_PACKED_WR))
		if (!debugfs_create_file("wr_pack_stats", S_IRUSR, root, card,
					 &mmc_dbg_wr_pack_stats_fops))
			goto err;

	if (mmc_card_mmc(card) && (card->ext_csd.rev >= 5) &&
	    card->ext_csd.bkops_en)
		if (!debugfs_create_file("bkops_stats", S_IRUSR, root, card,
					 &mmc_dbg_bkops_stats_fops))
			goto err;

//ASUS_BSP +++ Gavin_Chang "mmc cmd statistics"
	if (mmc_card_mmc(card))
		if (!debugfs_create_file("cmd_stats", S_IRUSR, root, card,
					 &mmc_dbg_cmd_stats_fops))
			goto err;
//ASUS_BSP --- Gavin_Chang "mmc cmd statistics"

	return;

err:
	debugfs_remove_recursive(root);
	card->debugfs_root = NULL;
	dev_err(&card->dev, "failed to initialize debugfs\n");
}
Exemplo n.º 2
0
void mmc_add_card_debugfs(struct mmc_card *card)
{
	struct mmc_host	*host = card->host;
	struct dentry	*root = NULL;
	struct dentry   *sdxc_root = NULL;

	if (!host->debugfs_root)
		return;

        sdxc_root = debugfs_create_dir("sdxc_root", host->debugfs_root);
        if (IS_ERR(sdxc_root))
            return;
        if (!sdxc_root)
            goto err;

	root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
	if (IS_ERR(root))
		/* Don't complain -- debugfs just isn't enabled */
		return;
	if (!root)
		/* Complain -- debugfs is enabled, but it failed to
		 * create the directory. */
		goto err;

        card->debugfs_sdxc = sdxc_root;
	card->debugfs_root = root;

	if (!debugfs_create_x32("state", S_IRUSR, root, &card->state))
		goto err;

	if (mmc_card_mmc(card) || mmc_card_sd(card))
		if (!debugfs_create_file("status", S_IRUSR, root, card,
					&mmc_dbg_card_status_fops))
			goto err;

	if (mmc_card_mmc(card))
		if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_fops))
			goto err;

	if (mmc_card_sd(card))
		if (!debugfs_create_file("sdxc", S_IRUSR, sdxc_root, card,
					&mmc_sdxc_fops))
			goto err;

	if (mmc_card_mmc(card))
	if (!debugfs_create_file("wr_prot", S_IFREG|S_IRWXU|S_IRGRP|S_IROTH, root, card, &mmc_dbg_wr_prot_fops))
		goto err;

	return;

err:
	debugfs_remove_recursive(root);
	debugfs_remove_recursive(sdxc_root);
	card->debugfs_root = NULL;
	card->debugfs_sdxc = NULL;
	dev_err(&card->dev, "failed to initialize debugfs\n");
}
Exemplo n.º 3
0
void mmc_add_card_debugfs(struct mmc_card *card)
{
	struct mmc_host	*host = card->host;
	struct dentry	*root;

	if (!host->debugfs_root)
		return;

	root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
	if (IS_ERR(root))
		
		return;
	if (!root)
		goto err;

	card->debugfs_root = root;

	if (!debugfs_create_x32("state", S_IRUSR, root, &card->state))
		goto err;

	if (mmc_card_mmc(card) || mmc_card_sd(card))
		if (!debugfs_create_file("status", S_IRUSR, root, card,
					&mmc_dbg_card_status_fops))
			goto err;

	if (mmc_card_mmc(card))
		if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_fops))
			goto err;

	if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) &&
	    (card->host->caps2 & MMC_CAP2_PACKED_WR))
		if (!debugfs_create_file("wr_pack_stats", S_IRUSR, root, card,
					 &mmc_dbg_wr_pack_stats_fops))
			goto err;

	if (mmc_card_mmc(card) && (card->ext_csd.rev >= 5) &&
	    card->ext_csd.bkops_en)
		if (!debugfs_create_file("bkops_stats", S_IRUSR, root, card,
					 &mmc_dbg_bkops_stats_fops))
			goto err;

	return;

err:
	debugfs_remove_recursive(root);
	card->debugfs_root = NULL;
	dev_err(&card->dev, "failed to initialize debugfs\n");
}
Exemplo n.º 4
0
        /*
         * If idle time bkops is running on the card, let's not get
         * into suspend.
         */
        if (mmc_card_doing_bkops(card) && mmc_card_is_prog_state(card))
            return -EBUSY;
        else
            return 0;
    } else {
        return mmc_power_save_host(card->host);
    }
}

static int mmc_runtime_resume(struct device *dev)
{
    struct mmc_card *card = mmc_dev_to_card(dev);

    if (mmc_use_core_runtime_pm(card->host))
        return 0;
    else
        return mmc_power_restore_host(card->host);
}

#ifdef CONFIG_MMC_SD_PENDING_RESUME_CUST_SH
extern bool sh_mmc_pending_resume;
#endif /* CONFIG_MMC_SD_PENDING_RESUME_CUST_SH */
static int mmc_runtime_idle(struct device *dev)
{
    struct mmc_card *card = mmc_dev_to_card(dev);
    struct mmc_host *host = card->host;
    int ret = 0;

    if (mmc_use_core_runtime_pm(card->host)) {
#ifdef CONFIG_PM_EMMC_CUST_SH
        if( !(host->card && mmc_card_mmc( host->card )) ) {
#endif /* CONFIG_PM_EMMC_CUST_SH */
#ifdef CONFIG_MMC_SD_PENDING_RESUME_CUST_SH
            if( (host->card && mmc_card_sd( host->card )) &&
                    sh_mmc_pending_resume == false ) {
#endif /* CONFIG_MMC_SD_PENDING_RESUME_CUST_SH */
                ret = pm_schedule_suspend(dev, card->idle_timeout);
                if (ret) {
                    pr_err("%s: %s: pm_schedule_suspend failed: err: %d\n",
                           mmc_hostname(host), __func__, ret);
                    return ret;
                }
#ifdef CONFIG_MMC_SD_PENDING_RESUME_CUST_SH
            }
#endif /* CONFIG_MMC_SD_PENDING_RESUME_CUST_SH */
#ifdef CONFIG_PM_EMMC_CUST_SH
        }
#endif /* CONFIG_PM_EMMC_CUST_SH */
    }

    return ret;
}
Exemplo n.º 5
0
int mmc_card_can_sleep(struct mmc_host *host)
{
	struct mmc_card *card = host->card;

	if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
		return 1;
	return 0;
}
Exemplo n.º 6
0
void mmc_add_card_debugfs(struct mmc_card *card)
{
	struct mmc_host	*host = card->host;
	struct dentry	*root;
	DBG("[%s] s\n",__func__);
	if (!host->debugfs_root) {
		DBG("[%s] e\n",__func__);
		return;
	}

	root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
	if (IS_ERR(root)) {
		DBG("[%s] e1\n",__func__);
		/* Don't complain -- debugfs just isn't enabled */
		return;
	}
	if (!root)
		/* Complain -- debugfs is enabled, but it failed to
		 * create the directory. */
		goto err;

	card->debugfs_root = root;

	if (!debugfs_create_x32("state", S_IRUSR, root, &card->state))
		goto err;

	if (mmc_card_mmc(card) || mmc_card_sd(card))
		if (!debugfs_create_file("status", S_IRUSR, root, card,
					&mmc_dbg_card_status_fops))
			goto err;

	if (mmc_card_mmc(card))
		if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_fops))
			goto err;

	DBG("[%s] e2\n",__func__);
	return;

err:
	debugfs_remove_recursive(root);
	card->debugfs_root = NULL;
	dev_err(&card->dev, "failed to initialize debugfs\n");
	DBG("[%s] e3\n",__func__);
}
Exemplo n.º 7
0
static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;

	current->flags |= PF_MEMALLOC;

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;
		struct mmc_queue_req *tmp;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		req = blk_fetch_request(q);
		mq->mqrq_cur->req = req;
		spin_unlock_irq(q->queue_lock);

		if (req || mq->mqrq_prev->req) {
			set_current_state(TASK_RUNNING);
		/* Abort any current bk ops of eMMC card by issuing HPI */
			if (mmc_card_mmc(mq->card) && mmc_card_doing_bkops(mq->card)) {
				mmc_interrupt_hpi(mq->card);
			}
			mq->issue_fn(mq, req);
		} else {
			/*
			 * Since the queue is empty, start synchronous
			 * background ops if there is a request for it.
			 */
			if (mmc_card_need_bkops(mq->card))
				mmc_bkops_start(mq->card, false, true);
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
				break;
			}
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
		}

		/* Current request becomes previous request and vice versa. */
		mq->mqrq_prev->brq.mrq.data = NULL;
		mq->mqrq_prev->req = NULL;
		tmp = mq->mqrq_prev;
		mq->mqrq_prev = mq->mqrq_cur;
		mq->mqrq_cur = tmp;
	} while (1);
	up(&mq->thread_sem);

	return 0;
}
static int simple_sd_ioctl_multi_rw(struct msdc_ioctl* msdc_ctl)
{
    char l_buf[512];
    struct scatterlist msdc_sg;
    struct mmc_data  msdc_data;
    struct mmc_command msdc_cmd;
    struct mmc_command msdc_stop;

#ifdef MTK_MSDC_USE_CMD23
	struct mmc_command msdc_sbc;
#endif
    
    struct mmc_request  msdc_mrq;
    struct msdc_host *host_ctl;
   
    host_ctl = mtk_msdc_host[msdc_ctl->host_num];
    BUG_ON(!host_ctl);
    BUG_ON(!host_ctl->mmc);
    BUG_ON(!host_ctl->mmc->card);

    mmc_claim_host(host_ctl->mmc);

#if DEBUG_MMC_IOCTL
    printk("user want access %d partition\n",msdc_ctl->partition);
#endif

    mmc_send_ext_csd(host_ctl->mmc->card, l_buf);
    switch (msdc_ctl->partition){
        case BOOT_PARTITION_1:
            if (0x1 != (l_buf[179] & 0x7)){
                /* change to access boot partition 1 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x1;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        case BOOT_PARTITION_2:
            if (0x2 != (l_buf[179] & 0x7)){
                /* change to access boot partition 2 */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x2;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
        default:
            /* make sure access partition is user data area */
            if (0 != (l_buf[179] & 0x7)){
                /* set back to access user area */
                l_buf[179] &= ~0x7;
                l_buf[179] |= 0x0;
                mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
            }
            break;
    }

    if(msdc_ctl->total_size > 64*1024){
        msdc_ctl->result = -1;
        return  msdc_ctl->result;
    }

    memset(&msdc_data, 0, sizeof(struct mmc_data));
    memset(&msdc_mrq, 0, sizeof(struct mmc_request));
    memset(&msdc_cmd, 0, sizeof(struct mmc_command));
    memset(&msdc_stop, 0, sizeof(struct mmc_command));

#ifdef MTK_MSDC_USE_CMD23
    memset(&msdc_sbc, 0, sizeof(struct mmc_command));
#endif

    msdc_mrq.cmd = &msdc_cmd;
    msdc_mrq.data = &msdc_data;

    if(msdc_ctl->trans_type)
        dma_force[host_ctl->id] = FORCE_IN_DMA;
    else
        dma_force[host_ctl->id] = FORCE_IN_PIO;

    if (msdc_ctl->iswrite){
        msdc_data.flags = MMC_DATA_WRITE;
        msdc_cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT; 
            }
        } else {
            /* called from other kernel module */
            memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size);
        }
    } else {
        msdc_data.flags = MMC_DATA_READ;
        msdc_cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
        msdc_data.blocks = msdc_ctl->total_size / 512;
        memset(sg_msdc_multi_buffer, 0 , msdc_ctl->total_size);
    }

#ifdef MTK_MSDC_USE_CMD23
    if ((mmc_card_mmc(host_ctl->mmc->card) || (mmc_card_sd(host_ctl->mmc->card) && host_ctl->mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)) && 
            !(host_ctl->mmc->card->quirks & MMC_QUIRK_BLK_NO_CMD23)){
        msdc_mrq.sbc = &msdc_sbc;
        msdc_mrq.sbc->opcode = MMC_SET_BLOCK_COUNT;
        msdc_mrq.sbc->arg = msdc_data.blocks;
        msdc_mrq.sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
    }
#endif

    msdc_cmd.arg = msdc_ctl->address;

    if (!mmc_card_blockaddr(host_ctl->mmc->card)){
        printk("this device use byte address!!\n");
        msdc_cmd.arg <<= 9;
    }
    msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

    msdc_stop.opcode = MMC_STOP_TRANSMISSION;
    msdc_stop.arg = 0;
    msdc_stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;

    msdc_data.stop = &msdc_stop;
    msdc_data.blksz = 512;
    msdc_data.sg = &msdc_sg;
    msdc_data.sg_len = 1;

#if DEBUG_MMC_IOCTL
    printk("total size is %d\n",msdc_ctl->total_size);
#endif
    sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size);
    mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card);
    mmc_wait_for_req(host_ctl->mmc, &msdc_mrq);

    if (!msdc_ctl->iswrite){
        if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
            if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size)){
                dma_force[host_ctl->id] = FORCE_NOTHING;
                return -EFAULT;
                }
        } else {
            /* called from other kernel module */
            memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size);
        }
    }

    if (msdc_ctl->partition){
        mmc_send_ext_csd(host_ctl->mmc->card,l_buf);

        if (l_buf[179] & 0x7) {
            /* set back to access user area */
            l_buf[179] &= ~0x7;
            l_buf[179] |= 0x0;
            mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
        }
    }

    mmc_release_host(host_ctl->mmc);

    if (msdc_cmd.error)
        msdc_ctl->result = msdc_cmd.error;

    if (msdc_data.error){
        msdc_ctl->result = msdc_data.error;
    } else {
        msdc_ctl->result = 0;
    }
        dma_force[host_ctl->id] = FORCE_NOTHING;
    return msdc_ctl->result;

}
Exemplo n.º 9
0
static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
	int err = 0, idx;
	unsigned int part_size;
	u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0;

	BUG_ON(!card);

	if (!ext_csd)
		return 0;

	
	card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
	if (card->csd.structure == 3) {
		if (card->ext_csd.raw_ext_csd_structure > 2) {
			pr_err("%s: unrecognised EXT_CSD structure "
				"version %d\n", mmc_hostname(card->host),
					card->ext_csd.raw_ext_csd_structure);
			err = -EINVAL;
			goto out;
		}
	}

	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
	if (card->ext_csd.rev > 6) {
		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
			mmc_hostname(card->host), card->ext_csd.rev);
		err = -EINVAL;
		goto out;
	}

	card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
	card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
	card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
	card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
	if (card->ext_csd.rev >= 2) {
		card->ext_csd.sectors =
			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;

		
		if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
			mmc_card_set_blockaddr(card);
	}

	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
	mmc_select_card_type(card);

	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
	card->ext_csd.raw_erase_timeout_mult =
		ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
	card->ext_csd.raw_hc_erase_grp_size =
		ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
	if (card->ext_csd.rev >= 3) {
		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];

		
		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];

		
		if (sa_shift > 0 && sa_shift <= 0x17)
			card->ext_csd.sa_timeout =
					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
		card->ext_csd.erase_group_def =
			ext_csd[EXT_CSD_ERASE_GROUP_DEF];
		card->ext_csd.hc_erase_timeout = 300 *
			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
		card->ext_csd.hc_erase_size =
			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;

		card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];

		if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
			for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
				part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
				mmc_part_add(card, part_size,
					EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
					"boot%d", idx, true,
					MMC_BLK_DATA_AREA_BOOT);
			}
		}
	}

	card->ext_csd.raw_hc_erase_gap_size =
		ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
	card->ext_csd.raw_sec_trim_mult =
		ext_csd[EXT_CSD_SEC_TRIM_MULT];
	card->ext_csd.raw_sec_erase_mult =
		ext_csd[EXT_CSD_SEC_ERASE_MULT];
	card->ext_csd.raw_sec_feature_support =
		ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
	card->ext_csd.raw_trim_mult =
		ext_csd[EXT_CSD_TRIM_MULT];
	if (card->ext_csd.rev >= 4) {
		card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
		if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
		    (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
			hc_erase_grp_sz =
				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
			hc_wp_grp_sz =
				ext_csd[EXT_CSD_HC_WP_GRP_SIZE];

			card->ext_csd.enhanced_area_en = 1;
			card->ext_csd.enhanced_area_offset =
				(ext_csd[139] << 24) + (ext_csd[138] << 16) +
				(ext_csd[137] << 8) + ext_csd[136];
			if (mmc_card_blockaddr(card))
				card->ext_csd.enhanced_area_offset <<= 9;
			card->ext_csd.enhanced_area_size =
				(ext_csd[142] << 16) + (ext_csd[141] << 8) +
				ext_csd[140];
			card->ext_csd.enhanced_area_size *=
				(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
			card->ext_csd.enhanced_area_size <<= 9;
		} else {
			card->ext_csd.enhanced_area_offset = -EINVAL;
			card->ext_csd.enhanced_area_size = -EINVAL;
		}

		if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
			EXT_CSD_PART_SUPPORT_PART_EN) {
			if (card->ext_csd.enhanced_area_en != 1) {
				hc_erase_grp_sz =
					ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
				hc_wp_grp_sz =
					ext_csd[EXT_CSD_HC_WP_GRP_SIZE];

				card->ext_csd.enhanced_area_en = 1;
			}

			for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
				if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
				!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
				!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
					continue;
				part_size =
				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
					<< 16) +
				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
					<< 8) +
				ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
				part_size *= (size_t)(hc_erase_grp_sz *
					hc_wp_grp_sz);
				mmc_part_add(card, part_size << 19,
					EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
					"gp%d", idx, false,
					MMC_BLK_DATA_AREA_GP);
			}
		}
		card->ext_csd.sec_trim_mult =
			ext_csd[EXT_CSD_SEC_TRIM_MULT];
		card->ext_csd.sec_erase_mult =
			ext_csd[EXT_CSD_SEC_ERASE_MULT];
		card->ext_csd.sec_feature_support =
			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
		card->ext_csd.trim_timeout = 300 *
			ext_csd[EXT_CSD_TRIM_MULT];

		card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
		card->ext_csd.boot_ro_lockable = true;
	}

	if (card->ext_csd.rev >= 5) {
		
		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
			card->ext_csd.bkops = 1;
			card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
			pr_info("[HTC_KER_ADD]%s: ext_csd[EXT_CSD_BKOPS_EN] = %d\n",
			mmc_hostname(card->host),
			ext_csd[EXT_CSD_BKOPS_EN]);
			card->ext_csd.raw_bkops_status =
				ext_csd[EXT_CSD_BKOPS_STATUS];
			if (!card->ext_csd.bkops_en &&
				card->host->caps2 & MMC_CAP2_INIT_BKOPS) {
				err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
					EXT_CSD_BKOPS_EN, 1, 0);
				if (err)
					pr_warning("%s: Enabling BKOPS failed\n",
						mmc_hostname(card->host));
				else {
					card->ext_csd.bkops_en = 1;
					pr_info("[HTC_KER_ADD]%s: Enabled BKOPs\n",
					mmc_hostname(card->host));
				}
			}
		}

		
		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
			card->ext_csd.hpi = 1;
			if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
				card->ext_csd.hpi_cmd =	MMC_STOP_TRANSMISSION;
			else
				card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
			card->ext_csd.out_of_int_time =
				ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
		}

		card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
		card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
	}

	card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
		card->erased_byte = 0xFF;
	else
		card->erased_byte = 0x0;

	
	if (card->ext_csd.rev >= 6) {
		card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;

		card->ext_csd.generic_cmd6_time = 10 *
			ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
		card->ext_csd.power_off_longtime = 10 *
			ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];

		card->ext_csd.cache_size =
			ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
			ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
			ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
			ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;

		if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
			card->ext_csd.data_sector_size = 4096;
		else
			card->ext_csd.data_sector_size = 512;

		if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
		    (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
			card->ext_csd.data_tag_unit_size =
			((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
			(card->ext_csd.data_sector_size);
		} else {
			card->ext_csd.data_tag_unit_size = 0;
		}
		card->ext_csd.max_packed_writes =
			ext_csd[EXT_CSD_MAX_PACKED_WRITES];
		card->ext_csd.max_packed_reads =
			ext_csd[EXT_CSD_MAX_PACKED_READS];
	}

	if (card->cid.manfid == SANDISK_MMC) {
		char buf[7] = {0};
		sprintf(buf, "%c%c%c%c%c%c", ext_csd[73], ext_csd[74], ext_csd[75],
										ext_csd[76], ext_csd[77], ext_csd[78]);
		strncpy(card->ext_csd.fwrev, buf, strlen(buf));
	}

	if (mmc_card_mmc(card)) {
		char *buf;
		int i, j;
		ssize_t n = 0;
		pr_info("%s: cid %08x%08x%08x%08x\n",
			mmc_hostname(card->host),
			card->raw_cid[0], card->raw_cid[1],
			card->raw_cid[2], card->raw_cid[3]);
		pr_info("%s: csd %08x%08x%08x%08x\n",
			mmc_hostname(card->host),
			card->raw_csd[0], card->raw_csd[1],
			card->raw_csd[2], card->raw_csd[3]);

		buf = kmalloc(512, GFP_KERNEL);
		if (buf) {
			for (i = 0; i < 32; i++) {
				for (j = 511 - (16 * i); j >= 496 - (16 * i); j--)
					n += sprintf(buf + n, "%02x", ext_csd[j]);
				n += sprintf(buf + n, "\n");
				pr_info("%s: ext_csd %s", mmc_hostname(card->host), buf);
				n = 0;
			}
		}
		if (buf)
			kfree(buf);
	}

out:
	return err;
}
Exemplo n.º 10
0
static int simple_sd_ioctl_single_rw(struct msdc_ioctl *msdc_ctl)
{
	char l_buf[512];
	struct scatterlist msdc_sg;
	struct mmc_data msdc_data;
	struct mmc_command msdc_cmd;
	struct mmc_request msdc_mrq;
	struct msdc_host *host_ctl;
	int  ret = 0;
    if(!msdc_ctl)
        return -EINVAL;
    if(msdc_ctl->total_size <= 0)
        return -EINVAL;

	host_ctl = mtk_msdc_host[msdc_ctl->host_num];
	BUG_ON(!host_ctl);
	BUG_ON(!host_ctl->mmc);
	BUG_ON(!host_ctl->mmc->card);

#ifdef MTK_MSDC_USE_CACHE
	if (msdc_ctl->iswrite && mmc_card_mmc(host_ctl->mmc->card)
		&& (host_ctl->mmc->card->ext_csd.cache_ctrl & 0x1))
		return simple_sd_ioctl_multi_rw(msdc_ctl);
#endif

	mmc_claim_host(host_ctl->mmc);

#if DEBUG_MMC_IOCTL
	pr_debug("user want access %d partition\n", msdc_ctl->partition);
#endif

	ret = mmc_send_ext_csd(host_ctl->mmc->card, l_buf);
	if (ret) {
		pr_debug("mmc_send_ext_csd error, single rw\n");
		goto single_end;
	}
#ifdef CONFIG_MTK_EMMC_SUPPORT
	switch (msdc_ctl->partition) {
	case EMMC_PART_BOOT1:
		if (0x1 != (l_buf[179] & 0x7)) {
			/* change to access boot partition 1 */
			l_buf[179] &= ~0x7;
			l_buf[179] |= 0x1;
			mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
		}
		break;
	case EMMC_PART_BOOT2:
		if (0x2 != (l_buf[179] & 0x7)) {
			/* change to access boot partition 2 */
			l_buf[179] &= ~0x7;
			l_buf[179] |= 0x2;
			mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
		}
		break;
	default:
		/* make sure access partition is user data area */
		if (0 != (l_buf[179] & 0x7)) {
			/* set back to access user area */
			l_buf[179] &= ~0x7;
			l_buf[179] |= 0x0;
			mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
		}
		break;
	}
#endif
	if (msdc_ctl->total_size > 512) {
		msdc_ctl->result = -1;
		goto single_end;
	}
#if DEBUG_MMC_IOCTL
	pr_debug("start MSDC_SINGLE_READ_WRITE !!\n");
#endif
	memset(&msdc_data, 0, sizeof(struct mmc_data));
	memset(&msdc_mrq, 0, sizeof(struct mmc_request));
	memset(&msdc_cmd, 0, sizeof(struct mmc_command));

	msdc_mrq.cmd = &msdc_cmd;
	msdc_mrq.data = &msdc_data;

	if (msdc_ctl->trans_type)
		dma_force[host_ctl->id] = FORCE_IN_DMA;
	else
		dma_force[host_ctl->id] = FORCE_IN_PIO;

	if (msdc_ctl->iswrite) {
		msdc_data.flags = MMC_DATA_WRITE;
		msdc_cmd.opcode = MMC_WRITE_BLOCK;
		msdc_data.blocks = msdc_ctl->total_size / 512;
		if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
			if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, 512)) {
				dma_force[host_ctl->id] = FORCE_NOTHING;
				ret = -EFAULT;
				goto single_end;
			}
		} else {
			/* called from other kernel module */
			memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, 512);
		}
	} else {
		msdc_data.flags = MMC_DATA_READ;
		msdc_cmd.opcode = MMC_READ_SINGLE_BLOCK;
		msdc_data.blocks = msdc_ctl->total_size / 512;

		memset(sg_msdc_multi_buffer, 0, 512);
	}

	msdc_cmd.arg = msdc_ctl->address;

	if (!mmc_card_blockaddr(host_ctl->mmc->card)) {
		pr_debug("the device is used byte address!\n");
		msdc_cmd.arg <<= 9;
	}

	msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	msdc_data.stop = NULL;
	msdc_data.blksz = 512;
	msdc_data.sg = &msdc_sg;
	msdc_data.sg_len = 1;

#if DEBUG_MMC_IOCTL
	pr_debug("single block: ueser buf address is 0x%p!\n", msdc_ctl->buffer);
#endif
	sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size);
	mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card);

	mmc_wait_for_req(host_ctl->mmc, &msdc_mrq);

	if (!msdc_ctl->iswrite) {
		if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) {
			if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, 512)) {
				dma_force[host_ctl->id] = FORCE_NOTHING;
				ret = -EFAULT;
				goto single_end;
			}
		} else {
			/* called from other kernel module */
			memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, 512);
		}
	}

    /* clear the global buffer of R/W IOCTL */
    memset(sg_msdc_multi_buffer, 0 , 512);

	if (msdc_ctl->partition) {
		ret = mmc_send_ext_csd(host_ctl->mmc->card, l_buf);
		if (ret) {
			pr_debug("mmc_send_ext_csd error, single rw2\n");
			goto single_end;
		}

		if (l_buf[179] & 0x7) {
			/* set back to access user area */
			l_buf[179] &= ~0x7;
			l_buf[179] |= 0x0;
			mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000);
		}
	}

single_end:
	mmc_release_host(host_ctl->mmc);
	if (ret)
		msdc_ctl->result = ret;

	if (msdc_cmd.error)
		msdc_ctl->result = msdc_cmd.error;

	if (msdc_data.error)
		msdc_ctl->result = msdc_data.error;
	else
		msdc_ctl->result = 0;

	dma_force[host_ctl->id] = FORCE_NOTHING;
	return msdc_ctl->result;
}
Exemplo n.º 11
0
int mmc_test_mem_card(struct mmc_test_config *cfg)
{
    int id, count, forever;
    int ret, chk_result, tid = 0, result = 0;
    unsigned int chunks, chunk_blks, left_blks, pass = 0, fail = 0;
    unsigned int total_blks;
    unsigned int i, j;
    unsigned int blksz;
    unsigned int clkhz;    
    char pattern = 0;
    char *buf;
    unsigned long blknr;
    struct mmc_host *host;
    struct mmc_card *card;

    id    = cfg->id;
    count = cfg->count;
    buf   = cfg->buf;
    blknr = cfg->blknr;
    blksz = cfg->blksz;

    chk_result = cfg->chk_result;
    chunk_blks = cfg->chunk_blks;
    total_blks = (cfg->total_size + blksz - 1) / blksz;
    forever    = (count == -1) ? 1 : 0;

    host = mmc_get_host(id);
    card = mmc_get_card(id);

    while (forever || count--) {
        printf("[TST] ==============================================\n");
        printf("[TST] BEGIN: %d/%d, No Stop(%d)\n", 
            (cfg->count != -1) ? cfg->count - count : 0, 
            (cfg->count != -1) ? cfg->count : 0, forever);
        printf("[TST] ----------------------------------------------\n");
        printf("[TST] Mode    : %d\n", cfg->mode);
        printf("[TST] Clock   : %d kHz\n", cfg->clock / 1000);
        printf("[TST] BusWidth: %d bits\n", cfg->buswidth);
        printf("[TST] BurstSz : %d bytes\n", 0x1 << cfg->burstsz);
        printf("[TST] BlkAddr : %xh\n", blknr);
        printf("[TST] BlkSize : %dbytes\n", blksz);
        printf("[TST] TstBlks : %d\n", total_blks);
#if defined(BB_MT6575)
        printf("[TST] AutoCMD : 12(%d), 23(%d)\n", 
            (cfg->autocmd & MSDC_AUTOCMD12) ? 1 : 0, 
            (cfg->autocmd & MSDC_AUTOCMD23) ? 1 : 0);
#endif
        printf("[TST] ----------------------------------------------\n");


        if (mmc_init_host(host, id) != 0) {
            result = -__LINE__;
            goto failure;
        }
        if (mmc_init_card(host, card) != 0) {
            result = -__LINE__;
            goto failure;
        }
#if defined(BB_MT6575)
        msdc_set_dma(host, (u8)cfg->burstsz, (u32)cfg->flags);
        msdc_set_autocmd(host, cfg->autocmd, 1);
#endif

        /* change uhs-1 mode */
#if 0        
        if (mmc_card_uhs1(card)) {
            if (mmc_switch_uhs1(host, card, cfg->uhsmode) != 0) {
                result = -__LINE__;
                goto failure;
            }
        }
#endif

        /* change clock */
        if (cfg->clock) {
            clkhz = card->maxhz < cfg->clock ? card->maxhz : cfg->clock;
            mmc_set_clock(host, mmc_card_ddr(card), clkhz); 
        }
        if (mmc_card_sd(card) && cfg->buswidth == HOST_BUS_WIDTH_8) {
            printf("[TST] SD card doesn't support 8-bit bus width (SKIP)\n");
            result = MMC_ERR_NONE;
        }
        if (mmc_set_bus_width(host, card, cfg->buswidth) != 0) {
            result = -__LINE__;
            goto failure;
        }

        /* cmd16 is illegal while card is in ddr mode */
        if (!(mmc_card_mmc(card) && mmc_card_ddr(card))) {
            if (mmc_set_blk_length(host, blksz) != 0) {
                result = -__LINE__;
                goto failure;            
            }
        }

#if defined(BB_MT6575)
        if (cfg->piobits) {
            printf("[TST] PIO bits: %d\n", cfg->piobits);
            msdc_set_pio_bits(host, cfg->piobits);
        }
#endif
        tid = result = 0;        

        if (mmc_erase_start(card, blknr * blksz) != MMC_ERR_NONE) {
            result = -__LINE__;
            goto failure;
        }
        if (mmc_erase_end(card, (blknr + total_blks) * blksz) != MMC_ERR_NONE) {
            result = -__LINE__;
            goto failure;
        }
        if (mmc_erase(card, MMC_ERASE_NORMAL) != MMC_ERR_NONE) {
            result = -__LINE__;
            goto failure;        
        }
        printf("[TST] 0x%x - 0x%x Erased\n", blknr * blksz, 
            (blknr + total_blks) * blksz);

        mmc_send_status(host, card, &status);

        if (cfg->tst_single) {
            /* single block write */
            for (i = 0; i < total_blks; i++) {
                pattern = (i + count) % 256;
                memset(buf, pattern, blksz);
                ret = mmc_block_write(id, blknr + i, 1, (unsigned long*)buf);
                if (ret != MMC_ERR_NONE) {
                    printf("test single block write failed (%d)\n", i);
                    result = -__LINE__;
                    goto failure;
                }
            }

            printf(TC_MSG, host->id, result == 0 ? "PASS" : "FAIL", tid++, 
                "test single block write\n");

            if (result)
                break;
            
            /* single block read */
            for (i = 0; i < total_blks && !result; i++) {
                pattern = (i + count) % 256;
                /* populate buffer with different pattern */
                memset(buf, pattern + 1, blksz);
                ret = mmc_block_read(id, blknr + i, 1, (unsigned long*)buf);
                if (ret != MMC_ERR_NONE) {
                    result = -__LINE__;
                    goto failure;
                }
                if (chk_result) {
                    for (j = 0; j < blksz; j++) {
                        if (buf[j] != pattern) {
                            result = -__LINE__;
                            goto failure;
                        }
                    }
                }
            }
            printf(TC_MSG, host->id, result == 0 ? "PASS" : "FAIL", tid++, 
                "test single block read\n");

            if (result) {
                printf("[SD%d]\t\tread back pattern(0x%.2x) failed\n", 
                    id, pattern);
                goto failure;
            }
        }

        mmc_send_status(host, card, &status);
        
        if (cfg->tst_multiple) {
            /* multiple block write */
            chunks = total_blks / chunk_blks;
            left_blks = total_blks % chunk_blks;   
            for (i = 0; i < chunks; i++) {
                pattern = (i + count) % 256;
                memset(buf, pattern, blksz * chunk_blks);
                ret = mmc_block_write(id, blknr + i * chunk_blks, 
                    chunk_blks, (unsigned long*)buf);
                if (ret != MMC_ERR_NONE) {
                    result = -__LINE__;
                    goto failure;
                }
            }
            
            if (!result && left_blks) {
                pattern = (i + count) % 256;
                memset(buf, pattern, blksz * left_blks);
                ret = mmc_block_write(id, blknr + chunks * chunk_blks, 
                    left_blks, (unsigned long*)buf);
                if (ret != MMC_ERR_NONE) {
                    result = -__LINE__;
                    goto failure;
                }
            }

            printf(TC_MSG, host->id, result == 0 ? "PASS" : "FAIL", tid++, 
                "test multiple block write\n");

            if (result)
                goto failure;

            /* multiple block read */
            for (i = 0; i < chunks; i++) {
                pattern = (i + count) % 256;
                /* populate buffer with different pattern */
                memset(buf, pattern + 1, blksz);
                ret = mmc_block_read(id, blknr + i * chunk_blks, 
                    chunk_blks, (unsigned long*)buf);
                if (ret != MMC_ERR_NONE) {
                    printf("[SD%d]\t\tread %d blks failed(ret = %d blks)\n",
                        host->id, chunk_blks, ret);
                    result = -__LINE__;
                    goto failure;
                }
                if (chk_result) {
                    for (j = 0; j < chunk_blks * blksz; j++) {
                        if (buf[j] == pattern)
                            continue;
                        result = -__LINE__;
                        printf("[SD%d]\t\t%xh = %x (!= %x)\n",
                            host->id, blknr + i * chunk_blks + j, buf[j], pattern);
                        goto failure;
                    }
                }
            }

            if (!result && left_blks) {
                pattern = i % 256;
                /* populate buffer with different pattern */
                memset(buf, pattern + 1, blksz);
                ret = mmc_block_read(id, blknr + chunks * chunk_blks, 
                    left_blks, (unsigned long*)buf);
                if (ret != MMC_ERR_NONE) {
                    printf("[SD%d]\t\tread %d blks failed(ret = %d blks)\n",
                        host->id, left_blks, ret);
                    result = -__LINE__;
                    goto failure;
                }
                if (chk_result) {
                    for (j = 0; j < left_blks * blksz; j++) {
                        if (buf[j] == pattern)
                            continue;
                        printf("[SD%d]\t\t%xh = %x (!= %x)\n",
                            host->id, blknr + chunks * chunk_blks + j, buf[j], pattern);
                        result = -__LINE__;
                        goto failure;
                    }
                }
            }

            printf(TC_MSG, host->id, result == 0 ? "PASS" : "FAIL", tid++, 
                "test multiple block read\n");

            if (result)
                goto failure;
        }

        mmc_send_status(host, card, &status);

        if (cfg->tst_interleave) {
            /* multiple block write */
            chunks = total_blks / chunk_blks;
            left_blks = total_blks % chunk_blks;   
            for (i = 0; i < chunks; i++) {
                pattern = (i + count) % 256;
                memset(buf, pattern, blksz * chunk_blks);
                ret = mmc_block_write(id, blknr + i * chunk_blks, 
                    chunk_blks, (unsigned long*)buf);
                if (ret != MMC_ERR_NONE) {
                    result = -__LINE__;
                    goto failure;
                }

                /* populate buffer with different pattern */
                memset(buf, pattern + 1, blksz * chunk_blks);
                ret = mmc_block_read(id, blknr + i * chunk_blks, 
                    chunk_blks, (unsigned long*)buf);
                if (ret != MMC_ERR_NONE) {
                    result = -__LINE__;
                    goto failure;
                }
                if (chk_result) {
                    for (j = 0; j < chunk_blks * blksz; j++) {
                        if (buf[j] == pattern) 
                            continue;
                        result = -__LINE__;
                        goto failure;
                    }
                }                
            }           

            if (!result && left_blks) {
                pattern = (i + count) % 256;
                memset(buf, pattern, blksz * left_blks);
                ret = mmc_block_write(id, blknr + chunks * chunk_blks, 
                    left_blks, (unsigned long*)buf);
                if (ret != MMC_ERR_NONE) {
                    result = -__LINE__;
                    goto failure;
                }

                /* populate buffer with different pattern */
                memset(buf, pattern + 1, blksz * left_blks);
                ret = mmc_block_read(id, blknr + chunks * chunk_blks, 
                    left_blks, (unsigned long*)buf);
                if (ret != MMC_ERR_NONE) {
                    result = -__LINE__;
                    break;
                }
                if (chk_result) {
                    for (j = 0; j < left_blks * blksz; j++) {
                        if (buf[j] == pattern)
                            continue;
                        result = -__LINE__;
                        goto failure;
                    }
                }
            }

            printf(TC_MSG, host->id, result == 0 ? "PASS" : "FAIL", tid++, 
                "test multiple block interleave write-read\n");

            if (result)
                goto failure;
        }
        if (cfg->desc) {
            printf("[TST] ----------------------------------------------\n");
            printf("[TST] Report - %s \n", cfg->desc);
            printf("[TST] ----------------------------------------------\n");
        }
        mmc_prof_dump(id);

failure:
        if (result) {
            printf("[SD%d] mmc test failed (%d)\n", host->id, result);
            fail++;
        } else {
            pass++;
        }
        printf("[TST] ----------------------------------------------\n");
        printf("[TST] Test Result: TOTAL(%d/%d), PASS(%d), FAIL(%d) \n", 
            cfg->count - count, cfg->count, pass, fail);
        printf("[TST] ----------------------------------------------\n");    
    	//mdelay(1000);
    }

    return result;
}
Exemplo n.º 12
0
static int mmc_queue_thread(void *d)
{
	struct mmc_queue *mq = d;
	struct request_queue *q = mq->queue;
#ifdef CONFIG_MMC_DISCARD_MERGE
	int ret;
	int state = DCS_NO_DISCARD_REQ;
	int flag;
#endif

	current->flags |= PF_MEMALLOC;

	down(&mq->thread_sem);
	do {
		struct request *req = NULL;

		spin_lock_irq(q->queue_lock);
		set_current_state(TASK_INTERRUPTIBLE);
		if (!blk_queue_plugged(q))
			req = blk_fetch_request(q);
		mq->req = req;
		spin_unlock_irq(q->queue_lock);

		if (!req) {
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
				break;
			}
#ifdef CONFIG_MMC_DISCARD_MERGE
			if (mmc_card_mmc(mq->card)) {
				flag = mmc_read_idle(mq->card);
				if (flag == DCS_IDLE_OPS_TURNED_ON) {
			mmc_claim_host(mq->card->host);
			ret = mmc_do_idle_ops(mq->card);
			mmc_release_host(mq->card->host);
					if (ret) {
						if (mq->flags & MMC_QUEUE_SUSPENDED)
							goto sched;
					} else {
						state = DCS_NO_DISCARD_REQ;
						mmc_clear_idle(mq->card);
					}
				continue;
				} else if (flag == DCS_MMC_DEVICE_REMOVED) {
					/* do nothing */
				} else if (state == DCS_DISCARD_REQ) {
					state = DCS_IDLE_TIMER_TRIGGERED;
					mmc_trigger_idle_timer(mq->card);
				}
			}
sched:
#endif
			up(&mq->thread_sem);
			schedule();
			down(&mq->thread_sem);
			continue;
		}
#ifdef CONFIG_MMC_DISCARD_MERGE
		else if (mmc_card_mmc(mq->card)) {
			if (state == DCS_NO_DISCARD_REQ && req->cmd_flags & REQ_DISCARD)
				state = DCS_DISCARD_REQ;
		}
#endif
		set_current_state(TASK_RUNNING);

		mq->issue_fn(mq, req);
	} while (1);
	up(&mq->thread_sem);

	return 0;
}
void mmc_add_card_debugfs(struct mmc_card *card)
{
	struct mmc_host	*host = card->host;
	struct dentry	*root;

	if (!host->debugfs_root)
		return;

	root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
	if (IS_ERR(root))
		/* Don't complain -- debugfs just isn't enabled */
		return;
	if (!root)
		/* Complain -- debugfs is enabled, but it failed to
		 * create the directory. */
		goto err;

	card->debugfs_root = root;

	if (!debugfs_create_x32("state", S_IRUSR, root, &card->state))
		goto err;

	if (mmc_card_mmc(card) || mmc_card_sd(card))
		if (!debugfs_create_file("status", S_IRUSR, root, card,
					&mmc_dbg_card_status_fops))
			goto err;

	if (mmc_card_mmc(card)) {
		if (!debugfs_create_file("ext_csd", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_fops))
			goto err;
		if (!debugfs_create_file("eol_status", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_eol_fops))
			goto err;
		if (!debugfs_create_file("dhs_type_a", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_life_time_type_a_fops))
			goto err;
		if (!debugfs_create_file("dhs_type_b", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_life_time_type_b_fops))
			goto err;
		if (!debugfs_create_file("emmc_device_type", S_IRUSR, root,
					card, &mmc_dbg_ext_csd_device_type_fops))
			goto err;
		if (!debugfs_create_file("firmware_version", S_IRUSR, root,
					card, &mmc_dbg_ext_csd_fw_v_fops))
			goto err;
		if (!debugfs_create_file("bkops_status", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_bkops_status_fops))
			goto err;
		if (!debugfs_create_file("bkops_support", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_bkops_support_fops))
			goto err;
		if (!debugfs_create_file("poweron_notify", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_pon_notify_fops))
			goto err;
		if (!debugfs_create_file("hpi_support", S_IRUSR, root, card,
					&mmc_dbg_ext_csd_hpi_support_fops))
			goto err;
	}

	if (mmc_card_sd(card))
		if (!debugfs_create_file("speed_class", S_IROTH, root, card,
					&mmc_dbg_card_speed_class_fops))
			goto err;

	return;

err:
	debugfs_remove_recursive(root);
	card->debugfs_root = NULL;
	dev_err(&card->dev, "failed to initialize debugfs\n");
}
Exemplo n.º 14
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mq->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce buffer\n",
					mmc_card_name(card));
			}
		}

		if (mq->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	if (is_svlte_type_mmc_card(card))
		mq->thread = kthread_run(mmc_queue_thread, mq, "svlte-qd");
	else if (mmc_card_sd(card))
		mq->thread = kthread_run(mmc_queue_thread, mq, "sd-qd");
	else if (mmc_card_mmc(card))
		mq->thread = kthread_run(mmc_queue_thread, mq, "emmc-qd");
	else if (mmc_card_sdio(card))
		mq->thread = kthread_run(mmc_queue_thread, mq, "sdio-qd");
	else
		mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");

	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}
Exemplo n.º 15
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
                   spinlock_t *lock, const char *subname)
{
    struct mmc_host *host = card->host;
    u64 limit = BLK_BOUNCE_ANY;
    int ret;
    struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
    struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];

    if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
        limit = *mmc_dev(host)->dma_mask;

    mq->card = card;
    mq->queue = blk_init_queue(mmc_request, lock);
    if (!mq->queue)
        return -ENOMEM;

#ifdef CONFIG_ZRAM
    if (mmc_card_mmc(card) &&
            (totalram_pages << (PAGE_SHIFT - 10)) <= (256 * 1024))
        mq->queue->backing_dev_info.ra_pages =
            (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
#endif // CONFIG_ZRAM


    memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
    memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
    mq->mqrq_cur = mqrq_cur;
    mq->mqrq_prev = mqrq_prev;
    mq->queue->queuedata = mq;

    blk_queue_prep_rq(mq->queue, mmc_prep_request);
    queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
    if (mmc_can_erase(card))
        mmc_queue_setup_discard(mq->queue, card);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
    if (host->max_segs == 1) {
        unsigned int bouncesz;

        bouncesz = MMC_QUEUE_BOUNCESZ;

        if (bouncesz > host->max_req_size)
            bouncesz = host->max_req_size;
        if (bouncesz > host->max_seg_size)
            bouncesz = host->max_seg_size;
        if (bouncesz > (host->max_blk_count * 512))
            bouncesz = host->max_blk_count * 512;

        if (bouncesz > 512) {
            mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
            if (!mqrq_cur->bounce_buf) {
                pr_warning("%s: unable to "
                           "allocate bounce cur buffer\n",
                           mmc_card_name(card));
            }
            mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
            if (!mqrq_prev->bounce_buf) {
                pr_warning("%s: unable to "
                           "allocate bounce prev buffer\n",
                           mmc_card_name(card));
                kfree(mqrq_cur->bounce_buf);
                mqrq_cur->bounce_buf = NULL;
            }
        }

        if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
            blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
            blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
            blk_queue_max_segments(mq->queue, bouncesz / 512);
            blk_queue_max_segment_size(mq->queue, bouncesz);

            mqrq_cur->sg = mmc_alloc_sg(1, &ret);
            if (ret)
                goto cleanup_queue;

            mqrq_cur->bounce_sg =
                mmc_alloc_sg(bouncesz / 512, &ret);
            if (ret)
                goto cleanup_queue;

            mqrq_prev->sg = mmc_alloc_sg(1, &ret);
            if (ret)
                goto cleanup_queue;

            mqrq_prev->bounce_sg =
                mmc_alloc_sg(bouncesz / 512, &ret);
            if (ret)
                goto cleanup_queue;
        }
    }
#endif

    if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
        blk_queue_bounce_limit(mq->queue, limit);
        blk_queue_max_hw_sectors(mq->queue,
                                 min(host->max_blk_count, host->max_req_size / 512));
        blk_queue_max_segments(mq->queue, host->max_segs);
        blk_queue_max_segment_size(mq->queue, host->max_seg_size);

        mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
        if (ret)
            goto cleanup_queue;


        mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
        if (ret)
            goto cleanup_queue;
    }

    sema_init(&mq->thread_sem, 1);

    mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
                             host->index, subname ? subname : "");

    if (IS_ERR(mq->thread)) {
        ret = PTR_ERR(mq->thread);
        goto free_bounce_sg;
    }

    return 0;
free_bounce_sg:
    kfree(mqrq_cur->bounce_sg);
    mqrq_cur->bounce_sg = NULL;
    kfree(mqrq_prev->bounce_sg);
    mqrq_prev->bounce_sg = NULL;

cleanup_queue:
    kfree(mqrq_cur->sg);
    mqrq_cur->sg = NULL;
    kfree(mqrq_cur->bounce_buf);
    mqrq_cur->bounce_buf = NULL;

    kfree(mqrq_prev->sg);
    mqrq_prev->sg = NULL;
    kfree(mqrq_prev->bounce_buf);
    mqrq_prev->bounce_buf = NULL;

    blk_cleanup_queue(mq->queue);
    return ret;
}