示例#1
0
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;

	blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

	blk_queue_bounce_limit(mq->queue, limit);
	blk_queue_max_hw_sectors(mq->queue,
		min(host->max_blk_count, host->max_req_size / 512));
	blk_queue_max_segments(mq->queue, host->max_segs);
	blk_queue_max_segment_size(mq->queue, host->max_seg_size);

	INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
	INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);

	mutex_init(&mq->complete_lock);

	init_waitqueue_head(&mq->wait);
}
/* call mmc block layer interface for userspace to do erase operate */
static int simple_mmc_erase_func(unsigned int start, unsigned int size)
{
    struct msdc_host *host;
    unsigned int arg; 
        
    /* emmc always in slot0 */
    host = msdc_get_host(MSDC_EMMC,MSDC_BOOT_EN,0);
    BUG_ON(!host);
    BUG_ON(!host->mmc);
    BUG_ON(!host->mmc->card);
                        
    mmc_claim_host(host->mmc);

    if(mmc_can_discard(host->mmc->card))
    {
        arg = __MMC_DISCARD_ARG; 
    }else if (host->mmc->card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN){
        /* for Hynix eMMC chip£¬do trim even if it is  MMC_QUIRK_TRIM_UNSTABLE */
        arg = __MMC_TRIM_ARG; 
    }else if(mmc_can_erase(host->mmc->card)){
        /* mmc_erase() will remove the erase group un-aligned part, 
         * msdc_command_start() will do trim for old combo erase un-aligned issue 
         */
        arg = __MMC_ERASE_ARG; 
    }else {
        printk("[%s]: emmc card can't support trim / discard / erase\n", __func__);
        goto end;
    }
    
    printk("[%s]: start=0x%x, size=%d, arg=0x%x, can_trim=(0x%x),EXT_CSD_SEC_GB_CL_EN=0x%x\n", 
                    __func__, start, size, arg, host->mmc->card->ext_csd.sec_feature_support, EXT_CSD_SEC_GB_CL_EN); 
    mmc_erase(host->mmc->card, start, size, arg);

#if DEBUG_MMC_IOCTL
    printk("[%s]: erase done....arg=0x%x\n", __func__, arg);
#endif
end:
    mmc_release_host(host->mmc);
    
    return 0;
}
示例#3
0
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;

	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

	blk_queue_bounce_limit(mq->queue, limit);
	blk_queue_max_hw_sectors(mq->queue,
		min(host->max_blk_count, host->max_req_size / 512));
	blk_queue_max_segments(mq->queue, host->max_segs);
	blk_queue_max_segment_size(mq->queue, host->max_seg_size);

	/* Initialize thread_sem even if it is not used */
	sema_init(&mq->thread_sem, 1);
}
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
	memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
	mq->mqrq_cur = mqrq_cur;
	mq->mqrq_prev = mqrq_prev;
	mq->queue->queuedata = mq;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_segs == 1) {
		unsigned int bouncesz;

		if(!mmc_card_sd(card))
			bouncesz = MMC_QUEUE_BOUNCESZ;
		else
			bouncesz = MMC_QUEUE_SD_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			if(!mmc_card_sd(card))
				mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			else
				mqrq_cur->bounce_buf = mmc_queue_cur_bounce_buf;
			if (!mqrq_cur->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce cur buffer\n",
					mmc_card_name(card));
			}
			if(!mmc_card_sd(card))
				mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			else
				mqrq_prev->bounce_buf = mmc_queue_prev_bounce_buf;
			if (!mqrq_prev->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce prev buffer\n",
					mmc_card_name(card));
				kfree(mqrq_cur->bounce_buf);
				mqrq_cur->bounce_buf = NULL;
			}
		}

		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_cur->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
		}
	}
#endif

	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_hw_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_segments(mq->queue, host->max_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;


		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
	}

	sema_init(&mq->thread_sem, 1);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");

	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;

 cleanup_queue:
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	if(!mmc_card_sd(card))
		kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;

	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	if(!mmc_card_sd(card))
		kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

	blk_cleanup_queue(mq->queue);
	return ret;
}
示例#5
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
                   spinlock_t *lock, const char *subname)
{
    struct mmc_host *host = card->host;
    u64 limit = BLK_BOUNCE_HIGH;
    int ret;

    if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
        limit = *mmc_dev(host)->dma_mask;

    mq->card = card;
    mq->queue = blk_init_queue(mmc_request, lock);
    if (!mq->queue)
        return -ENOMEM;

    mq->queue->queuedata = mq;
    mq->req = NULL;

    blk_queue_prep_rq(mq->queue, mmc_prep_request);
    queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
    if (mmc_can_erase(card)) {
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
        mq->queue->limits.max_discard_sectors = UINT_MAX;
        if (card->erased_byte == 0)
            mq->queue->limits.discard_zeroes_data = 1;
        mq->queue->limits.discard_granularity = card->pref_erase << 9;
        if (mmc_can_secure_erase_trim(card))
            queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
                                    mq->queue);
    }

#ifdef CONFIG_MMC_BLOCK_BOUNCE
    if (host->max_segs == 1) {
        unsigned int bouncesz;

        bouncesz = MMC_QUEUE_BOUNCESZ;

        if (bouncesz > host->max_req_size)
            bouncesz = host->max_req_size;
        if (bouncesz > host->max_seg_size)
            bouncesz = host->max_seg_size;
        if (bouncesz > (host->max_blk_count * 512))
            bouncesz = host->max_blk_count * 512;

        if (bouncesz > 512) {
            mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
            if (!mq->bounce_buf) {
                printk(KERN_WARNING "%s: unable to "
                       "allocate bounce buffer\n",
                       mmc_card_name(card));
            }
        }

        if (mq->bounce_buf) {
            blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
            blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
            blk_queue_max_segments(mq->queue, bouncesz / 512);
            blk_queue_max_segment_size(mq->queue, bouncesz);

            mq->sg = kmalloc(sizeof(struct scatterlist),
                             GFP_KERNEL);
            if (!mq->sg) {
                ret = -ENOMEM;
                goto cleanup_queue;
            }
            sg_init_table(mq->sg, 1);

            mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
                                    bouncesz / 512, GFP_KERNEL);
            if (!mq->bounce_sg) {
                ret = -ENOMEM;
                goto cleanup_queue;
            }
            sg_init_table(mq->bounce_sg, bouncesz / 512);
        }
    }
#endif

    if (!mq->bounce_buf) {
        blk_queue_bounce_limit(mq->queue, limit);
        blk_queue_max_hw_sectors(mq->queue,
                                 min(host->max_blk_count, host->max_req_size / 512));
        blk_queue_max_segments(mq->queue, host->max_segs);
        blk_queue_max_segment_size(mq->queue, host->max_seg_size);

        mq->sg = kmalloc(sizeof(struct scatterlist) *
                         host->max_segs, GFP_KERNEL);
        if (!mq->sg) {
            ret = -ENOMEM;
            goto cleanup_queue;
        }
        sg_init_table(mq->sg, host->max_segs);
    }

    sema_init(&mq->thread_sem, 1);

    mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
                             host->index, subname ? subname : "");

    if (IS_ERR(mq->thread)) {
        ret = PTR_ERR(mq->thread);
        goto free_bounce_sg;
    }

    return 0;
free_bounce_sg:
    if (mq->bounce_sg)
        kfree(mq->bounce_sg);
    mq->bounce_sg = NULL;
cleanup_queue:
    if (mq->sg)
        kfree(mq->sg);
    mq->sg = NULL;
    if (mq->bounce_buf)
        kfree(mq->bounce_buf);
    mq->bounce_buf = NULL;
    blk_cleanup_queue(mq->queue);
    return ret;
}
示例#6
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	bool bounce = false;
	int ret = -ENOMEM;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request_fn, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->qdepth = 2;
	mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
			   GFP_KERNEL);
	if (!mq->mqrq)
		goto blk_cleanup;
	mq->mqrq_cur = &mq->mqrq[0];
	mq->mqrq_prev = &mq->mqrq[1];
	mq->queue->queuedata = mq;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512 &&
		    mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
			if (ret)
				goto cleanup_queue;
			bounce = true;
		}
	}
#endif

	if (!bounce) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_hw_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_segments(mq->queue, host->max_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		ret = mmc_queue_alloc_sgs(mq, host->max_segs);
		if (ret)
			goto cleanup_queue;
	}

	sema_init(&mq->thread_sem, 1);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");

	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto cleanup_queue;
	}

	return 0;

 cleanup_queue:
	mmc_queue_reqs_free_bufs(mq);
	kfree(mq->mqrq);
	mq->mqrq = NULL;
blk_cleanup:
	blk_cleanup_queue(mq->queue);
	return ret;
}