int card_init_queue(struct card_queue *cq, struct memory_card *card,
		    spinlock_t * lock)
{
	struct card_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret=0;

	if (host->parent->dma_mask && *host->parent->dma_mask)
		limit = *host->parent->dma_mask;

	cq->card = card;
	cq->queue = blk_init_queue(card_request, lock);
	if (!cq->queue)
		return -ENOMEM;

	blk_queue_prep_rq(cq->queue, card_prep_request);
	card_init_bounce_buf(cq, card);
	
	if(!cq->bounce_buf){
		blk_queue_bounce_limit(cq->queue, limit);
		blk_queue_max_hw_sectors(cq->queue, host->max_sectors);
		//blk_queue_max_hw_phys_segments(cq->queue, host->max_phys_segs);
		blk_queue_max_segments(cq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(cq->queue, host->max_seg_size);

		cq->queue->queuedata = cq;
		cq->req = NULL;

		cq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL);
		if (!cq->sg) {
			ret = -ENOMEM;
			blk_cleanup_queue(cq->queue);
			return ret;
		}
	}

	/*change card io scheduler from cfq to deadline*/
	cq->queue->queuedata = cq;
	elevator_exit(cq->queue->elevator);
	cq->queue->elevator = NULL;
	ret = elevator_init(cq->queue, "deadline");
	if (ret) {
             printk("[card_init_queue] elevator_init deadline fail\n");
		blk_cleanup_queue(cq->queue);
		return ret;
	}


	init_MUTEX(&cq->thread_sem);
	cq->thread = kthread_run(card_queue_thread, cq, "%s_queue", card->name);
	if (IS_ERR(cq->thread)) {
		ret = PTR_ERR(cq->thread);
		//goto free_bounce_sg;
	}

	cq->nb.notifier_call = card_reboot_notifier;
	register_reboot_notifier(&cq->nb);

	return ret;
}
int card_init_queue(struct card_queue *cq, struct memory_card *card,
		    spinlock_t * lock)
{
	struct card_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret=0, card_quene_num;
	struct card_queue_list *cq_node_current;
	struct card_queue_list *cq_node_prev = NULL;

	if (host->parent->dma_mask && *host->parent->dma_mask)
		limit = *host->parent->dma_mask;

	cq->card = card;
	cq->queue = blk_init_queue(card_request, lock);
	if (!cq->queue)
		return -ENOMEM;

	blk_queue_prep_rq(cq->queue, card_prep_request);
	card_init_bounce_buf(cq, card);
	
	if(!cq->bounce_buf){
		blk_queue_bounce_limit(cq->queue, limit);
		blk_queue_max_hw_sectors(cq->queue, host->max_sectors);
		//blk_queue_max_hw_phys_segments(cq->queue, host->max_phys_segs);
		blk_queue_max_segments(cq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(cq->queue, host->max_seg_size);

		cq->queue->queuedata = cq;
		cq->req = NULL;

		cq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL);
		if (!cq->sg) {
			ret = -ENOMEM;
			blk_cleanup_queue(cq->queue);
			return ret;
		}
	}

	if (card_queue_head == NULL)
	{
		card_queue_head = kmalloc(sizeof(struct card_queue_list), GFP_KERNEL);
		if (card_queue_head == NULL) 
		{
			ret = -ENOMEM;
			kfree(card_queue_head);
			card_queue_head = NULL;
			return ret;
		}
		card_queue_head->cq = cq;
		card_queue_head->cq_num = 0;
		card_queue_head->cq_flag = 0;
		card_queue_head->cq_next = NULL;

		init_completion(&card_thread_complete);
		init_waitqueue_head(&card_thread_wq);
		init_MUTEX(&card_thread_sem);
		host->queue_task = kthread_run(card_queue_thread, cq, "card_queue");
		if (host->queue_task)
		{
			wait_for_completion(&card_thread_complete);
			init_completion(&card_thread_complete);
			ret = 0;
			return ret;
		}
	} 
	else
	{
		card_quene_num = 0;
		cq_node_current = card_queue_head;
		do
		{
			card_quene_num = cq_node_current->cq_num;
			cq_node_prev = cq_node_current;
			cq_node_current = cq_node_current->cq_next;
		} while (cq_node_current != NULL);

		cq_node_current = kmalloc(sizeof(struct card_queue_list), GFP_KERNEL);
		if (cq_node_current == NULL)
		{
			ret = -ENOMEM;
			kfree(cq_node_current);
			cq_node_current = NULL;
			return ret;
		}
		cq_node_prev->cq_next = cq_node_current;
		cq_node_current->cq = cq;
		cq_node_current->cq_next = NULL;
		cq_node_current->cq_num = (++card_quene_num);
		cq_node_current->cq_flag = 0;

		ret = 0;
		return ret;
	}

	return ret;
}