/** * blk_release_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging to the request queue to be released * * Description: * blk_release_queue is the pair to blk_init_queue() or * blk_queue_make_request(). It should be called when a request queue is * being released; typically when a block device is being de-registered. * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * * Note: * The low level driver must have finished any outstanding requests first * via blk_cleanup_queue(). **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); blkcg_exit_queue(q); if (q->elevator) { spin_lock_irq(q->queue_lock); ioc_clear_queue(q); spin_unlock_irq(q->queue_lock); elevator_exit(q->elevator); } blk_exit_rl(&q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); if (!q->mq_ops) blk_free_flush_queue(q->fq); else blk_mq_release(q); blk_trace_shutdown(q); if (q->bio_split) bioset_free(q->bio_split); ida_simple_remove(&blk_queue_ida, q->id); call_rcu(&q->rcu_head, blk_free_queue_rcu); }
static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); struct request_list *rl = &q->rq; blk_sync_queue(q); if (q->elevator) { spin_lock_irq(q->queue_lock); ioc_clear_queue(q); spin_unlock_irq(q->queue_lock); elevator_exit(q->elevator); } blk_throtl_exit(q); if (rl->rq_pool) mempool_destroy(rl->rq_pool); if (q->queue_tags) __blk_queue_free_tags(q); blk_throtl_release(q); blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); ida_simple_remove(&blk_queue_ida, q->id); kmem_cache_free(blk_requestq_cachep, q); }
/** * blk_release_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging to the request queue to be released * * Description: * blk_release_queue is the pair to blk_init_queue() or * blk_queue_make_request(). It should be called when a request queue is * being released; typically when a block device is being de-registered. * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * * Caveat: * Hopefully the low level driver will have finished any * outstanding requests first... **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); blk_sync_queue(q); blkcg_exit_queue(q); if (q->elevator) { spin_lock_irq(q->queue_lock); ioc_clear_queue(q); spin_unlock_irq(q->queue_lock); elevator_exit(q->elevator); } blk_exit_rl(&q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); ida_simple_remove(&blk_queue_ida, q->id); call_rcu(&q->rcu_head, blk_free_queue_rcu); }
int card_init_queue(struct card_queue *cq, struct memory_card *card, spinlock_t * lock) { struct card_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret=0; if (host->parent->dma_mask && *host->parent->dma_mask) limit = *host->parent->dma_mask; cq->card = card; cq->queue = blk_init_queue(card_request, lock); if (!cq->queue) return -ENOMEM; blk_queue_prep_rq(cq->queue, card_prep_request); card_init_bounce_buf(cq, card); if(!cq->bounce_buf){ blk_queue_bounce_limit(cq->queue, limit); blk_queue_max_hw_sectors(cq->queue, host->max_sectors); //blk_queue_max_hw_phys_segments(cq->queue, host->max_phys_segs); blk_queue_max_segments(cq->queue, host->max_hw_segs); blk_queue_max_segment_size(cq->queue, host->max_seg_size); cq->queue->queuedata = cq; cq->req = NULL; cq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL); if (!cq->sg) { ret = -ENOMEM; blk_cleanup_queue(cq->queue); return ret; } } /*change card io scheduler from cfq to deadline*/ cq->queue->queuedata = cq; elevator_exit(cq->queue->elevator); cq->queue->elevator = NULL; ret = elevator_init(cq->queue, "deadline"); if (ret) { printk("[card_init_queue] elevator_init deadline fail\n"); blk_cleanup_queue(cq->queue); return ret; } init_MUTEX(&cq->thread_sem); cq->thread = kthread_run(card_queue_thread, cq, "%s_queue", card->name); if (IS_ERR(cq->thread)) { ret = PTR_ERR(cq->thread); //goto free_bounce_sg; } return ret; }
/** * blk_release_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging to the request queue to be released * * Description: * blk_release_queue is the pair to blk_init_queue() or * blk_queue_make_request(). It should be called when a request queue is * being released; typically when a block device is being de-registered. * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * * Note: * The low level driver must have finished any outstanding requests first * via blk_cleanup_queue(). **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) blk_stat_remove_callback(q, q->poll_cb); blk_stat_free_callback(q->poll_cb); bdi_put(q->backing_dev_info); blkcg_exit_queue(q); if (q->elevator) { ioc_clear_queue(q); elevator_exit(q, q->elevator); } blk_free_queue_stats(q->stats); blk_exit_rl(&q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); if (!q->mq_ops) { if (q->exit_rq_fn) q->exit_rq_fn(q, q->fq->flush_rq); blk_free_flush_queue(q->fq); } else { blk_mq_release(q); } blk_trace_shutdown(q); if (q->mq_ops) blk_mq_debugfs_unregister(q); if (q->bio_split) bioset_free(q->bio_split); ida_simple_remove(&blk_queue_ida, q->id); call_rcu(&q->rcu_head, blk_free_queue_rcu); }
/** * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging of the request queue to be released * * Description: * blk_cleanup_queue is the pair to blk_init_queue() or * blk_queue_make_request(). It should be called when a request queue is * being released; typically when a block device is being de-registered. * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * * Caveat: * Hopefully the low level driver will have finished any * outstanding requests first... **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); blk_sync_queue(q); if (q->elevator) elevator_exit(q->elevator); blk_throtl_exit(q); blk_exit_rl(&q->root_rl); if (q->queue_tags) __blk_queue_free_tags(q); blk_throtl_release(q); blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); kmem_cache_free(blk_requestq_cachep, q); }
static int __init virtualblockdevice_init(void) { int ret; elevator_t *oldelev; printk(KERN_ALERT "VirtualBlockDevice: Entry virtualblockdevice_init !\n"); ret = register_blkdev( virtualblockdevice_major, VIRTUALBLOCKDEVICE_NAME ); if( 0 > ret ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to register block device: virtualblockdevice ! Major: %d\tErrno: %d !\n", virtualblockdevice_major, ret); goto failure_register_blkdev; } virtualblockdevice_major = ret; printk(KERN_ALERT "VirtualBlockDevice: Success to register block device: virtualblockdevice ! Major: %d !\n", virtualblockdevice_major); // get request_queue virtualblockdevice_queue = blk_init_queue( virtualblockdevice_do_request, NULL ); if( !virtualblockdevice_queue ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to init request_queue !\n"); ret = -ENOMEM; goto failure_init_queue; } printk(KERN_ALERT "VirtualBlockDevice: Success to init request_queue !\n"); // switch elevator oldelev = virtualblockdevice_queue->elevator; if( IS_ERR_VALUE( elevator_init( virtualblockdevice_queue, "noop" ) ) ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to switch elevator to noop, continue to use old one !\n"); } else { printk(KERN_ALERT "VirtualBlockDevice: Success to switch elevator to noop !\n"); elevator_exit( oldelev ); } // get gendisk virtualblockdevice_disk = alloc_disk( 1 ); if( !virtualblockdevice_disk ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to allocate gendisk !\n"); ret = -ENOMEM; goto failure_alloc_disk; } printk(KERN_ALERT "VirtualBlockDevice: Success to allocate gendisk !\n"); // initialize gendisk strcpy( virtualblockdevice_disk->disk_name, VIRTUALBLOCKDEVICE_NAME ); virtualblockdevice_disk->major = virtualblockdevice_major; virtualblockdevice_disk->first_minor = virtualblockdevice_minor; virtualblockdevice_disk->fops = &virtualblockdevice_fops; virtualblockdevice_disk->queue = virtualblockdevice_queue; set_capacity( virtualblockdevice_disk, ( VIRTUALBLOCKDEVICE_DISK_CAPACITY >> 9 ) ); // add gendisk to kernel add_disk( virtualblockdevice_disk ); return 0; failure_alloc_disk: blk_cleanup_queue( virtualblockdevice_queue ); failure_init_queue: unregister_blkdev( virtualblockdevice_major, VIRTUALBLOCKDEVICE_NAME ); failure_register_blkdev: return ret; }