void bsg_remove_queue(struct request_queue *q) { struct request *req; int counts; if (!q) return; spin_lock_irq(q->queue_lock); blk_stop_queue(q); while (1) { req = blk_fetch_request(q); counts = q->rq.count[0] + q->rq.count[1] + q->rq.starved[0] + q->rq.starved[1]; spin_unlock_irq(q->queue_lock); if (counts == 0) break; if (req) { req->errors = -ENXIO; blk_end_request_all(req, -ENXIO); } msleep(200); spin_lock_irq(q->queue_lock); } bsg_unregister_queue(q); }
void bsg_remove_queue(struct request_queue *q) { if (q) { struct bsg_set *bset = container_of(q->tag_set, struct bsg_set, tag_set); bsg_unregister_queue(q); blk_cleanup_queue(q); blk_mq_free_tag_set(&bset->tag_set); kfree(bset); } }
/** * bsg_remove_queue - Deletes the bsg dev from the q * @q: the request_queue that is to be torn down. * * Notes: * Before unregistering the queue empty any requests that are blocked */ void bsg_remove_queue(struct request_queue *q) { struct request *req; /* block request */ int counts; /* totals for request_list count and starved */ if (!q) return; /* Stop taking in new requests */ spin_lock_irq(q->queue_lock); blk_stop_queue(q); /* drain all requests in the queue */ while (1) { /* need the lock to fetch a request * this may fetch the same reqeust as the previous pass */ req = blk_fetch_request(q); /* save requests in use and starved */ counts = q->root_rl.count[0] + q->root_rl.count[1] + q->root_rl.starved[0] + q->root_rl.starved[1]; spin_unlock_irq(q->queue_lock); /* any requests still outstanding? */ if (counts == 0) break; /* This may be the same req as the previous iteration, * always send the blk_end_request_all after a prefetch. * It is not okay to not end the request because the * prefetch started the request. */ if (req) { /* return -ENXIO to indicate that this queue is * going away */ req->errors = -ENXIO; blk_end_request_all(req, -ENXIO); } msleep(200); /* allow bsg to possibly finish */ spin_lock_irq(q->queue_lock); } bsg_unregister_queue(q); }