/* * Slow path for ioc release in put_io_context(). Performs double-lock * dancing to unlink all icq's and then frees ioc. */ static void ioc_release_fn(struct work_struct *work) { struct io_context *ioc = container_of(work, struct io_context, release_work); struct request_queue *last_q = NULL; spin_lock_irq(&ioc->lock); while (!hlist_empty(&ioc->icq_list)) { struct io_cq *icq = hlist_entry(ioc->icq_list.first, struct io_cq, ioc_node); struct request_queue *this_q = icq->q; if (this_q != last_q) { /* * Need to switch to @this_q. Once we release * @ioc->lock, it can go away along with @cic. * Hold on to it. */ __blk_get_queue(this_q); /* * blk_put_queue() might sleep thanks to kobject * idiocy. Always release both locks, put and * restart. */ if (last_q) { spin_unlock(last_q->queue_lock); spin_unlock_irq(&ioc->lock); blk_put_queue(last_q); } else { spin_unlock_irq(&ioc->lock); } last_q = this_q; spin_lock_irq(this_q->queue_lock); spin_lock(&ioc->lock); continue; } ioc_exit_icq(icq); } if (last_q) { spin_unlock(last_q->queue_lock); spin_unlock_irq(&ioc->lock); blk_put_queue(last_q); } else { spin_unlock_irq(&ioc->lock); } kmem_cache_free(iocontext_cachep, ioc); }
static void free_dev(struct mapped_device *md) { free_minor(md->disk->first_minor); mempool_destroy(md->tio_pool); mempool_destroy(md->io_pool); del_gendisk(md->disk); put_disk(md->disk); blk_put_queue(md->queue); kfree(md); }
static void free_dev(struct mapped_device *md) { unsigned int minor = md->disk->first_minor; if (md->suspended_bdev) { thaw_bdev(md->suspended_bdev, NULL); bdput(md->suspended_bdev); } mempool_destroy(md->tio_pool); mempool_destroy(md->io_pool); del_gendisk(md->disk); free_minor(minor); put_disk(md->disk); blk_put_queue(md->queue); kfree(md); }
int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode, unsigned int cmd, void __user *arg) { int err; if (!q || blk_get_queue(q)) return -ENXIO; switch (cmd) { /* * new sgv3 interface */ case SG_GET_VERSION_NUM: err = sg_get_version(arg); break; case SCSI_IOCTL_GET_IDLUN: err = scsi_get_idlun(q, arg); break; case SCSI_IOCTL_GET_BUS_NUMBER: err = scsi_get_bus(q, arg); break; case SG_SET_TIMEOUT: err = sg_set_timeout(q, arg); break; case SG_GET_TIMEOUT: err = sg_get_timeout(q); break; case SG_GET_RESERVED_SIZE: err = sg_get_reserved_size(q, arg); break; case SG_SET_RESERVED_SIZE: err = sg_set_reserved_size(q, arg); break; case SG_EMULATED_HOST: err = sg_emulated_host(q, arg); break; case SG_IO: { struct sg_io_hdr hdr; err = -EFAULT; if (copy_from_user(&hdr, arg, sizeof(hdr))) break; err = sg_io(q, bd_disk, &hdr, mode); if (err == -EFAULT) break; if (copy_to_user(arg, &hdr, sizeof(hdr))) err = -EFAULT; break; } case CDROM_SEND_PACKET: { struct cdrom_generic_command cgc; struct sg_io_hdr hdr; err = -EFAULT; if (copy_from_user(&cgc, arg, sizeof(cgc))) break; cgc.timeout = clock_t_to_jiffies(cgc.timeout); memset(&hdr, 0, sizeof(hdr)); hdr.interface_id = 'S'; hdr.cmd_len = sizeof(cgc.cmd); hdr.dxfer_len = cgc.buflen; err = 0; switch (cgc.data_direction) { case CGC_DATA_UNKNOWN: hdr.dxfer_direction = SG_DXFER_UNKNOWN; break; case CGC_DATA_WRITE: hdr.dxfer_direction = SG_DXFER_TO_DEV; break; case CGC_DATA_READ: hdr.dxfer_direction = SG_DXFER_FROM_DEV; break; case CGC_DATA_NONE: hdr.dxfer_direction = SG_DXFER_NONE; break; default: err = -EINVAL; } if (err) break; hdr.dxferp = cgc.buffer; hdr.sbp = cgc.sense; if (hdr.sbp) hdr.mx_sb_len = sizeof(struct request_sense); hdr.timeout = jiffies_to_msecs(cgc.timeout); hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd; hdr.cmd_len = sizeof(cgc.cmd); err = sg_io(q, bd_disk, &hdr, mode); if (err == -EFAULT) break; if (hdr.status) err = -EIO; cgc.stat = err; cgc.buflen = hdr.resid; if (copy_to_user(arg, &cgc, sizeof(cgc))) err = -EFAULT; break; } /* * old junk scsi send command ioctl */ case SCSI_IOCTL_SEND_COMMAND: printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm); err = -EINVAL; if (!arg) break; err = sg_scsi_ioctl(q, bd_disk, mode, arg); break; case CDROMCLOSETRAY: err = blk_send_start_stop(q, bd_disk, 0x03); break; case CDROMEJECT: err = blk_send_start_stop(q, bd_disk, 0x02); break; default: err = -ENOTTY; } blk_put_queue(q); return err; }
/* * Allocate and initialise a blank device with a given minor. */ static struct mapped_device *alloc_dev(unsigned int minor, int persistent) { int r; struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); if (!md) { DMWARN("unable to allocate device, out of memory."); return NULL; } /* get a minor number for the dev */ r = persistent ? specific_minor(minor) : next_free_minor(&minor); if (r < 0) goto bad1; memset(md, 0, sizeof(*md)); init_rwsem(&md->lock); rwlock_init(&md->map_lock); atomic_set(&md->holders, 1); md->queue = blk_alloc_queue(GFP_KERNEL); if (!md->queue) goto bad1; md->queue->queuedata = md; md->queue->backing_dev_info.congested_fn = dm_any_congested; md->queue->backing_dev_info.congested_data = md; blk_queue_make_request(md->queue, dm_request); md->queue->unplug_fn = dm_unplug_all; md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, mempool_free_slab, _io_cache); if (!md->io_pool) goto bad2; md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, mempool_free_slab, _tio_cache); if (!md->tio_pool) goto bad3; md->disk = alloc_disk(1); if (!md->disk) goto bad4; md->disk->major = _major; md->disk->first_minor = minor; md->disk->fops = &dm_blk_dops; md->disk->queue = md->queue; md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); add_disk(md->disk); atomic_set(&md->pending, 0); init_waitqueue_head(&md->wait); init_waitqueue_head(&md->eventq); return md; bad4: mempool_destroy(md->tio_pool); bad3: mempool_destroy(md->io_pool); bad2: blk_put_queue(md->queue); free_minor(minor); bad1: kfree(md); return NULL; }
int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, void __user *arg) { request_queue_t *q; struct request *rq; int close = 0, err; q = bd_disk->queue; if (!q) return -ENXIO; if (blk_get_queue(q)) return -ENXIO; switch (cmd) { /* * new sgv3 interface */ case SG_GET_VERSION_NUM: err = sg_get_version(arg); break; case SCSI_IOCTL_GET_IDLUN: err = scsi_get_idlun(q, arg); break; case SCSI_IOCTL_GET_BUS_NUMBER: err = scsi_get_bus(q, arg); break; case SG_SET_TIMEOUT: err = sg_set_timeout(q, arg); break; case SG_GET_TIMEOUT: err = sg_get_timeout(q); break; case SG_GET_RESERVED_SIZE: err = sg_get_reserved_size(q, arg); break; case SG_SET_RESERVED_SIZE: err = sg_set_reserved_size(q, arg); break; case SG_EMULATED_HOST: err = sg_emulated_host(q, arg); break; case SG_IO: { struct sg_io_hdr hdr; err = -EFAULT; if (copy_from_user(&hdr, arg, sizeof(hdr))) break; err = sg_io(file, q, bd_disk, &hdr); if (err == -EFAULT) break; if (copy_to_user(arg, &hdr, sizeof(hdr))) err = -EFAULT; break; } case CDROM_SEND_PACKET: { struct cdrom_generic_command cgc; struct sg_io_hdr hdr; err = -EFAULT; if (copy_from_user(&cgc, arg, sizeof(cgc))) break; cgc.timeout = clock_t_to_jiffies(cgc.timeout); memset(&hdr, 0, sizeof(hdr)); hdr.interface_id = 'S'; hdr.cmd_len = sizeof(cgc.cmd); hdr.dxfer_len = cgc.buflen; err = 0; switch (cgc.data_direction) { case CGC_DATA_UNKNOWN: hdr.dxfer_direction = SG_DXFER_UNKNOWN; break; case CGC_DATA_WRITE: hdr.dxfer_direction = SG_DXFER_TO_DEV; break; case CGC_DATA_READ: hdr.dxfer_direction = SG_DXFER_FROM_DEV; break; case CGC_DATA_NONE: hdr.dxfer_direction = SG_DXFER_NONE; break; default: err = -EINVAL; } if (err) break; hdr.dxferp = cgc.buffer; hdr.sbp = cgc.sense; if (hdr.sbp) hdr.mx_sb_len = sizeof(struct request_sense); hdr.timeout = cgc.timeout; hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd; hdr.cmd_len = sizeof(cgc.cmd); err = sg_io(file, q, bd_disk, &hdr); if (err == -EFAULT) break; if (hdr.status) err = -EIO; cgc.stat = err; cgc.buflen = hdr.resid; if (copy_to_user(arg, &cgc, sizeof(cgc))) err = -EFAULT; break; } /* * old junk scsi send command ioctl */ case SCSI_IOCTL_SEND_COMMAND: err = -EINVAL; if (!arg) break; err = sg_scsi_ioctl(file, q, bd_disk, arg); break; case CDROMCLOSETRAY: close = 1; case CDROMEJECT: rq = blk_get_request(q, WRITE, __GFP_WAIT); rq->flags |= REQ_BLOCK_PC; rq->data = NULL; rq->data_len = 0; rq->timeout = BLK_DEFAULT_TIMEOUT; memset(rq->cmd, 0, sizeof(rq->cmd)); rq->cmd[0] = GPCMD_START_STOP_UNIT; rq->cmd[4] = 0x02 + (close != 0); rq->cmd_len = 6; err = blk_execute_rq(q, bd_disk, rq); blk_put_request(rq); break; default: err = -ENOTTY; } blk_put_queue(q); return err; }