static int tbio_ioctl(struct inode *ino, struct file *file, unsigned int cmd, unsigned long arg) { int err; // request_queue_t *q; //q = bdev_get_queue(Device.bdev); printk("ttbio: ioctl 0x%x 0x%lx\n", cmd, arg); switch (cmd) { case LTP_TBIO_DO_IO: { err = bd_claim(Device.bdev, current); if (err) { printk("tbio:bd_claim\n"); break; } err = tbio_io(Device.bdev, (struct tbio_interface *)arg); bd_release(Device.bdev); } break; case LTP_TBIO_CLONE: err = test_bio_clone(); break; case LTP_TBIO_ADD_PAGE: err = test_bio_add_page(); break; case LTP_TBIO_ALLOC: err = test_bio_alloc(); break; case LTP_TBIO_GET_NR_VECS: err = test_bio_get_nr_vecs(); break; case LTP_TBIO_PUT: err = test_bio_put(tbiop); break; case LTP_TBIO_SPLIT: { err = bd_claim(Device.bdev, current); if (err) { printk("tbio:bd_claim\n"); break; } err = test_bio_split(Device.bdev, (struct tbio_interface *)arg); bd_release(Device.bdev); } break; //case LTP_TBIO_PAIR_RELEASE: err = test_bio_pair_release();break; } return 0; }
/* * Close a device that we've been using. */ static void close_dev(struct dm_dev *d) { if (!d->bdev) return; bd_release(d->bdev); blkdev_put(d->bdev); d->bdev = NULL; }
/* * Open/close code for raw IO. * * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to * point at the blockdev's address_space and set the file handle to use * O_DIRECT. * * Set the device's soft blocksize to the minimum possible. This gives the * finest possible alignment and has no adverse impact on performance. */ static int raw_open(struct inode *inode, struct file *filp) { const int minor = iminor(inode); struct block_device *bdev; int err; if (minor == 0) { /* It is the control device */ filp->f_op = &raw_ctl_fops; return 0; } down(&raw_mutex); /* * All we need to do on open is check that the device is bound. */ bdev = raw_devices[minor].binding; err = -ENODEV; if (!bdev) goto out; igrab(bdev->bd_inode); err = blkdev_get(bdev, filp->f_mode, 0); if (err) goto out; err = bd_claim(bdev, raw_open); if (err) goto out1; err = set_blocksize(bdev, bdev_hardsect_size(bdev)); if (err) goto out2; filp->f_flags |= O_DIRECT; filp->f_mapping = bdev->bd_inode->i_mapping; if (++raw_devices[minor].inuse == 1) filp->f_dentry->d_inode->i_mapping = bdev->bd_inode->i_mapping; filp->private_data = bdev; up(&raw_mutex); return 0; out2: bd_release(bdev); out1: blkdev_put(bdev); out: up(&raw_mutex); return err; }
/* * When the final fd which refers to this character-special node is closed, we * make its ->mapping point back at its own i_data. */ static int raw_release(struct inode *inode, struct file *filp) { const int minor= iminor(inode); struct block_device *bdev; down(&raw_mutex); bdev = raw_devices[minor].binding; if (--raw_devices[minor].inuse == 0) { /* Here inode->i_mapping == bdev->bd_inode->i_mapping */ inode->i_mapping = &inode->i_data; inode->i_mapping->backing_dev_info = &default_backing_dev_info; } up(&raw_mutex); bd_release(bdev); blkdev_put(bdev, BDEV_RAW); return 0; }