static int tbio_ioctl(struct inode *ino, struct file *file, unsigned int cmd, unsigned long arg) { int err; // request_queue_t *q; //q = bdev_get_queue(Device.bdev); printk("ttbio: ioctl 0x%x 0x%lx\n", cmd, arg); switch (cmd) { case LTP_TBIO_DO_IO: { err = bd_claim(Device.bdev, current); if (err) { printk("tbio:bd_claim\n"); break; } err = tbio_io(Device.bdev, (struct tbio_interface *)arg); bd_release(Device.bdev); } break; case LTP_TBIO_CLONE: err = test_bio_clone(); break; case LTP_TBIO_ADD_PAGE: err = test_bio_add_page(); break; case LTP_TBIO_ALLOC: err = test_bio_alloc(); break; case LTP_TBIO_GET_NR_VECS: err = test_bio_get_nr_vecs(); break; case LTP_TBIO_PUT: err = test_bio_put(tbiop); break; case LTP_TBIO_SPLIT: { err = bd_claim(Device.bdev, current); if (err) { printk("tbio:bd_claim\n"); break; } err = test_bio_split(Device.bdev, (struct tbio_interface *)arg); bd_release(Device.bdev); } break; //case LTP_TBIO_PAIR_RELEASE: err = test_bio_pair_release();break; } return 0; }
/* * Open/close code for raw IO. * * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to * point at the blockdev's address_space and set the file handle to use * O_DIRECT. * * Set the device's soft blocksize to the minimum possible. This gives the * finest possible alignment and has no adverse impact on performance. */ static int raw_open(struct inode *inode, struct file *filp) { const int minor = iminor(inode); struct block_device *bdev; int err; if (minor == 0) { /* It is the control device */ filp->f_op = &raw_ctl_fops; return 0; } down(&raw_mutex); /* * All we need to do on open is check that the device is bound. */ bdev = raw_devices[minor].binding; err = -ENODEV; if (!bdev) goto out; igrab(bdev->bd_inode); err = blkdev_get(bdev, filp->f_mode, 0); if (err) goto out; err = bd_claim(bdev, raw_open); if (err) goto out1; err = set_blocksize(bdev, bdev_hardsect_size(bdev)); if (err) goto out2; filp->f_flags |= O_DIRECT; filp->f_mapping = bdev->bd_inode->i_mapping; if (++raw_devices[minor].inuse == 1) filp->f_dentry->d_inode->i_mapping = bdev->bd_inode->i_mapping; filp->private_data = bdev; up(&raw_mutex); return 0; out2: bd_release(bdev); out1: blkdev_put(bdev); out: up(&raw_mutex); return err; }
/* * Open a device so we can use it as a map destination. */ static int open_dev(struct dm_dev *d, dev_t dev) { static char *_claim_ptr = "I belong to device-mapper"; struct block_device *bdev; int r; if (d->bdev) BUG(); bdev = open_by_devnum(dev, d->mode); if (IS_ERR(bdev)) return PTR_ERR(bdev); r = bd_claim(bdev, _claim_ptr); if (r) blkdev_put(bdev); else d->bdev = bdev; return r; }
static struct block_device *stackbd_bdev_open(char dev_path[]) { /* Open underlying device */ struct block_device *bdev_raw = lookup_bdev(dev_path); printk("Opened %s\n", dev_path); if (IS_ERR(bdev_raw)) { printk("stackbd: error opening raw device <%lu>\n", PTR_ERR(bdev_raw)); return NULL; } if (!bdget(bdev_raw->bd_dev)) { printk("stackbd: error bdget()\n"); return NULL; } /* FIXME:VER */ /* if (blkdev_get(bdev_raw, STACKBD_BDEV_MODE, &stackbd))*/ if (blkdev_get(bdev_raw, STACKBD_BDEV_MODE)) { printk("stackbd: error blkdev_get()\n"); bdput(bdev_raw); return NULL; } if (bd_claim(bdev_raw, &stackbd)) { printk("stackbd: error bd_claim()\n"); bdput(bdev_raw); return NULL; } return bdev_raw; }