Beispiel #1
0
static int __init ramhd_init(void)
{
	int i;

	ramhd_space_init();
	alloc_ramdev();

	ramhd_major = register_blkdev(0, RAMHD_NAME);

	for (i=0; i<RAMHD_MAX_DEVICE; i++)
	{
		rdev[i]->data = sdisk[i];
		rdev[i]->queue = blk_alloc_queue(GFP_KERNEL);
		blk_queue_make_request(rdev[i]->queue, ramhd_make_request);
		rdev[i]->gd = alloc_disk(RAMHD_MAX_PARTITIONS);
		rdev[i]->gd->major = ramhd_major;
		rdev[i]->gd->first_minor = i*RAMHD_MAX_PARTITIONS;
		rdev[i]->gd->fops = &ramhd_fops;
		rdev[i]->gd->queue = rdev[i]->queue;
		rdev[i]->gd->private_data = rdev[i];
		sprintf(rdev[i]->gd->disk_name, "ramhd%c", 'a'+i);
		rdev[i]->gd->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
		set_capacity(rdev[i]->gd, RAMHD_SECTOR_TOTAL);
		add_disk(rdev[i]->gd);
	}
	return 0;
}
/* alloc_disk and add_disk can sleep */
void
aoeblk_gdalloc(void *vp)
{
	struct aoedev *d = vp;
	struct gendisk *gd;
	ulong flags;

	gd = alloc_disk(AOE_PARTITIONS);
	if (gd == NULL) {
		printk(KERN_ERR
			"aoe: cannot allocate disk structure for %ld.%d\n",
			d->aoemajor, d->aoeminor);
		goto err;
	}

	d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache);
	if (d->bufpool == NULL) {
		printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
			d->aoemajor, d->aoeminor);
		goto err_disk;
	}

	d->blkq = blk_alloc_queue(GFP_KERNEL);
	if (!d->blkq)
		goto err_mempool;
	blk_queue_make_request(d->blkq, aoeblk_make_request);
	d->blkq->backing_dev_info.name = "aoe";
	spin_lock_irqsave(&d->lock, flags);
	gd->major = AOE_MAJOR;
	gd->first_minor = d->sysminor * AOE_PARTITIONS;
	gd->fops = &aoe_bdops;
	gd->private_data = d;
	set_capacity(gd, d->ssize);
	snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
		d->aoemajor, d->aoeminor);

	gd->queue = d->blkq;
	d->gd = gd;
	d->flags &= ~DEVFL_GDALLOC;
	d->flags |= DEVFL_UP;

	spin_unlock_irqrestore(&d->lock, flags);

	add_disk(gd);
	aoedisk_add_sysfs(d);
	return;

err_mempool:
	mempool_destroy(d->bufpool);
err_disk:
	put_disk(gd);
err:
	spin_lock_irqsave(&d->lock, flags);
	d->flags &= ~DEVFL_GDALLOC;
	spin_unlock_irqrestore(&d->lock, flags);
}
Beispiel #3
0
Datei: zvol.c Projekt: l1k/zfs
/*
 * Allocate memory for a new zvol_state_t and setup the required
 * request queue and generic disk structures for the block device.
 */
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name)
{
	zvol_state_t *zv;

	zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);

	spin_lock_init(&zv->zv_lock);
	list_link_init(&zv->zv_next);

	zv->zv_queue = blk_alloc_queue(GFP_ATOMIC);
	if (zv->zv_queue == NULL)
		goto out_kmem;

	blk_queue_make_request(zv->zv_queue, zvol_request);

#ifdef HAVE_BLK_QUEUE_FLUSH
	blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
#else
	blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
#endif /* HAVE_BLK_QUEUE_FLUSH */

	zv->zv_disk = alloc_disk(ZVOL_MINORS);
	if (zv->zv_disk == NULL)
		goto out_queue;

	zv->zv_queue->queuedata = zv;
	zv->zv_dev = dev;
	zv->zv_open_count = 0;
	strlcpy(zv->zv_name, name, MAXNAMELEN);

	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
	    sizeof (rl_t), offsetof(rl_t, r_node));
	zv->zv_znode.z_is_zvol = TRUE;

	zv->zv_disk->major = zvol_major;
	zv->zv_disk->first_minor = (dev & MINORMASK);
	zv->zv_disk->fops = &zvol_ops;
	zv->zv_disk->private_data = zv;
	zv->zv_disk->queue = zv->zv_queue;
	snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
	    ZVOL_DEV_NAME, (dev & MINORMASK));

	return (zv);

out_queue:
	blk_cleanup_queue(zv->zv_queue);
out_kmem:
	kmem_free(zv, sizeof (zvol_state_t));

	return (NULL);
}
Beispiel #4
0
static int __init stackbd_init(void)
{
	/* Set up our internal device */
	spin_lock_init(&stackbd.lock);

	/* blk_alloc_queue() instead of blk_init_queue() so it won't set up the
     * queue for requests.
     */
    if (!(stackbd.queue = blk_alloc_queue(GFP_KERNEL)))
    {
        printk("stackbd: alloc_queue failed\n");
        return -EFAULT;
    }

    blk_queue_make_request(stackbd.queue, stackbd_make_request);
	blk_queue_logical_block_size(stackbd.queue, LOGICAL_BLOCK_SIZE);

	/* Get registered */
	if ((major_num = register_blkdev(major_num, STACKBD_NAME)) < 0)
    {
		printk("stackbd: unable to get major number\n");
		goto error_after_alloc_queue;
	}

	/* Gendisk structure */
	if (!(stackbd.gd = alloc_disk(16)))
		goto error_after_redister_blkdev;
	stackbd.gd->major = major_num;
	stackbd.gd->first_minor = 0;
	stackbd.gd->fops = &stackbd_ops;
	stackbd.gd->private_data = &stackbd;
	strcpy(stackbd.gd->disk_name, STACKBD_NAME_0);
	stackbd.gd->queue = stackbd.queue;
	add_disk(stackbd.gd);

    printk("stackbd: init done\n");

	return 0;

error_after_redister_blkdev:
	unregister_blkdev(major_num, STACKBD_NAME);
error_after_alloc_queue:
    blk_cleanup_queue(stackbd.queue);

	return -EFAULT;
}
Beispiel #5
0
Datei: zvol.c Projekt: alek-p/zfs
/*
 * Allocate memory for a new zvol_state_t and setup the required
 * request queue and generic disk structures for the block device.
 */
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name)
{
	zvol_state_t *zv;

	zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);

	list_link_init(&zv->zv_next);

	zv->zv_queue = blk_alloc_queue(GFP_ATOMIC);
	if (zv->zv_queue == NULL)
		goto out_kmem;

	blk_queue_make_request(zv->zv_queue, zvol_request);
	blk_queue_set_write_cache(zv->zv_queue, B_TRUE, B_TRUE);

	zv->zv_disk = alloc_disk(ZVOL_MINORS);
	if (zv->zv_disk == NULL)
		goto out_queue;

	zv->zv_queue->queuedata = zv;
	zv->zv_dev = dev;
	zv->zv_open_count = 0;
	strlcpy(zv->zv_name, name, MAXNAMELEN);

	zfs_rlock_init(&zv->zv_range_lock);

	zv->zv_disk->major = zvol_major;
	zv->zv_disk->first_minor = (dev & MINORMASK);
	zv->zv_disk->fops = &zvol_ops;
	zv->zv_disk->private_data = zv;
	zv->zv_disk->queue = zv->zv_queue;
	snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
	    ZVOL_DEV_NAME, (dev & MINORMASK));

	return (zv);

out_queue:
	blk_cleanup_queue(zv->zv_queue);
out_kmem:
	kmem_free(zv, sizeof (zvol_state_t));

	return (NULL);
}
Beispiel #6
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
{
	struct mmc_host *host = card->host;
	int ret = -ENOMEM;

	mq->card = card;
	mq->queue = blk_alloc_queue(GFP_KERNEL);
	if (!mq->queue)
		return -ENOMEM;
	mq->queue->queue_lock = lock;
	mq->queue->request_fn = mmc_request_fn;
	mq->queue->init_rq_fn = mmc_init_request;
	mq->queue->exit_rq_fn = mmc_exit_request;
	mq->queue->cmd_size = sizeof(struct mmc_queue_req);
	mq->queue->queuedata = mq;
	mq->qcnt = 0;
	ret = blk_init_allocated_queue(mq->queue);
	if (ret) {
		blk_cleanup_queue(mq->queue);
		return ret;
	}

	blk_queue_prep_rq(mq->queue, mmc_prep_request);

	mmc_setup_queue(mq, card);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");

	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto cleanup_queue;
	}

	return 0;

cleanup_queue:
	blk_cleanup_queue(mq->queue);
	return ret;
}
Beispiel #7
0
static int pmem_attach_disk(struct device *dev,
		struct nd_namespace_common *ndns, struct pmem_device *pmem)
{
	struct gendisk *disk;

	pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
	if (!pmem->pmem_queue)
		return -ENOMEM;

	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
	blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
	blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);

	disk = alloc_disk(0);
	if (!disk) {
		blk_cleanup_queue(pmem->pmem_queue);
		return -ENOMEM;
	}

	disk->major		= pmem_major;
	disk->first_minor	= 0;
	disk->fops		= &pmem_fops;
	disk->private_data	= pmem;
	disk->queue		= pmem->pmem_queue;
	disk->flags		= GENHD_FL_EXT_DEVT;
	nvdimm_namespace_disk_name(ndns, disk->disk_name);
	disk->driverfs_dev = dev;
	set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
	pmem->pmem_disk = disk;

	add_disk(disk);
	revalidate_disk(disk);

	return 0;
}
Beispiel #8
0
Datei: dm.c Projekt: wxlong/Test
/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
{
	int r;
	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

	/* get a minor number for the dev */
	r = persistent ? specific_minor(minor) : next_free_minor(&minor);
	if (r < 0)
		goto bad1;

	memset(md, 0, sizeof(*md));
	init_rwsem(&md->lock);
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
		goto bad1;

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	md->queue->unplug_fn = dm_unplug_all;

	md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
				     mempool_free_slab, _io_cache);
 	if (!md->io_pool)
 		goto bad2;

	md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
				      mempool_free_slab, _tio_cache);
	if (!md->tio_pool)
		goto bad3;

	md->disk = alloc_disk(1);
	if (!md->disk)
		goto bad4;

	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);

	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
	init_waitqueue_head(&md->eventq);

	return md;

 bad4:
	mempool_destroy(md->tio_pool);
 bad3:
	mempool_destroy(md->io_pool);
 bad2:
	blk_put_queue(md->queue);
	free_minor(minor);
 bad1:
	kfree(md);
	return NULL;
}
Beispiel #9
0
/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(int minor)
{
	int r;
	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
	void *old_md;

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

	if (!try_module_get(THIS_MODULE))
		goto bad0;

	/* get a minor number for the dev */
	if (minor == DM_ANY_MINOR)
		r = next_free_minor(md, &minor);
	else
		r = specific_minor(md, minor);
	if (r < 0)
		goto bad1;

	memset(md, 0, sizeof(*md));
	init_rwsem(&md->io_lock);
	init_MUTEX(&md->suspend_lock);
	spin_lock_init(&md->pushback_lock);
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);
	atomic_set(&md->open_count, 0);
	atomic_set(&md->event_nr, 0);

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
		goto bad1_free_minor;

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
	md->queue->unplug_fn = dm_unplug_all;
	md->queue->issue_flush_fn = dm_flush_all;

	md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
	if (!md->io_pool)
		goto bad2;

	md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
	if (!md->tio_pool)
		goto bad3;

	md->bs = bioset_create(16, 16);
	if (!md->bs)
		goto bad_no_bioset;

	md->disk = alloc_disk(1);
	if (!md->disk)
		goto bad4;

	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
	init_waitqueue_head(&md->eventq);

	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);
	format_dev_t(md->name, MKDEV(_major, minor));

	/* Populate the mapping, nobody knows we exist yet */
	spin_lock(&_minor_lock);
	old_md = idr_replace(&_minor_idr, md, minor);
	spin_unlock(&_minor_lock);

	BUG_ON(old_md != MINOR_ALLOCED);

	return md;

 bad4:
	bioset_free(md->bs);
 bad_no_bioset:
	mempool_destroy(md->tio_pool);
 bad3:
	mempool_destroy(md->io_pool);
 bad2:
	blk_cleanup_queue(md->queue);
 bad1_free_minor:
	free_minor(minor);
 bad1:
	module_put(THIS_MODULE);
 bad0:
	kfree(md);
	return NULL;
}
Beispiel #10
0
static struct pmem_device *pmem_alloc(struct device *dev, struct resource *res)
{
	struct pmem_device *pmem;
	struct gendisk *disk;
	int idx, err;

	err = -ENOMEM;
	pmem = kzalloc(sizeof(*pmem), GFP_KERNEL);
	if (!pmem)
		goto out;

	pmem->phys_addr = res->start;
	pmem->size = resource_size(res);

	err = -EINVAL;
	if (!request_mem_region(pmem->phys_addr, pmem->size, "pmem")) {
		dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", &pmem->phys_addr, pmem->size);
		goto out_free_dev;
	}

	/*
	 * Map the memory as write-through, as we can't write back the contents
	 * of the CPU caches in case of a crash.
	 */
	err = -ENOMEM;
	pmem->virt_addr = ioremap_wt(pmem->phys_addr, pmem->size);
	if (!pmem->virt_addr)
		goto out_release_region;

	pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
	if (!pmem->pmem_queue)
		goto out_unmap;

	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
	blk_queue_max_hw_sectors(pmem->pmem_queue, 1024);
	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);

	disk = alloc_disk(PMEM_MINORS);
	if (!disk)
		goto out_free_queue;

	idx = atomic_inc_return(&pmem_index) - 1;

	disk->major		= pmem_major;
	disk->first_minor	= PMEM_MINORS * idx;
	disk->fops		= &pmem_fops;
	disk->private_data	= pmem;
	disk->queue		= pmem->pmem_queue;
	disk->flags		= GENHD_FL_EXT_DEVT;
	sprintf(disk->disk_name, "pmem%d", idx);
	disk->driverfs_dev = dev;
	set_capacity(disk, pmem->size >> 9);
	pmem->pmem_disk = disk;

	add_disk(disk);

	return pmem;

out_free_queue:
	blk_cleanup_queue(pmem->pmem_queue);
out_unmap:
	iounmap(pmem->virt_addr);
out_release_region:
	release_mem_region(pmem->phys_addr, pmem->size);
out_free_dev:
	kfree(pmem);
out:
	return ERR_PTR(err);
}
Beispiel #11
0
/**
 * axon_ram_probe - probe() method for platform driver
 * @device: see platform_driver method
 */
static int axon_ram_probe(struct platform_device *device)
{
	static int axon_ram_bank_id = -1;
	struct axon_ram_bank *bank;
	struct resource resource;
	int rc = 0;

	axon_ram_bank_id++;

	dev_info(&device->dev, "Found memory controller on %s\n",
			device->dev.of_node->full_name);

	bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL);
	if (bank == NULL) {
		dev_err(&device->dev, "Out of memory\n");
		rc = -ENOMEM;
		goto failed;
	}

	device->dev.platform_data = bank;

	bank->device = device;

	if (of_address_to_resource(device->dev.of_node, 0, &resource) != 0) {
		dev_err(&device->dev, "Cannot access device tree\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->size = resource_size(&resource);

	if (bank->size == 0) {
		dev_err(&device->dev, "No DDR2 memory found for %s%d\n",
				AXON_RAM_DEVICE_NAME, axon_ram_bank_id);
		rc = -ENODEV;
		goto failed;
	}

	dev_info(&device->dev, "Register DDR2 memory device %s%d with %luMB\n",
			AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);

	bank->ph_addr = resource.start;
	bank->io_addr = (unsigned long) ioremap_prot(
			bank->ph_addr, bank->size, _PAGE_NO_CACHE);
	if (bank->io_addr == 0) {
		dev_err(&device->dev, "ioremap() failed\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->disk = alloc_disk(AXON_RAM_MINORS_PER_DISK);
	if (bank->disk == NULL) {
		dev_err(&device->dev, "Cannot register disk\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->disk->major = azfs_major;
	bank->disk->first_minor = azfs_minor;
	bank->disk->fops = &axon_ram_devops;
	bank->disk->private_data = bank;
	bank->disk->driverfs_dev = &device->dev;

	sprintf(bank->disk->disk_name, "%s%d",
			AXON_RAM_DEVICE_NAME, axon_ram_bank_id);

	bank->disk->queue = blk_alloc_queue(GFP_KERNEL);
	if (bank->disk->queue == NULL) {
		dev_err(&device->dev, "Cannot register disk queue\n");
		rc = -EFAULT;
		goto failed;
	}

	set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
	blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
	blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
	add_disk(bank->disk);

	bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0);
	if (bank->irq_id == NO_IRQ) {
		dev_err(&device->dev, "Cannot access ECC interrupt ID\n");
		rc = -EFAULT;
		goto failed;
	}

	rc = request_irq(bank->irq_id, axon_ram_irq_handler,
			AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device);
	if (rc != 0) {
		dev_err(&device->dev, "Cannot register ECC interrupt handler\n");
		bank->irq_id = NO_IRQ;
		rc = -EFAULT;
		goto failed;
	}

	rc = device_create_file(&device->dev, &dev_attr_ecc);
	if (rc != 0) {
		dev_err(&device->dev, "Cannot create sysfs file\n");
		rc = -EFAULT;
		goto failed;
	}

	azfs_minor += bank->disk->minors;

	return 0;

failed:
	if (bank != NULL) {
		if (bank->irq_id != NO_IRQ)
			free_irq(bank->irq_id, device);
		if (bank->disk != NULL) {
			if (bank->disk->major > 0)
				unregister_blkdev(bank->disk->major,
						bank->disk->disk_name);
			del_gendisk(bank->disk);
		}
		device->dev.platform_data = NULL;
		if (bank->io_addr != 0)
			iounmap((void __iomem *) bank->io_addr);
		kfree(bank);
	}

	return rc;
}
Beispiel #12
0
static int setup_passthrough_device(struct pt_dev *dev, const char *target_name)
{
	struct request_queue *q;

	dev->queue = blk_alloc_queue(GFP_KERNEL);
	if (dev->queue == NULL)
		return -1;

	blk_queue_make_request(dev->queue, passthrough_make_request);
	//blk_queue_flush(dev->queue, REQ_FLUSH | REQ_FUA);

	dev->gd = alloc_disk(1);
	if (! dev->gd) {

		return -1;
	}

	dev->gd->major = passthrough->major;
	dev->gd->first_minor = 0;
	dev->gd->fops = &pt_ops;
	dev->gd->queue = dev->queue;
	dev->gd->private_data = dev;
	dev->gd->flags |= GENHD_FL_EXT_DEVT;

	dev->target_dev = bdget(MKDEV(8,16));//blkdev_get_by_path(target_name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, dev);
	if(!dev->target_dev)
	{
		return -1;
	}

	dev->target_ssd = bdget(MKDEV(8,32));//blkdev_get_by_path(target_name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, dev);
	if(!dev->target_ssd)
	{
		return -1;
	}

	if(!dev->target_dev->bd_disk)
	{
		return -1;
	}

	if(!dev->target_ssd->bd_disk)
	{
		return -1;
	}

	q = bdev_get_queue(dev->target_dev);
	if(!q)
	{
		return -1;
	}

	dev->gd->queue->limits.max_hw_sectors	= q->limits.max_hw_sectors;
	dev->gd->queue->limits.max_sectors	= q->limits.max_sectors;
	dev->gd->queue->limits.max_segment_size	= q->limits.max_segment_size;
//	dev->gd->queue->limits.max_segments	= q->limits.max_segments;
	dev->gd->queue->limits.logical_block_size  = 512;
	dev->gd->queue->limits.physical_block_size = 512;
	set_bit(QUEUE_FLAG_NONROT, &dev->gd->queue->queue_flags);

	snprintf (dev->gd->disk_name, 32, "passthrough");

	set_capacity(dev->gd, get_capacity(dev->target_dev->bd_disk));

	add_disk(dev->gd);

	return 1;
}
Beispiel #13
0
/*
 * Set up our internal device.
 */
static void setup_device(struct sbull_dev *dev, int which)
{
	printk(KERN_EMERG "sbull: setup_device\n");
	/*
	 * Get some memory.
	 */
	memset (dev, 0, sizeof (struct sbull_dev));
	dev->size = nsectors*hardsect_size;
	dev->data = vmalloc(dev->size);
	if (dev->data == NULL) {
		printk (KERN_NOTICE "vmalloc failure.\n");
		return;
	}
	spin_lock_init(&dev->lock);
	
	/*
	 * The timer which "invalidates" the device.
	 */
	init_timer(&dev->timer);
	dev->timer.data = (unsigned long) dev;
	dev->timer.function = sbull_invalidate;
	
	/*
	 * The I/O queue, depending on whether we are using our own
	 * make_request function or not.
	 */
	switch (request_mode) {
	    case RM_NOQUEUE:
		dev->queue = blk_alloc_queue(GFP_KERNEL);
		if (dev->queue == NULL)
			goto out_vfree;
		blk_queue_make_request(dev->queue, sbull_make_request);
		break;

	    case RM_FULL:
		dev->queue = blk_init_queue(sbull_full_request, &dev->lock);
		if (dev->queue == NULL)
			goto out_vfree;
		break;

	    default:
		printk(KERN_NOTICE "Bad request mode %d, using simple\n", request_mode);
        	/* fall into.. */
	
	    case RM_SIMPLE:
		dev->queue = blk_init_queue(sbull_request, &dev->lock);
		if (dev->queue == NULL)
			goto out_vfree;
		break;
	}
	blk_queue_logical_block_size(dev->queue, hardsect_size);
	dev->queue->queuedata = dev;
	/*
	 * And the gendisk structure.
	 */
	dev->gd = alloc_disk(SBULL_MINORS);
	if (! dev->gd) {
		printk (KERN_NOTICE "alloc_disk failure\n");
		goto out_vfree;
	}
	dev->gd->major = sbull_major;
	dev->gd->first_minor = which*SBULL_MINORS;
	dev->gd->fops = &sbull_ops;
	dev->gd->queue = dev->queue;
	dev->gd->private_data = dev;
	snprintf (dev->gd->disk_name, 32, "sbull%c", which + 'a');
	set_capacity(dev->gd, nsectors*(hardsect_size/KERNEL_SECTOR_SIZE));
	add_disk(dev->gd);
	return;

  out_vfree:
	if (dev->data)
		vfree(dev->data);
}
Beispiel #14
0
		ramblk_data += bv->bv_len;
	}
out:
	bio_endio(bio, err);
	return 0
}
static int __init ramblk_init(void)
{
	int ramblk_major;
	struct request_queue *rq; //unuseful
	ramblk_major = register_blkdev(0, "firo-ramblk");
	if (ramblk_major < 0){
		printk("blk major is over!\n");
		return -EBUSY
	}
	rq = blk_alloc_queue(GFP_KERNEL);
	if (!rq){
		printk("alloc queue error.\n");
		return -ENOMEM;
	}

	blk_queue_make_request(rq, ramblk_make_request);


	ramdisk = alloc_disk(1);
	ramdisk->major = ramblk_major;
	ramdisk->first_minor = 0;
	ramdisk->fops = &ramblk_ops;
	set_capacity(rq, RAMBLK_TOTAL_BYTES >> 9);
	add_disk(ramdisk);
Beispiel #15
0
int td_linux_block_create(struct td_osdev *dev)
{
	int rc;
	struct request_queue *queue;
	unsigned bio_sector_size = dev->block_params.bio_sector_size;
	unsigned hw_sector_size = dev->block_params.hw_sector_size;

	/* very simple sector size support */
	if (!bio_sector_size || bio_sector_size & 511 || bio_sector_size > 4096) {
		td_os_err(dev, "bio sector size of %u is not supported\n", bio_sector_size);
		return -EINVAL;
	}

	/* MetaData is reported here */
	if (hw_sector_size == 520)
		hw_sector_size = 512;
	if (!hw_sector_size || hw_sector_size & 511 || hw_sector_size > 4096) {
		td_os_err(dev, "hw sector size of %u is not supported\n", hw_sector_size);
		return -EINVAL;
	}

	td_os_notice(dev, " - Set capacity to %llu (%u bytes/sector)\n",
		dev->block_params.capacity, dev->block_params.hw_sector_size);

	/* create a new bio queue */
	queue = blk_alloc_queue(GFP_KERNEL);
	if (!queue) {
		td_os_err(dev, "Error allocating disk queue.\n");
		rc = -ENOMEM;
		goto error_alloc_queue;
	}

#ifdef QUEUE_FLAG_NONROT
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue);
#endif
	
	switch (dev->type) {
	case TD_OSDEV_DEVICE:
		blk_queue_make_request(queue, td_device_make_request);
		dev->_bio_error = td_device_bio_error;
		break;
	case TD_OSDEV_RAID:
		blk_queue_make_request(queue, td_raid_make_request);
		dev->_bio_error = td_raid_bio_error;
		break;
		
	default:
		td_os_err(dev, "Unkonwn OS Type, cannot register block request handler\n");
		goto error_config_queue;
	}
	queue->queuedata = dev;

#if defined QUEUE_FLAG_PLUGGED 
	queue->unplug_fn = td_device_queue_unplug;
#endif

	/* configure queue ordering */

	/* in QUEUE_ORDERED_DRAIN we will get BARRIERS after the queue has
	 * been drained. */
#if defined KABI__blk_queue_ordered

#if KABI__blk_queue_ordered == 2
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN);
#elif KABI__blk_queue_ordered == 3
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN, NULL);
#else
#error unhandled value of KABI__blk_queue_ordered
#endif

#elif defined KABI__blk_queue_flush
	/*
	 * blk_queue_ordered was replaced with blk_queue_flush 
	 * The default implementation is QUEUE_ORDERED_DRAIN
	 */
	blk_queue_flush(queue, 0);
#else
#error undefined KABI__blk_queue_flush or KABI__blk_queue_ordered
#endif

	/* max out the throttling */
#ifdef KABI__blk_queue_max_hw_sectors
	blk_queue_max_hw_sectors(queue, dev->block_params.bio_max_bytes/512);
#elif defined KABI__blk_queue_max_sectors
	blk_queue_max_sectors(queue, dev->block_params.bio_max_bytes/512);
#else
	td_os_err(dev, "No kernel API for maximum sectors\n");
#endif

#if defined KABI__blk_queue_max_segments
	blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
#elif defined KABI__blk_queue_max_phys_segments
	blk_queue_max_phys_segments(queue, MAX_SEGMENT_SIZE);
	blk_queue_max_hw_segments(queue, MAX_SEGMENT_SIZE);
#else
	td_os_err(dev, "No kernel API for maximum segments\n");
#endif

	blk_queue_max_segment_size(queue, dev->block_params.bio_max_bytes);

	blk_queue_bounce_limit(queue, BLK_BOUNCE_ANY);

	/* setup paged based access */
	td_os_info(dev, "Set queue physical block size to %u\n", hw_sector_size);
#ifdef KABI__blk_queue_physical_block_size
	blk_queue_physical_block_size(queue, hw_sector_size);
#elif defined KABI__blk_queue_hardsect_size
	blk_queue_hardsect_size(queue, hw_sector_size);
#else
	td_os_err(dev, "No kernel API for physical sector size\n");
#endif

#ifdef KABI__blk_queue_logical_block_size
	td_os_info(dev, "Set queue logical block size to %u\n", bio_sector_size);
	blk_queue_logical_block_size(queue, bio_sector_size);
#else
	td_os_err(dev, "No kernel API for logical block size\n");
#endif
#ifdef KABI__blk_queue_io_min
	td_os_info(dev, "Set queue io_min to %u\n", bio_sector_size);
	blk_queue_io_min(queue, bio_sector_size);
#else
	td_os_err(dev, "No kernel API for minimum IO size\n");
#endif
#ifdef KABI__blk_queue_io_opt
	td_os_info(dev, "Set queue io_opt to %u\n", dev->block_params.bio_max_bytes);
	blk_queue_io_opt(queue,  dev->block_params.bio_max_bytes);
#else
	td_os_err(dev, "No kernel API for optimal IO size\n");
#endif

#if 0
	if (dev->block_params.discard)
	{
		int did_something = 0;
#if defined KABI__blk_queue_discard_granularity
		queue->limits.discard_granularity = bio_sector_size;
		did_something++;
#endif
#ifdef KABI__blk_queue_max_discard_sectors
		/* 0xFFFF (max sector size of chunk on trim) * 64  * # SSD */
		blk_queue_max_discard_sectors(queue, TD_MAX_DISCARD_LBA_COUNT * 2);
		did_something++;
#endif
#ifdef KABI__blk_queue_discard_zeroes_data
		queue->limits.discard_zeroes_data = 1;
		did_something++;
#endif
#ifdef KABI__queue_flag_set_unlocked
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue);
		did_something++;
#endif
		/* Maybe some day.. But not today. 
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, queue);
		*/
		if (did_something)
			td_os_info(dev, "Enabling discard support\n");
		else
			td_os_notice(dev, "No kernel API for discard support\n");
	} else {
		td_os_info(dev, "No DISCARD support enabled\n");
	}
#else
	/* bug 7444 */
	if (dev->block_params.discard)
		td_os_info(dev, "Device supports DISCARD but is currently being forced disabled\n");
#endif

	/*  assign */
	dev->queue = queue;

	return 0;

error_config_queue:
	blk_cleanup_queue(dev->queue);
	dev->queue = NULL;

error_alloc_queue:
	return rc;
}