Пример #1
0
static int trd_init(void)
{
	int i;

	trd_pages = (TRD_SIZE + (PAGE_SIZE-1)) >> PAGE_SHIFT;

	if (register_blkdev(MAJOR_NR, DEVICE_NAME, &trd_fops)) {
		printk(KERN_ERR DEVICE_NAME ": Unable to register_blkdev(%d)\n", MAJOR_NR);
		return -EBUSY;
	}

	blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), trd_make_request);
	for (i=0;i<MAX_DEVS;i++) {
		trd_blocksizes[i] = TRD_BLOCK_SIZE;
		trd_blk_sizes[i] = trd_size;
	}

	blksize_size[MAJOR_NR] = trd_blocksizes;
	blk_size[MAJOR_NR] = trd_blk_sizes;

	printk(KERN_DEBUG DEVICE_NAME ": trd initialised size=%dk\n", 
	       trd_size);

	return 0;
}
Пример #2
0
static int __init ramhd_init(void)
{
	int i;

	ramhd_space_init();
	alloc_ramdev();

	ramhd_major = register_blkdev(0, RAMHD_NAME);

	for (i=0; i<RAMHD_MAX_DEVICE; i++)
	{
		rdev[i]->data = sdisk[i];
		rdev[i]->queue = blk_alloc_queue(GFP_KERNEL);
		blk_queue_make_request(rdev[i]->queue, ramhd_make_request);
		rdev[i]->gd = alloc_disk(RAMHD_MAX_PARTITIONS);
		rdev[i]->gd->major = ramhd_major;
		rdev[i]->gd->first_minor = i*RAMHD_MAX_PARTITIONS;
		rdev[i]->gd->fops = &ramhd_fops;
		rdev[i]->gd->queue = rdev[i]->queue;
		rdev[i]->gd->private_data = rdev[i];
		sprintf(rdev[i]->gd->disk_name, "ramhd%c", 'a'+i);
		rdev[i]->gd->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
		set_capacity(rdev[i]->gd, RAMHD_SECTOR_TOTAL);
		add_disk(rdev[i]->gd);
	}
	return 0;
}
Пример #3
0
/* alloc_disk and add_disk can sleep */
void
aoeblk_gdalloc(void *vp)
{
	struct aoedev *d = vp;
	struct gendisk *gd;
	ulong flags;

	gd = alloc_disk(AOE_PARTITIONS);
	if (gd == NULL) {
		printk(KERN_ERR
			"aoe: cannot allocate disk structure for %ld.%d\n",
			d->aoemajor, d->aoeminor);
		goto err;
	}

	d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache);
	if (d->bufpool == NULL) {
		printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
			d->aoemajor, d->aoeminor);
		goto err_disk;
	}

	d->blkq = blk_alloc_queue(GFP_KERNEL);
	if (!d->blkq)
		goto err_mempool;
	blk_queue_make_request(d->blkq, aoeblk_make_request);
	d->blkq->backing_dev_info.name = "aoe";
	spin_lock_irqsave(&d->lock, flags);
	gd->major = AOE_MAJOR;
	gd->first_minor = d->sysminor * AOE_PARTITIONS;
	gd->fops = &aoe_bdops;
	gd->private_data = d;
	set_capacity(gd, d->ssize);
	snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
		d->aoemajor, d->aoeminor);

	gd->queue = d->blkq;
	d->gd = gd;
	d->flags &= ~DEVFL_GDALLOC;
	d->flags |= DEVFL_UP;

	spin_unlock_irqrestore(&d->lock, flags);

	add_disk(gd);
	aoedisk_add_sysfs(d);
	return;

err_mempool:
	mempool_destroy(d->bufpool);
err_disk:
	put_disk(gd);
err:
	spin_lock_irqsave(&d->lock, flags);
	d->flags &= ~DEVFL_GDALLOC;
	spin_unlock_irqrestore(&d->lock, flags);
}
Пример #4
0
/* alloc_disk and add_disk can sleep */
void
aoeblk_gdalloc(void *vp)
{
	struct aoedev *d = vp;
	struct gendisk *gd;
	ulong flags;

	gd = alloc_disk(AOE_PARTITIONS);
	if (gd == NULL) {
		printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate disk "
			"structure for %ld.%ld\n", d->aoemajor, d->aoeminor);
		spin_lock_irqsave(&d->lock, flags);
		d->flags &= ~DEVFL_WORKON;
		spin_unlock_irqrestore(&d->lock, flags);
		return;
	}

	d->bufpool = mempool_create(MIN_BUFS,
				    mempool_alloc_slab, mempool_free_slab,
				    buf_pool_cache);
	if (d->bufpool == NULL) {
		printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool "
			"for %ld.%ld\n", d->aoemajor, d->aoeminor);
		put_disk(gd);
		spin_lock_irqsave(&d->lock, flags);
		d->flags &= ~DEVFL_WORKON;
		spin_unlock_irqrestore(&d->lock, flags);
		return;
	}

	spin_lock_irqsave(&d->lock, flags);
	blk_queue_make_request(&d->blkq, aoeblk_make_request);
	gd->major = AOE_MAJOR;
	gd->first_minor = d->sysminor * AOE_PARTITIONS;
	gd->fops = &aoe_bdops;
	gd->private_data = d;
	gd->capacity = d->ssize;
	snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%ld",
		d->aoemajor, d->aoeminor);

	gd->queue = &d->blkq;
	d->gd = gd;
	d->flags &= ~DEVFL_WORKON;
	d->flags |= DEVFL_UP;

	spin_unlock_irqrestore(&d->lock, flags);

	add_disk(gd);
	aoedisk_add_sysfs(d);
	
	printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu "
		"sectors\n", (unsigned long long)mac_addr(d->addr),
		d->aoemajor, d->aoeminor,
		d->fw_ver, (long long)d->ssize);
}
Пример #5
0
Файл: zvol.c Проект: l1k/zfs
/*
 * Allocate memory for a new zvol_state_t and setup the required
 * request queue and generic disk structures for the block device.
 */
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name)
{
	zvol_state_t *zv;

	zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);

	spin_lock_init(&zv->zv_lock);
	list_link_init(&zv->zv_next);

	zv->zv_queue = blk_alloc_queue(GFP_ATOMIC);
	if (zv->zv_queue == NULL)
		goto out_kmem;

	blk_queue_make_request(zv->zv_queue, zvol_request);

#ifdef HAVE_BLK_QUEUE_FLUSH
	blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
#else
	blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
#endif /* HAVE_BLK_QUEUE_FLUSH */

	zv->zv_disk = alloc_disk(ZVOL_MINORS);
	if (zv->zv_disk == NULL)
		goto out_queue;

	zv->zv_queue->queuedata = zv;
	zv->zv_dev = dev;
	zv->zv_open_count = 0;
	strlcpy(zv->zv_name, name, MAXNAMELEN);

	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
	    sizeof (rl_t), offsetof(rl_t, r_node));
	zv->zv_znode.z_is_zvol = TRUE;

	zv->zv_disk->major = zvol_major;
	zv->zv_disk->first_minor = (dev & MINORMASK);
	zv->zv_disk->fops = &zvol_ops;
	zv->zv_disk->private_data = zv;
	zv->zv_disk->queue = zv->zv_queue;
	snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
	    ZVOL_DEV_NAME, (dev & MINORMASK));

	return (zv);

out_queue:
	blk_cleanup_queue(zv->zv_queue);
out_kmem:
	kmem_free(zv, sizeof (zvol_state_t));

	return (NULL);
}
Пример #6
0
Файл: rd.c Проект: nhanh0/hah
/* This is the registration and initialization section of the RAM disk driver */
int __init rd_init (void)
{
	int		i;

	if (rd_blocksize > PAGE_SIZE || rd_blocksize < 512 ||
	    (rd_blocksize & (rd_blocksize-1)))
	{
		printk("RAMDISK: wrong blocksize %d, reverting to defaults\n",
		       rd_blocksize);
		rd_blocksize = BLOCK_SIZE;
	}

	if (register_blkdev(MAJOR_NR, "ramdisk", &rd_bd_op)) {
		printk("RAMDISK: Could not get major %d", MAJOR_NR);
		return -EIO;
	}

	blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), &rd_make_request);

	for (i = 0; i < NUM_RAMDISKS; i++) {
		/* rd_size is given in kB */
		rd_length[i] = rd_size << 10;
		rd_hardsec[i] = rd_blocksize;
		rd_blocksizes[i] = rd_blocksize;
		rd_kbsize[i] = rd_size;
	}
	devfs_handle = devfs_mk_dir (NULL, "rd", NULL);
	devfs_register_series (devfs_handle, "%u", NUM_RAMDISKS,
			       DEVFS_FL_DEFAULT, MAJOR_NR, 0,
			       S_IFBLK | S_IRUSR | S_IWUSR,
			       &rd_bd_op, NULL);

	for (i = 0; i < NUM_RAMDISKS; i++)
		register_disk(NULL, MKDEV(MAJOR_NR,i), 1, &rd_bd_op, rd_size<<1);

#ifdef CONFIG_BLK_DEV_INITRD
	/* We ought to separate initrd operations here */
	register_disk(NULL, MKDEV(MAJOR_NR,INITRD_MINOR), 1, &rd_bd_op, rd_size<<1);
#endif

	hardsect_size[MAJOR_NR] = rd_hardsec;		/* Size of the RAM disk blocks */
	blksize_size[MAJOR_NR] = rd_blocksizes;		/* Avoid set_blocksize() check */
	blk_size[MAJOR_NR] = rd_kbsize;			/* Size of the RAM disk in kB  */

		/* rd_size is given in kB */
	printk("RAMDISK driver initialized: "
	       "%d RAM disks of %dK size %d blocksize\n",
	       NUM_RAMDISKS, rd_size, rd_blocksize);

	return 0;
}
Пример #7
0
/* alloc_disk and add_disk can sleep */
void
aoeblk_gdalloc(void *vp)
{
	struct aoedev *d = vp;
	struct gendisk *gd;
	ulong flags;

	gd = alloc_disk(AOE_PARTITIONS);
	if (gd == NULL) {
		printk(KERN_ERR "aoe: cannot allocate disk structure for %ld.%ld\n",
			d->aoemajor, d->aoeminor);
		spin_lock_irqsave(&d->lock, flags);
		d->flags &= ~DEVFL_GDALLOC;
		spin_unlock_irqrestore(&d->lock, flags);
		return;
	}

	d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache);
	if (d->bufpool == NULL) {
		printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%ld\n",
			d->aoemajor, d->aoeminor);
		put_disk(gd);
		spin_lock_irqsave(&d->lock, flags);
		d->flags &= ~DEVFL_GDALLOC;
		spin_unlock_irqrestore(&d->lock, flags);
		return;
	}

	spin_lock_irqsave(&d->lock, flags);
	blk_queue_make_request(&d->blkq, aoeblk_make_request);
	gd->major = AOE_MAJOR;
	gd->first_minor = d->sysminor * AOE_PARTITIONS;
	gd->fops = &aoe_bdops;
	gd->private_data = d;
	gd->capacity = d->ssize;
	snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%ld",
		d->aoemajor, d->aoeminor);

	gd->queue = &d->blkq;
	d->gd = gd;
	d->flags &= ~DEVFL_GDALLOC;
	d->flags |= DEVFL_UP;

	spin_unlock_irqrestore(&d->lock, flags);

	add_disk(gd);
	aoedisk_add_sysfs(d);
}
Пример #8
0
static int __init stackbd_init(void)
{
	/* Set up our internal device */
	spin_lock_init(&stackbd.lock);

	/* blk_alloc_queue() instead of blk_init_queue() so it won't set up the
     * queue for requests.
     */
    if (!(stackbd.queue = blk_alloc_queue(GFP_KERNEL)))
    {
        printk("stackbd: alloc_queue failed\n");
        return -EFAULT;
    }

    blk_queue_make_request(stackbd.queue, stackbd_make_request);
	blk_queue_logical_block_size(stackbd.queue, LOGICAL_BLOCK_SIZE);

	/* Get registered */
	if ((major_num = register_blkdev(major_num, STACKBD_NAME)) < 0)
    {
		printk("stackbd: unable to get major number\n");
		goto error_after_alloc_queue;
	}

	/* Gendisk structure */
	if (!(stackbd.gd = alloc_disk(16)))
		goto error_after_redister_blkdev;
	stackbd.gd->major = major_num;
	stackbd.gd->first_minor = 0;
	stackbd.gd->fops = &stackbd_ops;
	stackbd.gd->private_data = &stackbd;
	strcpy(stackbd.gd->disk_name, STACKBD_NAME_0);
	stackbd.gd->queue = stackbd.queue;
	add_disk(stackbd.gd);

    printk("stackbd: init done\n");

	return 0;

error_after_redister_blkdev:
	unregister_blkdev(major_num, STACKBD_NAME);
error_after_alloc_queue:
    blk_cleanup_queue(stackbd.queue);

	return -EFAULT;
}
Пример #9
0
Файл: zvol.c Проект: alek-p/zfs
/*
 * Allocate memory for a new zvol_state_t and setup the required
 * request queue and generic disk structures for the block device.
 */
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name)
{
	zvol_state_t *zv;

	zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);

	list_link_init(&zv->zv_next);

	zv->zv_queue = blk_alloc_queue(GFP_ATOMIC);
	if (zv->zv_queue == NULL)
		goto out_kmem;

	blk_queue_make_request(zv->zv_queue, zvol_request);
	blk_queue_set_write_cache(zv->zv_queue, B_TRUE, B_TRUE);

	zv->zv_disk = alloc_disk(ZVOL_MINORS);
	if (zv->zv_disk == NULL)
		goto out_queue;

	zv->zv_queue->queuedata = zv;
	zv->zv_dev = dev;
	zv->zv_open_count = 0;
	strlcpy(zv->zv_name, name, MAXNAMELEN);

	zfs_rlock_init(&zv->zv_range_lock);

	zv->zv_disk->major = zvol_major;
	zv->zv_disk->first_minor = (dev & MINORMASK);
	zv->zv_disk->fops = &zvol_ops;
	zv->zv_disk->private_data = zv;
	zv->zv_disk->queue = zv->zv_queue;
	snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
	    ZVOL_DEV_NAME, (dev & MINORMASK));

	return (zv);

out_queue:
	blk_cleanup_queue(zv->zv_queue);
out_kmem:
	kmem_free(zv, sizeof (zvol_state_t));

	return (NULL);
}
Пример #10
0
static int pmem_attach_disk(struct device *dev,
		struct nd_namespace_common *ndns, struct pmem_device *pmem)
{
	struct gendisk *disk;

	pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
	if (!pmem->pmem_queue)
		return -ENOMEM;

	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
	blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
	blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);

	disk = alloc_disk(0);
	if (!disk) {
		blk_cleanup_queue(pmem->pmem_queue);
		return -ENOMEM;
	}

	disk->major		= pmem_major;
	disk->first_minor	= 0;
	disk->fops		= &pmem_fops;
	disk->private_data	= pmem;
	disk->queue		= pmem->pmem_queue;
	disk->flags		= GENHD_FL_EXT_DEVT;
	nvdimm_namespace_disk_name(ndns, disk->disk_name);
	disk->driverfs_dev = dev;
	set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
	pmem->pmem_disk = disk;

	add_disk(disk);
	revalidate_disk(disk);

	return 0;
}
Пример #11
0
static int null_add_dev(void)
{
	struct nullb *nullb;
	int rv;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb) {
		rv = -ENOMEM;
		goto out;
	}

	spin_lock_init(&nullb->lock);

	if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
		submit_queues = nr_online_nodes;

	rv = setup_queues(nullb);
	if (rv)
		goto out_free_nullb;

	if (queue_mode == NULL_Q_MQ) {
		nullb->tag_set.ops = &null_mq_ops;
		nullb->tag_set.nr_hw_queues = submit_queues;
		nullb->tag_set.queue_depth = hw_queue_depth;
		nullb->tag_set.numa_node = home_node;
		nullb->tag_set.cmd_size	= sizeof(struct nullb_cmd);
		nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
		nullb->tag_set.driver_data = nullb;

		rv = blk_mq_alloc_tag_set(&nullb->tag_set);
		if (rv)
			goto out_cleanup_queues;

		nullb->q = blk_mq_init_queue(&nullb->tag_set);
		if (IS_ERR(nullb->q)) {
			rv = -ENOMEM;
			goto out_cleanup_tags;
		}
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_make_request(nullb->q, null_queue_bio);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	}

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);

	mutex_lock(&lock);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	sprintf(nullb->disk_name, "nullb%d", nullb->index);

	if (use_lightnvm)
		rv = null_nvm_register(nullb);
	else
		rv = null_gendisk_register(nullb);

	if (rv)
		goto out_cleanup_blk_queue;

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	mutex_unlock(&lock);

	return 0;
out_cleanup_blk_queue:
	blk_cleanup_queue(nullb->q);
out_cleanup_tags:
	if (queue_mode == NULL_Q_MQ)
		blk_mq_free_tag_set(&nullb->tag_set);
out_cleanup_queues:
	cleanup_queues(nullb);
out_free_nullb:
	kfree(nullb);
out:
	return rv;
}
Пример #12
0
/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(int minor)
{
	int r;
	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
	void *old_md;

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

	if (!try_module_get(THIS_MODULE))
		goto bad0;

	/* get a minor number for the dev */
	if (minor == DM_ANY_MINOR)
		r = next_free_minor(md, &minor);
	else
		r = specific_minor(md, minor);
	if (r < 0)
		goto bad1;

	memset(md, 0, sizeof(*md));
	init_rwsem(&md->io_lock);
	init_MUTEX(&md->suspend_lock);
	spin_lock_init(&md->pushback_lock);
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);
	atomic_set(&md->open_count, 0);
	atomic_set(&md->event_nr, 0);

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
		goto bad1_free_minor;

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
	md->queue->unplug_fn = dm_unplug_all;
	md->queue->issue_flush_fn = dm_flush_all;

	md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
	if (!md->io_pool)
		goto bad2;

	md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
	if (!md->tio_pool)
		goto bad3;

	md->bs = bioset_create(16, 16);
	if (!md->bs)
		goto bad_no_bioset;

	md->disk = alloc_disk(1);
	if (!md->disk)
		goto bad4;

	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
	init_waitqueue_head(&md->eventq);

	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);
	format_dev_t(md->name, MKDEV(_major, minor));

	/* Populate the mapping, nobody knows we exist yet */
	spin_lock(&_minor_lock);
	old_md = idr_replace(&_minor_idr, md, minor);
	spin_unlock(&_minor_lock);

	BUG_ON(old_md != MINOR_ALLOCED);

	return md;

 bad4:
	bioset_free(md->bs);
 bad_no_bioset:
	mempool_destroy(md->tio_pool);
 bad3:
	mempool_destroy(md->io_pool);
 bad2:
	blk_cleanup_queue(md->queue);
 bad1_free_minor:
	free_minor(minor);
 bad1:
	module_put(THIS_MODULE);
 bad0:
	kfree(md);
	return NULL;
}
void foo(void)
{
	struct request_queue *q = NULL;
	blk_queue_make_request(q, drbd_make_request);
	BUILD_BUG_ON(!(__same_type(drbd_make_request, make_request_fn)));
}
Пример #14
0
static int null_add_dev(void)
{
	struct gendisk *disk;
	struct nullb *nullb;
	sector_t size;
	int rv;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb) {
		rv = -ENOMEM;
		goto out;
	}

	spin_lock_init(&nullb->lock);

	if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
		submit_queues = nr_online_nodes;

	rv = setup_queues(nullb);
	if (rv)
		goto out_free_nullb;

	if (queue_mode == NULL_Q_MQ) {
		nullb->tag_set.ops = &null_mq_ops;
		nullb->tag_set.nr_hw_queues = submit_queues;
		nullb->tag_set.queue_depth = hw_queue_depth;
		nullb->tag_set.numa_node = home_node;
		nullb->tag_set.cmd_size	= sizeof(struct nullb_cmd);
		nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
		nullb->tag_set.driver_data = nullb;

		rv = blk_mq_alloc_tag_set(&nullb->tag_set);
		if (rv)
			goto out_cleanup_queues;

		nullb->q = blk_mq_init_queue(&nullb->tag_set);
		if (IS_ERR(nullb->q)) {
			rv = -ENOMEM;
			goto out_cleanup_tags;
		}
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_make_request(nullb->q, null_queue_bio);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	}

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);

	disk = nullb->disk = alloc_disk_node(1, home_node);
	if (!disk) {
		rv = -ENOMEM;
		goto out_cleanup_blk_queue;
	}

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	size = gb * 1024 * 1024 * 1024ULL;
	sector_div(size, bs);
	set_capacity(disk, size);

	disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
	disk->major		= null_major;
	disk->first_minor	= nullb->index;
	disk->fops		= &null_fops;
	disk->private_data	= nullb;
	disk->queue		= nullb->q;
	sprintf(disk->disk_name, "nullb%d", nullb->index);
	add_disk(disk);
	return 0;

out_cleanup_blk_queue:
	blk_cleanup_queue(nullb->q);
out_cleanup_tags:
	if (queue_mode == NULL_Q_MQ)
		blk_mq_free_tag_set(&nullb->tag_set);
out_cleanup_queues:
	cleanup_queues(nullb);
out_free_nullb:
	kfree(nullb);
out:
	return rv;
}
Пример #15
0
static int setup_passthrough_device(struct pt_dev *dev, const char *target_name)
{
	struct request_queue *q;

	dev->queue = blk_alloc_queue(GFP_KERNEL);
	if (dev->queue == NULL)
		return -1;

	blk_queue_make_request(dev->queue, passthrough_make_request);
	//blk_queue_flush(dev->queue, REQ_FLUSH | REQ_FUA);

	dev->gd = alloc_disk(1);
	if (! dev->gd) {

		return -1;
	}

	dev->gd->major = passthrough->major;
	dev->gd->first_minor = 0;
	dev->gd->fops = &pt_ops;
	dev->gd->queue = dev->queue;
	dev->gd->private_data = dev;
	dev->gd->flags |= GENHD_FL_EXT_DEVT;

	dev->target_dev = bdget(MKDEV(8,16));//blkdev_get_by_path(target_name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, dev);
	if(!dev->target_dev)
	{
		return -1;
	}

	dev->target_ssd = bdget(MKDEV(8,32));//blkdev_get_by_path(target_name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, dev);
	if(!dev->target_ssd)
	{
		return -1;
	}

	if(!dev->target_dev->bd_disk)
	{
		return -1;
	}

	if(!dev->target_ssd->bd_disk)
	{
		return -1;
	}

	q = bdev_get_queue(dev->target_dev);
	if(!q)
	{
		return -1;
	}

	dev->gd->queue->limits.max_hw_sectors	= q->limits.max_hw_sectors;
	dev->gd->queue->limits.max_sectors	= q->limits.max_sectors;
	dev->gd->queue->limits.max_segment_size	= q->limits.max_segment_size;
//	dev->gd->queue->limits.max_segments	= q->limits.max_segments;
	dev->gd->queue->limits.logical_block_size  = 512;
	dev->gd->queue->limits.physical_block_size = 512;
	set_bit(QUEUE_FLAG_NONROT, &dev->gd->queue->queue_flags);

	snprintf (dev->gd->disk_name, 32, "passthrough");

	set_capacity(dev->gd, get_capacity(dev->target_dev->bd_disk));

	add_disk(dev->gd);

	return 1;
}
Пример #16
0
int sbull_init(void)
{
    int result, i;

    /*
     * Copy the (static) cfg variables to public prefixed ones to allow
     * snoozing with a debugger.
     */

    sbull_major    = major;
    sbull_devs     = devs;
    sbull_rahead   = rahead;
    sbull_size     = size;
    sbull_blksize  = blksize;
    sbull_hardsect = hardsect;

#ifdef LINUX_20
    /* Hardsect can't be changed :( */
    if (hardsect != 512) {
        printk(KERN_ERR "sbull: can't change hardsect size\n");
        hardsect = sbull_hardsect = 512;
    }
#endif
    /*
     * Register your major, and accept a dynamic number
     */
    result = register_blkdev(sbull_major, "sbull", &sbull_fops);
    if (result < 0) {
        printk(KERN_WARNING "sbull: can't get major %d\n",sbull_major);
        return result;
    }
    if (sbull_major == 0) sbull_major = result; /* dynamic */
    major = sbull_major; /* Use `major' later on to save typing */

    /*
     * Assign the other needed values: request, rahead, size, blksize,
     * hardsect. All the minor devices feature the same value.
     * Note that `sbull' defines all of them to allow testing non-default
     * values. A real device could well avoid setting values in global
     * arrays if it uses the default values.
     */

    read_ahead[major] = sbull_rahead;
    result = -ENOMEM; /* for the possible errors */

    sbull_sizes = kmalloc(sbull_devs * sizeof(int), GFP_KERNEL);
    if (!sbull_sizes)
        goto fail_malloc;
    for (i=0; i < sbull_devs; i++) /* all the same size */
        sbull_sizes[i] = sbull_size;
    blk_size[major]=sbull_sizes;

    sbull_blksizes = kmalloc(sbull_devs * sizeof(int), GFP_KERNEL);
    if (!sbull_blksizes)
        goto fail_malloc;
    for (i=0; i < sbull_devs; i++) /* all the same blocksize */
        sbull_blksizes[i] = sbull_blksize;
    blksize_size[major]=sbull_blksizes;

    sbull_hardsects = kmalloc(sbull_devs * sizeof(int), GFP_KERNEL);
    if (!sbull_hardsects)
        goto fail_malloc;
    for (i=0; i < sbull_devs; i++) /* all the same hardsect */
        sbull_hardsects[i] = sbull_hardsect;
    hardsect_size[major]=sbull_hardsects;

    /* FIXME: max_readahead and max_sectors */
    
    /* 
     * allocate the devices -- we can't have them static, as the number
     * can be specified at load time
     */

    sbull_devices = kmalloc(sbull_devs * sizeof (Sbull_Dev), GFP_KERNEL);
    if (!sbull_devices)
        goto fail_malloc;
    memset(sbull_devices, 0, sbull_devs * sizeof (Sbull_Dev));
    for (i=0; i < sbull_devs; i++) {
        /* data and usage remain zeroed */
        sbull_devices[i].size = 1024 * sbull_size;
        init_timer(&(sbull_devices[i].timer));
        sbull_devices[i].timer.data = (unsigned long)(sbull_devices+i);
        sbull_devices[i].timer.function = sbull_expires;
        spin_lock_init(&sbull_devices[i].lock);
    }

    /*
     * Get the queue set up, and register our (nonexistent) partitions.
     */  
#ifdef SBULL_MULTIQUEUE
    for (i = 0; i < sbull_devs; i++) {
        blk_init_queue(&sbull_devices[i].queue, sbull_request);
        blk_queue_headactive(&sbull_devices[i].queue, 0);
        blk_queue_pluggable(&sbull_devices[i].queue, sbull_plug);
    }
    blk_dev[major].queue = sbull_find_queue;
#else
#  ifdef LINUX_24
    if (noqueue)
    {
        blk_init_queue(BLK_DEFAULT_QUEUE(major), sbull_unused_request);
        blk_queue_make_request(BLK_DEFAULT_QUEUE(major), sbull_make_request);
    }
    else
#  endif /* LINUX_24 */
        blk_init_queue(BLK_DEFAULT_QUEUE(major), sbull_request);
#endif
    /* A no-op in 2.4.0, but all drivers seem to do it anyway */
    for (i = 0; i < sbull_devs; i++)
            register_disk(NULL, MKDEV(major, i), 1, &sbull_fops,
                            sbull_size << 1);

#ifndef SBULL_DEBUG
    EXPORT_NO_SYMBOLS; /* otherwise, leave global symbols visible */
#endif

    printk ("<1>sbull: init complete, %d devs, size %d blks %d hs %d\n",
                    sbull_devs, sbull_size, sbull_blksize, sbull_hardsect);
#ifdef SBULL_MULTIQUEUE
    printk ("<1>sbull: Using multiqueue request\n");
#elif defined(LINUX_24)
    if (noqueue)
            printk (KERN_INFO "sbull: using direct make_request\n");
#endif

#ifdef DO_RAW_INTERFACE
    sbullr_init();
#endif
    return 0; /* succeed */

  fail_malloc:
    read_ahead[major] = 0;
    if (sbull_sizes) kfree(sbull_sizes);
    blk_size[major] = NULL;
    if (sbull_blksizes) kfree(sbull_blksizes);
    blksize_size[major] = NULL;
    if (sbull_hardsects) kfree(sbull_hardsects);
    hardsect_size[major] = NULL;
    if (sbull_devices) kfree(sbull_devices);

    unregister_blkdev(major, "sbull");
    return result;
}
Пример #17
0
/*
 * Set up our internal device.
 */
static void setup_device(struct sbull_dev *dev, int which)
{
	printk(KERN_EMERG "sbull: setup_device\n");
	/*
	 * Get some memory.
	 */
	memset (dev, 0, sizeof (struct sbull_dev));
	dev->size = nsectors*hardsect_size;
	dev->data = vmalloc(dev->size);
	if (dev->data == NULL) {
		printk (KERN_NOTICE "vmalloc failure.\n");
		return;
	}
	spin_lock_init(&dev->lock);
	
	/*
	 * The timer which "invalidates" the device.
	 */
	init_timer(&dev->timer);
	dev->timer.data = (unsigned long) dev;
	dev->timer.function = sbull_invalidate;
	
	/*
	 * The I/O queue, depending on whether we are using our own
	 * make_request function or not.
	 */
	switch (request_mode) {
	    case RM_NOQUEUE:
		dev->queue = blk_alloc_queue(GFP_KERNEL);
		if (dev->queue == NULL)
			goto out_vfree;
		blk_queue_make_request(dev->queue, sbull_make_request);
		break;

	    case RM_FULL:
		dev->queue = blk_init_queue(sbull_full_request, &dev->lock);
		if (dev->queue == NULL)
			goto out_vfree;
		break;

	    default:
		printk(KERN_NOTICE "Bad request mode %d, using simple\n", request_mode);
        	/* fall into.. */
	
	    case RM_SIMPLE:
		dev->queue = blk_init_queue(sbull_request, &dev->lock);
		if (dev->queue == NULL)
			goto out_vfree;
		break;
	}
	blk_queue_logical_block_size(dev->queue, hardsect_size);
	dev->queue->queuedata = dev;
	/*
	 * And the gendisk structure.
	 */
	dev->gd = alloc_disk(SBULL_MINORS);
	if (! dev->gd) {
		printk (KERN_NOTICE "alloc_disk failure\n");
		goto out_vfree;
	}
	dev->gd->major = sbull_major;
	dev->gd->first_minor = which*SBULL_MINORS;
	dev->gd->fops = &sbull_ops;
	dev->gd->queue = dev->queue;
	dev->gd->private_data = dev;
	snprintf (dev->gd->disk_name, 32, "sbull%c", which + 'a');
	set_capacity(dev->gd, nsectors*(hardsect_size/KERNEL_SECTOR_SIZE));
	add_disk(dev->gd);
	return;

  out_vfree:
	if (dev->data)
		vfree(dev->data);
}
Пример #18
0
/**
 * axon_ram_probe - probe() method for platform driver
 * @device: see platform_driver method
 */
static int axon_ram_probe(struct platform_device *device)
{
	static int axon_ram_bank_id = -1;
	struct axon_ram_bank *bank;
	struct resource resource;
	int rc = 0;

	axon_ram_bank_id++;

	dev_info(&device->dev, "Found memory controller on %s\n",
			device->dev.of_node->full_name);

	bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL);
	if (bank == NULL) {
		dev_err(&device->dev, "Out of memory\n");
		rc = -ENOMEM;
		goto failed;
	}

	device->dev.platform_data = bank;

	bank->device = device;

	if (of_address_to_resource(device->dev.of_node, 0, &resource) != 0) {
		dev_err(&device->dev, "Cannot access device tree\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->size = resource_size(&resource);

	if (bank->size == 0) {
		dev_err(&device->dev, "No DDR2 memory found for %s%d\n",
				AXON_RAM_DEVICE_NAME, axon_ram_bank_id);
		rc = -ENODEV;
		goto failed;
	}

	dev_info(&device->dev, "Register DDR2 memory device %s%d with %luMB\n",
			AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);

	bank->ph_addr = resource.start;
	bank->io_addr = (unsigned long) ioremap_prot(
			bank->ph_addr, bank->size, _PAGE_NO_CACHE);
	if (bank->io_addr == 0) {
		dev_err(&device->dev, "ioremap() failed\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->disk = alloc_disk(AXON_RAM_MINORS_PER_DISK);
	if (bank->disk == NULL) {
		dev_err(&device->dev, "Cannot register disk\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->disk->major = azfs_major;
	bank->disk->first_minor = azfs_minor;
	bank->disk->fops = &axon_ram_devops;
	bank->disk->private_data = bank;
	bank->disk->driverfs_dev = &device->dev;

	sprintf(bank->disk->disk_name, "%s%d",
			AXON_RAM_DEVICE_NAME, axon_ram_bank_id);

	bank->disk->queue = blk_alloc_queue(GFP_KERNEL);
	if (bank->disk->queue == NULL) {
		dev_err(&device->dev, "Cannot register disk queue\n");
		rc = -EFAULT;
		goto failed;
	}

	set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
	blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
	blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
	add_disk(bank->disk);

	bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0);
	if (bank->irq_id == NO_IRQ) {
		dev_err(&device->dev, "Cannot access ECC interrupt ID\n");
		rc = -EFAULT;
		goto failed;
	}

	rc = request_irq(bank->irq_id, axon_ram_irq_handler,
			AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device);
	if (rc != 0) {
		dev_err(&device->dev, "Cannot register ECC interrupt handler\n");
		bank->irq_id = NO_IRQ;
		rc = -EFAULT;
		goto failed;
	}

	rc = device_create_file(&device->dev, &dev_attr_ecc);
	if (rc != 0) {
		dev_err(&device->dev, "Cannot create sysfs file\n");
		rc = -EFAULT;
		goto failed;
	}

	azfs_minor += bank->disk->minors;

	return 0;

failed:
	if (bank != NULL) {
		if (bank->irq_id != NO_IRQ)
			free_irq(bank->irq_id, device);
		if (bank->disk != NULL) {
			if (bank->disk->major > 0)
				unregister_blkdev(bank->disk->major,
						bank->disk->disk_name);
			del_gendisk(bank->disk);
		}
		device->dev.platform_data = NULL;
		if (bank->io_addr != 0)
			iounmap((void __iomem *) bank->io_addr);
		kfree(bank);
	}

	return rc;
}
Пример #19
0
static int null_add_dev(void)
{
	struct gendisk *disk;
	struct nullb *nullb;
	sector_t size;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb)
		return -ENOMEM;

	spin_lock_init(&nullb->lock);

	if (setup_queues(nullb))
		goto err;

	if (queue_mode == NULL_Q_MQ) {
		null_mq_reg.numa_node = home_node;
		null_mq_reg.queue_depth = hw_queue_depth;

		if (use_per_node_hctx) {
			null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
			null_mq_reg.ops->free_hctx = null_free_hctx;

			null_mq_reg.nr_hw_queues = nr_online_nodes;
		} else {
			null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
			null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;

			null_mq_reg.nr_hw_queues = submit_queues;
		}

		nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		blk_queue_make_request(nullb->q, null_queue_bio);
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		if (nullb->q)
			blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
	}

	if (!nullb->q)
		goto queue_fail;

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);

	disk = nullb->disk = alloc_disk_node(1, home_node);
	if (!disk) {
queue_fail:
		if (queue_mode == NULL_Q_MQ)
			blk_mq_free_queue(nullb->q);
		else
			blk_cleanup_queue(nullb->q);
		cleanup_queues(nullb);
err:
		kfree(nullb);
		return -ENOMEM;
	}

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	size = gb * 1024 * 1024 * 1024ULL;
	sector_div(size, bs);
	set_capacity(disk, size);

	disk->flags |= GENHD_FL_EXT_DEVT;
	disk->major		= null_major;
	disk->first_minor	= nullb->index;
	disk->fops		= &null_fops;
	disk->private_data	= nullb;
	disk->queue		= nullb->q;
	sprintf(disk->disk_name, "nullb%d", nullb->index);
	add_disk(disk);
	return 0;
}
Пример #20
0
static struct pmem_device *pmem_alloc(struct device *dev, struct resource *res)
{
	struct pmem_device *pmem;
	struct gendisk *disk;
	int idx, err;

	err = -ENOMEM;
	pmem = kzalloc(sizeof(*pmem), GFP_KERNEL);
	if (!pmem)
		goto out;

	pmem->phys_addr = res->start;
	pmem->size = resource_size(res);

	err = -EINVAL;
	if (!request_mem_region(pmem->phys_addr, pmem->size, "pmem")) {
		dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", &pmem->phys_addr, pmem->size);
		goto out_free_dev;
	}

	/*
	 * Map the memory as write-through, as we can't write back the contents
	 * of the CPU caches in case of a crash.
	 */
	err = -ENOMEM;
	pmem->virt_addr = ioremap_wt(pmem->phys_addr, pmem->size);
	if (!pmem->virt_addr)
		goto out_release_region;

	pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
	if (!pmem->pmem_queue)
		goto out_unmap;

	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
	blk_queue_max_hw_sectors(pmem->pmem_queue, 1024);
	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);

	disk = alloc_disk(PMEM_MINORS);
	if (!disk)
		goto out_free_queue;

	idx = atomic_inc_return(&pmem_index) - 1;

	disk->major		= pmem_major;
	disk->first_minor	= PMEM_MINORS * idx;
	disk->fops		= &pmem_fops;
	disk->private_data	= pmem;
	disk->queue		= pmem->pmem_queue;
	disk->flags		= GENHD_FL_EXT_DEVT;
	sprintf(disk->disk_name, "pmem%d", idx);
	disk->driverfs_dev = dev;
	set_capacity(disk, pmem->size >> 9);
	pmem->pmem_disk = disk;

	add_disk(disk);

	return pmem;

out_free_queue:
	blk_cleanup_queue(pmem->pmem_queue);
out_unmap:
	iounmap(pmem->virt_addr);
out_release_region:
	release_mem_region(pmem->phys_addr, pmem->size);
out_free_dev:
	kfree(pmem);
out:
	return ERR_PTR(err);
}
Пример #21
0
Файл: dm.c Проект: wxlong/Test
/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
{
	int r;
	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

	/* get a minor number for the dev */
	r = persistent ? specific_minor(minor) : next_free_minor(&minor);
	if (r < 0)
		goto bad1;

	memset(md, 0, sizeof(*md));
	init_rwsem(&md->lock);
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
		goto bad1;

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	md->queue->unplug_fn = dm_unplug_all;

	md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
				     mempool_free_slab, _io_cache);
 	if (!md->io_pool)
 		goto bad2;

	md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
				      mempool_free_slab, _tio_cache);
	if (!md->tio_pool)
		goto bad3;

	md->disk = alloc_disk(1);
	if (!md->disk)
		goto bad4;

	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);

	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
	init_waitqueue_head(&md->eventq);

	return md;

 bad4:
	mempool_destroy(md->tio_pool);
 bad3:
	mempool_destroy(md->io_pool);
 bad2:
	blk_put_queue(md->queue);
	free_minor(minor);
 bad1:
	kfree(md);
	return NULL;
}
Пример #22
0
int td_linux_block_create(struct td_osdev *dev)
{
	int rc;
	struct request_queue *queue;
	unsigned bio_sector_size = dev->block_params.bio_sector_size;
	unsigned hw_sector_size = dev->block_params.hw_sector_size;

	/* very simple sector size support */
	if (!bio_sector_size || bio_sector_size & 511 || bio_sector_size > 4096) {
		td_os_err(dev, "bio sector size of %u is not supported\n", bio_sector_size);
		return -EINVAL;
	}

	/* MetaData is reported here */
	if (hw_sector_size == 520)
		hw_sector_size = 512;
	if (!hw_sector_size || hw_sector_size & 511 || hw_sector_size > 4096) {
		td_os_err(dev, "hw sector size of %u is not supported\n", hw_sector_size);
		return -EINVAL;
	}

	td_os_notice(dev, " - Set capacity to %llu (%u bytes/sector)\n",
		dev->block_params.capacity, dev->block_params.hw_sector_size);

	/* create a new bio queue */
	queue = blk_alloc_queue(GFP_KERNEL);
	if (!queue) {
		td_os_err(dev, "Error allocating disk queue.\n");
		rc = -ENOMEM;
		goto error_alloc_queue;
	}

#ifdef QUEUE_FLAG_NONROT
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue);
#endif
	
	switch (dev->type) {
	case TD_OSDEV_DEVICE:
		blk_queue_make_request(queue, td_device_make_request);
		dev->_bio_error = td_device_bio_error;
		break;
	case TD_OSDEV_RAID:
		blk_queue_make_request(queue, td_raid_make_request);
		dev->_bio_error = td_raid_bio_error;
		break;
		
	default:
		td_os_err(dev, "Unkonwn OS Type, cannot register block request handler\n");
		goto error_config_queue;
	}
	queue->queuedata = dev;

#if defined QUEUE_FLAG_PLUGGED 
	queue->unplug_fn = td_device_queue_unplug;
#endif

	/* configure queue ordering */

	/* in QUEUE_ORDERED_DRAIN we will get BARRIERS after the queue has
	 * been drained. */
#if defined KABI__blk_queue_ordered

#if KABI__blk_queue_ordered == 2
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN);
#elif KABI__blk_queue_ordered == 3
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN, NULL);
#else
#error unhandled value of KABI__blk_queue_ordered
#endif

#elif defined KABI__blk_queue_flush
	/*
	 * blk_queue_ordered was replaced with blk_queue_flush 
	 * The default implementation is QUEUE_ORDERED_DRAIN
	 */
	blk_queue_flush(queue, 0);
#else
#error undefined KABI__blk_queue_flush or KABI__blk_queue_ordered
#endif

	/* max out the throttling */
#ifdef KABI__blk_queue_max_hw_sectors
	blk_queue_max_hw_sectors(queue, dev->block_params.bio_max_bytes/512);
#elif defined KABI__blk_queue_max_sectors
	blk_queue_max_sectors(queue, dev->block_params.bio_max_bytes/512);
#else
	td_os_err(dev, "No kernel API for maximum sectors\n");
#endif

#if defined KABI__blk_queue_max_segments
	blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
#elif defined KABI__blk_queue_max_phys_segments
	blk_queue_max_phys_segments(queue, MAX_SEGMENT_SIZE);
	blk_queue_max_hw_segments(queue, MAX_SEGMENT_SIZE);
#else
	td_os_err(dev, "No kernel API for maximum segments\n");
#endif

	blk_queue_max_segment_size(queue, dev->block_params.bio_max_bytes);

	blk_queue_bounce_limit(queue, BLK_BOUNCE_ANY);

	/* setup paged based access */
	td_os_info(dev, "Set queue physical block size to %u\n", hw_sector_size);
#ifdef KABI__blk_queue_physical_block_size
	blk_queue_physical_block_size(queue, hw_sector_size);
#elif defined KABI__blk_queue_hardsect_size
	blk_queue_hardsect_size(queue, hw_sector_size);
#else
	td_os_err(dev, "No kernel API for physical sector size\n");
#endif

#ifdef KABI__blk_queue_logical_block_size
	td_os_info(dev, "Set queue logical block size to %u\n", bio_sector_size);
	blk_queue_logical_block_size(queue, bio_sector_size);
#else
	td_os_err(dev, "No kernel API for logical block size\n");
#endif
#ifdef KABI__blk_queue_io_min
	td_os_info(dev, "Set queue io_min to %u\n", bio_sector_size);
	blk_queue_io_min(queue, bio_sector_size);
#else
	td_os_err(dev, "No kernel API for minimum IO size\n");
#endif
#ifdef KABI__blk_queue_io_opt
	td_os_info(dev, "Set queue io_opt to %u\n", dev->block_params.bio_max_bytes);
	blk_queue_io_opt(queue,  dev->block_params.bio_max_bytes);
#else
	td_os_err(dev, "No kernel API for optimal IO size\n");
#endif

#if 0
	if (dev->block_params.discard)
	{
		int did_something = 0;
#if defined KABI__blk_queue_discard_granularity
		queue->limits.discard_granularity = bio_sector_size;
		did_something++;
#endif
#ifdef KABI__blk_queue_max_discard_sectors
		/* 0xFFFF (max sector size of chunk on trim) * 64  * # SSD */
		blk_queue_max_discard_sectors(queue, TD_MAX_DISCARD_LBA_COUNT * 2);
		did_something++;
#endif
#ifdef KABI__blk_queue_discard_zeroes_data
		queue->limits.discard_zeroes_data = 1;
		did_something++;
#endif
#ifdef KABI__queue_flag_set_unlocked
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue);
		did_something++;
#endif
		/* Maybe some day.. But not today. 
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, queue);
		*/
		if (did_something)
			td_os_info(dev, "Enabling discard support\n");
		else
			td_os_notice(dev, "No kernel API for discard support\n");
	} else {
		td_os_info(dev, "No DISCARD support enabled\n");
	}
#else
	/* bug 7444 */
	if (dev->block_params.discard)
		td_os_info(dev, "Device supports DISCARD but is currently being forced disabled\n");
#endif

	/*  assign */
	dev->queue = queue;

	return 0;

error_config_queue:
	blk_cleanup_queue(dev->queue);
	dev->queue = NULL;

error_alloc_queue:
	return rc;
}