Пример #1
0
int card_init_queue(struct card_queue *cq, struct memory_card *card,
		    spinlock_t * lock)
{
	struct card_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret=0;

	if (host->parent->dma_mask && *host->parent->dma_mask)
		limit = *host->parent->dma_mask;

	cq->card = card;
	cq->queue = blk_init_queue(card_request, lock);
	if (!cq->queue)
		return -ENOMEM;

	blk_queue_prep_rq(cq->queue, card_prep_request);
	card_init_bounce_buf(cq, card);
	
	if(!cq->bounce_buf){
		blk_queue_bounce_limit(cq->queue, limit);
		blk_queue_max_hw_sectors(cq->queue, host->max_sectors);
		//blk_queue_max_hw_phys_segments(cq->queue, host->max_phys_segs);
		blk_queue_max_segments(cq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(cq->queue, host->max_seg_size);

		cq->queue->queuedata = cq;
		cq->req = NULL;

		cq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL);
		if (!cq->sg) {
			ret = -ENOMEM;
			blk_cleanup_queue(cq->queue);
			return ret;
		}
	}

	/*change card io scheduler from cfq to deadline*/
	cq->queue->queuedata = cq;
	elevator_exit(cq->queue->elevator);
	cq->queue->elevator = NULL;
	ret = elevator_init(cq->queue, "deadline");
	if (ret) {
             printk("[card_init_queue] elevator_init deadline fail\n");
		blk_cleanup_queue(cq->queue);
		return ret;
	}


	init_MUTEX(&cq->thread_sem);
	cq->thread = kthread_run(card_queue_thread, cq, "%s_queue", card->name);
	if (IS_ERR(cq->thread)) {
		ret = PTR_ERR(cq->thread);
		//goto free_bounce_sg;
	}

	cq->nb.notifier_call = card_reboot_notifier;
	register_reboot_notifier(&cq->nb);

	return ret;
}
Пример #2
0
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;

	blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

	blk_queue_bounce_limit(mq->queue, limit);
	blk_queue_max_hw_sectors(mq->queue,
		min(host->max_blk_count, host->max_req_size / 512));
	blk_queue_max_segments(mq->queue, host->max_segs);
	blk_queue_max_segment_size(mq->queue, host->max_seg_size);

	INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
	INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);

	mutex_init(&mq->complete_lock);

	init_waitqueue_head(&mq->wait);
}
Пример #3
0
/*
 * Alloc bounce buf for read/write numbers of pages in one request
 */
static int card_init_bounce_buf(struct card_queue *cq, 
			struct memory_card *card)
{
	int ret=0;
	struct card_host *host = card->host;
	unsigned int bouncesz;

	bouncesz = CARD_QUEUE_BOUNCESZ;

	if (bouncesz > host->max_req_size)
		bouncesz = host->max_req_size;

	if (bouncesz >= PAGE_CACHE_SIZE) {
		//cq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
		cq->bounce_buf = host->dma_buf;
		if (!cq->bounce_buf) {
			printk(KERN_WARNING "%s: unable to "
				"allocate bounce buffer\n", card->name);
		}
	}

	if (cq->bounce_buf) {
		blk_queue_bounce_limit(cq->queue, BLK_BOUNCE_HIGH);
		blk_queue_max_hw_sectors(cq->queue, bouncesz / 512);
		blk_queue_physical_block_size(cq->queue, bouncesz);
		blk_queue_max_segments(cq->queue, bouncesz / PAGE_CACHE_SIZE);
		blk_queue_max_segment_size(cq->queue, bouncesz);

		cq->queue->queuedata = cq;
		cq->req = NULL;
	
		cq->sg = kmalloc(sizeof(struct scatterlist),
			GFP_KERNEL);
		if (!cq->sg) {
			ret = -ENOMEM;
			blk_cleanup_queue(cq->queue);
			return ret;
		}
		sg_init_table(cq->sg, 1);

		cq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
			bouncesz / PAGE_CACHE_SIZE, GFP_KERNEL);
		if (!cq->bounce_sg) {
			ret = -ENOMEM;
			kfree(cq->sg);
			cq->sg = NULL;
			blk_cleanup_queue(cq->queue);
			return ret;
		}
		sg_init_table(cq->bounce_sg, bouncesz / PAGE_CACHE_SIZE);
	}

	return 0;
}
Пример #4
0
static void ide_floppy_setup(ide_drive_t *drive)
{
	struct ide_disk_obj *floppy = drive->driver_data;
	u16 *id = drive->id;

	drive->pc_callback	 = ide_floppy_callback;

	/*
	 * We used to check revisions here. At this point however I'm giving up.
	 * Just assume they are all broken, its easier.
	 *
	 * The actual reason for the workarounds was likely a driver bug after
	 * all rather than a firmware bug, and the workaround below used to hide
	 * it. It should be fixed as of version 1.9, but to be on the safe side
	 * we'll leave the limitation below for the 2.2.x tree.
	 */
	if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI")) {
		drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
		/* This value will be visible in the /proc/ide/hdx/settings */
		drive->pc_delay = IDEFLOPPY_PC_DELAY;
		blk_queue_max_hw_sectors(drive->queue, 64);
	}

	/*
	 * Guess what? The IOMEGA Clik! drive also needs the above fix. It makes
	 * nasty clicking noises without it, so please don't remove this.
	 */
	if (strstarts((char *)&id[ATA_ID_PROD], "IOMEGA Clik!")) {
		blk_queue_max_hw_sectors(drive->queue, 64);
		drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
		/* IOMEGA Clik! drives do not support lock/unlock commands */
		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
	}

	(void) ide_floppy_get_capacity(drive);

	ide_proc_register_driver(drive, floppy->driver);

	drive->dev_flags |= IDE_DFLAG_ATTACH;
}
Пример #5
0
/*
 * slave_configure()
 */
static int slave_configure(struct scsi_device *sdev)
{
	struct us_data *us = host_to_us(sdev->host);

	/* pr_info("scsiglue --- slave_configure\n"); */
	if (us->fflags & (US_FL_MAX_SECTORS_64 | US_FL_MAX_SECTORS_MIN)) {
		unsigned int max_sectors = 64;

		if (us->fflags & US_FL_MAX_SECTORS_MIN)
			max_sectors = PAGE_CACHE_SIZE >> 9;
		if (queue_max_sectors(sdev->request_queue) > max_sectors)
			blk_queue_max_hw_sectors(sdev->request_queue,
					      max_sectors);
	}
Пример #6
0
/*
 * Initializes the block layer interfaces.
 */
static int sd_init_blk_dev(struct sd_host *host)
{
	struct gendisk *disk;
	struct request_queue *queue;
	int channel;
	int retval;

	channel = to_channel(exi_get_exi_channel(host->exi_device));

	/* queue */
	retval = -ENOMEM;
	spin_lock_init(&host->queue_lock);
	queue = blk_init_queue(sd_request_func, &host->queue_lock);
	if (!queue) {
		sd_printk(KERN_ERR, "error initializing queue\n");
		goto err_blk_init_queue;
	}
	blk_queue_dma_alignment(queue, EXI_DMA_ALIGN);
	blk_queue_max_segments(queue, 1);
	blk_queue_max_hw_sectors(queue, 8);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue);
	queue->queuedata = host;
	host->queue = queue;

	/* disk */
	disk = alloc_disk(1 << MMC_SHIFT);
	if (!disk) {
		sd_printk(KERN_ERR, "error allocating disk\n");
		goto err_alloc_disk;
	}
	disk->major = SD_MAJOR;
	disk->first_minor = channel << MMC_SHIFT;
	disk->fops = &sd_fops;
	sprintf(disk->disk_name, "%s%c", SD_NAME, 'a' + channel);
	disk->private_data = host;
	disk->queue = host->queue;
	host->disk = disk;

	retval = 0;
	goto out;

err_alloc_disk:
	blk_cleanup_queue(host->queue);
	host->queue = NULL;
err_blk_init_queue:
out:
	return retval;
}
Пример #7
0
int cyasblkdev_init_queue(struct cyasblkdev_queue *bq, spinlock_t *lock)
{
    int ret;

    DBGPRN_FUNC_NAME;

    /* 1st param is a function that wakes up the queue thread */
    bq->queue = blk_init_queue(cyasblkdev_request, lock);
    if (!bq->queue)
        return -ENOMEM;

    blk_queue_prep_rq(bq->queue, cyasblkdev_prep_request);

    blk_queue_bounce_limit(bq->queue, BLK_BOUNCE_ANY);
    blk_queue_max_hw_sectors(bq->queue, Q_MAX_SECTORS);

    /* As of now, we have the HAL/driver support to
     * merge scattered segments and handle them simultaneously.
     * so, setting the max_phys_segments to 8. */
    /*blk_queue_max_phys_segments(bq->queue, Q_MAX_SGS);
    blk_queue_max_hw_segments(bq->queue, Q_MAX_SGS);*/
    blk_queue_max_segments(bq->queue, Q_MAX_SGS);

    /* should be < then HAL can handle */
    blk_queue_max_segment_size(bq->queue, 512*Q_MAX_SECTORS);

    bq->queue->queuedata = bq;
    bq->req = NULL;

    init_completion(&bq->thread_complete);
    init_waitqueue_head(&bq->thread_wq);
    sema_init(&bq->thread_sem, 1);

    ret = kernel_thread(cyasblkdev_queue_thread, bq, CLONE_KERNEL);
    if (ret >= 0) {
        /* wait until the thread is spawned */
        wait_for_completion(&bq->thread_complete);

        /* reinitialize the completion */
        init_completion(&bq->thread_complete);
        ret = 0;
        goto out;
    }

out:
    return ret;
}
Пример #8
0
static int slave_configure(struct scsi_device *sdev)
{
	struct us_data *us = host_to_us(sdev->host);

	/* Many devices have trouble transferring more than 32KB at a time,
	 * while others have trouble with more than 64K. At this time we
	 * are limiting both to 32K (64 sectores).
	 */
	if (us->fflags & (US_FL_MAX_SECTORS_64 | US_FL_MAX_SECTORS_MIN)) {
		unsigned int max_sectors = 64;

		if (us->fflags & US_FL_MAX_SECTORS_MIN)
			max_sectors = PAGE_CACHE_SIZE >> 9;
		if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
			blk_queue_max_hw_sectors(sdev->request_queue,
					      max_sectors);
	} else if (sdev->type == TYPE_TAPE) {
Пример #9
0
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;

	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

	blk_queue_bounce_limit(mq->queue, limit);
	blk_queue_max_hw_sectors(mq->queue,
		min(host->max_blk_count, host->max_req_size / 512));
	blk_queue_max_segments(mq->queue, host->max_segs);
	blk_queue_max_segment_size(mq->queue, host->max_seg_size);

	/* Initialize thread_sem even if it is not used */
	sema_init(&mq->thread_sem, 1);
}
Пример #10
0
static int stackbd_start(char dev_path[])
{
    unsigned max_sectors;

    if (!(stackbd.bdev_raw = stackbd_bdev_open(dev_path)))
        return -EFAULT;

    /* Set up our internal device */
    stackbd.capacity = get_capacity(stackbd.bdev_raw->bd_disk);
    printk("stackbd: Device real capacity: %llu\n", (unsigned long long) stackbd.capacity);

    set_capacity(stackbd.gd, stackbd.capacity);

    max_sectors = queue_max_hw_sectors(bdev_get_queue(stackbd.bdev_raw));
    blk_queue_max_hw_sectors(stackbd.queue, max_sectors);
    printk("stackbd: Max sectors: %u\n", max_sectors);

    stackbd.thread = kthread_create(stackbd_threadfn, NULL,
           stackbd.gd->disk_name);
    if (IS_ERR(stackbd.thread))
    {
        printk("stackbd: error kthread_create <%lu>\n",
               PTR_ERR(stackbd.thread));
        goto error_after_bdev;
    }

    printk("stackbd: done initializing successfully\n");
    stackbd.is_active = 1;
    wake_up_process(stackbd.thread);

    return 0;

error_after_bdev:
    blkdev_put(stackbd.bdev_raw, STACKBD_BDEV_MODE);
    bdput(stackbd.bdev_raw);

    return -EFAULT;
}
Пример #11
0
static int pmem_attach_disk(struct device *dev,
		struct nd_namespace_common *ndns, struct pmem_device *pmem)
{
	struct gendisk *disk;

	pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
	if (!pmem->pmem_queue)
		return -ENOMEM;

	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
	blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
	blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);

	disk = alloc_disk(0);
	if (!disk) {
		blk_cleanup_queue(pmem->pmem_queue);
		return -ENOMEM;
	}

	disk->major		= pmem_major;
	disk->first_minor	= 0;
	disk->fops		= &pmem_fops;
	disk->private_data	= pmem;
	disk->queue		= pmem->pmem_queue;
	disk->flags		= GENHD_FL_EXT_DEVT;
	nvdimm_namespace_disk_name(ndns, disk->disk_name);
	disk->driverfs_dev = dev;
	set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
	pmem->pmem_disk = disk;

	add_disk(disk);
	revalidate_disk(disk);

	return 0;
}
Пример #12
0
static int
__zvol_create_minor(const char *name)
{
	zvol_state_t *zv;
	objset_t *os;
	dmu_object_info_t *doi;
	uint64_t volsize;
	unsigned minor = 0;
	int error = 0;

	ASSERT(MUTEX_HELD(&zvol_state_lock));

	zv = zvol_find_by_name(name);
	if (zv) {
		error = EEXIST;
		goto out;
	}

	doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);

	error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
	if (error)
		goto out_doi;

    /* Make sure we have the key loaded if we need one. */
    error = dsl_crypto_key_inherit(name);
    if (error != 0 && error != EEXIST)
		goto out_dmu_objset_disown;

	error = dmu_object_info(os, ZVOL_OBJ, doi);
	if (error)
		goto out_dmu_objset_disown;

	error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
	if (error)
		goto out_dmu_objset_disown;

	error = zvol_find_minor(&minor);
	if (error)
		goto out_dmu_objset_disown;

	zv = zvol_alloc(MKDEV(zvol_major, minor), name);
	if (zv == NULL) {
		error = EAGAIN;
		goto out_dmu_objset_disown;
	}

	if (dmu_objset_is_snapshot(os))
		zv->zv_flags |= ZVOL_RDONLY;

	zv->zv_volblocksize = doi->doi_data_block_size;
	zv->zv_volsize = volsize;
	zv->zv_objset = os;

	set_capacity(zv->zv_disk, zv->zv_volsize >> 9);

	blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
	blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
	blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
	blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
	blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
#ifdef HAVE_BLK_QUEUE_DISCARD
	blk_queue_max_discard_sectors(zv->zv_queue,
	    (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
	blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
#endif
#ifdef HAVE_BLK_QUEUE_NONROT
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
#endif

	if (zil_replay_disable)
		zil_destroy(dmu_objset_zil(os), B_FALSE);
	else
		zil_replay(os, zv, zvol_replay_vector);

out_dmu_objset_disown:
	dmu_objset_disown(os, zvol_tag);
	zv->zv_objset = NULL;
out_doi:
	kmem_free(doi, sizeof(dmu_object_info_t));
out:

	if (error == 0) {
		zvol_insert(zv);
		add_disk(zv->zv_disk);
	}

	return (error);
}
Пример #13
0
/* Create disk on demand. So we won't create lots of disk for un-used devices. */
static struct kobject *tzmem_blk_probe(dev_t dev, int *part, void *data)
{
	uint32_t len;
	struct gendisk *disk;
	struct kobject *kobj;
	struct request_queue *queue;
	struct tzmem_diskinfo_s *diskInfo;
	int ret;
	KREE_SESSION_HANDLE session;

#ifdef MTEE_TZMEM_DBG
	pr_warn("====> tzmem_blk_probe\n");
#endif

	mutex_lock(&tzmem_probe_mutex);

	diskInfo = (struct tzmem_diskinfo_s *) &_tzmem_diskInfo[tzmem_poolIndex];
	if (diskInfo->disk == NULL) {
		disk = alloc_disk(1);
		if (!disk)
			goto out_info;

		queue = blk_init_queue(do_tzmem_blk_request, &tzmem_blk_lock);
		if (!queue)
			goto out_queue;

		blk_queue_max_hw_sectors(queue, 1024);
		blk_queue_bounce_limit(queue, BLK_BOUNCE_ANY);

		if (_tzmem_get_poolsize(&len))
			goto out_init;

		disk->major = IO_NODE_MAJOR_TZMEM;
		disk->first_minor = MINOR(dev);
		disk->fops = &tzmem_blk_fops;
		disk->private_data = &_tzmem_diskInfo;
		snprintf(disk->disk_name, sizeof(disk->disk_name), "tzmem%d", MINOR(dev));
		disk->queue = queue;
		set_capacity(disk, len / 512);
		add_disk(disk);

		ret = KREE_CreateSession(TZ_TA_MEM_UUID, &session);
		if (ret != TZ_RESULT_SUCCESS) {
			pr_debug(MTEE_TZMEM_TAG
			"[%s] _tzmem_get_poolsize: KREE_CreateSession Error = 0x%x\n",
			MODULE_NAME, ret);
			goto out_init;
		}

		diskInfo->session = session;
		diskInfo->pool_size = len;
		diskInfo->disk = disk;
		diskInfo->size = len;
	}

	*part = 0;
	kobj = diskInfo ? get_disk(diskInfo->disk) : ERR_PTR(-ENOMEM);

	mutex_unlock(&tzmem_probe_mutex);
	return kobj;

out_init:
	blk_cleanup_queue(queue);
out_queue:
	put_disk(disk);
out_info:
	mutex_unlock(&tzmem_probe_mutex);
	return ERR_PTR(-ENOMEM);
}
Пример #14
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
                   spinlock_t *lock, const char *subname)
{
    struct mmc_host *host = card->host;
    u64 limit = BLK_BOUNCE_HIGH;
    int ret;

    if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
        limit = *mmc_dev(host)->dma_mask;

    mq->card = card;
    mq->queue = blk_init_queue(mmc_request, lock);
    if (!mq->queue)
        return -ENOMEM;

    mq->queue->queuedata = mq;
    mq->req = NULL;

    blk_queue_prep_rq(mq->queue, mmc_prep_request);
    queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
    if (mmc_can_erase(card)) {
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
        mq->queue->limits.max_discard_sectors = UINT_MAX;
        if (card->erased_byte == 0)
            mq->queue->limits.discard_zeroes_data = 1;
        mq->queue->limits.discard_granularity = card->pref_erase << 9;
        if (mmc_can_secure_erase_trim(card))
            queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
                                    mq->queue);
    }

#ifdef CONFIG_MMC_BLOCK_BOUNCE
    if (host->max_segs == 1) {
        unsigned int bouncesz;

        bouncesz = MMC_QUEUE_BOUNCESZ;

        if (bouncesz > host->max_req_size)
            bouncesz = host->max_req_size;
        if (bouncesz > host->max_seg_size)
            bouncesz = host->max_seg_size;
        if (bouncesz > (host->max_blk_count * 512))
            bouncesz = host->max_blk_count * 512;

        if (bouncesz > 512) {
            mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
            if (!mq->bounce_buf) {
                printk(KERN_WARNING "%s: unable to "
                       "allocate bounce buffer\n",
                       mmc_card_name(card));
            }
        }

        if (mq->bounce_buf) {
            blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
            blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
            blk_queue_max_segments(mq->queue, bouncesz / 512);
            blk_queue_max_segment_size(mq->queue, bouncesz);

            mq->sg = kmalloc(sizeof(struct scatterlist),
                             GFP_KERNEL);
            if (!mq->sg) {
                ret = -ENOMEM;
                goto cleanup_queue;
            }
            sg_init_table(mq->sg, 1);

            mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
                                    bouncesz / 512, GFP_KERNEL);
            if (!mq->bounce_sg) {
                ret = -ENOMEM;
                goto cleanup_queue;
            }
            sg_init_table(mq->bounce_sg, bouncesz / 512);
        }
    }
#endif

    if (!mq->bounce_buf) {
        blk_queue_bounce_limit(mq->queue, limit);
        blk_queue_max_hw_sectors(mq->queue,
                                 min(host->max_blk_count, host->max_req_size / 512));
        blk_queue_max_segments(mq->queue, host->max_segs);
        blk_queue_max_segment_size(mq->queue, host->max_seg_size);

        mq->sg = kmalloc(sizeof(struct scatterlist) *
                         host->max_segs, GFP_KERNEL);
        if (!mq->sg) {
            ret = -ENOMEM;
            goto cleanup_queue;
        }
        sg_init_table(mq->sg, host->max_segs);
    }

    sema_init(&mq->thread_sem, 1);

    mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
                             host->index, subname ? subname : "");

    if (IS_ERR(mq->thread)) {
        ret = PTR_ERR(mq->thread);
        goto free_bounce_sg;
    }

    return 0;
free_bounce_sg:
    if (mq->bounce_sg)
        kfree(mq->bounce_sg);
    mq->bounce_sg = NULL;
cleanup_queue:
    if (mq->sg)
        kfree(mq->sg);
    mq->sg = NULL;
    if (mq->bounce_buf)
        kfree(mq->bounce_buf);
    mq->bounce_buf = NULL;
    blk_cleanup_queue(mq->queue);
    return ret;
}
Пример #15
0
int nbdx_register_block_device(struct nbdx_file *nbdx_file)
{
	sector_t size = nbdx_file->stbuf.st_size;
	int page_size = PAGE_SIZE;
	int err = 0;

	pr_debug("%s called\n", __func__);

	nbdx_file->major = nbdx_major;

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
	nbdx_mq_reg.nr_hw_queues = submit_queues;

	nbdx_file->queue = blk_mq_init_queue(&nbdx_mq_reg, nbdx_file);
#else
	nbdx_file->tag_set.ops = &nbdx_mq_ops;
	nbdx_file->tag_set.nr_hw_queues = submit_queues;
	nbdx_file->tag_set.queue_depth = NBDX_QUEUE_DEPTH;
	nbdx_file->tag_set.numa_node = NUMA_NO_NODE;
	nbdx_file->tag_set.cmd_size	= sizeof(struct raio_io_u);
	nbdx_file->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	nbdx_file->tag_set.driver_data = nbdx_file;

	err = blk_mq_alloc_tag_set(&nbdx_file->tag_set);
	if (err)
		goto out;

	nbdx_file->queue = blk_mq_init_queue(&nbdx_file->tag_set);
#endif
	if (IS_ERR(nbdx_file->queue)) {
		pr_err("%s: Failed to allocate blk queue ret=%ld\n",
		       __func__, PTR_ERR(nbdx_file->queue));
		err = PTR_ERR(nbdx_file->queue);
		goto blk_mq_init;
	}

	nbdx_file->queue->queuedata = nbdx_file;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nbdx_file->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nbdx_file->queue);

	nbdx_file->disk = alloc_disk_node(1, NUMA_NO_NODE);
	if (!nbdx_file->disk) {
		pr_err("%s: Failed to allocate disk node\n", __func__);
		err = -ENOMEM;
		goto alloc_disk;
	}

	nbdx_file->disk->major = nbdx_file->major;
	nbdx_file->disk->first_minor = nbdx_file->index;
	nbdx_file->disk->fops = &nbdx_ops;
	nbdx_file->disk->queue = nbdx_file->queue;
	nbdx_file->disk->private_data = nbdx_file;
	blk_queue_logical_block_size(nbdx_file->queue, NBDX_SECT_SIZE);
	blk_queue_physical_block_size(nbdx_file->queue, NBDX_SECT_SIZE);
	sector_div(page_size, NBDX_SECT_SIZE);
	blk_queue_max_hw_sectors(nbdx_file->queue, page_size * MAX_SGL_LEN);
	sector_div(size, NBDX_SECT_SIZE);
	set_capacity(nbdx_file->disk, size);
	sscanf(nbdx_file->dev_name, "%s", nbdx_file->disk->disk_name);
	add_disk(nbdx_file->disk);
	goto out;

alloc_disk:
	blk_cleanup_queue(nbdx_file->queue);
blk_mq_init:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
	blk_mq_free_tag_set(&nbdx_file->tag_set);
#endif
out:
	return err;
}
Пример #16
0
int card_init_queue(struct card_queue *cq, struct memory_card *card,
		    spinlock_t * lock)
{
	struct card_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret=0, card_quene_num;
	struct card_queue_list *cq_node_current;
	struct card_queue_list *cq_node_prev = NULL;

	if (host->parent->dma_mask && *host->parent->dma_mask)
		limit = *host->parent->dma_mask;

	cq->card = card;
	cq->queue = blk_init_queue(card_request, lock);
	if (!cq->queue)
		return -ENOMEM;

	blk_queue_prep_rq(cq->queue, card_prep_request);
	card_init_bounce_buf(cq, card);
	
	if(!cq->bounce_buf){
		blk_queue_bounce_limit(cq->queue, limit);
		blk_queue_max_hw_sectors(cq->queue, host->max_sectors);
		//blk_queue_max_hw_phys_segments(cq->queue, host->max_phys_segs);
		blk_queue_max_segments(cq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(cq->queue, host->max_seg_size);

		cq->queue->queuedata = cq;
		cq->req = NULL;

		cq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL);
		if (!cq->sg) {
			ret = -ENOMEM;
			blk_cleanup_queue(cq->queue);
			return ret;
		}
	}

	if (card_queue_head == NULL)
	{
		card_queue_head = kmalloc(sizeof(struct card_queue_list), GFP_KERNEL);
		if (card_queue_head == NULL) 
		{
			ret = -ENOMEM;
			kfree(card_queue_head);
			card_queue_head = NULL;
			return ret;
		}
		card_queue_head->cq = cq;
		card_queue_head->cq_num = 0;
		card_queue_head->cq_flag = 0;
		card_queue_head->cq_next = NULL;

		init_completion(&card_thread_complete);
		init_waitqueue_head(&card_thread_wq);
		init_MUTEX(&card_thread_sem);
		host->queue_task = kthread_run(card_queue_thread, cq, "card_queue");
		if (host->queue_task)
		{
			wait_for_completion(&card_thread_complete);
			init_completion(&card_thread_complete);
			ret = 0;
			return ret;
		}
	} 
	else
	{
		card_quene_num = 0;
		cq_node_current = card_queue_head;
		do
		{
			card_quene_num = cq_node_current->cq_num;
			cq_node_prev = cq_node_current;
			cq_node_current = cq_node_current->cq_next;
		} while (cq_node_current != NULL);

		cq_node_current = kmalloc(sizeof(struct card_queue_list), GFP_KERNEL);
		if (cq_node_current == NULL)
		{
			ret = -ENOMEM;
			kfree(cq_node_current);
			cq_node_current = NULL;
			return ret;
		}
		cq_node_prev->cq_next = cq_node_current;
		cq_node_current->cq = cq;
		cq_node_current->cq_next = NULL;
		cq_node_current->cq_num = (++card_quene_num);
		cq_node_current->cq_flag = 0;

		ret = 0;
		return ret;
	}

	return ret;
}
Пример #17
0
static int __init stheno_module_init( void )
{
    int retval;

    print_info( "stheno_module_init was called.\n" );

    init_waitqueue_head( &stheno_wait_q );

    wake_lock_init( &stheno_wakelock, WAKE_LOCK_SUSPEND, STHENO_NAME );

    stheno_major = register_blkdev( 0, STHENO_NAME );
    if( stheno_major <= 0 ){
        print_error( "stheno register_blkdev failed.\n" );
        retval = -EBUSY;
        goto error;
    }

    spin_lock_init( &stheno_lock );
    stheno_queue = blk_init_queue( stheno_request, &stheno_lock );
    if( stheno_queue == NULL ){
        print_error( "stheno blk_init_queue failed.\n" );
        retval = -ENOMEM;
        goto error;
    }

    /*blk_queue_hardsect_size( stheno_queue, SECTOR_SIZE );*/
    /*blk_queue_max_sectors( stheno_queue, MAX_SECTORS );*/
    blk_queue_logical_block_size( stheno_queue, SECTOR_SIZE );
    blk_queue_max_hw_sectors( stheno_queue, MAX_SECTORS );
#if defined( STHENO_BLK_BOUNCE_ANY )
    blk_queue_bounce_limit( stheno_queue, BLK_BOUNCE_ANY );
#else
    blk_queue_bounce_limit( stheno_queue, BLK_BOUNCE_HIGH ); /* default */
#endif

    stheno_gd = alloc_disk( STHENO_MINOR_COUNT );
    if( stheno_gd == NULL ){
        print_error( "stheno alloc_disk failed.\n" );
        retval = -ENOMEM;
        goto error;
    }

    stheno_gd->major = stheno_major;
    stheno_gd->first_minor = 0;
    stheno_gd->fops = &stheno_fops;
    stheno_gd->queue = stheno_queue;
    /*stheno_gd->flags = GENHD_FL_REMOVABLE;*/
    /*stheno_gd->private_data = NULL;*/
    snprintf( stheno_gd->disk_name, DISK_NAME_LEN, "%s", STHENO_NAME );
    set_capacity( stheno_gd, AMOUNT_OF_SECTORS );

    stheno_thread = kthread_create( stheno_request_thread, 0, STHENO_THREAD_NAME );
    if( IS_ERR( stheno_thread ) ){
        print_error( "stheno kthread_create failed.\n" );
        retval = -EBUSY;
        goto error;
    }
    wake_up_process( stheno_thread );

    add_disk( stheno_gd );

    print_debug( "stheno major = %d\n", stheno_major );
    return 0;
error:
    if( stheno_gd != NULL ) del_gendisk( stheno_gd );
    if( stheno_queue != NULL ) blk_cleanup_queue( stheno_queue );
    if( stheno_major > 0 ) unregister_blkdev( stheno_major, STHENO_NAME );
    return retval;
}
Пример #18
0
static int __init hd_init(void)
{
	int drive;

	if (register_blkdev(HD_MAJOR, "hd"))
		return -1;

	hd_queue = blk_init_queue(do_hd_request, &hd_lock);
	if (!hd_queue) {
		unregister_blkdev(HD_MAJOR, "hd");
		return -ENOMEM;
	}

	blk_queue_max_hw_sectors(hd_queue, 255);
	init_timer(&device_timer);
	device_timer.function = hd_times_out;
	blk_queue_logical_block_size(hd_queue, 512);

	if (!NR_HD) {
		/*
		 * We don't know anything about the drive.  This means
		 * that you *MUST* specify the drive parameters to the
		 * kernel yourself.
		 *
		 * If we were on an i386, we used to read this info from
		 * the BIOS or CMOS.  This doesn't work all that well,
		 * since this assumes that this is a primary or secondary
		 * drive, and if we're using this legacy driver, it's
		 * probably an auxilliary controller added to recover
		 * legacy data off an ST-506 drive.  Either way, it's
		 * definitely safest to have the user explicitly specify
		 * the information.
		 */
		printk("hd: no drives specified - use hd=cyl,head,sectors"
			" on kernel command line\n");
		goto out;
	}

	for (drive = 0 ; drive < NR_HD ; drive++) {
		struct gendisk *disk = alloc_disk(64);
		struct hd_i_struct *p = &hd_info[drive];
		if (!disk)
			goto Enomem;
		disk->major = HD_MAJOR;
		disk->first_minor = drive << 6;
		disk->fops = &hd_fops;
		sprintf(disk->disk_name, "hd%c", 'a'+drive);
		disk->private_data = p;
		set_capacity(disk, p->head * p->sect * p->cyl);
		disk->queue = hd_queue;
		p->unit = drive;
		hd_gendisk[drive] = disk;
		printk("%s: %luMB, CHS=%d/%d/%d\n",
			disk->disk_name, (unsigned long)get_capacity(disk)/2048,
			p->cyl, p->head, p->sect);
	}

	if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) {
		printk("hd: unable to get IRQ%d for the hard disk driver\n",
			HD_IRQ);
		goto out1;
	}
	if (!request_region(HD_DATA, 8, "hd")) {
		printk(KERN_WARNING "hd: port 0x%x busy\n", HD_DATA);
		goto out2;
	}
	if (!request_region(HD_CMD, 1, "hd(cmd)")) {
		printk(KERN_WARNING "hd: port 0x%x busy\n", HD_CMD);
		goto out3;
	}

	/* Let them fly */
	for (drive = 0; drive < NR_HD; drive++)
		add_disk(hd_gendisk[drive]);

	return 0;

out3:
	release_region(HD_DATA, 8);
out2:
	free_irq(HD_IRQ, NULL);
out1:
	for (drive = 0; drive < NR_HD; drive++)
		put_disk(hd_gendisk[drive]);
	NR_HD = 0;
out:
	del_timer(&device_timer);
	unregister_blkdev(HD_MAJOR, "hd");
	blk_cleanup_queue(hd_queue);
	return -1;
Enomem:
	while (drive--)
		put_disk(hd_gendisk[drive]);
	goto out;
}
Пример #19
0
/*
    Create system device file for the enabled slot.
*/
ndas_error_t slot_enable(int s)
{
    ndas_error_t ret = NDAS_ERROR_INTERNAL;
    int got;
    struct ndas_slot* slot = NDAS_GET_SLOT_DEV(s); 
    dbgl_blk(3, "ing s#=%d slot=%p",s, slot);
    got = try_module_get(THIS_MODULE);
    MOD_INC_USE_COUNT;
    
    if ( slot == NULL)
        goto out1;
    
    if ( slot->enabled ) {
        dbgl_blk(1, "already enabled");
        ret = NDAS_OK;
        goto out2;
    }
    ret = ndas_query_slot(s, &slot->info);
    if ( !NDAS_SUCCESS(ret) ) {
        dbgl_blk(1, "fail ndas_query_slot");
        goto out2;
    }
    dbgl_blk(1, "mode=%d", slot->info.mode);
    
    slot->enabled = 1;
    
#if LINUX_VERSION_25_ABOVE

    slot->disk = NULL;
    spin_lock_init(&slot->lock);
    slot->queue = blk_init_queue(
        nblk_request_proc, 
        &slot->lock
    );
	#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33))    
	    blk_queue_max_phys_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);
	    blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);
	#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33))
	    blk_queue_max_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);	//renamed in 2.6.34	
	    //blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); //removed in 2.6.34
	#endif

	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31))
	    blk_queue_logical_block_size(slot->queue, slot->info.sector_size);
	#else
	    blk_queue_hardsect_size(slot->queue, slot->info.sector_size);
	#endif

	#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33))
	    blk_queue_max_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS);
	#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33))
	    blk_queue_max_hw_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS); //renamed in 2.6.34
	#endif

	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
	    // Set ordered queue property.
		#if 0
		    blk_queue_ordered(slot->queue, QUEUE_ORDERED_TAG_FLUSH, nblk_prepare_flush);
		#endif
	#endif

    slot->disk = alloc_disk(NR_PARTITION);
    if ( slot->disk == NULL ) {
        slot->enabled = 0;
        dbgl_blk(1, "fail alloc disk");
        goto out2;
    }

    slot->disk->major = NDAS_BLK_MAJOR;
    slot->disk->first_minor = (s - NDAS_FIRST_SLOT_NR) << PARTN_BITS;
    slot->disk->fops = &ndas_fops;
    slot->disk->queue = slot->queue;
    slot->disk->private_data = (void*) (long)s;
    slot->queue_flags = 0;

    dbgl_blk(1, "mode=%d", slot->info.mode);
    if ( slot->info.mode == NDAS_DISK_MODE_SINGLE || 
        slot->info.mode == NDAS_DISK_MODE_ATAPI ||
        slot->info.mode == NDAS_DISK_MODE_MEDIAJUKE) 
    {
        char short_serial[NDAS_SERIAL_SHORT_LENGTH + 1];
        if (strlen(slot->info.ndas_serial) > 8) {
            /* Extended serial number is too long as sysfs object name. Use last 8 digit only */
            strncpy(
                short_serial,
                slot->info.ndas_serial + ( NDAS_SERIAL_EXTEND_LENGTH - NDAS_SERIAL_SHORT_LENGTH),
                8);
        } else {
            strncpy(short_serial, slot->info.ndas_serial, 8);
        }
        short_serial[8] =0;
        snprintf(slot->devname,
            sizeof(slot->devname)-1, 
            "ndas-%s-%d", short_serial, slot->info.unit
        );

        strcpy(slot->disk->disk_name, slot->devname);

	    dbgl_blk(1, "just set slot->disk->%s, slot->%s", slot->disk->disk_name, slot->devname );

	#if !LINUX_VERSION_DEVFS_REMOVED_COMPLETELY
	        strcpy(slot->disk->devfs_name, slot->devname);
	#endif
        set_capacity(slot->disk, slot->info.sectors);
	    dbgl_blk(1, "just set capacity slot->disk, slot->info.sectors:%llu", slot->info.sectors);

    } else {
        /* Other mode is not implemented */

    }
    
    if (slot->info.mode == NDAS_DISK_MODE_ATAPI) {
        slot->disk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
	    dbgl_blk(1, "just set slot->disk->flags");
	#if 0
	        kref_init(&slot->ndascd.kref);
	#endif
    }

    dbgl_blk(4, "adding disk: slot=%d, first_minor=%d, capacity=%llu", s, slot->disk->first_minor, slot->info.sectors);
    add_disk(slot->disk);
    dbgl_blk(1, "added disk: slot=%d", s);
   
		#ifndef NDAS_DONT_CARE_SCHEDULER
			#if LINUX_VERSION_AVOID_CFQ_SCHEDULER
				#if CONFIG_SYSFS
				    sal_assert(slot->queue->kobj.ktype);	
				    sal_assert(slot->queue->kobj.ktype->default_attrs);
				    {
				        struct queue_sysfs_entry {
				        	struct attribute attr;
				        	ssize_t (*show)(struct request_queue *, char *);
				        	ssize_t (*store)(struct request_queue *, const char *, size_t);
				        };
				        struct attribute *attr = slot->queue->kobj.ktype->default_attrs[4];
				        struct queue_sysfs_entry *entry = container_of(attr , struct queue_sysfs_entry, attr);
				        //dbgl_blk(1, "now to set the scheduler: slot-queue=%d, scheduler==%s, scheduler_len=%d", slot->queue, NDAS_QUEUE_SCHEDULER, strlen(NDAS_QUEUE_SCHEDULER));
				        entry->store(slot->queue,NDAS_QUEUE_SCHEDULER,strlen(NDAS_QUEUE_SCHEDULER)); 
				        
				    }
				#else
					#error "NDAS driver doesn't work well with CFQ scheduler of 2.6.13 or above kernel." \
				   "if you forcely want to use it, please specify compiler flags by " \
				   "export NDAS_EXTRA_CFLAGS=\"-DNDAS_DONT_CARE_SCHEDULER\" "\
				   "then compile the source again."
				#endif
			#endif
		#endif        
    printk("ndas: /dev/%s enabled\n" , 
            slot->devname);
#else 
    /* < LINUX_VERSION_25_ABOVE */
    dbgl_blk(4, "blksize=%d", DEFAULT_ND_BLKSIZE);
    dbgl_blk(4, "size=%lld", slot->info.sectors);
    dbgl_blk(1, "hardsectsize=%d", slot->info.sector_size);
    ndas_ops_set_blk_size(
        s, 
        DEFAULT_ND_BLKSIZE, 
        slot->info.sectors,
        slot->info.sector_size, 
        DEFAULT_ND_MAX_SECTORS
    );
#ifdef NDAS_DEVFS    
    printk("ndas: /dev/nd/disc%d enabled\n" , 
            s - NDAS_FIRST_SLOT_NR);
#else
    printk("ndas: /dev/nd%c enabled\n" , 
            s + 'a' - NDAS_FIRST_SLOT_NR);
#endif

#endif
    
    //up(&slot->mutex);
 #ifdef NDAS_MSHARE 
    if(NDAS_GET_SLOT_DEV(s)->info.mode == NDAS_DISK_MODE_MEDIAJUKE)
    {
  	    ndas_CheckFormat(s);
    }
 #endif
#if !LINUX_VERSION_25_ABOVE
    ndas_ops_read_partition(s);
#endif
    dbgl_blk(3, "ed");
    return NDAS_OK;
out2:    
    //up(&slot->mutex);
out1:    
    if ( got ) module_put(THIS_MODULE);
    MOD_DEC_USE_COUNT;
    return ret;
}
void foo(struct request_queue *q)
{
	blk_queue_max_hw_sectors(q, 1);

}
Пример #21
0
/* alloc_disk and add_disk can sleep */
void
aoeblk_gdalloc(void *vp)
{
	struct aoedev *d = vp;
	struct gendisk *gd;
	mempool_t *mp;
	struct request_queue *q;
	enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
	ulong flags;
	int late = 0;

	spin_lock_irqsave(&d->lock, flags);
	if (d->flags & DEVFL_GDALLOC
	&& !(d->flags & DEVFL_TKILL)
	&& !(d->flags & DEVFL_GD_NOW))
		d->flags |= DEVFL_GD_NOW;
	else
		late = 1;
	spin_unlock_irqrestore(&d->lock, flags);
	if (late)
		return;

	gd = alloc_disk(AOE_PARTITIONS);
	if (gd == NULL) {
		pr_err("aoe: cannot allocate disk structure for %ld.%d\n",
			d->aoemajor, d->aoeminor);
		goto err;
	}

	mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab,
		buf_pool_cache);
	if (mp == NULL) {
		printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
			d->aoemajor, d->aoeminor);
		goto err_disk;
	}
	q = blk_init_queue(aoeblk_request, &d->lock);
	if (q == NULL) {
		pr_err("aoe: cannot allocate block queue for %ld.%d\n",
			d->aoemajor, d->aoeminor);
		goto err_mempool;
	}

	spin_lock_irqsave(&d->lock, flags);
	WARN_ON(!(d->flags & DEVFL_GD_NOW));
	WARN_ON(!(d->flags & DEVFL_GDALLOC));
	WARN_ON(d->flags & DEVFL_TKILL);
	WARN_ON(d->gd);
	WARN_ON(d->flags & DEVFL_UP);
	blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
	q->backing_dev_info.name = "aoe";
	q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
	d->bufpool = mp;
	d->blkq = gd->queue = q;
	q->queuedata = d;
	d->gd = gd;
	if (aoe_maxsectors)
		blk_queue_max_hw_sectors(q, aoe_maxsectors);
	gd->major = AOE_MAJOR;
	gd->first_minor = d->sysminor;
	gd->fops = &aoe_bdops;
	gd->private_data = d;
	set_capacity(gd, d->ssize);
	snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
		d->aoemajor, d->aoeminor);

	d->flags &= ~DEVFL_GDALLOC;
	d->flags |= DEVFL_UP;

	spin_unlock_irqrestore(&d->lock, flags);

	add_disk(gd);
	aoedisk_add_sysfs(d);

	spin_lock_irqsave(&d->lock, flags);
	WARN_ON(!(d->flags & DEVFL_GD_NOW));
	d->flags &= ~DEVFL_GD_NOW;
	spin_unlock_irqrestore(&d->lock, flags);
	return;

err_mempool:
	mempool_destroy(mp);
err_disk:
	put_disk(gd);
err:
	spin_lock_irqsave(&d->lock, flags);
	d->flags &= ~DEVFL_GD_NOW;
	schedule_work(&d->work);
	spin_unlock_irqrestore(&d->lock, flags);
}
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
	memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
	mq->mqrq_cur = mqrq_cur;
	mq->mqrq_prev = mqrq_prev;
	mq->queue->queuedata = mq;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_segs == 1) {
		unsigned int bouncesz;

		if(!mmc_card_sd(card))
			bouncesz = MMC_QUEUE_BOUNCESZ;
		else
			bouncesz = MMC_QUEUE_SD_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			if(!mmc_card_sd(card))
				mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			else
				mqrq_cur->bounce_buf = mmc_queue_cur_bounce_buf;
			if (!mqrq_cur->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce cur buffer\n",
					mmc_card_name(card));
			}
			if(!mmc_card_sd(card))
				mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			else
				mqrq_prev->bounce_buf = mmc_queue_prev_bounce_buf;
			if (!mqrq_prev->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce prev buffer\n",
					mmc_card_name(card));
				kfree(mqrq_cur->bounce_buf);
				mqrq_cur->bounce_buf = NULL;
			}
		}

		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_cur->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
		}
	}
#endif

	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_hw_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_segments(mq->queue, host->max_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;


		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
	}

	sema_init(&mq->thread_sem, 1);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");

	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;

 cleanup_queue:
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	if(!mmc_card_sd(card))
		kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;

	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	if(!mmc_card_sd(card))
		kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

	blk_cleanup_queue(mq->queue);
	return ret;
}
Пример #23
0
static struct pmem_device *pmem_alloc(struct device *dev, struct resource *res)
{
	struct pmem_device *pmem;
	struct gendisk *disk;
	int idx, err;

	err = -ENOMEM;
	pmem = kzalloc(sizeof(*pmem), GFP_KERNEL);
	if (!pmem)
		goto out;

	pmem->phys_addr = res->start;
	pmem->size = resource_size(res);

	err = -EINVAL;
	if (!request_mem_region(pmem->phys_addr, pmem->size, "pmem")) {
		dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", &pmem->phys_addr, pmem->size);
		goto out_free_dev;
	}

	/*
	 * Map the memory as write-through, as we can't write back the contents
	 * of the CPU caches in case of a crash.
	 */
	err = -ENOMEM;
	pmem->virt_addr = ioremap_wt(pmem->phys_addr, pmem->size);
	if (!pmem->virt_addr)
		goto out_release_region;

	pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
	if (!pmem->pmem_queue)
		goto out_unmap;

	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
	blk_queue_max_hw_sectors(pmem->pmem_queue, 1024);
	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);

	disk = alloc_disk(PMEM_MINORS);
	if (!disk)
		goto out_free_queue;

	idx = atomic_inc_return(&pmem_index) - 1;

	disk->major		= pmem_major;
	disk->first_minor	= PMEM_MINORS * idx;
	disk->fops		= &pmem_fops;
	disk->private_data	= pmem;
	disk->queue		= pmem->pmem_queue;
	disk->flags		= GENHD_FL_EXT_DEVT;
	sprintf(disk->disk_name, "pmem%d", idx);
	disk->driverfs_dev = dev;
	set_capacity(disk, pmem->size >> 9);
	pmem->pmem_disk = disk;

	add_disk(disk);

	return pmem;

out_free_queue:
	blk_cleanup_queue(pmem->pmem_queue);
out_unmap:
	iounmap(pmem->virt_addr);
out_release_region:
	release_mem_region(pmem->phys_addr, pmem->size);
out_free_dev:
	kfree(pmem);
out:
	return ERR_PTR(err);
}
Пример #24
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	bool bounce = false;
	int ret = -ENOMEM;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request_fn, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->qdepth = 2;
	mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
			   GFP_KERNEL);
	if (!mq->mqrq)
		goto blk_cleanup;
	mq->mqrq_cur = &mq->mqrq[0];
	mq->mqrq_prev = &mq->mqrq[1];
	mq->queue->queuedata = mq;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512 &&
		    mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
			if (ret)
				goto cleanup_queue;
			bounce = true;
		}
	}
#endif

	if (!bounce) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_hw_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_segments(mq->queue, host->max_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		ret = mmc_queue_alloc_sgs(mq, host->max_segs);
		if (ret)
			goto cleanup_queue;
	}

	sema_init(&mq->thread_sem, 1);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");

	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto cleanup_queue;
	}

	return 0;

 cleanup_queue:
	mmc_queue_reqs_free_bufs(mq);
	kfree(mq->mqrq);
	mq->mqrq = NULL;
blk_cleanup:
	blk_cleanup_queue(mq->queue);
	return ret;
}
Пример #25
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mq->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce buffer\n",
					mmc_card_name(card));
			}
		}

		if (mq->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_hw_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}
Пример #26
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);

	/* Set max discard size, << 11 converts to megabytes in sectors */
	blk_queue_max_discard_sectors(mq->queue, 16 << 11);

	if (card->csd.cmdclass & CCC_ERASE)
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
					mq->queue);

	/*
	 * Calculating a correct span is way to messy if this
	 * assumption is broken, so remove the erase support
	 */
	if (unlikely(mmc_card_blockaddr(card) &&
			(card->csd.erase_size % 512)))
		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
					  mq->queue);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mq->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce buffer\n",
					mmc_card_name(card));
			}
		}

		if (mq->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_hw_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}
Пример #27
0
static int srb_init_disk(struct srb_device_s *dev)
{
	struct gendisk *disk = NULL;
	struct request_queue *q;
	int i;
	int ret = 0;

	SRB_LOG_INFO(srb_log, "srb_init_disk: initializing disk for device: %s", dev->name);

	/* create gendisk info */
	disk = alloc_disk(DEV_MINORS);
	if (!disk) {
		SRB_LOG_WARN(srb_log, "srb_init_disk: unable to allocate memory for disk for device: %s",
			dev->name);
		return -ENOMEM;
	}
	SRB_LOG_DEBUG(srb_log, "Creating new disk: %p", disk);

	strcpy(disk->disk_name, dev->name);
	disk->major	   = dev->major;
	disk->first_minor  = 0;
	disk->fops	   = &srb_fops;
	disk->private_data = dev;

	/* init rq */
	q = blk_init_queue(srb_rq_fn, &dev->rq_lock);
	if (!q) {
		SRB_LOG_WARN(srb_log, "srb_init_disk: unable to init block queue for device: %p, disk: %p",
			dev, disk);
		srb_free_disk(dev);
		return -ENOMEM;
	}

	blk_queue_max_hw_sectors(q, DEV_NB_PHYS_SEGS);
	q->queuedata	= dev;

	dev->disk	= disk;
	dev->q		= disk->queue = q;
	dev->nb_threads = 0;
	//blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
	//blk_queue_max_phys_segments(q, DEV_NB_PHYS_SEGS);

	//TODO: Enable flush and bio (Issue #21)
	//blk_queue_flush(q, REQ_FLUSH);

	for (i = 0; i < thread_pool_size; i++) {
		//if ((ret = srb_cdmi_connect(&dev->debug, &dev->thread_cdmi_desc[i]))) {
		if ((ret = srb_cdmi_connect(&dev->debug, dev->thread_cdmi_desc[i]))) {
			SRB_LOG_ERR(srb_log, "Unable to connect to CDMI endpoint: %d",
				ret);
			srb_free_disk(dev);
			return -EIO;
		}
	}
	/* Caution: be sure to call this before spawning threads */
	ret = srb_cdmi_getsize(&dev->debug, dev->thread_cdmi_desc[0], &dev->disk_size);
	if (ret != 0) {
		SRB_LOG_ERR(srb_log, "Could not retrieve volume size.");
		srb_free_disk(dev);
		return ret;
	}

	set_capacity(disk, dev->disk_size / 512ULL);

	for (i = 0; i < thread_pool_size; i++) {
		dev->thread[i] = kthread_create(srb_thread, dev, "%s",
						dev->disk->disk_name);
		if (IS_ERR(dev->thread[i])) {
			SRB_LOG_ERR(srb_log, "Unable to create worker thread (id %d)", i);
			dev->thread[i] = NULL;
			srb_free_disk(dev);
			goto err_kthread;
		}
		wake_up_process(dev->thread[i]);
	}
	add_disk(disk);

	SRBDEV_LOG_INFO(dev, "Attached volume %s of size 0x%llx",
	                disk->disk_name, (unsigned long long)dev->disk_size);

	return 0;

err_kthread:
	for (i = 0; i < thread_pool_size; i++) {
		if (dev->thread[i] != NULL)
			kthread_stop(dev->thread[i]);
	}

	return -EIO;
}
Пример #28
0
int td_linux_block_create(struct td_osdev *dev)
{
	int rc;
	struct request_queue *queue;
	unsigned bio_sector_size = dev->block_params.bio_sector_size;
	unsigned hw_sector_size = dev->block_params.hw_sector_size;

	/* very simple sector size support */
	if (!bio_sector_size || bio_sector_size & 511 || bio_sector_size > 4096) {
		td_os_err(dev, "bio sector size of %u is not supported\n", bio_sector_size);
		return -EINVAL;
	}

	/* MetaData is reported here */
	if (hw_sector_size == 520)
		hw_sector_size = 512;
	if (!hw_sector_size || hw_sector_size & 511 || hw_sector_size > 4096) {
		td_os_err(dev, "hw sector size of %u is not supported\n", hw_sector_size);
		return -EINVAL;
	}

	td_os_notice(dev, " - Set capacity to %llu (%u bytes/sector)\n",
		dev->block_params.capacity, dev->block_params.hw_sector_size);

	/* create a new bio queue */
	queue = blk_alloc_queue(GFP_KERNEL);
	if (!queue) {
		td_os_err(dev, "Error allocating disk queue.\n");
		rc = -ENOMEM;
		goto error_alloc_queue;
	}

#ifdef QUEUE_FLAG_NONROT
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue);
#endif
	
	switch (dev->type) {
	case TD_OSDEV_DEVICE:
		blk_queue_make_request(queue, td_device_make_request);
		dev->_bio_error = td_device_bio_error;
		break;
	case TD_OSDEV_RAID:
		blk_queue_make_request(queue, td_raid_make_request);
		dev->_bio_error = td_raid_bio_error;
		break;
		
	default:
		td_os_err(dev, "Unkonwn OS Type, cannot register block request handler\n");
		goto error_config_queue;
	}
	queue->queuedata = dev;

#if defined QUEUE_FLAG_PLUGGED 
	queue->unplug_fn = td_device_queue_unplug;
#endif

	/* configure queue ordering */

	/* in QUEUE_ORDERED_DRAIN we will get BARRIERS after the queue has
	 * been drained. */
#if defined KABI__blk_queue_ordered

#if KABI__blk_queue_ordered == 2
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN);
#elif KABI__blk_queue_ordered == 3
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN, NULL);
#else
#error unhandled value of KABI__blk_queue_ordered
#endif

#elif defined KABI__blk_queue_flush
	/*
	 * blk_queue_ordered was replaced with blk_queue_flush 
	 * The default implementation is QUEUE_ORDERED_DRAIN
	 */
	blk_queue_flush(queue, 0);
#else
#error undefined KABI__blk_queue_flush or KABI__blk_queue_ordered
#endif

	/* max out the throttling */
#ifdef KABI__blk_queue_max_hw_sectors
	blk_queue_max_hw_sectors(queue, dev->block_params.bio_max_bytes/512);
#elif defined KABI__blk_queue_max_sectors
	blk_queue_max_sectors(queue, dev->block_params.bio_max_bytes/512);
#else
	td_os_err(dev, "No kernel API for maximum sectors\n");
#endif

#if defined KABI__blk_queue_max_segments
	blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
#elif defined KABI__blk_queue_max_phys_segments
	blk_queue_max_phys_segments(queue, MAX_SEGMENT_SIZE);
	blk_queue_max_hw_segments(queue, MAX_SEGMENT_SIZE);
#else
	td_os_err(dev, "No kernel API for maximum segments\n");
#endif

	blk_queue_max_segment_size(queue, dev->block_params.bio_max_bytes);

	blk_queue_bounce_limit(queue, BLK_BOUNCE_ANY);

	/* setup paged based access */
	td_os_info(dev, "Set queue physical block size to %u\n", hw_sector_size);
#ifdef KABI__blk_queue_physical_block_size
	blk_queue_physical_block_size(queue, hw_sector_size);
#elif defined KABI__blk_queue_hardsect_size
	blk_queue_hardsect_size(queue, hw_sector_size);
#else
	td_os_err(dev, "No kernel API for physical sector size\n");
#endif

#ifdef KABI__blk_queue_logical_block_size
	td_os_info(dev, "Set queue logical block size to %u\n", bio_sector_size);
	blk_queue_logical_block_size(queue, bio_sector_size);
#else
	td_os_err(dev, "No kernel API for logical block size\n");
#endif
#ifdef KABI__blk_queue_io_min
	td_os_info(dev, "Set queue io_min to %u\n", bio_sector_size);
	blk_queue_io_min(queue, bio_sector_size);
#else
	td_os_err(dev, "No kernel API for minimum IO size\n");
#endif
#ifdef KABI__blk_queue_io_opt
	td_os_info(dev, "Set queue io_opt to %u\n", dev->block_params.bio_max_bytes);
	blk_queue_io_opt(queue,  dev->block_params.bio_max_bytes);
#else
	td_os_err(dev, "No kernel API for optimal IO size\n");
#endif

#if 0
	if (dev->block_params.discard)
	{
		int did_something = 0;
#if defined KABI__blk_queue_discard_granularity
		queue->limits.discard_granularity = bio_sector_size;
		did_something++;
#endif
#ifdef KABI__blk_queue_max_discard_sectors
		/* 0xFFFF (max sector size of chunk on trim) * 64  * # SSD */
		blk_queue_max_discard_sectors(queue, TD_MAX_DISCARD_LBA_COUNT * 2);
		did_something++;
#endif
#ifdef KABI__blk_queue_discard_zeroes_data
		queue->limits.discard_zeroes_data = 1;
		did_something++;
#endif
#ifdef KABI__queue_flag_set_unlocked
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue);
		did_something++;
#endif
		/* Maybe some day.. But not today. 
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, queue);
		*/
		if (did_something)
			td_os_info(dev, "Enabling discard support\n");
		else
			td_os_notice(dev, "No kernel API for discard support\n");
	} else {
		td_os_info(dev, "No DISCARD support enabled\n");
	}
#else
	/* bug 7444 */
	if (dev->block_params.discard)
		td_os_info(dev, "Device supports DISCARD but is currently being forced disabled\n");
#endif

	/*  assign */
	dev->queue = queue;

	return 0;

error_config_queue:
	blk_cleanup_queue(dev->queue);
	dev->queue = NULL;

error_alloc_queue:
	return rc;
}