Beispiel #1
0
/*
 * Allocate memory for a new zvol_state_t and setup the required
 * request queue and generic disk structures for the block device.
 */
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name)
{
	zvol_state_t *zv;

	zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
	if (zv == NULL)
		goto out;

	zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
	if (zv->zv_queue == NULL)
		goto out_kmem;

	zv->zv_disk = alloc_disk(ZVOL_MINORS);
	if (zv->zv_disk == NULL)
		goto out_queue;

	zv->zv_queue->queuedata = zv;
	zv->zv_dev = dev;
	zv->zv_open_count = 0;
	strlcpy(zv->zv_name, name, DISK_NAME_LEN);

	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
	    sizeof (rl_t), offsetof(rl_t, r_node));
	spin_lock_init(&zv->zv_lock);
	list_link_init(&zv->zv_next);

	zv->zv_disk->major = zvol_major;
	zv->zv_disk->first_minor = (dev & MINORMASK);
	zv->zv_disk->fops = &zvol_ops;
	zv->zv_disk->private_data = zv;
	zv->zv_disk->queue = zv->zv_queue;
	snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s", name);

	return zv;

out_queue:
	blk_cleanup_queue(zv->zv_queue);
out_kmem:
	kmem_free(zv, sizeof (zvol_state_t));
out:
	return NULL;
}
Beispiel #2
0
void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

#ifndef CONFIG_ARCH_EMXX
	/* Mark that we should start throwing out stragglers */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	spin_unlock_irqrestore(q->queue_lock, flags);
#endif
	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

	/* Then terminate our worker thread */
	kthread_stop(mq->thread);

#ifdef CONFIG_ARCH_EMXX
	/* Empty the queue */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	blk_start_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
#endif

	if (mq->bounce_sg)
		kfree(mq->bounce_sg);
	mq->bounce_sg = NULL;

	kfree(mq->sg);
	mq->sg = NULL;

	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

#ifndef CONFIG_ARCH_EMXX
	blk_cleanup_queue(mq->queue);
#endif

	mq->card = NULL;
}
Beispiel #3
0
static void sbull_exit(void)
{
	int i;

	for (i = 0; i < ndevices; i++) {
	   struct sbull_dev *dev = Devices + i;

//	   del_timer_sync(&dev->timer);
	   if (dev->gd) {
	    del_gendisk(dev->gd);
	    put_disk(dev->gd);
	   }
	   if (dev->queue)
	    blk_cleanup_queue(dev->queue);
	   if (dev->data)
	    vfree(dev->data);
	}
	unregister_blkdev(sbull_major, "sbull");
	kfree(Devices);
}
Beispiel #4
0
/* This routine gets called during initialization if things go wrong,
 * and is used in mcd_exit as well. */
static void cleanup(int level)
{
	switch (level) {
	case 3:
		if (unregister_cdrom(&mcd_info)) {
			printk(KERN_WARNING "Can't unregister cdrom mcd\n");
			return;
		}
		free_irq(mcd_irq, NULL);
	case 2:
		release_region(mcd_port, 4);
	case 1:
		if (devfs_unregister_blkdev(MAJOR_NR, "mcd")) {
			printk(KERN_WARNING "Can't unregister major mcd\n");
			return;
		}
		blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
	default:;
	}
}
Beispiel #5
0
void mmc_cleanup_queue(struct mmc_queue *mq)
{
	request_queue_t *q = mq->queue;
	unsigned long flags;

	/* Mark that we should start throwing out stragglers */
	_spin_lock_irqsave(q->queue_lock);
	q->queuedata = NULL;
	_spin_unlock_irqrestore(q->queue_lock, flags);

	/* Then terminate our worker thread */
	kthread_stop(mq->thread);

	kfree(mq->sg);
	mq->sg = NULL;

	blk_cleanup_queue(mq->queue);

	mq->card = NULL;
}
Beispiel #6
0
static int xsysace_remove(struct device *dev)
{
	if (!dev)
		return -EINVAL;

	proc_cleanup();

	if (old_restart)
		ppc_md.restart = old_restart;

	unregister_blkdev(xsa_major, MAJOR_NAME);
	del_gendisk(xsa_gendisk);
	blk_cleanup_queue(xsysace_queue);
	XSysAce_DisableInterrupt(&SysAce);
	free_irq(xsa_irq, NULL);
	iounmap((void *)(SysAce.BaseAddress));
	release_mem_region(xsa_phys_addr, xsa_remap_size);

	return 0;		/* success */
}
Beispiel #7
0
static void
aoedev_freedev(struct aoedev *d)
{
	struct aoetgt **t, **e;

	if (d->gd) {
		aoedisk_rm_sysfs(d);
		del_gendisk(d->gd);
		put_disk(d->gd);
	}
	t = d->targets;
	e = t + NTARGETS;
	for (; t < e && *t; t++)
		freetgt(d, *t);
	if (d->bufpool)
		mempool_destroy(d->bufpool);
	skbpoolfree(d);
	blk_cleanup_queue(d->blkq);
	kfree(d);
}
Beispiel #8
0
static void __exit ndas_exit(void)
{
	struct ndas_dev *dev;

	func();

	dev = Device;
	if (dev) {
		if (dev->gd) {
			del_gendisk(dev->gd);
			put_disk(dev->gd);
		}
		if (dev->queue) {
			blk_cleanup_queue(dev->queue);
		}
		kfree(dev);
	}
	if (major_number > 0)
		unregister_blkdev(major_number, "myndas");
}
Beispiel #9
0
static void mmc_blk_put(struct mmc_blk_data *md)
{
	mutex_lock(&open_lock);
	md->usage--;
	if (md->usage == 0) {
		int devmaj = MAJOR(disk_devt(md->disk));
		int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;

		if (!devmaj)
			devidx = md->disk->first_minor >> MMC_SHIFT;

		blk_cleanup_queue(md->queue.queue);

		__clear_bit(devidx, dev_use);

		if (md->bounce)
			kfree(md->bounce);

		put_disk(md->disk);
		kfree(md);
	}
Beispiel #10
0
static void ubiblk_exit(void)
{
	int i;

	for (i = 0; i < ndevices; i++) {
		struct ubiblk_dev *dev = Devices + i;
		
		if (dev->gd) {
			del_gendisk(dev->gd);
			put_disk(dev->gd);
		}
		if (dev->queue) {
			blk_cleanup_queue(dev->queue);
		}
		if (dev->ubi_vol) {
			ubi_close_volume(dev->ubi_vol);
		}
	}
	unregister_blkdev(ubiblk_major, "ubiblk");
	kfree(Devices);
}
Beispiel #11
0
static int __init simp_blkdev_init(void) {
    int ret;

    simp_blkdev_data = vmalloc(SIMP_BLKDEV_BYTES);


    simp_blkdev_queue = blk_init_queue(simp_blkdev_do_request, NULL);

    if (!simp_blkdev_queue) {
        ret = -ENOMEM;
        goto err_init_queue;
    }



    simp_blkdev_disk = alloc_disk(1);

    if (!simp_blkdev_disk) {
        ret = -ENOMEM;
        goto err_alloc_disk;
    }

    strcpy(simp_blkdev_disk->disk_name, SIMP_BLKDEV_DISKNAME);
    simp_blkdev_disk->major =  SIMP_BLKDEV_DEVICEMAJOR;
    simp_blkdev_disk->first_minor = 0;
    simp_blkdev_disk->fops =  &simp_blkdev_fops;
    simp_blkdev_disk->queue =  simp_blkdev_queue;
    set_capacity(simp_blkdev_disk, SIMP_BLKDEV_BYTES >> 9);



    add_disk(simp_blkdev_disk);
    return 0;

err_init_queue:
    blk_cleanup_queue(simp_blkdev_queue);
err_alloc_disk:
    return ret;
}
static void free_dev(struct mapped_device *md)
{
	int minor = md->disk->first_minor;

	if (md->suspended_bdev) {
		thaw_bdev(md->suspended_bdev, NULL);
		bdput(md->suspended_bdev);
	}
	mempool_destroy(md->tio_pool);
	mempool_destroy(md->io_pool);
	del_gendisk(md->disk);
	free_minor(minor);

	spin_lock(&_minor_lock);
	md->disk->private_data = NULL;
	spin_unlock(&_minor_lock);

	put_disk(md->disk);
	blk_cleanup_queue(md->queue);
	module_put(THIS_MODULE);
	kfree(md);
}
Beispiel #13
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (host->dev->dma_mask && *host->dev->dma_mask)
		limit = *host->dev->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_bounce_limit(mq->queue, limit);
	blk_queue_max_sectors(mq->queue, host->max_sectors);
	blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
	blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
	blk_queue_max_segment_size(mq->queue, host->max_seg_size);

	mq->queue->queuedata = mq;
	mq->req = NULL;

	init_completion(&mq->thread_complete);
	init_waitqueue_head(&mq->thread_wq);
	init_MUTEX(&mq->thread_sem);

	ret = kernel_thread(mmc_queue_thread, mq, CLONE_KERNEL);
	if (ret < 0) {
		blk_cleanup_queue(mq->queue);
	} else {
		wait_for_completion(&mq->thread_complete);
		init_completion(&mq->thread_complete);
		ret = 0;
	}

	return ret;
}
/*
 * Free internal disk
 */
static int srb_free_disk(struct srb_device_s *dev)
{
	struct gendisk *disk = NULL;

	disk = dev->disk;
	if (!disk) {
		SRBDEV_LOG_ERR(dev, "Disk is not available anymore (%s)", dev->name);
		return -EINVAL;
	}
	dev->disk = NULL;

	/* free disk */
	if (disk->flags & GENHD_FL_UP) {
		del_gendisk(disk);
		if (disk->queue)
			blk_cleanup_queue(disk->queue);
	}

	put_disk(disk);

	return 0;
}
Beispiel #15
0
static void
freedev(struct aoedev *d)
{
	struct aoetgt **t, **e;
	int freeing = 0;
	unsigned long flags;

	spin_lock_irqsave(&d->lock, flags);
	if (d->flags & DEVFL_TKILL
	&& !(d->flags & DEVFL_FREEING)) {
		d->flags |= DEVFL_FREEING;
		freeing = 1;
	}
	spin_unlock_irqrestore(&d->lock, flags);
	if (!freeing)
		return;

	del_timer_sync(&d->timer);
	aoedbg_undbg(d);
	if (d->gd) {
		aoedisk_rm_debugfs(d);
		aoedisk_rm_sysfs(d);
		del_gendisk(d->gd);
		put_disk(d->gd);
		blk_cleanup_queue(d->blkq);
	}
	t = d->targets;
	e = t + d->ntargets;
	for (; t < e && *t; t++)
		freetgt(d, *t);
	if (d->bufpool)
		mempool_destroy(d->bufpool);
	skbpoolfree(d);
	minor_free(d->sysminor);

	spin_lock_irqsave(&d->lock, flags);
	d->flags |= DEVFL_FREED;
	spin_unlock_irqrestore(&d->lock, flags);
}
Beispiel #16
0
static void __exit mcdx_exit(void)
{
	int i;

	xinfo("cleanup_module called\n");

	for (i = 0; i < MCDX_NDRIVES; i++) {
		struct s_drive_stuff *stuffp = mcdx_stuffp[i];
		if (!stuffp)
			continue;
		del_gendisk(stuffp->disk);
		if (unregister_cdrom(&stuffp->info)) {
			printk(KERN_WARNING "Can't unregister cdrom mcdx\n");
			continue;
		}
		put_disk(stuffp->disk);
		release_region(stuffp->wreg_data, MCDX_IO_SIZE);
		free_irq(stuffp->irq, NULL);
		if (stuffp->toc) {
			xtrace(MALLOC, "cleanup_module() free toc @ %p\n",
			       stuffp->toc);
			kfree(stuffp->toc);
		}
		xtrace(MALLOC, "cleanup_module() free stuffp @ %p\n",
		       stuffp);
		mcdx_stuffp[i] = NULL;
		kfree(stuffp);
	}

	if (unregister_blkdev(MAJOR_NR, "mcdx") != 0) {
		xwarn("cleanup() unregister_blkdev() failed\n");
	}
#if !MCDX_QUIET
	else
	xinfo("cleanup() succeeded\n");
#endif
	blk_cleanup_queue(mcdx_queue);
}
Beispiel #17
0
void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	/* Mark that we should start throwing out stragglers */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	spin_unlock_irqrestore(q->queue_lock, flags);

	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

	/* Then terminate our worker thread */
	kthread_stop(mq->thread);

	kfree(mq->sg);
	mq->sg = NULL;

	blk_cleanup_queue(mq->queue);

	mq->card = NULL;
}
Beispiel #18
0
static int pmem_attach_disk(struct device *dev,
		struct nd_namespace_common *ndns, struct pmem_device *pmem)
{
	struct gendisk *disk;

	pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
	if (!pmem->pmem_queue)
		return -ENOMEM;

	blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
	blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
	blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
	blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);

	disk = alloc_disk(0);
	if (!disk) {
		blk_cleanup_queue(pmem->pmem_queue);
		return -ENOMEM;
	}

	disk->major		= pmem_major;
	disk->first_minor	= 0;
	disk->fops		= &pmem_fops;
	disk->private_data	= pmem;
	disk->queue		= pmem->pmem_queue;
	disk->flags		= GENHD_FL_EXT_DEVT;
	nvdimm_namespace_disk_name(ndns, disk->disk_name);
	disk->driverfs_dev = dev;
	set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
	pmem->pmem_disk = disk;

	add_disk(disk);
	revalidate_disk(disk);

	return 0;
}
Beispiel #19
0
void block_cleanup(void)
{
  if (gdisk != NULL) {
    //
    del_gendisk(gdisk);
    //
    put_disk(gdisk);
  }

  if (major_number > 0) {
    //
    unregister_blkdev(major_number, DEVICE_NAME);
  }

  if (queue != NULL) {
    //
    blk_cleanup_queue(queue);
  }

  if (data != NULL) {
    //
    vfree(data);
  }
}
Beispiel #20
0
static int null_add_dev(void)
{
	struct gendisk *disk;
	struct nullb *nullb;
	sector_t size;
	int rv;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb) {
		rv = -ENOMEM;
		goto out;
	}

	spin_lock_init(&nullb->lock);

	if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
		submit_queues = nr_online_nodes;

	rv = setup_queues(nullb);
	if (rv)
		goto out_free_nullb;

	if (queue_mode == NULL_Q_MQ) {
		nullb->tag_set.ops = &null_mq_ops;
		nullb->tag_set.nr_hw_queues = submit_queues;
		nullb->tag_set.queue_depth = hw_queue_depth;
		nullb->tag_set.numa_node = home_node;
		nullb->tag_set.cmd_size	= sizeof(struct nullb_cmd);
		nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
		nullb->tag_set.driver_data = nullb;

		rv = blk_mq_alloc_tag_set(&nullb->tag_set);
		if (rv)
			goto out_cleanup_queues;

		nullb->q = blk_mq_init_queue(&nullb->tag_set);
		if (IS_ERR(nullb->q)) {
			rv = -ENOMEM;
			goto out_cleanup_tags;
		}
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_make_request(nullb->q, null_queue_bio);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	}

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);

	disk = nullb->disk = alloc_disk_node(1, home_node);
	if (!disk) {
		rv = -ENOMEM;
		goto out_cleanup_blk_queue;
	}

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	size = gb * 1024 * 1024 * 1024ULL;
	sector_div(size, bs);
	set_capacity(disk, size);

	disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
	disk->major		= null_major;
	disk->first_minor	= nullb->index;
	disk->fops		= &null_fops;
	disk->private_data	= nullb;
	disk->queue		= nullb->q;
	sprintf(disk->disk_name, "nullb%d", nullb->index);
	add_disk(disk);
	return 0;

out_cleanup_blk_queue:
	blk_cleanup_queue(nullb->q);
out_cleanup_tags:
	if (queue_mode == NULL_Q_MQ)
		blk_mq_free_tag_set(&nullb->tag_set);
out_cleanup_queues:
	cleanup_queues(nullb);
out_free_nullb:
	kfree(nullb);
out:
	return rv;
}
Beispiel #21
0
static int __init hd_init(void)
{
	int drive;

	if (register_blkdev(MAJOR_NR,"hd"))
		return -1;

	hd_queue = blk_init_queue(do_hd_request, &hd_lock);
	if (!hd_queue) {
		unregister_blkdev(MAJOR_NR,"hd");
		return -ENOMEM;
	}

	blk_queue_max_sectors(hd_queue, 255);
	init_timer(&device_timer);
	device_timer.function = hd_times_out;
	blk_queue_hardsect_size(hd_queue, 512);

#ifdef __i386__
	if (!NR_HD) {
		extern struct drive_info drive_info;
		unsigned char *BIOS = (unsigned char *) &drive_info;
		unsigned long flags;
		int cmos_disks;

		for (drive=0 ; drive<2 ; drive++) {
			hd_info[drive].cyl = *(unsigned short *) BIOS;
			hd_info[drive].head = *(2+BIOS);
			hd_info[drive].wpcom = *(unsigned short *) (5+BIOS);
			hd_info[drive].ctl = *(8+BIOS);
			hd_info[drive].lzone = *(unsigned short *) (12+BIOS);
			hd_info[drive].sect = *(14+BIOS);
#ifdef does_not_work_for_everybody_with_scsi_but_helps_ibm_vp
			if (hd_info[drive].cyl && NR_HD == drive)
				NR_HD++;
#endif
			BIOS += 16;
		}

	/*
		We query CMOS about hard disks : it could be that 
		we have a SCSI/ESDI/etc controller that is BIOS
		compatible with ST-506, and thus showing up in our
		BIOS table, but not register compatible, and therefore
		not present in CMOS.

		Furthermore, we will assume that our ST-506 drives
		<if any> are the primary drives in the system, and 
		the ones reflected as drive 1 or 2.

		The first drive is stored in the high nibble of CMOS
		byte 0x12, the second in the low nibble.  This will be
		either a 4 bit drive type or 0xf indicating use byte 0x19 
		for an 8 bit type, drive 1, 0x1a for drive 2 in CMOS.

		Needless to say, a non-zero value means we have 
		an AT controller hard disk for that drive.

		Currently the rtc_lock is a bit academic since this
		driver is non-modular, but someday... ?         Paul G.
	*/

		spin_lock_irqsave(&rtc_lock, flags);
		cmos_disks = CMOS_READ(0x12);
		spin_unlock_irqrestore(&rtc_lock, flags);

		if (cmos_disks & 0xf0) {
			if (cmos_disks & 0x0f)
				NR_HD = 2;
			else
				NR_HD = 1;
		}
	}
#endif /* __i386__ */
#ifdef __arm__
	if (!NR_HD) {
		/* We don't know anything about the drive.  This means
		 * that you *MUST* specify the drive parameters to the
		 * kernel yourself.
		 */
		printk("hd: no drives specified - use hd=cyl,head,sectors"
			" on kernel command line\n");
	}
#endif
	if (!NR_HD)
		goto out;

	for (drive=0 ; drive < NR_HD ; drive++) {
		struct gendisk *disk = alloc_disk(64);
		struct hd_i_struct *p = &hd_info[drive];
		if (!disk)
			goto Enomem;
		disk->major = MAJOR_NR;
		disk->first_minor = drive << 6;
		disk->fops = &hd_fops;
		sprintf(disk->disk_name, "hd%c", 'a'+drive);
		disk->private_data = p;
		set_capacity(disk, p->head * p->sect * p->cyl);
		disk->queue = hd_queue;
		p->unit = drive;
		hd_gendisk[drive] = disk;
		printk ("%s: %luMB, CHS=%d/%d/%d\n",
			disk->disk_name, (unsigned long)get_capacity(disk)/2048,
			p->cyl, p->head, p->sect);
	}

	if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) {
		printk("hd: unable to get IRQ%d for the hard disk driver\n",
			HD_IRQ);
		goto out1;
	}
	if (!request_region(HD_DATA, 8, "hd")) {
		printk(KERN_WARNING "hd: port 0x%x busy\n", HD_DATA);
		goto out2;
	}
	if (!request_region(HD_CMD, 1, "hd(cmd)")) {
		printk(KERN_WARNING "hd: port 0x%x busy\n", HD_CMD);
		goto out3;
	}

	/* Let them fly */
	for(drive=0; drive < NR_HD; drive++)
		add_disk(hd_gendisk[drive]);

	return 0;

out3:
	release_region(HD_DATA, 8);
out2:
	free_irq(HD_IRQ, NULL);
out1:
	for (drive = 0; drive < NR_HD; drive++)
		put_disk(hd_gendisk[drive]);
	NR_HD = 0;
out:
	del_timer(&device_timer);
	unregister_blkdev(MAJOR_NR,"hd");
	blk_cleanup_queue(hd_queue);
	return -1;
Enomem:
	while (drive--)
		put_disk(hd_gendisk[drive]);
	goto out;
}
Beispiel #22
0
/*
 * Allocate memory for a new zvol_state_t and setup the required
 * request queue and generic disk structures for the block device.
 */
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name)
{
	zvol_state_t *zv;
	int error = 0;

	zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
	if (zv == NULL)
		goto out;

	zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
	if (zv->zv_queue == NULL)
		goto out_kmem;

#ifdef HAVE_ELEVATOR_CHANGE
	error = elevator_change(zv->zv_queue, "noop");
#endif /* HAVE_ELEVATOR_CHANGE */
	if (error) {
		printk("ZFS: Unable to set \"%s\" scheduler for zvol %s: %d\n",
		    "noop", name, error);
		goto out_queue;
	}

#ifdef HAVE_BLK_QUEUE_FLUSH
	blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
#else
	blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
#endif /* HAVE_BLK_QUEUE_FLUSH */

	zv->zv_disk = alloc_disk(ZVOL_MINORS);
	if (zv->zv_disk == NULL)
		goto out_queue;

	zv->zv_queue->queuedata = zv;
	zv->zv_dev = dev;
	zv->zv_open_count = 0;
	strlcpy(zv->zv_name, name, MAXNAMELEN);

	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
	    sizeof (rl_t), offsetof(rl_t, r_node));
	zv->zv_znode.z_is_zvol = TRUE;

	spin_lock_init(&zv->zv_lock);
	list_link_init(&zv->zv_next);

	zv->zv_disk->major = zvol_major;
	zv->zv_disk->first_minor = (dev & MINORMASK);
	zv->zv_disk->fops = &zvol_ops;
	zv->zv_disk->private_data = zv;
	zv->zv_disk->queue = zv->zv_queue;
	snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
	    ZVOL_DEV_NAME, (dev & MINORMASK));

	return zv;

out_queue:
	blk_cleanup_queue(zv->zv_queue);
out_kmem:
	kmem_free(zv, sizeof (zvol_state_t));
out:
	return NULL;
}
Beispiel #23
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mq->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce buffer\n",
					mmc_card_name(card));
			}
		}

		if (mq->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}
Beispiel #24
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
                   spinlock_t *lock, const char *subname)
{
    struct mmc_host *host = card->host;
    u64 limit = BLK_BOUNCE_HIGH;
    int ret;

    if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
        limit = *mmc_dev(host)->dma_mask;

    mq->card = card;
    mq->queue = blk_init_queue(mmc_request, lock);
    if (!mq->queue)
        return -ENOMEM;

    mq->queue->queuedata = mq;
    mq->req = NULL;

    blk_queue_prep_rq(mq->queue, mmc_prep_request);
    queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
    if (mmc_can_erase(card)) {
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
        mq->queue->limits.max_discard_sectors = UINT_MAX;
        if (card->erased_byte == 0)
            mq->queue->limits.discard_zeroes_data = 1;
        mq->queue->limits.discard_granularity = card->pref_erase << 9;
        if (mmc_can_secure_erase_trim(card))
            queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
                                    mq->queue);
    }

#ifdef CONFIG_MMC_BLOCK_BOUNCE
    if (host->max_segs == 1) {
        unsigned int bouncesz;

        bouncesz = MMC_QUEUE_BOUNCESZ;

        if (bouncesz > host->max_req_size)
            bouncesz = host->max_req_size;
        if (bouncesz > host->max_seg_size)
            bouncesz = host->max_seg_size;
        if (bouncesz > (host->max_blk_count * 512))
            bouncesz = host->max_blk_count * 512;

        if (bouncesz > 512) {
            mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
            if (!mq->bounce_buf) {
                printk(KERN_WARNING "%s: unable to "
                       "allocate bounce buffer\n",
                       mmc_card_name(card));
            }
        }

        if (mq->bounce_buf) {
            blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
            blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
            blk_queue_max_segments(mq->queue, bouncesz / 512);
            blk_queue_max_segment_size(mq->queue, bouncesz);

            mq->sg = kmalloc(sizeof(struct scatterlist),
                             GFP_KERNEL);
            if (!mq->sg) {
                ret = -ENOMEM;
                goto cleanup_queue;
            }
            sg_init_table(mq->sg, 1);

            mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
                                    bouncesz / 512, GFP_KERNEL);
            if (!mq->bounce_sg) {
                ret = -ENOMEM;
                goto cleanup_queue;
            }
            sg_init_table(mq->bounce_sg, bouncesz / 512);
        }
    }
#endif

    if (!mq->bounce_buf) {
        blk_queue_bounce_limit(mq->queue, limit);
        blk_queue_max_hw_sectors(mq->queue,
                                 min(host->max_blk_count, host->max_req_size / 512));
        blk_queue_max_segments(mq->queue, host->max_segs);
        blk_queue_max_segment_size(mq->queue, host->max_seg_size);

        mq->sg = kmalloc(sizeof(struct scatterlist) *
                         host->max_segs, GFP_KERNEL);
        if (!mq->sg) {
            ret = -ENOMEM;
            goto cleanup_queue;
        }
        sg_init_table(mq->sg, host->max_segs);
    }

    sema_init(&mq->thread_sem, 1);

    mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
                             host->index, subname ? subname : "");

    if (IS_ERR(mq->thread)) {
        ret = PTR_ERR(mq->thread);
        goto free_bounce_sg;
    }

    return 0;
free_bounce_sg:
    if (mq->bounce_sg)
        kfree(mq->bounce_sg);
    mq->bounce_sg = NULL;
cleanup_queue:
    if (mq->sg)
        kfree(mq->sg);
    mq->sg = NULL;
    if (mq->bounce_buf)
        kfree(mq->bounce_buf);
    mq->bounce_buf = NULL;
    blk_cleanup_queue(mq->queue);
    return ret;
}
Beispiel #25
0
int nbdx_register_block_device(struct nbdx_file *nbdx_file)
{
	sector_t size = nbdx_file->stbuf.st_size;
	int page_size = PAGE_SIZE;
	int err = 0;

	pr_debug("%s called\n", __func__);

	nbdx_file->major = nbdx_major;

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
	nbdx_mq_reg.nr_hw_queues = submit_queues;

	nbdx_file->queue = blk_mq_init_queue(&nbdx_mq_reg, nbdx_file);
#else
	nbdx_file->tag_set.ops = &nbdx_mq_ops;
	nbdx_file->tag_set.nr_hw_queues = submit_queues;
	nbdx_file->tag_set.queue_depth = NBDX_QUEUE_DEPTH;
	nbdx_file->tag_set.numa_node = NUMA_NO_NODE;
	nbdx_file->tag_set.cmd_size	= sizeof(struct raio_io_u);
	nbdx_file->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	nbdx_file->tag_set.driver_data = nbdx_file;

	err = blk_mq_alloc_tag_set(&nbdx_file->tag_set);
	if (err)
		goto out;

	nbdx_file->queue = blk_mq_init_queue(&nbdx_file->tag_set);
#endif
	if (IS_ERR(nbdx_file->queue)) {
		pr_err("%s: Failed to allocate blk queue ret=%ld\n",
		       __func__, PTR_ERR(nbdx_file->queue));
		err = PTR_ERR(nbdx_file->queue);
		goto blk_mq_init;
	}

	nbdx_file->queue->queuedata = nbdx_file;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nbdx_file->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nbdx_file->queue);

	nbdx_file->disk = alloc_disk_node(1, NUMA_NO_NODE);
	if (!nbdx_file->disk) {
		pr_err("%s: Failed to allocate disk node\n", __func__);
		err = -ENOMEM;
		goto alloc_disk;
	}

	nbdx_file->disk->major = nbdx_file->major;
	nbdx_file->disk->first_minor = nbdx_file->index;
	nbdx_file->disk->fops = &nbdx_ops;
	nbdx_file->disk->queue = nbdx_file->queue;
	nbdx_file->disk->private_data = nbdx_file;
	blk_queue_logical_block_size(nbdx_file->queue, NBDX_SECT_SIZE);
	blk_queue_physical_block_size(nbdx_file->queue, NBDX_SECT_SIZE);
	sector_div(page_size, NBDX_SECT_SIZE);
	blk_queue_max_hw_sectors(nbdx_file->queue, page_size * MAX_SGL_LEN);
	sector_div(size, NBDX_SECT_SIZE);
	set_capacity(nbdx_file->disk, size);
	sscanf(nbdx_file->dev_name, "%s", nbdx_file->disk->disk_name);
	add_disk(nbdx_file->disk);
	goto out;

alloc_disk:
	blk_cleanup_queue(nbdx_file->queue);
blk_mq_init:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
	blk_mq_free_tag_set(&nbdx_file->tag_set);
#endif
out:
	return err;
}
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 * @subname: partition subname
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
		   spinlock_t *lock, const char *subname)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;
	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
	memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
	mq->mqrq_cur = mqrq_cur;
	mq->mqrq_prev = mqrq_prev;
	mq->queue->queuedata = mq;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
	if (mmc_can_erase(card))
		mmc_queue_setup_discard(mq->queue, card);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_segs == 1) {
		unsigned int bouncesz;

		if(!mmc_card_sd(card))
			bouncesz = MMC_QUEUE_BOUNCESZ;
		else
			bouncesz = MMC_QUEUE_SD_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			if(!mmc_card_sd(card))
				mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			else
				mqrq_cur->bounce_buf = mmc_queue_cur_bounce_buf;
			if (!mqrq_cur->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce cur buffer\n",
					mmc_card_name(card));
			}
			if(!mmc_card_sd(card))
				mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			else
				mqrq_prev->bounce_buf = mmc_queue_prev_bounce_buf;
			if (!mqrq_prev->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce prev buffer\n",
					mmc_card_name(card));
				kfree(mqrq_cur->bounce_buf);
				mqrq_cur->bounce_buf = NULL;
			}
		}

		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mqrq_cur->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_cur->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->sg = mmc_alloc_sg(1, &ret);
			if (ret)
				goto cleanup_queue;

			mqrq_prev->bounce_sg =
				mmc_alloc_sg(bouncesz / 512, &ret);
			if (ret)
				goto cleanup_queue;
		}
	}
#endif

	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_hw_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_segments(mq->queue, host->max_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;


		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
		if (ret)
			goto cleanup_queue;
	}

	sema_init(&mq->thread_sem, 1);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
		host->index, subname ? subname : "");

	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
	kfree(mqrq_cur->bounce_sg);
	mqrq_cur->bounce_sg = NULL;
	kfree(mqrq_prev->bounce_sg);
	mqrq_prev->bounce_sg = NULL;

 cleanup_queue:
	kfree(mqrq_cur->sg);
	mqrq_cur->sg = NULL;
	if(!mmc_card_sd(card))
		kfree(mqrq_cur->bounce_buf);
	mqrq_cur->bounce_buf = NULL;

	kfree(mqrq_prev->sg);
	mqrq_prev->sg = NULL;
	if(!mmc_card_sd(card))
		kfree(mqrq_prev->bounce_buf);
	mqrq_prev->bounce_buf = NULL;

	blk_cleanup_queue(mq->queue);
	return ret;
}
Beispiel #27
0
/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(int minor)
{
	int r;
	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
	void *old_md;

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

	if (!try_module_get(THIS_MODULE))
		goto bad0;

	/* get a minor number for the dev */
	if (minor == DM_ANY_MINOR)
		r = next_free_minor(md, &minor);
	else
		r = specific_minor(md, minor);
	if (r < 0)
		goto bad1;

	memset(md, 0, sizeof(*md));
	init_rwsem(&md->io_lock);
	init_MUTEX(&md->suspend_lock);
	spin_lock_init(&md->pushback_lock);
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);
	atomic_set(&md->open_count, 0);
	atomic_set(&md->event_nr, 0);

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
		goto bad1_free_minor;

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
	md->queue->unplug_fn = dm_unplug_all;
	md->queue->issue_flush_fn = dm_flush_all;

	md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
	if (!md->io_pool)
		goto bad2;

	md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
	if (!md->tio_pool)
		goto bad3;

	md->bs = bioset_create(16, 16);
	if (!md->bs)
		goto bad_no_bioset;

	md->disk = alloc_disk(1);
	if (!md->disk)
		goto bad4;

	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
	init_waitqueue_head(&md->eventq);

	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);
	format_dev_t(md->name, MKDEV(_major, minor));

	/* Populate the mapping, nobody knows we exist yet */
	spin_lock(&_minor_lock);
	old_md = idr_replace(&_minor_idr, md, minor);
	spin_unlock(&_minor_lock);

	BUG_ON(old_md != MINOR_ALLOCED);

	return md;

 bad4:
	bioset_free(md->bs);
 bad_no_bioset:
	mempool_destroy(md->tio_pool);
 bad3:
	mempool_destroy(md->io_pool);
 bad2:
	blk_cleanup_queue(md->queue);
 bad1_free_minor:
	free_minor(minor);
 bad1:
	module_put(THIS_MODULE);
 bad0:
	kfree(md);
	return NULL;
}
Beispiel #28
0
int __init mcd_init(void)
{
	struct gendisk *disk = alloc_disk(1);
	int count;
	unsigned char result[3];
	char msg[80];

	if (!disk) {
		printk(KERN_INFO "mcd: can't allocated disk.\n");
		return -ENOMEM;
	}
	if (mcd_port <= 0 || mcd_irq <= 0) {
		printk(KERN_INFO "mcd: not probing.\n");
		put_disk(disk);
		return -EIO;
	}
	if (register_blkdev(MAJOR_NR, "mcd")) {
		put_disk(disk);
		return -EIO;
	}
	if (!request_region(mcd_port, 4, "mcd")) {
		printk(KERN_ERR "mcd: Initialization failed, I/O port (%X) already in use\n", mcd_port);
		goto out_region;
	}

	mcd_queue = blk_init_queue(do_mcd_request, &mcd_spinlock);
	if (!mcd_queue)
		goto out_queue;

	/* check for card */

	outb(0, MCDPORT(1));	/* send reset */
	for (count = 0; count < 2000000; count++)
		(void) inb(MCDPORT(1));	/* delay a bit */

	outb(0x40, MCDPORT(0));	/* send get-stat cmd */
	for (count = 0; count < 2000000; count++)
		if (!(inb(MCDPORT(1)) & MFL_STATUS))
			break;

	if (count >= 2000000) {
		printk(KERN_INFO "mcd: initialisation failed - No mcd device at 0x%x irq %d\n",
		       mcd_port, mcd_irq);
		goto out_probe;
	}
	count = inb(MCDPORT(0));	/* pick up the status */

	outb(MCMD_GET_VERSION, MCDPORT(0));
	for (count = 0; count < 3; count++)
		if (getValue(result + count)) {
			printk(KERN_ERR "mcd: mitsumi get version failed at 0x%x\n",
			       mcd_port);
			goto out_probe;
		}

	if (result[0] == result[1] && result[1] == result[2])
		goto out_probe;

	mcdVersion = result[2];

	if (mcdVersion >= 4)
		outb(4, MCDPORT(2));	/* magic happens */

	/* don't get the IRQ until we know for sure the drive is there */

	if (request_irq(mcd_irq, mcd_interrupt, SA_INTERRUPT, "Mitsumi CD", NULL)) {
		printk(KERN_ERR "mcd: Unable to get IRQ%d for Mitsumi CD-ROM\n", mcd_irq);
		goto out_probe;
	}

	if (result[1] == 'D') {
		MCMD_DATA_READ = MCMD_2X_READ;
		/* Added flag to drop to 1x speed if too many errors */
		mcdDouble = 1;
	} else
		mcd_info.speed = 1;
	sprintf(msg, " mcd: Mitsumi %s Speed CD-ROM at port=0x%x,"
		" irq=%d\n", mcd_info.speed == 1 ? "Single" : "Double",
		mcd_port, mcd_irq);

	outb(MCMD_CONFIG_DRIVE, MCDPORT(0));
	outb(0x02, MCDPORT(0));
	outb(0x00, MCDPORT(0));
	getValue(result);

	outb(MCMD_CONFIG_DRIVE, MCDPORT(0));
	outb(0x10, MCDPORT(0));
	outb(0x04, MCDPORT(0));
	getValue(result);

	mcd_invalidate_buffers();
	mcdPresent = 1;

	disk->major = MAJOR_NR;
	disk->first_minor = 0;
	sprintf(disk->disk_name, "mcd");
	disk->fops = &mcd_bdops;
	disk->flags = GENHD_FL_CD;
	mcd_gendisk = disk;

	if (register_cdrom(&mcd_info) != 0) {
		printk(KERN_ERR "mcd: Unable to register Mitsumi CD-ROM.\n");
		goto out_cdrom;
	}
	disk->queue = mcd_queue;
	add_disk(disk);
	printk(msg);
	return 0;

out_cdrom:
	free_irq(mcd_irq, NULL);
out_queue:
	release_region(mcd_port, 4);
out_probe:
	blk_cleanup_queue(mcd_queue);
out_region:
	unregister_blkdev(MAJOR_NR, "mcd");
	put_disk(disk);
	return -EIO;
}
Beispiel #29
0
/* --------------------------------------------------------------------
 * SystemACE device setup/teardown code
 */
static int __devinit ace_setup(struct ace_device *ace)
{
	u16 version;
	u16 val;
	int rc;

	dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
	dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n",
		(unsigned long long)ace->physaddr, ace->irq);

	spin_lock_init(&ace->lock);
	init_completion(&ace->id_completion);

	/*
	 * Map the device
	 */
	ace->baseaddr = ioremap(ace->physaddr, 0x80);
	if (!ace->baseaddr)
		goto err_ioremap;

	/*
	 * Initialize the state machine tasklet and stall timer
	 */
	tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
	setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);

	/*
	 * Initialize the request queue
	 */
	ace->queue = blk_init_queue(ace_request, &ace->lock);
	if (ace->queue == NULL)
		goto err_blk_initq;
	blk_queue_logical_block_size(ace->queue, 512);

	/*
	 * Allocate and initialize GD structure
	 */
	ace->gd = alloc_disk(ACE_NUM_MINORS);
	if (!ace->gd)
		goto err_alloc_disk;

	ace->gd->major = ace_major;
	ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
	ace->gd->fops = &ace_fops;
	ace->gd->queue = ace->queue;
	ace->gd->private_data = ace;
	snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');

	/* set bus width */
	if (ace->bus_width == ACE_BUS_WIDTH_16) {
		/* 0x0101 should work regardless of endianess */
		ace_out_le16(ace, ACE_BUSMODE, 0x0101);

		/* read it back to determine endianess */
		if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
			ace->reg_ops = &ace_reg_le16_ops;
		else
			ace->reg_ops = &ace_reg_be16_ops;
	} else {
		ace_out_8(ace, ACE_BUSMODE, 0x00);
		ace->reg_ops = &ace_reg_8_ops;
	}

	/* Make sure version register is sane */
	version = ace_in(ace, ACE_VERSION);
	if ((version == 0) || (version == 0xFFFF))
		goto err_read;

	/* Put sysace in a sane state by clearing most control reg bits */
	ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
		ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);

	/* Now we can hook up the irq handler */
	if (ace->irq != NO_IRQ) {
		rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
		if (rc) {
			/* Failure - fall back to polled mode */
			dev_err(ace->dev, "request_irq failed\n");
			ace->irq = NO_IRQ;
		}
	}

	/* Enable interrupts */
	val = ace_in(ace, ACE_CTRL);
	val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
	ace_out(ace, ACE_CTRL, val);

	/* Print the identification */
	dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
		 (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
	dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n",
		(unsigned long long) ace->physaddr, ace->baseaddr, ace->irq);

	ace->media_change = 1;
	ace_revalidate_disk(ace->gd);

	/* Make the sysace device 'live' */
	add_disk(ace->gd);

	return 0;

err_read:
	put_disk(ace->gd);
err_alloc_disk:
	blk_cleanup_queue(ace->queue);
err_blk_initq:
	iounmap(ace->baseaddr);
err_ioremap:
	dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n",
		 (unsigned long long) ace->physaddr);
	return -ENOMEM;
}
Beispiel #30
0
static int htifblk_probe(struct device *dev)
{
	static unsigned int index = 0;
	static const char prefix[] = " size=";

	struct htif_device *htif_dev;
	struct htifblk_device *htifblk_dev;
	struct gendisk *disk;
	struct request_queue *queue;
	const char *str;
	u64 size;
	int ret;

	dev_info(dev, "detected disk\n");
	htif_dev = to_htif_dev(dev);

	str = strstr(htif_dev->id, prefix);
	if (unlikely(str == NULL
	    || kstrtou64(str + sizeof(prefix) - 1, 10, &size))) {
		dev_err(dev, "error determining size of disk\n");
		return -ENODEV;
	}
	if (unlikely(size & (SECTOR_SIZE - 1))) {
		dev_warn(dev, "disk size not a multiple of sector size:"
			" %llu\n", size);
	}

	ret = -ENOMEM;
	htifblk_dev = devm_kzalloc(dev, sizeof(struct htifblk_device), GFP_KERNEL);
	if (unlikely(htifblk_dev == NULL))
		goto out;

	htifblk_dev->size = size;
	htifblk_dev->dev = htif_dev;
	htifblk_dev->tag = index;
	spin_lock_init(&htifblk_dev->lock);

	disk = alloc_disk(1);
	if (unlikely(disk == NULL))
		goto out;

	queue = blk_init_queue(htifblk_request, &htifblk_dev->lock);
	if (unlikely(queue == NULL))
		goto out_put_disk;

	queue->queuedata = htifblk_dev;
	blk_queue_max_segments(queue, 1);
	blk_queue_dma_alignment(queue, HTIF_ALIGN - 1);

	disk->queue = queue;
	disk->major = major;
	disk->minors = 1;
	disk->first_minor = 0;
	disk->fops = &htifblk_fops;
	set_capacity(disk, size >> SECTOR_SIZE_SHIFT);
	snprintf(disk->disk_name, DISK_NAME_LEN - 1, "htifblk%u", index++);

	htifblk_dev->disk = disk;
	add_disk(disk);
	dev_info(dev, "added %s\n", disk->disk_name);

	ret = htif_request_irq(htif_dev, htifblk_isr);
	if (unlikely(ret))
		goto out_del_disk;

	dev_set_drvdata(dev, htifblk_dev);
	return 0;

out_del_disk:
	del_gendisk(disk);
	blk_cleanup_queue(disk->queue);
out_put_disk:
	put_disk(disk);
out:
	return ret;
}