static int ide_gd_probe(ide_drive_t *drive) { const struct ide_disk_ops *disk_ops = NULL; struct ide_disk_obj *idkp; struct gendisk *g; /* strstr("foo", "") is non-NULL */ if (!strstr("ide-gd", drive->driver_req)) goto failed; #ifdef CONFIG_IDE_GD_ATA if (drive->media == ide_disk) disk_ops = &ide_ata_disk_ops; #endif #ifdef CONFIG_IDE_GD_ATAPI if (drive->media == ide_floppy) disk_ops = &ide_atapi_disk_ops; #endif if (disk_ops == NULL) goto failed; if (disk_ops->check(drive, DRV_NAME) == 0) { printk(KERN_ERR PFX "%s: not supported by this driver\n", drive->name); goto failed; } idkp = kzalloc(sizeof(*idkp), GFP_KERNEL); if (!idkp) { printk(KERN_ERR PFX "%s: can't allocate a disk structure\n", drive->name); goto failed; } g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif)); if (!g) goto out_free_idkp; ide_init_disk(g, drive); kref_init(&idkp->kref); idkp->drive = drive; idkp->driver = &ide_gd_driver; idkp->disk = g; g->private_data = &idkp->driver; drive->driver_data = idkp; drive->debug_mask = debug_mask; drive->disk_ops = disk_ops; disk_ops->setup(drive); set_capacity(g, ide_gd_capacity(drive)); g->minors = IDE_DISK_MINORS; g->driverfs_dev = &drive->gendev; g->flags |= GENHD_FL_EXT_DEVT; if (drive->dev_flags & IDE_DFLAG_REMOVABLE) g->flags = GENHD_FL_REMOVABLE; g->fops = &ide_gd_ops; add_disk(g); return 0; out_free_idkp: kfree(idkp); failed: return -ENODEV; }
/* * Create a block device minor node and setup the linkage between it * and the specified volume. Once this function returns the block * device is live and ready for use. */ static int zvol_create_minor_impl(const char *name) { zvol_state_t *zv; objset_t *os; dmu_object_info_t *doi; uint64_t volsize; uint64_t len; unsigned minor = 0; int error = 0; mutex_enter(&zvol_state_lock); zv = zvol_find_by_name(name); if (zv) { error = SET_ERROR(EEXIST); goto out; } doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP); error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os); if (error) goto out_doi; error = dmu_object_info(os, ZVOL_OBJ, doi); if (error) goto out_dmu_objset_disown; error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); if (error) goto out_dmu_objset_disown; error = zvol_find_minor(&minor); if (error) goto out_dmu_objset_disown; zv = zvol_alloc(MKDEV(zvol_major, minor), name); if (zv == NULL) { error = SET_ERROR(EAGAIN); goto out_dmu_objset_disown; } if (dmu_objset_is_snapshot(os)) zv->zv_flags |= ZVOL_RDONLY; zv->zv_volblocksize = doi->doi_data_block_size; zv->zv_volsize = volsize; zv->zv_objset = os; set_capacity(zv->zv_disk, zv->zv_volsize >> 9); blk_queue_max_hw_sectors(zv->zv_queue, (DMU_MAX_ACCESS / 4) >> 9); blk_queue_max_segments(zv->zv_queue, UINT16_MAX); blk_queue_max_segment_size(zv->zv_queue, UINT_MAX); blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize); blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize); blk_queue_max_discard_sectors(zv->zv_queue, (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9); blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue); #ifdef QUEUE_FLAG_NONROT queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue); #endif #ifdef QUEUE_FLAG_ADD_RANDOM queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zv->zv_queue); #endif if (spa_writeable(dmu_objset_spa(os))) { if (zil_replay_disable) zil_destroy(dmu_objset_zil(os), B_FALSE); else zil_replay(os, zv, zvol_replay_vector); } /* * When udev detects the addition of the device it will immediately * invoke blkid(8) to determine the type of content on the device. * Prefetching the blocks commonly scanned by blkid(8) will speed * up this process. */ len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE); if (len > 0) { dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ); dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len, ZIO_PRIORITY_SYNC_READ); } zv->zv_objset = NULL; out_dmu_objset_disown: dmu_objset_disown(os, zvol_tag); out_doi: kmem_free(doi, sizeof (dmu_object_info_t)); out: if (error == 0) { zvol_insert(zv); /* * Drop the lock to prevent deadlock with sys_open() -> * zvol_open(), which first takes bd_disk->bd_mutex and then * takes zvol_state_lock, whereas this code path first takes * zvol_state_lock, and then takes bd_disk->bd_mutex. */ mutex_exit(&zvol_state_lock); add_disk(zv->zv_disk); } else { mutex_exit(&zvol_state_lock); } return (SET_ERROR(error)); }
/* * Allocate and initialise a blank device with a given minor. */ static struct mapped_device *alloc_dev(unsigned int minor, int persistent) { int r; struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); if (!md) { DMWARN("unable to allocate device, out of memory."); return NULL; } /* get a minor number for the dev */ r = persistent ? specific_minor(minor) : next_free_minor(&minor); if (r < 0) goto bad1; memset(md, 0, sizeof(*md)); init_rwsem(&md->lock); rwlock_init(&md->map_lock); atomic_set(&md->holders, 1); md->queue = blk_alloc_queue(GFP_KERNEL); if (!md->queue) goto bad1; md->queue->queuedata = md; md->queue->backing_dev_info.congested_fn = dm_any_congested; md->queue->backing_dev_info.congested_data = md; blk_queue_make_request(md->queue, dm_request); md->queue->unplug_fn = dm_unplug_all; md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, mempool_free_slab, _io_cache); if (!md->io_pool) goto bad2; md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, mempool_free_slab, _tio_cache); if (!md->tio_pool) goto bad3; md->disk = alloc_disk(1); if (!md->disk) goto bad4; md->disk->major = _major; md->disk->first_minor = minor; md->disk->fops = &dm_blk_dops; md->disk->queue = md->queue; md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); add_disk(md->disk); atomic_set(&md->pending, 0); init_waitqueue_head(&md->wait); init_waitqueue_head(&md->eventq); return md; bad4: mempool_destroy(md->tio_pool); bad3: mempool_destroy(md->io_pool); bad2: blk_put_queue(md->queue); free_minor(minor); bad1: kfree(md); return NULL; }
static int __zvol_create_minor(const char *name) { zvol_state_t *zv; objset_t *os; dmu_object_info_t *doi; uint64_t volsize; unsigned minor = 0; int error = 0; ASSERT(MUTEX_HELD(&zvol_state_lock)); zv = zvol_find_by_name(name); if (zv) { error = EEXIST; goto out; } doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP); error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os); if (error) goto out_doi; /* Make sure we have the key loaded if we need one. */ error = dsl_crypto_key_inherit(name); if (error != 0 && error != EEXIST) goto out_dmu_objset_disown; error = dmu_object_info(os, ZVOL_OBJ, doi); if (error) goto out_dmu_objset_disown; error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); if (error) goto out_dmu_objset_disown; error = zvol_find_minor(&minor); if (error) goto out_dmu_objset_disown; zv = zvol_alloc(MKDEV(zvol_major, minor), name); if (zv == NULL) { error = EAGAIN; goto out_dmu_objset_disown; } if (dmu_objset_is_snapshot(os)) zv->zv_flags |= ZVOL_RDONLY; zv->zv_volblocksize = doi->doi_data_block_size; zv->zv_volsize = volsize; zv->zv_objset = os; set_capacity(zv->zv_disk, zv->zv_volsize >> 9); blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX); blk_queue_max_segments(zv->zv_queue, UINT16_MAX); blk_queue_max_segment_size(zv->zv_queue, UINT_MAX); blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize); blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize); #ifdef HAVE_BLK_QUEUE_DISCARD blk_queue_max_discard_sectors(zv->zv_queue, (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9); blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue); #endif #ifdef HAVE_BLK_QUEUE_NONROT queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue); #endif if (zil_replay_disable) zil_destroy(dmu_objset_zil(os), B_FALSE); else zil_replay(os, zv, zvol_replay_vector); out_dmu_objset_disown: dmu_objset_disown(os, zvol_tag); zv->zv_objset = NULL; out_doi: kmem_free(doi, sizeof(dmu_object_info_t)); out: if (error == 0) { zvol_insert(zv); add_disk(zv->zv_disk); } return (error); }
static int __init hd_init(void) { int drive; if (register_blkdev(MAJOR_NR,"hd")) return -1; hd_queue = blk_init_queue(do_hd_request, &hd_lock); if (!hd_queue) { unregister_blkdev(MAJOR_NR,"hd"); return -ENOMEM; } blk_queue_max_sectors(hd_queue, 255); init_timer(&device_timer); device_timer.function = hd_times_out; blk_queue_hardsect_size(hd_queue, 512); #ifdef __i386__ if (!NR_HD) { extern struct drive_info drive_info; unsigned char *BIOS = (unsigned char *) &drive_info; unsigned long flags; int cmos_disks; for (drive=0 ; drive<2 ; drive++) { hd_info[drive].cyl = *(unsigned short *) BIOS; hd_info[drive].head = *(2+BIOS); hd_info[drive].wpcom = *(unsigned short *) (5+BIOS); hd_info[drive].ctl = *(8+BIOS); hd_info[drive].lzone = *(unsigned short *) (12+BIOS); hd_info[drive].sect = *(14+BIOS); #ifdef does_not_work_for_everybody_with_scsi_but_helps_ibm_vp if (hd_info[drive].cyl && NR_HD == drive) NR_HD++; #endif BIOS += 16; } /* We query CMOS about hard disks : it could be that we have a SCSI/ESDI/etc controller that is BIOS compatible with ST-506, and thus showing up in our BIOS table, but not register compatible, and therefore not present in CMOS. Furthermore, we will assume that our ST-506 drives <if any> are the primary drives in the system, and the ones reflected as drive 1 or 2. The first drive is stored in the high nibble of CMOS byte 0x12, the second in the low nibble. This will be either a 4 bit drive type or 0xf indicating use byte 0x19 for an 8 bit type, drive 1, 0x1a for drive 2 in CMOS. Needless to say, a non-zero value means we have an AT controller hard disk for that drive. Currently the rtc_lock is a bit academic since this driver is non-modular, but someday... ? Paul G. */ spin_lock_irqsave(&rtc_lock, flags); cmos_disks = CMOS_READ(0x12); spin_unlock_irqrestore(&rtc_lock, flags); if (cmos_disks & 0xf0) { if (cmos_disks & 0x0f) NR_HD = 2; else NR_HD = 1; } } #endif /* __i386__ */ #ifdef __arm__ if (!NR_HD) { /* We don't know anything about the drive. This means * that you *MUST* specify the drive parameters to the * kernel yourself. */ printk("hd: no drives specified - use hd=cyl,head,sectors" " on kernel command line\n"); } #endif if (!NR_HD) goto out; for (drive=0 ; drive < NR_HD ; drive++) { struct gendisk *disk = alloc_disk(64); struct hd_i_struct *p = &hd_info[drive]; if (!disk) goto Enomem; disk->major = MAJOR_NR; disk->first_minor = drive << 6; disk->fops = &hd_fops; sprintf(disk->disk_name, "hd%c", 'a'+drive); disk->private_data = p; set_capacity(disk, p->head * p->sect * p->cyl); disk->queue = hd_queue; p->unit = drive; hd_gendisk[drive] = disk; printk ("%s: %luMB, CHS=%d/%d/%d\n", disk->disk_name, (unsigned long)get_capacity(disk)/2048, p->cyl, p->head, p->sect); } if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) { printk("hd: unable to get IRQ%d for the hard disk driver\n", HD_IRQ); goto out1; } if (!request_region(HD_DATA, 8, "hd")) { printk(KERN_WARNING "hd: port 0x%x busy\n", HD_DATA); goto out2; } if (!request_region(HD_CMD, 1, "hd(cmd)")) { printk(KERN_WARNING "hd: port 0x%x busy\n", HD_CMD); goto out3; } /* Let them fly */ for(drive=0; drive < NR_HD; drive++) add_disk(hd_gendisk[drive]); return 0; out3: release_region(HD_DATA, 8); out2: free_irq(HD_IRQ, NULL); out1: for (drive = 0; drive < NR_HD; drive++) put_disk(hd_gendisk[drive]); NR_HD = 0; out: del_timer(&device_timer); unregister_blkdev(MAJOR_NR,"hd"); blk_cleanup_queue(hd_queue); return -1; Enomem: while (drive--) put_disk(hd_gendisk[drive]); goto out; }
/* Initialize the device */ static int __init sb_init(void) { int ret; int major = MAJOR_NODE; printk(KERN_NOTICE "sb: simple block device driver\n"); if (request_region(REG_BASE, REG_LEN, DEV_NAME) == NULL) { printk(KERN_WARNING "sb: unable to register IOPorts %03x:%d\n", REG_BASE, REG_LEN); return -EBUSY; } ret = register_blkdev(MAJOR_NODE, DEV_NAME); if ((ret <= 0) && (!MAJOR_NODE)) { printk(KERN_WARNING "sb: unable to register dynamic major number\n"); return -EBUSY; } if ((ret > 0) && (!MAJOR_NODE)) { printk(KERN_NOTICE "sb: received dynamic major %d\n", ret); major = ret; } if ((ret != 0) && (MAJOR_NODE)) { printk(KERN_WARNING "sb: unable to register static major number %d\n", MAJOR_NODE); return -EBUSY; } /* Alloc space for struct */ sbd = kmalloc(sizeof(struct sb_dev), GFP_KERNEL); if (sbd == NULL) { goto emergency_clean; } /* Create spinlock */ spin_lock_init(&sbd->lock); /* Init queue */ sbd->queue = blk_init_queue(sb_request, &sbd->lock); if (sbd->queue == NULL) { goto emergency_clean; } /* Init gendisk stuff */ sbd->gd = alloc_disk(MINORS); if (!sbd->gd) { goto emergency_clean; } sbd->gd->major = major; /* Set some extra settings */ sbd->gd->first_minor = 0; sbd->gd->queue = sbd->queue; sbd->gd->private_data = sbd; /* Device ops */ sbd->gd->fops = &sb_fops; strcpy(sbd->gd->disk_name, "sb"); /* Size: number of sectors * size of each sect */ set_capacity(sbd->gd, sb_num_sectors ()); /* Tell the kernel about the disk */ add_disk(sbd->gd); return 0; emergency_clean: printk(KERN_WARNING "sb: unable to allocate memory\n"); release_region(REG_BASE, REG_LEN); unregister_blkdev(MAJOR_NODE, DEV_NAME); return -ENOMEM; }
int __init mcd_init(void) { struct gendisk *disk = alloc_disk(1); int count; unsigned char result[3]; char msg[80]; if (!disk) { printk(KERN_INFO "mcd: can't allocated disk.\n"); return -ENOMEM; } if (mcd_port <= 0 || mcd_irq <= 0) { printk(KERN_INFO "mcd: not probing.\n"); put_disk(disk); return -EIO; } if (register_blkdev(MAJOR_NR, "mcd")) { put_disk(disk); return -EIO; } if (!request_region(mcd_port, 4, "mcd")) { printk(KERN_ERR "mcd: Initialization failed, I/O port (%X) already in use\n", mcd_port); goto out_region; } mcd_queue = blk_init_queue(do_mcd_request, &mcd_spinlock); if (!mcd_queue) goto out_queue; /* check for card */ outb(0, MCDPORT(1)); /* send reset */ for (count = 0; count < 2000000; count++) (void) inb(MCDPORT(1)); /* delay a bit */ outb(0x40, MCDPORT(0)); /* send get-stat cmd */ for (count = 0; count < 2000000; count++) if (!(inb(MCDPORT(1)) & MFL_STATUS)) break; if (count >= 2000000) { printk(KERN_INFO "mcd: initialisation failed - No mcd device at 0x%x irq %d\n", mcd_port, mcd_irq); goto out_probe; } count = inb(MCDPORT(0)); /* pick up the status */ outb(MCMD_GET_VERSION, MCDPORT(0)); for (count = 0; count < 3; count++) if (getValue(result + count)) { printk(KERN_ERR "mcd: mitsumi get version failed at 0x%x\n", mcd_port); goto out_probe; } if (result[0] == result[1] && result[1] == result[2]) goto out_probe; mcdVersion = result[2]; if (mcdVersion >= 4) outb(4, MCDPORT(2)); /* magic happens */ /* don't get the IRQ until we know for sure the drive is there */ if (request_irq(mcd_irq, mcd_interrupt, SA_INTERRUPT, "Mitsumi CD", NULL)) { printk(KERN_ERR "mcd: Unable to get IRQ%d for Mitsumi CD-ROM\n", mcd_irq); goto out_probe; } if (result[1] == 'D') { MCMD_DATA_READ = MCMD_2X_READ; /* Added flag to drop to 1x speed if too many errors */ mcdDouble = 1; } else mcd_info.speed = 1; sprintf(msg, " mcd: Mitsumi %s Speed CD-ROM at port=0x%x," " irq=%d\n", mcd_info.speed == 1 ? "Single" : "Double", mcd_port, mcd_irq); outb(MCMD_CONFIG_DRIVE, MCDPORT(0)); outb(0x02, MCDPORT(0)); outb(0x00, MCDPORT(0)); getValue(result); outb(MCMD_CONFIG_DRIVE, MCDPORT(0)); outb(0x10, MCDPORT(0)); outb(0x04, MCDPORT(0)); getValue(result); mcd_invalidate_buffers(); mcdPresent = 1; disk->major = MAJOR_NR; disk->first_minor = 0; sprintf(disk->disk_name, "mcd"); disk->fops = &mcd_bdops; disk->flags = GENHD_FL_CD; mcd_gendisk = disk; if (register_cdrom(&mcd_info) != 0) { printk(KERN_ERR "mcd: Unable to register Mitsumi CD-ROM.\n"); goto out_cdrom; } disk->queue = mcd_queue; add_disk(disk); printk(msg); return 0; out_cdrom: free_irq(mcd_irq, NULL); out_queue: release_region(mcd_port, 4); out_probe: blk_cleanup_queue(mcd_queue); out_region: unregister_blkdev(MAJOR_NR, "mcd"); put_disk(disk); return -EIO; }
static int srb_init_disk(struct srb_device_s *dev) { struct gendisk *disk = NULL; struct request_queue *q; int i; int ret = 0; SRB_LOG_INFO(srb_log, "srb_init_disk: initializing disk for device: %s", dev->name); /* create gendisk info */ disk = alloc_disk(DEV_MINORS); if (!disk) { SRB_LOG_WARN(srb_log, "srb_init_disk: unable to allocate memory for disk for device: %s", dev->name); return -ENOMEM; } SRB_LOG_DEBUG(srb_log, "Creating new disk: %p", disk); strcpy(disk->disk_name, dev->name); disk->major = dev->major; disk->first_minor = 0; disk->fops = &srb_fops; disk->private_data = dev; /* init rq */ q = blk_init_queue(srb_rq_fn, &dev->rq_lock); if (!q) { SRB_LOG_WARN(srb_log, "srb_init_disk: unable to init block queue for device: %p, disk: %p", dev, disk); srb_free_disk(dev); return -ENOMEM; } blk_queue_max_hw_sectors(q, DEV_NB_PHYS_SEGS); q->queuedata = dev; dev->disk = disk; dev->q = disk->queue = q; dev->nb_threads = 0; //blk_queue_flush(q, REQ_FLUSH | REQ_FUA); //blk_queue_max_phys_segments(q, DEV_NB_PHYS_SEGS); //TODO: Enable flush and bio (Issue #21) //blk_queue_flush(q, REQ_FLUSH); for (i = 0; i < thread_pool_size; i++) { //if ((ret = srb_cdmi_connect(&dev->debug, &dev->thread_cdmi_desc[i]))) { if ((ret = srb_cdmi_connect(&dev->debug, dev->thread_cdmi_desc[i]))) { SRB_LOG_ERR(srb_log, "Unable to connect to CDMI endpoint: %d", ret); srb_free_disk(dev); return -EIO; } } /* Caution: be sure to call this before spawning threads */ ret = srb_cdmi_getsize(&dev->debug, dev->thread_cdmi_desc[0], &dev->disk_size); if (ret != 0) { SRB_LOG_ERR(srb_log, "Could not retrieve volume size."); srb_free_disk(dev); return ret; } set_capacity(disk, dev->disk_size / 512ULL); for (i = 0; i < thread_pool_size; i++) { dev->thread[i] = kthread_create(srb_thread, dev, "%s", dev->disk->disk_name); if (IS_ERR(dev->thread[i])) { SRB_LOG_ERR(srb_log, "Unable to create worker thread (id %d)", i); dev->thread[i] = NULL; srb_free_disk(dev); goto err_kthread; } wake_up_process(dev->thread[i]); } add_disk(disk); SRBDEV_LOG_INFO(dev, "Attached volume %s of size 0x%llx", disk->disk_name, (unsigned long long)dev->disk_size); return 0; err_kthread: for (i = 0; i < thread_pool_size; i++) { if (dev->thread[i] != NULL) kthread_stop(dev->thread[i]); } return -EIO; }
static int __init sbd_init(void) { /* * crypto_alloc_cipher() - allocate single block cipher handle * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * single block cipher * @type: specifies the type of the cipher * @mask: specifies the mask for the cipher * * Allocate a cipher handle for a single block cipher. The returned struct * crypto_cipher is the cipher handle that is required for any subsequent API * invocation for that single block cipher. * * Return: allocated cipher handle in case of success; IS_ERR() is true in case * of an error, PTR_ERR() returns the error code. static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_CIPHER; mask |= CRYPTO_ALG_TYPE_MASK; return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); } aes, 0, 0 for placeholder alloc aes based off encryption */ crypto = crypto_alloc_cipher("aes", 0, 0); /* * Set up our internal device. */ Device.size = nsectors * logical_block_size; spin_lock_init(&Device.lock); Device.data = vmalloc(Device.size); if (Device.data == NULL) return -ENOMEM; /* * Get a request queue. */ Queue = blk_init_queue(sbd_request, &Device.lock); if (Queue == NULL) goto out; blk_queue_logical_block_size(Queue, logical_block_size); /* * Get registered. */ major_num = register_blkdev(major_num, "sbd"); if (major_num < 0) { printk(KERN_WARNING "sbd: unable to get major number\n"); goto out; } /* * And the gendisk structure. */ Device.gd = alloc_disk(16); if (!Device.gd) goto out_unregister; Device.gd->major = major_num; Device.gd->first_minor = 0; Device.gd->fops = &sbd_ops; Device.gd->private_data = &Device; strcpy(Device.gd->disk_name, "sbd0"); set_capacity(Device.gd, nsectors); Device.gd->queue = Queue; add_disk(Device.gd); return 0; out_unregister: unregister_blkdev(major_num, "sbd"); out: vfree(Device.data); return -ENOMEM; }
static int __init virtualblockdevice_init(void) { int ret; elevator_t *oldelev; printk(KERN_ALERT "VirtualBlockDevice: Entry virtualblockdevice_init !\n"); ret = register_blkdev( virtualblockdevice_major, VIRTUALBLOCKDEVICE_NAME ); if( 0 > ret ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to register block device: virtualblockdevice ! Major: %d\tErrno: %d !\n", virtualblockdevice_major, ret); goto failure_register_blkdev; } virtualblockdevice_major = ret; printk(KERN_ALERT "VirtualBlockDevice: Success to register block device: virtualblockdevice ! Major: %d !\n", virtualblockdevice_major); // get request_queue virtualblockdevice_queue = blk_init_queue( virtualblockdevice_do_request, NULL ); if( !virtualblockdevice_queue ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to init request_queue !\n"); ret = -ENOMEM; goto failure_init_queue; } printk(KERN_ALERT "VirtualBlockDevice: Success to init request_queue !\n"); // switch elevator oldelev = virtualblockdevice_queue->elevator; if( IS_ERR_VALUE( elevator_init( virtualblockdevice_queue, "noop" ) ) ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to switch elevator to noop, continue to use old one !\n"); } else { printk(KERN_ALERT "VirtualBlockDevice: Success to switch elevator to noop !\n"); elevator_exit( oldelev ); } // get gendisk virtualblockdevice_disk = alloc_disk( 1 ); if( !virtualblockdevice_disk ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to allocate gendisk !\n"); ret = -ENOMEM; goto failure_alloc_disk; } printk(KERN_ALERT "VirtualBlockDevice: Success to allocate gendisk !\n"); // initialize gendisk strcpy( virtualblockdevice_disk->disk_name, VIRTUALBLOCKDEVICE_NAME ); virtualblockdevice_disk->major = virtualblockdevice_major; virtualblockdevice_disk->first_minor = virtualblockdevice_minor; virtualblockdevice_disk->fops = &virtualblockdevice_fops; virtualblockdevice_disk->queue = virtualblockdevice_queue; set_capacity( virtualblockdevice_disk, ( VIRTUALBLOCKDEVICE_DISK_CAPACITY >> 9 ) ); // add gendisk to kernel add_disk( virtualblockdevice_disk ); return 0; failure_alloc_disk: blk_cleanup_queue( virtualblockdevice_queue ); failure_init_queue: unregister_blkdev( virtualblockdevice_major, VIRTUALBLOCKDEVICE_NAME ); failure_register_blkdev: return ret; }
int block_init(void) { printk(KERN_INFO "initializing block device module\n"); // spin_lock_init(&lock); // size = NSECTORS * LOGICAL_BLOCK_SIZE; data = vmalloc(size); if (data == NULL) { printk(KERN_INFO "block_init: could not malloc a block of size %lu\n", size); return -ENOMEM; } // queue = blk_init_queue(block_request, &lock); if (queue == NULL) { printk(KERN_INFO "block_init: could not initialize blk queue\n"); block_cleanup(); return -ENOMEM; } // blk_queue_logical_block_size(queue, LOGICAL_BLOCK_SIZE); // major_number = register_blkdev(major_number, DEVICE_NAME); if (major_number < 0) { printk(KERN_INFO "block_init: could not register blk device, major number=%d\n", major_number); block_cleanup(); return -ENOMEM; } gdisk = alloc_disk(16); if (!gdisk) { printk(KERN_INFO "block_init: could not alloc gdisk\n"); block_cleanup(); return -ENOMEM; } // gdisk->major = major_number; // gdisk->first_minor = 0; // gdisk->fops = &block_ops; // gdisk->private_data = &data; // gdisk->queue = queue; // strcpy(gdisk->disk_name, DISK_NAME); // set_capacity(gdisk, 0); // add_disk(gdisk); printk(KERN_INFO "block_init: added gendisk\n"); set_capacity(gdisk, NSECTORS); printk(KERN_INFO "block_init: set capacity on gendisk to %d sectors\n", NSECTORS); return 0; }
/* WARNING: Function accesses globals find_diskqp, curlog, curlog, curstr, * dynamic_disklist */ gboolean search_logfile( find_result_t **output_find, const char *label, const char *passed_datestamp, const char *logfile, disklist_t * dynamic_disklist) { FILE *logf; char *host, *host_undo; char *disk = NULL, *qdisk, *disk_undo; char *date, *date_undo; int partnum; int totalparts; int maxparts = -1; char *number; int fileno; char *current_label; char *rest, *rest_undo; char *ck_label=NULL; int level = 0; off_t filenum; char *ck_datestamp=NULL; char *datestamp; char *s; int ch; disk_t *dp; GHashTable* valid_label; GHashTable* part_by_dle; find_result_t *part_find; find_result_t *a_part_find; gboolean right_label = FALSE; gboolean found_something = FALSE; double sec; off_t kb; off_t bytes; off_t orig_kb; int taper_part = 0; g_return_val_if_fail(output_find != NULL, 0); g_return_val_if_fail(logfile != NULL, 0); current_label = g_strdup(""); if (string_chunk == NULL) { string_chunk = g_string_chunk_new(32768); } valid_label = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); part_by_dle = g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); datestamp = g_strdup(passed_datestamp); if((logf = fopen(logfile, "r")) == NULL) { error(_("could not open logfile %s: %s"), logfile, strerror(errno)); /*NOTREACHED*/ } filenum = (off_t)0; while(get_logline(logf)) { if (curlog == L_START && curprog == P_TAPER) { amfree(ck_label); ck_datestamp = NULL; if(parse_taper_datestamp_log(curstr, &ck_datestamp, &ck_label) == 0) { g_printf(_("strange log line in %s \"start taper %s\"\n"), logfile, curstr); continue; } if (datestamp != NULL) { if (!g_str_equal(datestamp, ck_datestamp)) { g_printf(_("Log file %s stamped %s, expecting %s!\n"), logfile, ck_datestamp, datestamp); amfree(ck_label); break; } } right_label = volume_matches(label, ck_label, ck_datestamp); if (right_label && ck_label) { g_hash_table_insert(valid_label, g_strdup(ck_label), GINT_TO_POINTER(1)); } if (label && datestamp && right_label) { found_something = TRUE; } amfree(current_label); current_label = ck_label; ck_label = NULL; if (datestamp == NULL) { datestamp = g_strdup(ck_datestamp); } filenum = (off_t)0; } if (!datestamp) continue; if (right_label && (curlog == L_SUCCESS || curlog == L_CHUNK || curlog == L_PART || curlog == L_PARTPARTIAL) && curprog == P_TAPER) { filenum++; } else if (right_label && curlog == L_PARTIAL && curprog == P_TAPER && taper_part == 0) { filenum++; } partnum = -1; totalparts = -1; if (curlog == L_SUCCESS || curlog == L_CHUNKSUCCESS || curlog == L_DONE || curlog == L_FAIL || curlog == L_CHUNK || curlog == L_PART || curlog == L_PARTIAL || curlog == L_PARTPARTIAL ) { s = curstr; ch = *s++; skip_whitespace(s, ch); if(ch == '\0') { g_printf(_("strange log line in %s \"%s\"\n"), logfile, curstr); continue; } if (curlog == L_PART || curlog == L_PARTPARTIAL) { char *part_label; char *qpart_label = s - 1; taper_part++; skip_quoted_string(s, ch); s[-1] = '\0'; part_label = unquote_string(qpart_label); if (!g_hash_table_lookup(valid_label, part_label)) { amfree(part_label); continue; } amfree(current_label); current_label = part_label; skip_whitespace(s, ch); if(ch == '\0') { g_printf("strange log line in %s \"%s\"\n", logfile, curstr); continue; } number = s - 1; skip_non_whitespace(s, ch); s[-1] = '\0'; fileno = atoi(number); filenum = fileno; if (filenum == 0) continue; skip_whitespace(s, ch); if(ch == '\0') { g_printf("strange log line in %s \"%s\"\n", logfile, curstr); continue; } } else { taper_part = 0; } host = s - 1; skip_non_whitespace(s, ch); host_undo = s - 1; *host_undo = '\0'; skip_whitespace(s, ch); if(ch == '\0') { g_printf(_("strange log line in %s \"%s\"\n"), logfile, curstr); continue; } qdisk = s - 1; skip_quoted_string(s, ch); disk_undo = s - 1; *disk_undo = '\0'; disk = unquote_string(qdisk); skip_whitespace(s, ch); if(ch == '\0') { g_printf(_("strange log line in %s \"%s\"\n"), logfile, curstr); amfree(disk); continue; } date = s - 1; skip_non_whitespace(s, ch); date_undo = s - 1; *date_undo = '\0'; if(strlen(date) < 3) { /* old log didn't have datestamp */ level = atoi(date); date = datestamp; partnum = 1; totalparts = 1; } else { if (curprog == P_TAPER && (curlog == L_CHUNK || curlog == L_PART || curlog == L_PARTPARTIAL || curlog == L_PARTIAL || curlog == L_DONE)) { char *s1, ch1; skip_whitespace(s, ch); number = s - 1; skip_non_whitespace(s, ch); s1 = &s[-1]; ch1 = *s1; skip_whitespace(s, ch); if (*(s-1) != '[') { *s1 = ch1; sscanf(number, "%d/%d", &partnum, &totalparts); if (partnum > maxparts) maxparts = partnum; if (totalparts > maxparts) maxparts = totalparts; } else { /* nparts is not in all PARTIAL lines */ partnum = 1; totalparts = 1; s = number + 1; } } else { skip_whitespace(s, ch); } if(ch == '\0' || sscanf(s - 1, "%d", &level) != 1) { g_printf(_("Fstrange log line in %s \"%s\"\n"), logfile, s-1); amfree(disk); continue; } skip_integer(s, ch); } skip_whitespace(s, ch); if(ch == '\0') { g_printf(_("strange log line in %s \"%s\"\n"), logfile, curstr); amfree(disk); continue; } rest = s - 1; skip_non_whitespace(s, ch); rest_undo = s - 1; *rest_undo = '\0'; if (g_str_equal(rest, "[sec")) { skip_whitespace(s, ch); if(ch == '\0') { g_printf(_("strange log line in %s \"%s\"\n"), logfile, curstr); amfree(disk); continue; } sec = atof(s - 1); skip_non_whitespace(s, ch); skip_whitespace(s, ch); rest = s - 1; skip_non_whitespace(s, ch); rest_undo = s - 1; *rest_undo = '\0'; if (!g_str_equal(rest, "kb") && !g_str_equal(rest, "bytes")) { g_printf(_("Bstrange log line in %s \"%s\"\n"), logfile, curstr); amfree(disk); continue; } skip_whitespace(s, ch); if (ch == '\0') { g_printf(_("strange log line in %s \"%s\"\n"), logfile, curstr); amfree(disk); continue; } if (g_str_equal(rest, "kb")) { kb = atof(s - 1); bytes = 0; } else { bytes = atof(s - 1); kb = bytes / 1024; } skip_non_whitespace(s, ch); skip_whitespace(s, ch); rest = s - 1; skip_non_whitespace(s, ch); rest_undo = s - 1; *rest_undo = '\0'; if (!g_str_equal(rest, "kps")) { g_printf(_("Cstrange log line in %s \"%s\"\n"), logfile, curstr); amfree(disk); continue; } skip_whitespace(s, ch); if (ch == '\0') { g_printf(_("strange log line in %s \"%s\"\n"), logfile, curstr); amfree(disk); continue; } /* kps = atof(s - 1); */ skip_non_whitespace(s, ch); skip_whitespace(s, ch); rest = s - 1; skip_non_whitespace(s, ch); rest_undo = s - 1; *rest_undo = '\0'; if (!g_str_equal(rest, "orig-kb")) { orig_kb = 0; } else { skip_whitespace(s, ch); if(ch == '\0') { g_printf(_("strange log line in %s \"%s\"\n"), logfile, curstr); amfree(disk); continue; } orig_kb = atof(s - 1); } } else { sec = 0; kb = 0; bytes = 0; orig_kb = 0; *rest_undo = ' '; } if (g_str_has_prefix(rest, "error")) rest += 6; if (g_str_has_prefix(rest, "config")) rest += 7; dp = lookup_disk(host,disk); if ( dp == NULL ) { if (dynamic_disklist == NULL) { amfree(disk); continue; } dp = add_disk(dynamic_disklist, host, disk); enqueue_disk(dynamic_disklist, dp); } if (find_match(host, disk)) { if(curprog == P_TAPER) { char *key = g_strdup_printf( "HOST:%s DISK:%s: DATE:%s LEVEL:%d", host, disk, date, level); find_result_t *new_output_find = g_new0(find_result_t, 1); part_find = g_hash_table_lookup(part_by_dle, key); maxparts = partnum; if (maxparts < totalparts) maxparts = totalparts; for (a_part_find = part_find; a_part_find; a_part_find = a_part_find->next) { if (maxparts < a_part_find->partnum) maxparts = a_part_find->partnum; if (maxparts < a_part_find->totalparts) maxparts = a_part_find->totalparts; } new_output_find->timestamp = g_string_chunk_insert_const(string_chunk, date); new_output_find->write_timestamp = g_string_chunk_insert_const(string_chunk, datestamp); new_output_find->hostname=g_string_chunk_insert_const(string_chunk, host); new_output_find->diskname=g_string_chunk_insert_const(string_chunk, disk); new_output_find->level=level; new_output_find->partnum = partnum; new_output_find->totalparts = totalparts; new_output_find->label=g_string_chunk_insert_const(string_chunk, current_label); new_output_find->status=NULL; new_output_find->dump_status=NULL; new_output_find->message=""; new_output_find->filenum=filenum; new_output_find->sec=sec; new_output_find->kb=kb; new_output_find->bytes=bytes; new_output_find->orig_kb=orig_kb; new_output_find->next=NULL; if (curlog == L_SUCCESS) { new_output_find->status = "OK"; new_output_find->dump_status = "OK"; new_output_find->next = *output_find; new_output_find->partnum = 1; /* L_SUCCESS is pre-splitting */ *output_find = new_output_find; found_something = TRUE; } else if (curlog == L_CHUNKSUCCESS || curlog == L_DONE || curlog == L_PARTIAL || curlog == L_FAIL) { /* result line */ if (curlog == L_PARTIAL || curlog == L_FAIL) { /* set dump_status of each part */ for (a_part_find = part_find; a_part_find; a_part_find = a_part_find->next) { if (curlog == L_PARTIAL) a_part_find->dump_status = "PARTIAL"; else { a_part_find->dump_status = "FAIL"; a_part_find->message = g_string_chunk_insert_const(string_chunk, rest); } } } else { if (maxparts > -1) { /* format with part */ /* must check if all part are there */ int num_part = maxparts; for (a_part_find = part_find; a_part_find; a_part_find = a_part_find->next) { if (a_part_find->partnum == num_part && g_str_equal(a_part_find->status, "OK")) num_part--; } /* set dump_status of each part */ for (a_part_find = part_find; a_part_find; a_part_find = a_part_find->next) { if (num_part == 0) { a_part_find->dump_status = "OK"; } else { a_part_find->dump_status = "FAIL"; a_part_find->message = g_string_chunk_insert_const(string_chunk, "Missing part"); } } } } if (curlog == L_DONE) { for (a_part_find = part_find; a_part_find; a_part_find = a_part_find->next) { if (a_part_find->totalparts == -1) { a_part_find->totalparts = maxparts; } if (a_part_find->orig_kb == 0) { a_part_find->orig_kb = orig_kb; } } } if (part_find) { /* find last element */ for (a_part_find = part_find; a_part_find->next != NULL; a_part_find=a_part_find->next) { } /* merge part_find to *output_find */ a_part_find->next = *output_find; *output_find = part_find; part_find = NULL; maxparts = -1; found_something = TRUE; g_hash_table_remove(part_by_dle, key); } free_find_result(&new_output_find); } else { /* part line */ if (curlog == L_PART || curlog == L_CHUNK) { new_output_find->status = "OK"; new_output_find->dump_status = "OK"; } else { /* PARTPARTIAL */ new_output_find->status = "PARTIAL"; new_output_find->dump_status = "PARTIAL"; } /* Add to part_find list */ if (part_find) { new_output_find->next = part_find; part_find = new_output_find; } else { new_output_find->next = NULL; part_find = new_output_find; } g_hash_table_insert(part_by_dle, g_strdup(key), part_find); found_something = TRUE; } amfree(key); } else if(curlog == L_FAIL) { char *status_failed; /* print other failures too -- this is a hack to ensure that failures which * did not make it to tape are also listed in the output of 'amadmin x find'; * users that do not want this information (e.g., Amanda::DB::Catalog) should * filter dumps with a NULL label. */ find_result_t *new_output_find = g_new0(find_result_t, 1); new_output_find->next = *output_find; new_output_find->timestamp = g_string_chunk_insert_const(string_chunk, date); new_output_find->write_timestamp = g_strdup("00000000000000"); /* dump was not written.. */ new_output_find->hostname=g_string_chunk_insert_const(string_chunk, host); new_output_find->diskname=g_string_chunk_insert_const(string_chunk, disk); new_output_find->level=level; new_output_find->label=NULL; new_output_find->partnum=partnum; new_output_find->totalparts=totalparts; new_output_find->filenum=0; new_output_find->sec=sec; new_output_find->kb=kb; new_output_find->bytes=bytes; new_output_find->orig_kb=orig_kb; status_failed = g_strjoin(NULL, "FAILED (", program_str[(int)curprog], ") ", rest, NULL); new_output_find->status = g_string_chunk_insert_const(string_chunk, status_failed); amfree(status_failed); new_output_find->dump_status=""; new_output_find->message=""; *output_find=new_output_find; found_something = TRUE; maxparts = -1; } } amfree(disk); } } g_hash_table_destroy(valid_label); afclose(logf); amfree(datestamp); amfree(current_label); amfree(disk); return found_something; }
void search_holding_disk( find_result_t **output_find, disklist_t * dynamic_disklist) { GSList *holding_file_list; GSList *e; char *holding_file; disk_t *dp; char *orig_name; holding_file_list = holding_get_files(NULL, 1); if (string_chunk == NULL) { string_chunk = g_string_chunk_new(32768); } for(e = holding_file_list; e != NULL; e = e->next) { dumpfile_t file; holding_file = (char *)e->data; if (!holding_file_get_dumpfile(holding_file, &file)) continue; if (file.dumplevel < 0 || file.dumplevel >= DUMP_LEVELS) { dumpfile_free_data(&file); continue; } dp = NULL; orig_name = g_strdup(file.name); for(;;) { char *s; if((dp = lookup_disk(file.name, file.disk))) break; if((s = strrchr(file.name,'.')) == NULL) break; *s = '\0'; } strcpy(file.name, orig_name); /* restore munged string */ g_free(orig_name); if ( dp == NULL ) { if (dynamic_disklist == NULL) { dumpfile_free_data(&file); continue; } dp = add_disk(dynamic_disklist, file.name, file.disk); enqueue_disk(dynamic_disklist, dp); } if(find_match(file.name,file.disk)) { find_result_t *new_output_find = g_new0(find_result_t, 1); new_output_find->next=*output_find; new_output_find->timestamp = g_string_chunk_insert_const(string_chunk, file.datestamp); new_output_find->write_timestamp = g_string_chunk_insert_const(string_chunk, "00000000000000"); new_output_find->hostname = g_string_chunk_insert_const(string_chunk, file.name); new_output_find->diskname = g_string_chunk_insert_const(string_chunk, file.disk); new_output_find->level=file.dumplevel; new_output_find->label=g_string_chunk_insert_const(string_chunk, holding_file); new_output_find->partnum = -1; new_output_find->totalparts = -1; new_output_find->filenum=0; if (file.is_partial) { new_output_find->status="PARTIAL"; new_output_find->dump_status="PARTIAL"; } else { new_output_find->status="OK"; new_output_find->dump_status="OK"; } new_output_find->message=""; new_output_find->kb = holding_file_size(holding_file, 1); new_output_find->bytes = 0; new_output_find->orig_kb = file.orig_size; *output_find=new_output_find; } dumpfile_free_data(&file); } slist_free_full(holding_file_list, g_free); }
/* alloc_disk and add_disk can sleep */ void aoeblk_gdalloc(void *vp) { struct aoedev *d = vp; struct gendisk *gd; ulong flags; gd = alloc_disk(AOE_PARTITIONS); if (gd == NULL) { printk(KERN_ERR "aoe: cannot allocate disk structure for %ld.%d\n", d->aoemajor, d->aoeminor); goto err; } d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); if (d->bufpool == NULL) { printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n", d->aoemajor, d->aoeminor); goto err_disk; } d->blkq = blk_alloc_queue(GFP_KERNEL); if (!d->blkq) goto err_mempool; blk_queue_make_request(d->blkq, aoeblk_make_request); d->blkq->backing_dev_info.name = "aoe"; if (bdi_init(&d->blkq->backing_dev_info)) goto err_blkq; spin_lock_irqsave(&d->lock, flags); gd->major = AOE_MAJOR; gd->first_minor = d->sysminor * AOE_PARTITIONS; gd->fops = &aoe_bdops; gd->private_data = d; set_capacity(gd, d->ssize); snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", d->aoemajor, d->aoeminor); gd->queue = d->blkq; d->gd = gd; d->flags &= ~DEVFL_GDALLOC; d->flags |= DEVFL_UP; spin_unlock_irqrestore(&d->lock, flags); add_disk(gd); aoedisk_add_sysfs(d); return; err_blkq: blk_cleanup_queue(d->blkq); d->blkq = NULL; err_mempool: mempool_destroy(d->bufpool); err_disk: put_disk(gd); err: spin_lock_irqsave(&d->lock, flags); d->flags &= ~DEVFL_GDALLOC; spin_unlock_irqrestore(&d->lock, flags); }
static int xsysace_probe(struct device *dev) { XSysAce_Config xsysace_cfg; struct platform_device *pdev = to_platform_device(dev); struct resource *irq_res, *regs_res; unsigned long remap_size; XStatus stat; long size; XSysAce_CFParameters ident; int retval; if (!dev) return -EINVAL; /* Find irq number, map the control registers in */ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs_res || !irq_res) { printk(KERN_ERR "%s #%d: IO resource(s) not found\n", DRIVER_NAME, pdev->id); retval = -EFAULT; goto failed1; } xsa_irq = irq_res->start; xsa_phys_addr = regs_res->start; remap_size = regs_res->end - regs_res->start + 1; if (!request_mem_region(regs_res->start, remap_size, DRIVER_NAME)) { printk(KERN_ERR "%s #%d: Couldn't lock memory region at 0x%08lX\n", DRIVER_NAME, pdev->id, regs_res->start); retval = -EBUSY; goto failed1; } /* Fill in cfg data and add them to the list */ xsa_remap_size = remap_size; xsysace_cfg.DeviceId = pdev->id; xsysace_cfg.BaseAddress = (u32) ioremap(regs_res->start, remap_size); if (xsysace_cfg.BaseAddress == 0) { printk(KERN_ERR "%s #%d: Couldn't ioremap memory at 0x%08lX\n", DRIVER_NAME, pdev->id, regs_res->start); retval = -EFAULT; goto failed2; } /* Tell the Xilinx code to bring this SystemACE interface up. */ down(&cfg_sem); if (XSysAce_CfgInitialize (&SysAce, &xsysace_cfg, xsysace_cfg.BaseAddress) != XST_SUCCESS) { up(&cfg_sem); printk(KERN_ERR "%s #%d: Could not initialize device.\n", DRIVER_NAME, pdev->id); retval = -ENODEV; goto failed3; } up(&cfg_sem); retval = request_irq(xsa_irq, xsysace_interrupt, 0, DEVICE_NAME, NULL); if (retval) { printk(KERN_ERR "%s #%d: Couldn't allocate interrupt %d.\n", DRIVER_NAME, pdev->id, xsa_irq); goto failed3; } XSysAce_SetEventHandler(&SysAce, EventHandler, (void *)NULL); XSysAce_EnableInterrupt(&SysAce); /* Time to identify the drive. */ while (XSysAce_Lock(&SysAce, 0) == XST_DEVICE_BUSY) ; while ((stat = XSysAce_IdentifyCF(&SysAce, &ident)) == XST_DEVICE_BUSY) ; XSysAce_Unlock(&SysAce); if (stat != XST_SUCCESS) { printk(KERN_ERR "%s: Could not send identify command.\n", DEVICE_NAME); retval = -ENODEV; goto failed4; } /* Fill in what we learned. */ heads = ident.NumHeads; sectors = ident.NumSectorsPerTrack; cylinders = ident.NumCylinders; size = (long)cylinders *(long)heads *(long)sectors; xsysace_queue = blk_init_queue(xsysace_do_request, &xsysace_lock); if (!xsysace_queue) { retval = -ENODEV; goto failed4; } if (register_blkdev(xsa_major, MAJOR_NAME)) { retval = -EBUSY; goto failed5; } xsa_gendisk = alloc_disk(16); if (!xsa_gendisk) { retval = -ENODEV; goto failed6; } strcpy(xsa_gendisk->disk_name, MAJOR_NAME); xsa_gendisk->fops = &xsysace_fops; xsa_gendisk->major = xsa_major; xsa_gendisk->first_minor = 0; xsa_gendisk->minors = 16; xsa_gendisk->queue = xsysace_queue; set_capacity(xsa_gendisk, size); printk(KERN_INFO "%s at 0x%08X mapped to 0x%08X, irq=%d, %ldKB\n", DEVICE_NAME, xsa_phys_addr, SysAce.BaseAddress, xsa_irq, size / 2); /* Hook our reset function into system's restart code. */ if (old_restart == NULL) { old_restart = ppc_md.restart; ppc_md.restart = xsysace_restart; } if (proc_init()) printk(KERN_WARNING "%s: could not register /proc interface.\n", DEVICE_NAME); add_disk(xsa_gendisk); return 0; /* success */ failed6: unregister_blkdev(xsa_major, MAJOR_NAME); failed5: blk_cleanup_queue(xsysace_queue); failed4: XSysAce_DisableInterrupt(&SysAce); free_irq(xsa_irq, NULL); failed3: iounmap((void *)(xsysace_cfg.BaseAddress)); failed2: release_mem_region(regs_res->start, remap_size); failed1: return retval; }
static int __init mcdx_init_drive(int drive) { struct s_version version; struct gendisk *disk; struct s_drive_stuff *stuffp; int size = sizeof(*stuffp); char msg[80]; xtrace(INIT, "init() try drive %d\n", drive); xtrace(INIT, "kmalloc space for stuffpt's\n"); xtrace(MALLOC, "init() malloc %d bytes\n", size); if (!(stuffp = kzalloc(size, GFP_KERNEL))) { xwarn("init() malloc failed\n"); return 1; } disk = alloc_disk(1); if (!disk) { xwarn("init() malloc failed\n"); kfree(stuffp); return 1; } xtrace(INIT, "init() got %d bytes for drive stuff @ %p\n", sizeof(*stuffp), stuffp); /* set default values */ stuffp->present = 0; /* this should be 0 already */ stuffp->toc = NULL; /* this should be NULL already */ /* setup our irq and i/o addresses */ stuffp->irq = irq(mcdx_drive_map[drive]); stuffp->wreg_data = stuffp->rreg_data = port(mcdx_drive_map[drive]); stuffp->wreg_reset = stuffp->rreg_status = stuffp->wreg_data + 1; stuffp->wreg_hcon = stuffp->wreg_reset + 1; stuffp->wreg_chn = stuffp->wreg_hcon + 1; init_waitqueue_head(&stuffp->busyq); init_waitqueue_head(&stuffp->lockq); init_waitqueue_head(&stuffp->sleepq); /* check if i/o addresses are available */ if (!request_region(stuffp->wreg_data, MCDX_IO_SIZE, "mcdx")) { xwarn("0x%03x,%d: Init failed. " "I/O ports (0x%03x..0x%03x) already in use.\n", stuffp->wreg_data, stuffp->irq, stuffp->wreg_data, stuffp->wreg_data + MCDX_IO_SIZE - 1); xtrace(MALLOC, "init() free stuffp @ %p\n", stuffp); kfree(stuffp); put_disk(disk); xtrace(INIT, "init() continue at next drive\n"); return 0; /* next drive */ } xtrace(INIT, "init() i/o port is available at 0x%03x\n" stuffp->wreg_data); xtrace(INIT, "init() hardware reset\n"); mcdx_reset(stuffp, HARD, 1); xtrace(INIT, "init() get version\n"); if (-1 == mcdx_requestversion(stuffp, &version, 4)) { /* failed, next drive */ release_region(stuffp->wreg_data, MCDX_IO_SIZE); xwarn("%s=0x%03x,%d: Init failed. Can't get version.\n", MCDX, stuffp->wreg_data, stuffp->irq); xtrace(MALLOC, "init() free stuffp @ %p\n", stuffp); kfree(stuffp); put_disk(disk); xtrace(INIT, "init() continue at next drive\n"); return 0; } switch (version.code) { case 'D': stuffp->readcmd = READ2X; stuffp->present = DOUBLE | DOOR | MULTI; break; case 'F': stuffp->readcmd = READ1X; stuffp->present = SINGLE | DOOR | MULTI; break; case 'M': stuffp->readcmd = READ1X; stuffp->present = SINGLE; break; default: stuffp->present = 0; break; } stuffp->playcmd = READ1X; if (!stuffp->present) { release_region(stuffp->wreg_data, MCDX_IO_SIZE); xwarn("%s=0x%03x,%d: Init failed. No Mitsumi CD-ROM?.\n", MCDX, stuffp->wreg_data, stuffp->irq); kfree(stuffp); put_disk(disk); return 0; /* next drive */ } xtrace(INIT, "init() register blkdev\n"); if (register_blkdev(MAJOR_NR, "mcdx")) { release_region(stuffp->wreg_data, MCDX_IO_SIZE); kfree(stuffp); put_disk(disk); return 1; } mcdx_queue = blk_init_queue(do_mcdx_request, &mcdx_lock); if (!mcdx_queue) { unregister_blkdev(MAJOR_NR, "mcdx"); release_region(stuffp->wreg_data, MCDX_IO_SIZE); kfree(stuffp); put_disk(disk); return 1; } xtrace(INIT, "init() subscribe irq and i/o\n"); if (request_irq(stuffp->irq, mcdx_intr, IRQF_DISABLED, "mcdx", stuffp)) { release_region(stuffp->wreg_data, MCDX_IO_SIZE); xwarn("%s=0x%03x,%d: Init failed. Can't get irq (%d).\n", MCDX, stuffp->wreg_data, stuffp->irq, stuffp->irq); stuffp->irq = 0; blk_cleanup_queue(mcdx_queue); kfree(stuffp); put_disk(disk); return 0; } xtrace(INIT, "init() get garbage\n"); { int i; mcdx_delay(stuffp, HZ / 2); for (i = 100; i; i--) (void) inb(stuffp->rreg_status); } #ifdef WE_KNOW_WHY /* irq 11 -> channel register */ outb(0x50, stuffp->wreg_chn); #endif xtrace(INIT, "init() set non dma but irq mode\n"); mcdx_config(stuffp, 1); stuffp->info.ops = &mcdx_dops; stuffp->info.speed = 2; stuffp->info.capacity = 1; stuffp->info.handle = stuffp; sprintf(stuffp->info.name, "mcdx%d", drive); disk->major = MAJOR_NR; disk->first_minor = drive; strcpy(disk->disk_name, stuffp->info.name); disk->fops = &mcdx_bdops; disk->flags = GENHD_FL_CD; stuffp->disk = disk; sprintf(msg, " mcdx: Mitsumi CD-ROM installed at 0x%03x, irq %d." " (Firmware version %c %x)\n", stuffp->wreg_data, stuffp->irq, version.code, version.ver); mcdx_stuffp[drive] = stuffp; xtrace(INIT, "init() mcdx_stuffp[%d] = %p\n", drive, stuffp); if (register_cdrom(&stuffp->info) != 0) { printk("Cannot register Mitsumi CD-ROM!\n"); free_irq(stuffp->irq, NULL); release_region(stuffp->wreg_data, MCDX_IO_SIZE); kfree(stuffp); put_disk(disk); if (unregister_blkdev(MAJOR_NR, "mcdx") != 0) xwarn("cleanup() unregister_blkdev() failed\n"); blk_cleanup_queue(mcdx_queue); return 2; } disk->private_data = stuffp; disk->queue = mcdx_queue; add_disk(disk); printk(msg); return 0; }
int nbdx_register_block_device(struct nbdx_file *nbdx_file) { sector_t size = nbdx_file->stbuf.st_size; int page_size = PAGE_SIZE; int err = 0; pr_debug("%s called\n", __func__); nbdx_file->major = nbdx_major; #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) nbdx_mq_reg.nr_hw_queues = submit_queues; nbdx_file->queue = blk_mq_init_queue(&nbdx_mq_reg, nbdx_file); #else nbdx_file->tag_set.ops = &nbdx_mq_ops; nbdx_file->tag_set.nr_hw_queues = submit_queues; nbdx_file->tag_set.queue_depth = NBDX_QUEUE_DEPTH; nbdx_file->tag_set.numa_node = NUMA_NO_NODE; nbdx_file->tag_set.cmd_size = sizeof(struct raio_io_u); nbdx_file->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; nbdx_file->tag_set.driver_data = nbdx_file; err = blk_mq_alloc_tag_set(&nbdx_file->tag_set); if (err) goto out; nbdx_file->queue = blk_mq_init_queue(&nbdx_file->tag_set); #endif if (IS_ERR(nbdx_file->queue)) { pr_err("%s: Failed to allocate blk queue ret=%ld\n", __func__, PTR_ERR(nbdx_file->queue)); err = PTR_ERR(nbdx_file->queue); goto blk_mq_init; } nbdx_file->queue->queuedata = nbdx_file; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nbdx_file->queue); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nbdx_file->queue); nbdx_file->disk = alloc_disk_node(1, NUMA_NO_NODE); if (!nbdx_file->disk) { pr_err("%s: Failed to allocate disk node\n", __func__); err = -ENOMEM; goto alloc_disk; } nbdx_file->disk->major = nbdx_file->major; nbdx_file->disk->first_minor = nbdx_file->index; nbdx_file->disk->fops = &nbdx_ops; nbdx_file->disk->queue = nbdx_file->queue; nbdx_file->disk->private_data = nbdx_file; blk_queue_logical_block_size(nbdx_file->queue, NBDX_SECT_SIZE); blk_queue_physical_block_size(nbdx_file->queue, NBDX_SECT_SIZE); sector_div(page_size, NBDX_SECT_SIZE); blk_queue_max_hw_sectors(nbdx_file->queue, page_size * MAX_SGL_LEN); sector_div(size, NBDX_SECT_SIZE); set_capacity(nbdx_file->disk, size); sscanf(nbdx_file->dev_name, "%s", nbdx_file->disk->disk_name); add_disk(nbdx_file->disk); goto out; alloc_disk: blk_cleanup_queue(nbdx_file->queue); blk_mq_init: #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) blk_mq_free_tag_set(&nbdx_file->tag_set); #endif out: return err; }
static int __zvol_create_minor(const char *name) { zvol_state_t *zv; objset_t *os; dmu_object_info_t *doi; uint64_t volsize; unsigned minor = 0; int error = 0; ASSERT(MUTEX_HELD(&zvol_state_lock)); zv = zvol_find_by_name(name); if (zv) { error = EEXIST; goto out; } doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP); error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os); if (error) goto out_doi; error = dmu_object_info(os, ZVOL_OBJ, doi); if (error) goto out_dmu_objset_disown; error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); if (error) goto out_dmu_objset_disown; error = zvol_find_minor(&minor); if (error) goto out_dmu_objset_disown; zv = zvol_alloc(MKDEV(zvol_major, minor), name); if (zv == NULL) { error = EAGAIN; goto out_dmu_objset_disown; } if (dmu_objset_is_snapshot(os)) zv->zv_flags |= ZVOL_RDONLY; zv->zv_volblocksize = doi->doi_data_block_size; zv->zv_volsize = volsize; zv->zv_objset = os; set_capacity(zv->zv_disk, zv->zv_volsize >> 9); if (zil_replay_disable) zil_destroy(dmu_objset_zil(os), B_FALSE); else zil_replay(os, zv, zvol_replay_vector); out_dmu_objset_disown: dmu_objset_disown(os, zvol_tag); zv->zv_objset = NULL; out_doi: kmem_free(doi, sizeof(dmu_object_info_t)); out: if (error == 0) { zvol_insert(zv); add_disk(zv->zv_disk); } return (error); }
static int __init sbd_init(void) { /* * Set up our internal device. */ int ret; tfm = crypto_alloc_cipher("aes", 0, 16); if (IS_ERR(tfm)){ printk(KERN_ERR "alg: cipher: Failed to load transform"); return PTR_ERR(tfm); } Device.size = nsectors * logical_block_size; spin_lock_init(&Device.lock); Device.data = vmalloc(Device.size); if (Device.data == NULL) return -ENOMEM; /* * Get a request queue. */ Queue = blk_init_queue(sbd_request, &Device.lock); if (Queue == NULL) goto out; blk_queue_logical_block_size(Queue, logical_block_size); /* * Get registered. */ major_num = register_blkdev(major_num, "sbd"); if (major_num < 0) { printk(KERN_WARNING "sbd: unable to get major number\n"); goto out; } /* * And the gendisk structure. */ Device.gd = alloc_disk(16); if (!Device.gd) goto out_unregister; Device.gd->major = major_num; Device.gd->first_minor = 0; Device.gd->fops = &sbd_ops; Device.gd->private_data = &Device; strcpy(Device.gd->disk_name, "sbd0"); set_capacity(Device.gd, nsectors); Device.gd->queue = Queue; add_disk(Device.gd); ret = device_register(&rd_root_dev); if (ret < 0) goto out_unregister; ret = device_create_file(&rd_root_dev, &dev_attr_key); if (ret < 0) { device_unregister(&rd_root_dev); goto out_unregister; } return 0; out_unregister: unregister_blkdev(major_num, "sbd"); out: vfree(Device.data); crypto_free_cipher(tfm); return -ENOMEM; }
/* Create system device file for the enabled slot. */ ndas_error_t slot_enable(int s) { ndas_error_t ret = NDAS_ERROR_INTERNAL; int got; struct ndas_slot* slot = NDAS_GET_SLOT_DEV(s); dbgl_blk(3, "ing s#=%d slot=%p",s, slot); got = try_module_get(THIS_MODULE); MOD_INC_USE_COUNT; if ( slot == NULL) goto out1; if ( slot->enabled ) { dbgl_blk(1, "already enabled"); ret = NDAS_OK; goto out2; } ret = ndas_query_slot(s, &slot->info); if ( !NDAS_SUCCESS(ret) ) { dbgl_blk(1, "fail ndas_query_slot"); goto out2; } dbgl_blk(1, "mode=%d", slot->info.mode); slot->enabled = 1; #if LINUX_VERSION_25_ABOVE slot->disk = NULL; spin_lock_init(&slot->lock); slot->queue = blk_init_queue( nblk_request_proc, &slot->lock ); #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33)) blk_queue_max_phys_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33)) blk_queue_max_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); //renamed in 2.6.34 //blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); //removed in 2.6.34 #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) blk_queue_logical_block_size(slot->queue, slot->info.sector_size); #else blk_queue_hardsect_size(slot->queue, slot->info.sector_size); #endif #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33)) blk_queue_max_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS); #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33)) blk_queue_max_hw_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS); //renamed in 2.6.34 #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)) // Set ordered queue property. #if 0 blk_queue_ordered(slot->queue, QUEUE_ORDERED_TAG_FLUSH, nblk_prepare_flush); #endif #endif slot->disk = alloc_disk(NR_PARTITION); if ( slot->disk == NULL ) { slot->enabled = 0; dbgl_blk(1, "fail alloc disk"); goto out2; } slot->disk->major = NDAS_BLK_MAJOR; slot->disk->first_minor = (s - NDAS_FIRST_SLOT_NR) << PARTN_BITS; slot->disk->fops = &ndas_fops; slot->disk->queue = slot->queue; slot->disk->private_data = (void*) (long)s; slot->queue_flags = 0; dbgl_blk(1, "mode=%d", slot->info.mode); if ( slot->info.mode == NDAS_DISK_MODE_SINGLE || slot->info.mode == NDAS_DISK_MODE_ATAPI || slot->info.mode == NDAS_DISK_MODE_MEDIAJUKE) { char short_serial[NDAS_SERIAL_SHORT_LENGTH + 1]; if (strlen(slot->info.ndas_serial) > 8) { /* Extended serial number is too long as sysfs object name. Use last 8 digit only */ strncpy( short_serial, slot->info.ndas_serial + ( NDAS_SERIAL_EXTEND_LENGTH - NDAS_SERIAL_SHORT_LENGTH), 8); } else { strncpy(short_serial, slot->info.ndas_serial, 8); } short_serial[8] =0; snprintf(slot->devname, sizeof(slot->devname)-1, "ndas-%s-%d", short_serial, slot->info.unit ); strcpy(slot->disk->disk_name, slot->devname); dbgl_blk(1, "just set slot->disk->%s, slot->%s", slot->disk->disk_name, slot->devname ); #if !LINUX_VERSION_DEVFS_REMOVED_COMPLETELY strcpy(slot->disk->devfs_name, slot->devname); #endif set_capacity(slot->disk, slot->info.sectors); dbgl_blk(1, "just set capacity slot->disk, slot->info.sectors:%llu", slot->info.sectors); } else { /* Other mode is not implemented */ } if (slot->info.mode == NDAS_DISK_MODE_ATAPI) { slot->disk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE; dbgl_blk(1, "just set slot->disk->flags"); #if 0 kref_init(&slot->ndascd.kref); #endif } dbgl_blk(4, "adding disk: slot=%d, first_minor=%d, capacity=%llu", s, slot->disk->first_minor, slot->info.sectors); add_disk(slot->disk); dbgl_blk(1, "added disk: slot=%d", s); #ifndef NDAS_DONT_CARE_SCHEDULER #if LINUX_VERSION_AVOID_CFQ_SCHEDULER #if CONFIG_SYSFS sal_assert(slot->queue->kobj.ktype); sal_assert(slot->queue->kobj.ktype->default_attrs); { struct queue_sysfs_entry { struct attribute attr; ssize_t (*show)(struct request_queue *, char *); ssize_t (*store)(struct request_queue *, const char *, size_t); }; struct attribute *attr = slot->queue->kobj.ktype->default_attrs[4]; struct queue_sysfs_entry *entry = container_of(attr , struct queue_sysfs_entry, attr); //dbgl_blk(1, "now to set the scheduler: slot-queue=%d, scheduler==%s, scheduler_len=%d", slot->queue, NDAS_QUEUE_SCHEDULER, strlen(NDAS_QUEUE_SCHEDULER)); entry->store(slot->queue,NDAS_QUEUE_SCHEDULER,strlen(NDAS_QUEUE_SCHEDULER)); } #else #error "NDAS driver doesn't work well with CFQ scheduler of 2.6.13 or above kernel." \ "if you forcely want to use it, please specify compiler flags by " \ "export NDAS_EXTRA_CFLAGS=\"-DNDAS_DONT_CARE_SCHEDULER\" "\ "then compile the source again." #endif #endif #endif printk("ndas: /dev/%s enabled\n" , slot->devname); #else /* < LINUX_VERSION_25_ABOVE */ dbgl_blk(4, "blksize=%d", DEFAULT_ND_BLKSIZE); dbgl_blk(4, "size=%lld", slot->info.sectors); dbgl_blk(1, "hardsectsize=%d", slot->info.sector_size); ndas_ops_set_blk_size( s, DEFAULT_ND_BLKSIZE, slot->info.sectors, slot->info.sector_size, DEFAULT_ND_MAX_SECTORS ); #ifdef NDAS_DEVFS printk("ndas: /dev/nd/disc%d enabled\n" , s - NDAS_FIRST_SLOT_NR); #else printk("ndas: /dev/nd%c enabled\n" , s + 'a' - NDAS_FIRST_SLOT_NR); #endif #endif //up(&slot->mutex); #ifdef NDAS_MSHARE if(NDAS_GET_SLOT_DEV(s)->info.mode == NDAS_DISK_MODE_MEDIAJUKE) { ndas_CheckFormat(s); } #endif #if !LINUX_VERSION_25_ABOVE ndas_ops_read_partition(s); #endif dbgl_blk(3, "ed"); return NDAS_OK; out2: //up(&slot->mutex); out1: if ( got ) module_put(THIS_MODULE); MOD_DEC_USE_COUNT; return ret; }
static int htifblk_probe(struct device *dev) { static unsigned int index = 0; static const char prefix[] = " size="; struct htif_device *htif_dev; struct htifblk_device *htifblk_dev; struct gendisk *disk; struct request_queue *queue; const char *str; u64 size; int ret; dev_info(dev, "detected disk\n"); htif_dev = to_htif_dev(dev); str = strstr(htif_dev->id, prefix); if (unlikely(str == NULL || kstrtou64(str + sizeof(prefix) - 1, 10, &size))) { dev_err(dev, "error determining size of disk\n"); return -ENODEV; } if (unlikely(size & (SECTOR_SIZE - 1))) { dev_warn(dev, "disk size not a multiple of sector size:" " %llu\n", size); } ret = -ENOMEM; htifblk_dev = devm_kzalloc(dev, sizeof(struct htifblk_device), GFP_KERNEL); if (unlikely(htifblk_dev == NULL)) goto out; htifblk_dev->size = size; htifblk_dev->dev = htif_dev; htifblk_dev->tag = index; spin_lock_init(&htifblk_dev->lock); disk = alloc_disk(1); if (unlikely(disk == NULL)) goto out; queue = blk_init_queue(htifblk_request, &htifblk_dev->lock); if (unlikely(queue == NULL)) goto out_put_disk; queue->queuedata = htifblk_dev; blk_queue_max_segments(queue, 1); blk_queue_dma_alignment(queue, HTIF_ALIGN - 1); disk->queue = queue; disk->major = major; disk->minors = 1; disk->first_minor = 0; disk->fops = &htifblk_fops; set_capacity(disk, size >> SECTOR_SIZE_SHIFT); snprintf(disk->disk_name, DISK_NAME_LEN - 1, "htifblk%u", index++); htifblk_dev->disk = disk; add_disk(disk); dev_info(dev, "added %s\n", disk->disk_name); ret = htif_request_irq(htif_dev, htifblk_isr); if (unlikely(ret)) goto out_del_disk; dev_set_drvdata(dev, htifblk_dev); return 0; out_del_disk: del_gendisk(disk); blk_cleanup_queue(disk->queue); out_put_disk: put_disk(disk); out: return ret; }
static int __init hd_init(void) { int drive; if (register_blkdev(HD_MAJOR, "hd")) return -1; hd_queue = blk_init_queue(do_hd_request, &hd_lock); if (!hd_queue) { unregister_blkdev(HD_MAJOR, "hd"); return -ENOMEM; } blk_queue_max_hw_sectors(hd_queue, 255); init_timer(&device_timer); device_timer.function = hd_times_out; blk_queue_logical_block_size(hd_queue, 512); if (!NR_HD) { /* * We don't know anything about the drive. This means * that you *MUST* specify the drive parameters to the * kernel yourself. * * If we were on an i386, we used to read this info from * the BIOS or CMOS. This doesn't work all that well, * since this assumes that this is a primary or secondary * drive, and if we're using this legacy driver, it's * probably an auxilliary controller added to recover * legacy data off an ST-506 drive. Either way, it's * definitely safest to have the user explicitly specify * the information. */ printk("hd: no drives specified - use hd=cyl,head,sectors" " on kernel command line\n"); goto out; } for (drive = 0 ; drive < NR_HD ; drive++) { struct gendisk *disk = alloc_disk(64); struct hd_i_struct *p = &hd_info[drive]; if (!disk) goto Enomem; disk->major = HD_MAJOR; disk->first_minor = drive << 6; disk->fops = &hd_fops; sprintf(disk->disk_name, "hd%c", 'a'+drive); disk->private_data = p; set_capacity(disk, p->head * p->sect * p->cyl); disk->queue = hd_queue; p->unit = drive; hd_gendisk[drive] = disk; printk("%s: %luMB, CHS=%d/%d/%d\n", disk->disk_name, (unsigned long)get_capacity(disk)/2048, p->cyl, p->head, p->sect); } if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) { printk("hd: unable to get IRQ%d for the hard disk driver\n", HD_IRQ); goto out1; } if (!request_region(HD_DATA, 8, "hd")) { printk(KERN_WARNING "hd: port 0x%x busy\n", HD_DATA); goto out2; } if (!request_region(HD_CMD, 1, "hd(cmd)")) { printk(KERN_WARNING "hd: port 0x%x busy\n", HD_CMD); goto out3; } /* Let them fly */ for (drive = 0; drive < NR_HD; drive++) add_disk(hd_gendisk[drive]); return 0; out3: release_region(HD_DATA, 8); out2: free_irq(HD_IRQ, NULL); out1: for (drive = 0; drive < NR_HD; drive++) put_disk(hd_gendisk[drive]); NR_HD = 0; out: del_timer(&device_timer); unregister_blkdev(HD_MAJOR, "hd"); blk_cleanup_queue(hd_queue); return -1; Enomem: while (drive--) put_disk(hd_gendisk[drive]); goto out; }
static int cyasblkdev_add_disks(int bus_num, struct cyasblkdev_blk_data *bd, int total_media_count, int devidx) { int ret = 0; uint64_t disk_cap; int lcl_unit_no; cy_as_storage_query_unit_data unit_data = {0}; #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s:query device: " "type:%d, removable:%d, writable:%d, " "blksize %d, units:%d, locked:%d, " "erase_sz:%d\n", __func__, dev_data.desc_p.type, dev_data.desc_p.removable, dev_data.desc_p.writeable, dev_data.desc_p.block_size, dev_data.desc_p.number_units, dev_data.desc_p.locked, dev_data.desc_p.erase_unit_size ); #endif /* make sure that device is not locked */ if (dev_data.desc_p.locked) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: device is locked\n", __func__); #endif ret = cy_as_storage_release( bd->dev_handle, bus_num, 0, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s cannot release" " storage\n", __func__); #endif goto out; } goto out; } unit_data.device = 0; unit_data.unit = 0; unit_data.bus = bus_num; ret = cy_as_storage_query_unit(bd->dev_handle, &unit_data, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cannot query " "%d device unit - reason code %d\n", __func__, bus_num, ret); #endif goto out; } if (private_partition_bus == bus_num) { if (private_partition_size > 0) { ret = cy_as_storage_create_p_partition( bd->dev_handle, bus_num, 0, private_partition_size, 0, 0); if ((ret != CY_AS_ERROR_SUCCESS) && (ret != CY_AS_ERROR_ALREADY_PARTITIONED)) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: cy_as_storage_" "create_p_partition after size > 0 check " "failed with error code %d\n", __func__, ret); #endif disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = 0; } else if (ret == CY_AS_ERROR_ALREADY_PARTITIONED) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cy_as_storage_create_p_partition " "indicates memory already partitioned\n", __func__); #endif /*check to see that partition * matches size */ if (unit_data.desc_p.unit_size != private_partition_size) { ret = cy_as_storage_remove_p_partition( bd->dev_handle, bus_num, 0, 0, 0); if (ret == CY_AS_ERROR_SUCCESS) { ret = cy_as_storage_create_p_partition( bd->dev_handle, bus_num, 0, private_partition_size, 0, 0); if (ret == CY_AS_ERROR_SUCCESS) { unit_data.bus = bus_num; unit_data.device = 0; unit_data.unit = 1; } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cy_as_storage_create_p_partition " "after removal unexpectedly failed " "with error %d\n", __func__, ret); #endif /* need to requery bus * seeing as delete * successful and create * failed we have changed * the disk properties */ unit_data.bus = bus_num; unit_data.device = 0; unit_data.unit = 0; } ret = cy_as_storage_query_unit( bd->dev_handle, &unit_data, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cannot query %d " "device unit - reason code %d\n", __func__, bus_num, ret); #endif goto out; } else { disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = unit_data.unit; } } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cy_as_storage_remove_p_partition " "failed with error %d\n", __func__, ret); #endif unit_data.bus = bus_num; unit_data.device = 0; unit_data.unit = 1; ret = cy_as_storage_query_unit( bd->dev_handle, &unit_data, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cannot query %d " "device unit - reason " "code %d\n", __func__, bus_num, ret); #endif goto out; } disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = unit_data.unit; } } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: partition " "exists and sizes equal\n", __func__); #endif /*partition already existed, * need to query second unit*/ unit_data.bus = bus_num; unit_data.device = 0; unit_data.unit = 1; ret = cy_as_storage_query_unit( bd->dev_handle, &unit_data, 0, 0); if (ret != CY_AS_ERROR_SUCCESS) { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cannot query %d " "device unit " "- reason code %d\n", __func__, bus_num, ret); #endif goto out; } else { disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = unit_data.unit; } } } else { #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: cy_as_storage_create_p_partition " "created successfully\n", __func__); #endif disk_cap = (uint64_t) (unit_data.desc_p.unit_size - private_partition_size); lcl_unit_no = 1; } } #ifndef WESTBRIDGE_NDEBUG else { cy_as_hal_print_message( "%s: invalid partition_size%d\n", __func__, private_partition_size); disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = 0; } #endif } else { disk_cap = (uint64_t) (unit_data.desc_p.unit_size); lcl_unit_no = 0; } if ((bus_num == 0) || (total_media_count == 1)) { sprintf(bd->user_disk_0->disk_name, "cyasblkdevblk%d", devidx); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: disk unit_sz:%lu blk_sz:%d, " "start_blk:%lu, capacity:%llu\n", __func__, (unsigned long) unit_data.desc_p.unit_size, unit_data.desc_p.block_size, (unsigned long) unit_data.desc_p.start_block, (uint64_t)disk_cap ); #endif #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: setting gendisk disk " "capacity to %d\n", __func__, (int) disk_cap); #endif /* initializing bd->queue */ #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message("%s: init bd->queue\n", __func__); #endif /* this will create a * queue kernel thread */ cyasblkdev_init_queue( &bd->queue, &bd->lock); bd->queue.prep_fn = cyasblkdev_blk_prep_rq; bd->queue.issue_fn = cyasblkdev_blk_issue_rq; bd->queue.data = bd; /*blk_size should always * be a multiple of 512, * set to the max to ensure * that all accesses aligned * to the greatest multiple, * can adjust request to * smaller block sizes * dynamically*/ bd->user_disk_0_read_only = !dev_data.desc_p.writeable; bd->user_disk_0_blk_size = dev_data.desc_p.block_size; bd->user_disk_0_type = dev_data.desc_p.type; bd->user_disk_0_bus_num = bus_num; bd->user_disk_0->major = major; bd->user_disk_0->first_minor = devidx << CYASBLKDEV_SHIFT; bd->user_disk_0->minors = 8; bd->user_disk_0->fops = &cyasblkdev_bdops; bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE; bd->user_disk_0->private_data = bd; bd->user_disk_0->queue = bd->queue.queue; bd->dbgprn_flags = DBGPRN_RD_RQ; bd->user_disk_0_unit_no = lcl_unit_no; blk_queue_logical_block_size(bd->queue.queue, bd->user_disk_0_blk_size); set_capacity(bd->user_disk_0, disk_cap); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: returned from set_capacity %d\n", __func__, (int) disk_cap); #endif /* need to start search from * public partition beginning */ if (vfat_search) { bd->user_disk_0_first_sector = cyasblkdev_get_vfat_offset( bd->user_disk_0_bus_num, bd->user_disk_0_unit_no); } else { bd->user_disk_0_first_sector = 0; } #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: set user_disk_0_first " "sector to %d\n", __func__, bd->user_disk_0_first_sector); cy_as_hal_print_message( "%s: add_disk: disk->major=0x%x\n", __func__, bd->user_disk_0->major); cy_as_hal_print_message( "%s: add_disk: " "disk->first_minor=0x%x\n", __func__, bd->user_disk_0->first_minor); cy_as_hal_print_message( "%s: add_disk: " "disk->minors=0x%x\n", __func__, bd->user_disk_0->minors); cy_as_hal_print_message( "%s: add_disk: " "disk->disk_name=%s\n", __func__, bd->user_disk_0->disk_name); cy_as_hal_print_message( "%s: add_disk: " "disk->part_tbl=0x%x\n", __func__, (unsigned int) bd->user_disk_0->part_tbl); cy_as_hal_print_message( "%s: add_disk: " "disk->queue=0x%x\n", __func__, (unsigned int) bd->user_disk_0->queue); cy_as_hal_print_message( "%s: add_disk: " "disk->flags=0x%x\n", __func__, (unsigned int) bd->user_disk_0->flags); cy_as_hal_print_message( "%s: add_disk: " "disk->driverfs_dev=0x%x\n", __func__, (unsigned int) bd->user_disk_0->driverfs_dev); cy_as_hal_print_message( "%s: add_disk: " "disk->slave_dir=0x%x\n", __func__, (unsigned int) bd->user_disk_0->slave_dir); cy_as_hal_print_message( "%s: add_disk: " "disk->random=0x%x\n", __func__, (unsigned int) bd->user_disk_0->random); cy_as_hal_print_message( "%s: add_disk: " "disk->node_id=0x%x\n", __func__, (unsigned int) bd->user_disk_0->node_id); #endif add_disk(bd->user_disk_0); } else if ((bus_num == 1) && (total_media_count == 2)) { bd->user_disk_1_read_only = !dev_data.desc_p.writeable; bd->user_disk_1_blk_size = dev_data.desc_p.block_size; bd->user_disk_1_type = dev_data.desc_p.type; bd->user_disk_1_bus_num = bus_num; bd->user_disk_1->major = major; bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT; bd->user_disk_1->minors = 8; bd->user_disk_1->fops = &cyasblkdev_bdops; bd->user_disk_1->events = DISK_EVENT_MEDIA_CHANGE; bd->user_disk_1->private_data = bd; bd->user_disk_1->queue = bd->queue.queue; bd->dbgprn_flags = DBGPRN_RD_RQ; bd->user_disk_1_unit_no = lcl_unit_no; sprintf(bd->user_disk_1->disk_name, "cyasblkdevblk%d", (devidx + 1)); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: disk unit_sz:%lu " "blk_sz:%d, " "start_blk:%lu, " "capacity:%llu\n", __func__, (unsigned long) unit_data.desc_p.unit_size, unit_data.desc_p.block_size, (unsigned long) unit_data.desc_p.start_block, (uint64_t)disk_cap ); #endif /*blk_size should always be a * multiple of 512, set to the max * to ensure that all accesses * aligned to the greatest multiple, * can adjust request to smaller * block sizes dynamically*/ if (bd->user_disk_0_blk_size > bd->user_disk_1_blk_size) { blk_queue_logical_block_size(bd->queue.queue, bd->user_disk_0_blk_size); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: set hard sect_sz:%d\n", __func__, bd->user_disk_0_blk_size); #endif } else { blk_queue_logical_block_size(bd->queue.queue, bd->user_disk_1_blk_size); #ifndef WESTBRIDGE_NDEBUG cy_as_hal_print_message( "%s: set hard sect_sz:%d\n", __func__, bd->user_disk_1_blk_size); #endif } set_capacity(bd->user_disk_1, disk_cap); if (vfat_search) { bd->user_disk_1_first_sector = cyasblkdev_get_vfat_offset( bd->user_disk_1_bus_num, bd->user_disk_1_unit_no); } else { bd->user_disk_1_first_sector = 0; } add_disk(bd->user_disk_1); } if (lcl_unit_no > 0) { if (bd->system_disk == NULL) { bd->system_disk = alloc_disk(8); if (bd->system_disk == NULL) { kfree(bd); bd = ERR_PTR(-ENOMEM); return bd; } disk_cap = (uint64_t) (private_partition_size); /* set properties of * system disk */ bd->system_disk_read_only = !dev_data.desc_p.writeable; bd->system_disk_blk_size = dev_data.desc_p.block_size; bd->system_disk_bus_num = bus_num; bd->system_disk->major = major; bd->system_disk->first_minor = (devidx + 2) << CYASBLKDEV_SHIFT; bd->system_disk->minors = 8; bd->system_disk->fops = &cyasblkdev_bdops; bd->system_disk->events = DISK_EVENT_MEDIA_CHANGE; bd->system_disk->private_data = bd; bd->system_disk->queue = bd->queue.queue; /* don't search for vfat * with system disk */ bd->system_disk_first_sector = 0; sprintf( bd->system_disk->disk_name, "cyasblkdevblk%d", (devidx + 2)); set_capacity(bd->system_disk, disk_cap); add_disk(bd->system_disk); } #ifndef WESTBRIDGE_NDEBUG else { cy_as_hal_print_message( "%s: system disk already allocated %d\n", __func__, bus_num); } #endif } out: return ret; }
static int __init stheno_module_init( void ) { int retval; print_info( "stheno_module_init was called.\n" ); init_waitqueue_head( &stheno_wait_q ); wake_lock_init( &stheno_wakelock, WAKE_LOCK_SUSPEND, STHENO_NAME ); stheno_major = register_blkdev( 0, STHENO_NAME ); if( stheno_major <= 0 ){ print_error( "stheno register_blkdev failed.\n" ); retval = -EBUSY; goto error; } spin_lock_init( &stheno_lock ); stheno_queue = blk_init_queue( stheno_request, &stheno_lock ); if( stheno_queue == NULL ){ print_error( "stheno blk_init_queue failed.\n" ); retval = -ENOMEM; goto error; } /*blk_queue_hardsect_size( stheno_queue, SECTOR_SIZE );*/ /*blk_queue_max_sectors( stheno_queue, MAX_SECTORS );*/ blk_queue_logical_block_size( stheno_queue, SECTOR_SIZE ); blk_queue_max_hw_sectors( stheno_queue, MAX_SECTORS ); #if defined( STHENO_BLK_BOUNCE_ANY ) blk_queue_bounce_limit( stheno_queue, BLK_BOUNCE_ANY ); #else blk_queue_bounce_limit( stheno_queue, BLK_BOUNCE_HIGH ); /* default */ #endif stheno_gd = alloc_disk( STHENO_MINOR_COUNT ); if( stheno_gd == NULL ){ print_error( "stheno alloc_disk failed.\n" ); retval = -ENOMEM; goto error; } stheno_gd->major = stheno_major; stheno_gd->first_minor = 0; stheno_gd->fops = &stheno_fops; stheno_gd->queue = stheno_queue; /*stheno_gd->flags = GENHD_FL_REMOVABLE;*/ /*stheno_gd->private_data = NULL;*/ snprintf( stheno_gd->disk_name, DISK_NAME_LEN, "%s", STHENO_NAME ); set_capacity( stheno_gd, AMOUNT_OF_SECTORS ); stheno_thread = kthread_create( stheno_request_thread, 0, STHENO_THREAD_NAME ); if( IS_ERR( stheno_thread ) ){ print_error( "stheno kthread_create failed.\n" ); retval = -EBUSY; goto error; } wake_up_process( stheno_thread ); add_disk( stheno_gd ); print_debug( "stheno major = %d\n", stheno_major ); return 0; error: if( stheno_gd != NULL ) del_gendisk( stheno_gd ); if( stheno_queue != NULL ) blk_cleanup_queue( stheno_queue ); if( stheno_major > 0 ) unregister_blkdev( stheno_major, STHENO_NAME ); return retval; }
static int null_add_dev(void) { struct gendisk *disk; struct nullb *nullb; sector_t size; int rv; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); if (!nullb) { rv = -ENOMEM; goto out; } spin_lock_init(&nullb->lock); if (queue_mode == NULL_Q_MQ && use_per_node_hctx) submit_queues = nr_online_nodes; rv = setup_queues(nullb); if (rv) goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { nullb->tag_set.ops = &null_mq_ops; nullb->tag_set.nr_hw_queues = submit_queues; nullb->tag_set.queue_depth = hw_queue_depth; nullb->tag_set.numa_node = home_node; nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; nullb->tag_set.driver_data = nullb; rv = blk_mq_alloc_tag_set(&nullb->tag_set); if (rv) goto out_cleanup_queues; nullb->q = blk_mq_init_queue(&nullb->tag_set); if (IS_ERR(nullb->q)) { rv = -ENOMEM; goto out_cleanup_tags; } } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; } blk_queue_make_request(nullb->q, null_queue_bio); rv = init_driver_queues(nullb); if (rv) goto out_cleanup_blk_queue; } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; } blk_queue_prep_rq(nullb->q, null_rq_prep_fn); blk_queue_softirq_done(nullb->q, null_softirq_done_fn); rv = init_driver_queues(nullb); if (rv) goto out_cleanup_blk_queue; } nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); disk = nullb->disk = alloc_disk_node(1, home_node); if (!disk) { rv = -ENOMEM; goto out_cleanup_blk_queue; } mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); nullb->index = nullb_indexes++; mutex_unlock(&lock); blk_queue_logical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs); size = gb * 1024 * 1024 * 1024ULL; sector_div(size, bs); set_capacity(disk, size); disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; disk->major = null_major; disk->first_minor = nullb->index; disk->fops = &null_fops; disk->private_data = nullb; disk->queue = nullb->q; sprintf(disk->disk_name, "nullb%d", nullb->index); add_disk(disk); return 0; out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: if (queue_mode == NULL_Q_MQ) blk_mq_free_tag_set(&nullb->tag_set); out_cleanup_queues: cleanup_queues(nullb); out_free_nullb: kfree(nullb); out: return rv; }
static int __init vbd_init(void) { int read_latency_usec = read_latency * 1000; int write_latency_usec = write_latency * 1000; int read_confd_limit = (read_latency_usec * error_limit) / 100; int write_confd_limit = (write_latency_usec * error_limit) / 100; /* * Assign the delay parameters to the device. * The confd_limit parameters gives the values * to subtract and add from to get the lower and * and the higher value of the confidence limit. */ device.r_lower_limit = read_latency_usec - read_confd_limit; device.r_upper_limit = read_latency_usec + read_confd_limit; device.w_lower_limit = write_latency_usec - write_confd_limit; device.w_upper_limit = write_latency_usec + write_confd_limit; device.r_confd_freq = 0; device.r_error_freq = 0; device.w_confd_freq = 0; device.w_confd_freq = 0; /* * Allocate some memory for the device */ device.size = nsectors * logical_block_size; spin_lock_init(&device.lock); device.data = vmalloc(device.size); /* * if the kernel can't allocate space to this device * then exit with -ENOMEM */ if(device.data == NULL) return -ENOMEM; /* * Initialize procfs entry for vbd */ device.procfs_file = create_proc_read_entry(MODULE_NAME, 0444, NULL, proc_read_vbd_stats, NULL); if(device.procfs_file == NULL) return -ENOMEM; vbd_queue = blk_init_queue(vbd_request, &device.lock); /* if queue is not allocated then release the device */ if(vbd_queue == NULL) goto out; /* * Let the kernel know the queue for this device and logical block size * that it operate on */ blk_queue_logical_block_size(vbd_queue, logical_block_size); /* Register the device */ major_num = register_blkdev(major_num, MODULE_NAME); /* if the device is unable to get a major number then release the device */ if(major_num < 0) { printk(KERN_WARNING "vbd: unable to get a major problem\n"); goto out; } device.gd = alloc_disk(16); if(!device.gd) goto out_unregister; /* Populate our device structure */ device.gd->major = major_num; device.gd->first_minor = 0; device.gd->fops = &vbd_ops; device.gd->private_data = &device; strcpy(device.gd->disk_name, MODULE_NAME); set_capacity(device.gd, nsectors); device.gd->queue = vbd_queue; add_disk(device.gd); return 0; out_unregister: unregister_blkdev(major_num, MODULE_NAME); out: vfree(device.data); return -ENOMEM; }
/* * Allocate and initialise a blank device with a given minor. */ static struct mapped_device *alloc_dev(int minor) { int r; struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL); void *old_md; if (!md) { DMWARN("unable to allocate device, out of memory."); return NULL; } if (!try_module_get(THIS_MODULE)) goto bad0; /* get a minor number for the dev */ if (minor == DM_ANY_MINOR) r = next_free_minor(md, &minor); else r = specific_minor(md, minor); if (r < 0) goto bad1; memset(md, 0, sizeof(*md)); init_rwsem(&md->io_lock); init_MUTEX(&md->suspend_lock); spin_lock_init(&md->pushback_lock); rwlock_init(&md->map_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); atomic_set(&md->event_nr, 0); md->queue = blk_alloc_queue(GFP_KERNEL); if (!md->queue) goto bad1_free_minor; md->queue->queuedata = md; md->queue->backing_dev_info.congested_fn = dm_any_congested; md->queue->backing_dev_info.congested_data = md; blk_queue_make_request(md->queue, dm_request); blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); md->queue->unplug_fn = dm_unplug_all; md->queue->issue_flush_fn = dm_flush_all; md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); if (!md->io_pool) goto bad2; md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); if (!md->tio_pool) goto bad3; md->bs = bioset_create(16, 16); if (!md->bs) goto bad_no_bioset; md->disk = alloc_disk(1); if (!md->disk) goto bad4; atomic_set(&md->pending, 0); init_waitqueue_head(&md->wait); init_waitqueue_head(&md->eventq); md->disk->major = _major; md->disk->first_minor = minor; md->disk->fops = &dm_blk_dops; md->disk->queue = md->queue; md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); add_disk(md->disk); format_dev_t(md->name, MKDEV(_major, minor)); /* Populate the mapping, nobody knows we exist yet */ spin_lock(&_minor_lock); old_md = idr_replace(&_minor_idr, md, minor); spin_unlock(&_minor_lock); BUG_ON(old_md != MINOR_ALLOCED); return md; bad4: bioset_free(md->bs); bad_no_bioset: mempool_destroy(md->tio_pool); bad3: mempool_destroy(md->io_pool); bad2: blk_cleanup_queue(md->queue); bad1_free_minor: free_minor(minor); bad1: module_put(THIS_MODULE); bad0: kfree(md); return NULL; }
/* pdev is NULL for eisa */ static int cpqarray_register_ctlr( int i, struct pci_dev *pdev) { request_queue_t *q; int j; /* * register block devices * Find disks and fill in structs * Get an interrupt, set the Q depth and get into /proc */ /* If this successful it should insure that we are the only */ /* instance of the driver */ if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) { goto Enomem4; } hba[i]->access.set_intr_mask(hba[i], 0); if (request_irq(hba[i]->intr, do_ida_intr, SA_INTERRUPT|SA_SHIRQ|SA_SAMPLE_RANDOM, hba[i]->devname, hba[i])) { printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n", hba[i]->intr, hba[i]->devname); goto Enomem3; } for (j=0; j<NWD; j++) { ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT); if (!ida_gendisk[i][j]) goto Enomem2; } hba[i]->cmd_pool = (cmdlist_t *)pci_alloc_consistent( hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t), &(hba[i]->cmd_pool_dhandle)); hba[i]->cmd_pool_bits = kmalloc( ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL); if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool) goto Enomem1; memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t)); memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long)); printk(KERN_INFO "cpqarray: Finding drives on %s", hba[i]->devname); spin_lock_init(&hba[i]->lock); q = blk_init_queue(do_ida_request, &hba[i]->lock); if (!q) goto Enomem1; hba[i]->queue = q; q->queuedata = hba[i]; getgeometry(i); start_fwbk(i); ida_procinit(i); if (pdev) blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); /* This is a hardware imposed limit. */ blk_queue_max_hw_segments(q, SG_MAX); /* This is a driver limit and could be eliminated. */ blk_queue_max_phys_segments(q, SG_MAX); init_timer(&hba[i]->timer); hba[i]->timer.expires = jiffies + IDA_TIMER; hba[i]->timer.data = (unsigned long)hba[i]; hba[i]->timer.function = ida_timer; add_timer(&hba[i]->timer); /* Enable IRQ now that spinlock and rate limit timer are set up */ hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY); for(j=0; j<NWD; j++) { struct gendisk *disk = ida_gendisk[i][j]; drv_info_t *drv = &hba[i]->drv[j]; sprintf(disk->disk_name, "ida/c%dd%d", i, j); disk->major = COMPAQ_SMART2_MAJOR + i; disk->first_minor = j<<NWD_SHIFT; disk->fops = &ida_fops; if (j && !drv->nr_blks) continue; blk_queue_hardsect_size(hba[i]->queue, drv->blk_size); set_capacity(disk, drv->nr_blks); disk->queue = hba[i]->queue; disk->private_data = drv; add_disk(disk); } /* done ! */ return(i); Enomem1: nr_ctlr = i; kfree(hba[i]->cmd_pool_bits); if (hba[i]->cmd_pool) pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t), hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); Enomem2: while (j--) { put_disk(ida_gendisk[i][j]); ida_gendisk[i][j] = NULL; } free_irq(hba[i]->intr, hba[i]); Enomem3: unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname); Enomem4: if (pdev) pci_set_drvdata(pdev, NULL); release_io_mem(hba[i]); free_hba(i); printk( KERN_ERR "cpqarray: out of memory"); return -1; }
/* -------------------------------------------------------------------- * SystemACE device setup/teardown code */ static int __devinit ace_setup(struct ace_device *ace) { u16 version; u16 val; int rc; dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace); dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n", (unsigned long long)ace->physaddr, ace->irq); spin_lock_init(&ace->lock); init_completion(&ace->id_completion); /* * Map the device */ ace->baseaddr = ioremap(ace->physaddr, 0x80); if (!ace->baseaddr) goto err_ioremap; /* * Initialize the state machine tasklet and stall timer */ tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace); setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace); /* * Initialize the request queue */ ace->queue = blk_init_queue(ace_request, &ace->lock); if (ace->queue == NULL) goto err_blk_initq; blk_queue_logical_block_size(ace->queue, 512); /* * Allocate and initialize GD structure */ ace->gd = alloc_disk(ACE_NUM_MINORS); if (!ace->gd) goto err_alloc_disk; ace->gd->major = ace_major; ace->gd->first_minor = ace->id * ACE_NUM_MINORS; ace->gd->fops = &ace_fops; ace->gd->queue = ace->queue; ace->gd->private_data = ace; snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); /* set bus width */ if (ace->bus_width == ACE_BUS_WIDTH_16) { /* 0x0101 should work regardless of endianess */ ace_out_le16(ace, ACE_BUSMODE, 0x0101); /* read it back to determine endianess */ if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001) ace->reg_ops = &ace_reg_le16_ops; else ace->reg_ops = &ace_reg_be16_ops; } else { ace_out_8(ace, ACE_BUSMODE, 0x00); ace->reg_ops = &ace_reg_8_ops; } /* Make sure version register is sane */ version = ace_in(ace, ACE_VERSION); if ((version == 0) || (version == 0xFFFF)) goto err_read; /* Put sysace in a sane state by clearing most control reg bits */ ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE | ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); /* Now we can hook up the irq handler */ if (ace->irq != NO_IRQ) { rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace); if (rc) { /* Failure - fall back to polled mode */ dev_err(ace->dev, "request_irq failed\n"); ace->irq = NO_IRQ; } } /* Enable interrupts */ val = ace_in(ace, ACE_CTRL); val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ; ace_out(ace, ACE_CTRL, val); /* Print the identification */ dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n", (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff); dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n", (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq); ace->media_change = 1; ace_revalidate_disk(ace->gd); /* Make the sysace device 'live' */ add_disk(ace->gd); return 0; err_read: put_disk(ace->gd); err_alloc_disk: blk_cleanup_queue(ace->queue); err_blk_initq: iounmap(ace->baseaddr); err_ioremap: dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n", (unsigned long long) ace->physaddr); return -ENOMEM; }
static int null_add_dev(void) { struct gendisk *disk; struct nullb *nullb; sector_t size; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); if (!nullb) return -ENOMEM; spin_lock_init(&nullb->lock); if (setup_queues(nullb)) goto err; if (queue_mode == NULL_Q_MQ) { null_mq_reg.numa_node = home_node; null_mq_reg.queue_depth = hw_queue_depth; if (use_per_node_hctx) { null_mq_reg.ops->alloc_hctx = null_alloc_hctx; null_mq_reg.ops->free_hctx = null_free_hctx; null_mq_reg.nr_hw_queues = nr_online_nodes; } else { null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; null_mq_reg.nr_hw_queues = submit_queues; } nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); blk_queue_make_request(nullb->q, null_queue_bio); } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); blk_queue_prep_rq(nullb->q, null_rq_prep_fn); if (nullb->q) blk_queue_softirq_done(nullb->q, null_softirq_done_fn); } if (!nullb->q) goto queue_fail; nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); disk = nullb->disk = alloc_disk_node(1, home_node); if (!disk) { queue_fail: if (queue_mode == NULL_Q_MQ) blk_mq_free_queue(nullb->q); else blk_cleanup_queue(nullb->q); cleanup_queues(nullb); err: kfree(nullb); return -ENOMEM; } mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); nullb->index = nullb_indexes++; mutex_unlock(&lock); blk_queue_logical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs); size = gb * 1024 * 1024 * 1024ULL; sector_div(size, bs); set_capacity(disk, size); disk->flags |= GENHD_FL_EXT_DEVT; disk->major = null_major; disk->first_minor = nullb->index; disk->fops = &null_fops; disk->private_data = nullb; disk->queue = nullb->q; sprintf(disk->disk_name, "nullb%d", nullb->index); add_disk(disk); return 0; }