static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox, request_fn_proc *proc, void (*work) (struct work_struct *), void (*tasklet)(unsigned long)) { struct request_queue *q; struct omap_mbox_queue *mq; mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL); if (!mq) return NULL; spin_lock_init(&mq->lock); q = blk_init_queue(proc, &mq->lock); if (!q) goto error; q->queuedata = mbox; mq->queue = q; if (work) INIT_WORK(&mq->work, work); if (tasklet) tasklet_init(&mq->tasklet, tasklet, (unsigned long)mbox); return mq; error: kfree(mq); return NULL; }
int card_init_queue(struct card_queue *cq, struct memory_card *card, spinlock_t * lock) { struct card_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret=0; if (host->parent->dma_mask && *host->parent->dma_mask) limit = *host->parent->dma_mask; cq->card = card; cq->queue = blk_init_queue(card_request, lock); if (!cq->queue) return -ENOMEM; blk_queue_prep_rq(cq->queue, card_prep_request); card_init_bounce_buf(cq, card); if(!cq->bounce_buf){ blk_queue_bounce_limit(cq->queue, limit); blk_queue_max_hw_sectors(cq->queue, host->max_sectors); //blk_queue_max_hw_phys_segments(cq->queue, host->max_phys_segs); blk_queue_max_segments(cq->queue, host->max_hw_segs); blk_queue_max_segment_size(cq->queue, host->max_seg_size); cq->queue->queuedata = cq; cq->req = NULL; cq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL); if (!cq->sg) { ret = -ENOMEM; blk_cleanup_queue(cq->queue); return ret; } } /*change card io scheduler from cfq to deadline*/ cq->queue->queuedata = cq; elevator_exit(cq->queue->elevator); cq->queue->elevator = NULL; ret = elevator_init(cq->queue, "deadline"); if (ret) { printk("[card_init_queue] elevator_init deadline fail\n"); blk_cleanup_queue(cq->queue); return ret; } init_MUTEX(&cq->thread_sem); cq->thread = kthread_run(card_queue_thread, cq, "%s_queue", card->name); if (IS_ERR(cq->thread)) { ret = PTR_ERR(cq->thread); //goto free_bounce_sg; } cq->nb.notifier_call = card_reboot_notifier; register_reboot_notifier(&cq->nb); return ret; }
/* xd_init: register the block device number and set up pointer tables */ int __init xd_init(void) { init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog; if (!xd_dma_buffer) xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); if (!xd_dma_buffer) { printk(KERN_ERR "xd: Out of memory.\n"); return -ENOMEM; } if (devfs_register_blkdev(MAJOR_NR,"xd",&xd_fops)) { printk(KERN_ERR "xd: Unable to get major number %d\n",MAJOR_NR); return -1; } devfs_handle = devfs_mk_dir (NULL, xd_gendisk.major_name, NULL); blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST); read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */ add_gendisk(&xd_gendisk); xd_geninit(); return 0; }
static int ramblock_init(void) { /* 1. 分配一个gendisk结构体 */ ramblock_disk = alloc_disk(16); /* 次设备号个数: 分区个数+1 */ /* 2. 设置 */ /* 2.1 分配/设置队列: 提供读写能力 */ ramblock_queue = blk_init_queue(do_ramblock_request, &ramblock_lock); ramblock_disk->queue = ramblock_queue; /* 2.2 设置其他属性: 比如容量 */ major = register_blkdev(0, "ramblock"); /* cat /proc/devices */ ramblock_disk->major = major; ramblock_disk->first_minor = 0; sprintf(ramblock_disk->disk_name, "ramblock"); ramblock_disk->fops = &ramblock_fops; set_capacity(ramblock_disk, RAMBLOCK_SIZE / 512); /* 3. 硬件相关操作 */ ramblock_buf = kzalloc(RAMBLOCK_SIZE, GFP_KERNEL); /* 4. 注册 */ add_disk(ramblock_disk); return 0; }
static int setup_device(osprd_info_t *d, int which) { memset(d, 0, sizeof(osprd_info_t)); /* Get memory to store the actual block data. */ if (!(d->data = vmalloc(nsectors * SECTOR_SIZE))) return -1; memset(d->data, 0, nsectors * SECTOR_SIZE); /* Set up the I/O queue. */ spin_lock_init(&d->qlock); if (!(d->queue = blk_init_queue(osprd_process_request_queue, &d->qlock))) return -1; blk_queue_hardsect_size(d->queue, SECTOR_SIZE); d->queue->queuedata = d; /* The gendisk structure. */ if (!(d->gd = alloc_disk(1))) return -1; d->gd->major = OSPRD_MAJOR; d->gd->first_minor = which; d->gd->fops = &osprd_ops; d->gd->queue = d->queue; d->gd->private_data = d; snprintf(d->gd->disk_name, 32, "osprd%c", which + 'a'); set_capacity(d->gd, nsectors); add_disk(d->gd); /* Call the setup function. */ osprd_setup(d); return 0; }
int __init init_mtdblock(void) { int i; if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) { printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", MTD_BLOCK_MAJOR); return -EAGAIN; } DEBUG(MTD_DEBUG_LEVEL3, "init_mtdblock: allocated major number %d (read only).\n", MTD_BLOCK_MAJOR); /* We fill it in at open() time. */ for (i=0; i< MAX_MTD_DEVICES; i++) { mtd_sizes[i] = 0; } /* Allow the block size to default to BLOCK_SIZE. */ blksize_size[MAJOR_NR] = NULL; blk_size[MAJOR_NR] = mtd_sizes; blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request); return 0; }
static int __init simp_blkdev_init(void) { int ret; simp_blkdev_queue=blk_init_queue(simp_blkdev_do_request,NULL); if(!simp_blkdev_queue) { ret = -ENOMEM; goto err_alloc_queue; } simp_blkdev_disk = alloc_disk(1); if(!simp_blkdev_disk) { ret = -ENOMEM; goto err_alloc_disk; } strcpy(simp_blkdev_disk->disk_name,SIMP_BLKDEV_DISKNAME); simp_blkdev_disk->major = SIMP_BLKDEV_DEVICEMAJOR; simp_blkdev_disk->first_minor = 0; simp_blkdev_disk->fops = &simp_blkdev_fops; simp_blkdev_disk->queue =simp_blkdev_queue; set_capacity(simp_blkdev_disk,SIMP_BLKDEV_BYTES>>9); add_disk(simp_blkdev_disk); return 0; err_alloc_disk: blk_cleanup_queue(simp_blkdev_queue); err_alloc_queue: return ret; }
static int __init ramblock_init(void) { /* 1. Allocate gendisk struct */ ramblock_disk = alloc_disk(16); /* 2. Configure */ /* 2.1 Allocate/Configure a queue which supporting read/write capabilities */ if (!(ramblock_queue = blk_init_queue(do_ramblock_request, &ramblock_lock))) return -ENOMEM; ramblock_disk->queue = ramblock_queue; /* 2.2 Configure other properties: such as volume, etc */ major = register_blkdev(0, "ramblock"); ramblock_disk->major = major; ramblock_disk->first_minor = 0; sprintf(ramblock_disk->disk_name, "ramblock"); ramblock_disk->fops = &ramblock_fops; set_capacity(ramblock_disk, RAMBLOCK_SIZE / 512); /* 512 bytes per sector */ /* 3. Register */ add_disk(ramblock_disk); return 0; }
static int __init Virtual_blkdev_init(void) { int ret; Virtual_blkdev_disk = alloc_disk(1); if (!Virtual_blkdev_disk) { ret = -ENOMEM; goto err_alloc_disk; } Virtual_blkdev_queue = blk_init_queue(Virtual_blkdev_do_request, NULL); if (!Virtual_blkdev_queue) { ret = -ENOMEM; goto err_init_queue; } strcpy(Virtual_blkdev_disk->disk_name, VIRTUAL_BLKDEV_DISKNAME); Virtual_blkdev_disk->major = VIRTUAL_BLKDEV_DEVICEMAJOR; Virtual_blkdev_disk->first_minor = 0; Virtual_blkdev_disk->fops = &Virtual_blkdev_fops; Virtual_blkdev_disk->queue = Virtual_blkdev_queue; set_capacity(Virtual_blkdev_disk, VIRTUAL_BLKDEV_BYTES>>9); add_disk(Virtual_blkdev_disk); return 0; err_init_queue: put_disk(Virtual_blkdev_disk); err_alloc_disk: return ret; }
static int ramblock_init(void) { major = register_blkdev(0, "ramblock"); /* 1.分配一个gendisk结构体 */ /* 这里的16表示分区的个数 15个分区 */ ramblock_gendisk = alloc_disk(16); /* 2. 设置 */ ramblock_gendisk->major = major; ramblock_gendisk->first_minor = 0; sprintf(ramblock_gendisk->disk_name, "ramblock"); ramblock_gendisk->fops = &ramblock_fops; /* 2.1 分配/设置队列:提供读写能力 */ ramblock_request_queue = blk_init_queue(do_ramblock_request, &ramblock_lock); ramblock_gendisk->queue = ramblock_request_queue; /* 2.2 设置其它属性:比如容量 */ set_capacity(ramblock_gendisk, RAMBLOCK_SIZE / 512); /* 3. 硬件相关操作 */ if (NULL == (ramblock_buf = kzalloc(RAMBLOCK_SIZE, GFP_KERNEL))) return -ENOMEM; /* 4. 注册 */ add_disk(ramblock_gendisk); return 0; }
static int __init block_demo_init(void) { spin_lock_init(&g_lock); int err = register_blkdev(BLK_DEV_MAJOR,"blk-dev-demo"); if(err != 0) { Log("[Error] register_blkdev failed."); return -1; } gp_blk_dev_disk = alloc_disk(1); if(!gp_blk_dev_disk) { Log("[Error] alloc_disk failed."); err = -1; goto FAIL_ALLOC_DISK; } gp_blk_dev_disk->major = BLK_DEV_MAJOR; gp_blk_dev_disk->first_minor = 0; sprintf(gp_blk_dev_disk->disk_name,DISK_NAME); set_capacity(gp_blk_dev_disk, DISK_SIZE >> 9); gp_blk_dev_disk->fops = &g_mem_fops; gp_blk_dev_disk->queue = blk_init_queue(mem_block_requeut_fn,&g_lock); add_disk(gp_blk_dev_disk); return 0; FAIL_ALLOC_DISK: unregister_blkdev(BLK_DEV_MAJOR,"blk-dev-demo"); return err; }
static int htifbd_probe(struct device *dev) { static unsigned int htifbd_nr = 0; static const char size_str[] = "size="; struct htif_dev *htif_dev; struct htifbd_dev *htifbd_dev; struct gendisk *gd; unsigned long size; htif_dev = to_htif_dev(dev); pr_info(DRIVER_NAME ": detected disk with ID %u\n", htif_dev->minor); if (unlikely(strncmp(htif_dev->spec, size_str, sizeof(size_str) - 1) || kstrtoul(htif_dev->spec + sizeof(size_str) - 1, 10, &size))) { pr_err(DRIVER_NAME ": unable to determine size of disk %u\n", htif_dev->minor); goto err_out; } if (unlikely(size & (SECTOR_SIZE - 1))) { pr_warn(DRIVER_NAME ": size of disk %u not a multiple of sector size\n", htif_dev->minor); } htifbd_dev = kzalloc(sizeof(struct htifbd_dev), GFP_KERNEL); if (unlikely(htifbd_dev == NULL)) goto err_out; htifbd_dev->size = size; htifbd_dev->dev = htif_dev; gd = alloc_disk(1); if (unlikely(gd == NULL)) goto err_gd_alloc; spin_lock_init(&htifbd_dev->lock); gd->queue = blk_init_queue(htifbd_request, &htifbd_dev->lock); if (unlikely(gd->queue == NULL)) goto err_queue_init; gd->major = htifbd_major; gd->minors = 1; gd->first_minor = 0; gd->fops = &htifbd_ops; gd->private_data = htifbd_dev; set_capacity(gd, size >> SECTOR_SIZE_SHIFT); snprintf(gd->disk_name, DISK_NAME_LEN - 1, "htifbd%u", htifbd_nr++); pr_info(DRIVER_NAME ": adding %s\n", gd->disk_name); htifbd_dev->gd = gd; add_disk(gd); return 0; err_queue_init: put_disk(gd); err_gd_alloc: kfree(htifbd_dev); err_out: return -ENODEV; }
static int __init ndas_init(void) { int retval = 0; struct ndas_dev *dev; func(); retval = register_blkdev(0, "myndas"); if (retval <= 0) { printk(KERN_ERR "ndas: failed to register device\n"); return retval; } else { major_number = retval; printk(KERN_INFO "ndas: register device major number %d\n", major_number); } /* init block device */ dev = kmalloc(sizeof(struct ndas_dev), GFP_KERNEL); if (dev == NULL) { printk(KERN_ERR "ndas: failed to allocate memory for device\n"); goto err1; } memset(dev, sizeof(struct ndas_dev), 0); spin_lock_init(&dev->lock); Device = dev; /* init queue */ dev->queue = blk_init_queue(ndas_request, &dev->lock); if (dev->queue == NULL) { printk(KERN_ERR "ndas: failed to allocate memory for queue\n"); goto err2; } blk_queue_logical_block_size(dev->queue, HARDSECT_SIZE); dev->queue->queuedata = dev; /* gendisk structure */ dev->gd = alloc_disk(NDAS_MINORS); if (dev->gd == NULL) { printk(KERN_ERR "ndas: failed to allocate memory for gendisk\n"); goto err3; } dev->gd->major = major_number; dev->gd->first_minor = 0; dev->gd->fops = &blk_ops; dev->gd->queue = dev->queue; dev->gd->private_data = dev; set_capacity(dev->gd, NSECTOR * (HARDSECT_SIZE / KERNEL_SECTOR_SIZE)); snprintf(dev->gd->disk_name, 6, "myndas"); add_disk(dev->gd); return 0; err3: blk_cleanup_queue(dev->queue); err2: kfree(dev); err1: Device = NULL; unregister_blkdev(major_number, "ndas"); return -ENOMEM; }
static int __init mbd_init(void) { int err = -ENOMEM; int i; for (i = 0; i < MAX_MBD; i++) { struct gendisk *disk = alloc_disk(1); if (!disk) goto out; mbd_dev[i].disk = disk; /* * The new linux 2.5 block layer implementation requires * every gendisk to have its very own request_queue struct. * These structs are big so we dynamically allocate them. */ disk->queue = blk_init_queue(do_mbd_request, &mbd_lock); if (!disk->queue) { put_disk(disk); goto out; } } if (register_blkdev(MAJOR_NR, "mbd")) { err = -EIO; goto out; } #ifdef MODULE printk("mambo bogus disk: registered device at major %d\n", MAJOR_NR); #else printk("mambo bogus disk: compiled in with kernel\n"); #endif devfs_mk_dir("mambobd"); for (i = 0; i < MAX_MBD; i++) { /* load defaults */ struct gendisk *disk = mbd_dev[i].disk; mbd_dev[i].initialized = 0; mbd_dev[i].refcnt = 0; mbd_dev[i].flags = 0; disk->major = MAJOR_NR; disk->first_minor = i; disk->fops = &mbd_fops; disk->private_data = &mbd_dev[i]; sprintf(disk->disk_name, "mambobd%d", i); sprintf(disk->devfs_name, "mambobd%d", i); set_capacity(disk, 0x7ffffc00ULL << 1); /* 2 TB */ add_disk(disk); } return 0; out: while (i--) { if (mbd_dev[i].disk->queue) blk_cleanup_queue(mbd_dev[i].disk->queue); put_disk(mbd_dev[i].disk); } return -EIO; }
static int __init looper_init(void) { if (filename == NULL) { printk(KERN_WARNING "looper: no filename defined"); return -1; } /* * Set up our internal device. */ Device.size = nsectors * logical_block_size; spin_lock_init(&Device.lock); Device.data = vmalloc(Device.size); if (Device.data == NULL) return -ENOMEM; /* * Get a request queue. */ Queue = blk_init_queue(looper_request, &Device.lock); if (Queue == NULL) goto out; blk_queue_logical_block_size(Queue, logical_block_size); /* * Get registered. */ major_num = register_blkdev(major_num, "looper"); if (major_num <= 0) { printk(KERN_WARNING "looper: unable to get major number\n"); goto out; } /* * And the gendisk structure. */ Device.gd = alloc_disk(16); if (!Device.gd) goto out_unregister; Device.gd->major = major_num; Device.gd->first_minor = 0; Device.gd->fops = &looper_ops; Device.gd->private_data = &Device; strcpy(Device.gd->disk_name, "looper0"); set_capacity(Device.gd, nsectors); Device.gd->queue = Queue; add_disk(Device.gd); return 0; out_unregister: unregister_blkdev(major_num, "looper"); out: vfree(Device.data); return -ENOMEM; }
static void setup_blk_device(struct ramdisk_dev* dev) { mutex_init(&dev->mutex); init_waitqueue_head(&dev->waitqueue); /* * Get some memory. */ memset (dev, 0, sizeof (struct ramdisk_dev)); dev->size = NSECTORS*HARDSECT_SIZE; dev->data = vmalloc(dev->size); if (dev->data == NULL) { printk (KERN_NOTICE "vmalloc failure.\n"); return; } spin_lock_init(&dev->lock); /* * The timer which "invalidates" the device. */ init_timer(&dev->timer); dev->timer.data = (unsigned long) dev; dev->timer.function = ramdisk_invalidate; dev->queue = blk_init_queue(ramdisk_request, &dev->lock); if (dev->queue == NULL) goto out_vfree; blk_queue_logical_block_size(dev->queue, HARDSECT_SIZE); dev->queue->queuedata = dev; /* * And the gendisk structure. */ dev->gd = alloc_disk(BLK_MINORS); if (! dev->gd) { printk (KERN_NOTICE "alloc_disk failure\n"); goto out_vfree; } dev->gd->major = blk_major; dev->gd->first_minor = BLK_MINORS; dev->gd->fops = &ramdisk_ops; dev->gd->queue = dev->queue; dev->gd->private_data = dev; snprintf (dev->gd->disk_name, 32, "ramdisk%c", 'a'); set_capacity(dev->gd, NSECTORS*(HARDSECT_SIZE/KERNEL_SECTOR_SIZE)); // set_capacity(dev->gd, 0); add_disk(dev->gd); return; out_vfree: if (dev->data) vfree(dev->data); }
/* * Allocate memory for a new zvol_state_t and setup the required * request queue and generic disk structures for the block device. */ static zvol_state_t * zvol_alloc(dev_t dev, const char *name) { zvol_state_t *zv; zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); if (zv == NULL) goto out; zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock); if (zv->zv_queue == NULL) goto out_kmem; #ifdef HAVE_BLK_QUEUE_FLUSH blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA); #else blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL); #endif /* HAVE_BLK_QUEUE_FLUSH */ zv->zv_disk = alloc_disk(ZVOL_MINORS); if (zv->zv_disk == NULL) goto out_queue; zv->zv_queue->queuedata = zv; zv->zv_dev = dev; zv->zv_open_count = 0; strlcpy(zv->zv_name, name, MAXNAMELEN); mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, sizeof (rl_t), offsetof(rl_t, r_node)); zv->zv_znode.z_is_zvol = TRUE; spin_lock_init(&zv->zv_lock); list_link_init(&zv->zv_next); zv->zv_disk->major = zvol_major; zv->zv_disk->first_minor = (dev & MINORMASK); zv->zv_disk->fops = &zvol_ops; zv->zv_disk->private_data = zv; zv->zv_disk->queue = zv->zv_queue; snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d", ZVOL_DEV_NAME, (dev & MINORMASK)); return zv; out_queue: blk_cleanup_queue(zv->zv_queue); out_kmem: kmem_free(zv, sizeof (zvol_state_t)); out: return NULL; }
static int __init radimo_init(void) { int res; /* block size must be a multiple of sector size */ if (radimo_soft & ((1 << RADIMO_HARDS_BITS)-1)) { MSG(RADIMO_ERROR, "Block size not a multiple of sector size\n"); return -EINVAL; } /* allocate room for data */ radimo_storage = (char *) vmalloc(1024*radimo_size); if (radimo_storage == NULL) { MSG(RADIMO_ERROR, "Not enough memory. Try a smaller size.\n"); return -ENOMEM; } memset(radimo_storage, 0, 1024*radimo_size); /* register block device */ res = register_blkdev(RADIMO_MAJOR, "radimo", &radimo_fops); if (res) { MSG(RADIMO_ERROR, "couldn't register block device\n"); return res; } /* for media change */ radimo_changed = 0; init_timer(&radimo_timer); /* set hard- and soft blocksize */ hardsect_size[RADIMO_MAJOR] = &radimo_hard; blksize_size[RADIMO_MAJOR] = &radimo_soft; blk_size[RADIMO_MAJOR] = &radimo_size; /* define our request function */ /* Here's another instace where kernel versions really matter. The request queue interface changed in the 2.4 series kernels */ #if LINUX_VERSION_CODE < 0x20320 blk_dev[RADIMO_MAJOR].request_fn = &radimo_request; #else blk_init_queue(BLK_DEFAULT_QUEUE(RADIMO_MAJOR), radimo_request); #endif read_ahead[RADIMO_MAJOR] = radimo_readahead; MSG(RADIMO_INFO, "loaded\n"); MSG(RADIMO_INFO, "sector size of %d, block size of %d, total size = %dKb\n", radimo_hard, radimo_soft, radimo_size); return 0; }
static int __init virtualblockdevice_init(void) { int ret; printk(KERN_ALERT "VirtualBlockDevice: Entry virtualblockdevice_init !\n"); ret = register_blkdev( virtualblockdevice_major, VIRTUALBLOCKDEVICE_NAME ); if( 0 > ret ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to register block device: virtualblockdevice ! Major: %d\tErrno: %d !\n", virtualblockdevice_major, ret); goto failure_register_blkdev; } virtualblockdevice_major = ret; printk(KERN_ALERT "VirtualBlockDevice: Success to register block device: virtualblockdevice ! Major: %d !\n", virtualblockdevice_major); // get request_queue virtualblockdevice_queue = blk_init_queue( virtualblockdevice_do_request, NULL ); if( !virtualblockdevice_queue ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to init request_queue !\n"); ret = -ENOMEM; goto failure_init_queue; } printk(KERN_ALERT "VirtualBlockDevice: Success to init request_queue !\n"); // get gendisk virtualblockdevice_disk = alloc_disk( 1 ); if( !virtualblockdevice_disk ) { printk(KERN_ALERT "VirtualBlockDevice: Failure to allocate gendisk !\n"); ret = -ENOMEM; goto failure_alloc_disk; } printk(KERN_ALERT "VirtualBlockDevice: Success to allocate gendisk !\n"); // initialize gendisk strcpy( virtualblockdevice_disk->disk_name, VIRTUALBLOCKDEVICE_NAME ); virtualblockdevice_disk->major = virtualblockdevice_major; virtualblockdevice_disk->first_minor = virtualblockdevice_minor; virtualblockdevice_disk->fops = &virtualblockdevice_fops; virtualblockdevice_disk->queue = virtualblockdevice_queue; set_capacity( virtualblockdevice_disk, ( VIRTUALBLOCKDEVICE_DISK_CAPACITY >> 9 ) ); // add gendisk to kernel add_disk( virtualblockdevice_disk ); return 0; failure_alloc_disk: blk_cleanup_queue( virtualblockdevice_queue ); failure_init_queue: unregister_blkdev( virtualblockdevice_major, VIRTUALBLOCKDEVICE_NAME ); failure_register_blkdev: return ret; }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; if (host->dev->dma_mask && *host->dev->dma_mask) limit = *host->dev->dma_mask; mq->card = card; mq->queue = blk_init_queue(mmc_request, lock); if (!mq->queue) return -ENOMEM; blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_sectors(mq->queue, host->max_sectors); blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mq->queue->queuedata = mq; mq->req = NULL; mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup; } init_completion(&mq->thread_complete); init_waitqueue_head(&mq->thread_wq); init_MUTEX(&mq->thread_sem); ret = kernel_thread(mmc_queue_thread, mq, CLONE_KERNEL); if (ret >= 0) { wait_for_completion(&mq->thread_complete); init_completion(&mq->thread_complete); ret = 0; goto out; } cleanup: kfree(mq->sg); mq->sg = NULL; blk_cleanup_queue(mq->queue); out: return ret; }
/* * Set up our internal device. */ static void setup_device(struct sbull_dev *dev, int which) { /* * Get some memory. */ memset (dev, 0, sizeof (struct sbull_dev)); dev->size = nsectors*hardsect_size; dev->data = vmalloc(dev->size); if (dev->data == NULL) { printk (KERN_NOTICE "vmalloc failure.\n"); return; } spin_lock_init(&dev->lock); /* * The timer which "invalidates" the device. */ init_timer(&dev->timer); dev->timer.data = (unsigned long) dev; dev->timer.function = sbull_invalidate; dev->queue = blk_init_queue(sbull_request, &dev->lock); if (dev->queue == NULL) goto out_vfree; blk_queue_logical_block_size(dev->queue, hardsect_size); dev->queue->queuedata = dev; /* * And the gendisk structure. */ dev->gd = alloc_disk(SBULL_MINORS); if (! dev->gd) { printk (KERN_NOTICE "alloc_disk failure\n"); goto out_vfree; } dev->gd->major = sbull_major; dev->gd->first_minor = which*SBULL_MINORS; dev->gd->fops = &sbull_ops; dev->gd->queue = dev->queue; dev->gd->private_data = dev; snprintf (dev->gd->disk_name, 32, "sbull%c", which + 'a'); set_capacity(dev->gd, nsectors*(hardsect_size/KERNEL_SECTOR_SIZE)); add_disk(dev->gd); return; out_vfree: if (dev->data) vfree(dev->data); }
static int __init chargement (void) { int ret; if ((nb_sect_exemple <= 0) || (lg_sect_exemple <= 0)) return -EINVAL; data_exemple = vmalloc(nb_sect_exemple * lg_sect_exemple); if (data_exemple == NULL) return -ENOMEM; ret = register_blkdev(majeur_exemple, nom_exemple); if (ret < 0) { vfree(data_exemple); return ret; } if (majeur_exemple == 0) majeur_exemple = ret; spin_lock_init(& spinlock_exemple); request_queue_exemple = blk_init_queue(request_exemple, & spinlock_exemple); if (request_queue_exemple == NULL) { unregister_blkdev(majeur_exemple, nom_exemple); vfree(data_exemple); return -ENOMEM; } gendisk_exemple = alloc_disk(NB_MINEURS); if (gendisk_exemple == NULL) { blk_cleanup_queue(request_queue_exemple); unregister_blkdev(majeur_exemple, nom_exemple); vfree(data_exemple); return -ENOMEM; } gendisk_exemple->major = majeur_exemple; gendisk_exemple->first_minor = 0; gendisk_exemple->fops = & devops_exemple; gendisk_exemple->queue = request_queue_exemple; snprintf(gendisk_exemple->disk_name, 32, nom_exemple); /* La capacite doit toujours etre indiquee en nombre de secteurs * de 512 octets. Il faut donc faire une conversion. */ set_capacity(gendisk_exemple, nb_sect_exemple * lg_sect_exemple / 512); add_disk(gendisk_exemple); return 0; }
static int __init stheno_module_init( void ) { stheno_major = register_blkdev( 0, MODNAME ); if( stheno_major <= 0 ){ printk( KERN_WARNING "register_blkdev failed\n" ); return stheno_major; } spin_lock_init( &stheno_lock ); stheno_queue = blk_init_queue( stheno_request, // &stheno_lock ); if( ! stheno_queue ){ printk( KERN_WARNING "blk_init_queue failed\n" ); unregister_blkdev( stheno_major, MODNAME ); return -ENOMEM; } blk_queue_logical_block_size( stheno_queue, SECT_SIZE ); blk_queue_max_sectors( stheno_queue, MAX_SECTORS ); blk_queue_bounce_limit(stheno_queue, BLK_BOUNCE_ANY); stheno_gd = alloc_disk( MINOR_COUNT ); if( ! stheno_gd ){ printk( KERN_WARNING "alloc_disk failed\n" ); blk_cleanup_queue( stheno_queue ); unregister_blkdev( stheno_major, MODNAME ); return -ENOMEM; } sprintf( stheno_gd->disk_name, "%s", MODNAME ); // stheno_gd->queue = stheno_queue; // stheno_gd->major = stheno_major; stheno_gd->first_minor = 0; stheno_gd->fops = &stheno_fops; // set_capacity( stheno_gd, SECT_NUM ); sema_init(&stheno_sem, 1); init_waitqueue_head(&stheno_process_q); stheno_thread = kthread_create(stheno_do_request, 0, "sthenod"); wake_up_process(stheno_thread); add_disk( stheno_gd ); printk( KERN_INFO "stheno is loaded\n" ); printk( KERN_INFO "major = %d\n", stheno_major ); return 0; }
static int __init sbd_init(void) { crypt_cipher = crypto_alloc_cipher("aes", 0, 0); /* Set up our internal device. */ Device.size = nsectors * logical_block_size; spin_lock_init(&Device.lock); Device.data = vmalloc(Device.size); if (Device.data == NULL) { return -ENOMEM; } /* Get a request queue. */ Queue = blk_init_queue(sbd_request, &Device.lock); if (Queue == NULL) { goto out; } blk_queue_logical_block_size(Queue, logical_block_size); /* Get registered. */ major_num = register_blkdev(major_num, "sbd"); if (major_num < 0) { printk(KERN_WARNING "sbd: unable to get major number\n"); goto out; } /* And the gendisk structure. */ Device.gd = alloc_disk(16); if (!Device.gd) { goto out_unregister; } Device.gd->major = major_num; Device.gd->first_minor = 0; Device.gd->fops = &sbd_ops; Device.gd->private_data = &Device; strcpy(Device.gd->disk_name, "sbd0"); set_capacity(Device.gd, nsectors); Device.gd->queue = Queue; add_disk(Device.gd); return 0; out_unregister: unregister_blkdev(major_num, "sbd"); out: vfree(Device.data); return -ENOMEM; }
/* * Initializes the block layer interfaces. */ static int sd_init_blk_dev(struct sd_host *host) { struct gendisk *disk; struct request_queue *queue; int channel; int retval; channel = to_channel(exi_get_exi_channel(host->exi_device)); /* queue */ retval = -ENOMEM; spin_lock_init(&host->queue_lock); queue = blk_init_queue(sd_request_func, &host->queue_lock); if (!queue) { sd_printk(KERN_ERR, "error initializing queue\n"); goto err_blk_init_queue; } blk_queue_dma_alignment(queue, EXI_DMA_ALIGN); blk_queue_max_phys_segments(queue, 1); blk_queue_max_hw_segments(queue, 1); blk_queue_max_sectors(queue, 8); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue); queue->queuedata = host; host->queue = queue; /* disk */ disk = alloc_disk(1 << MMC_SHIFT); if (!disk) { sd_printk(KERN_ERR, "error allocating disk\n"); goto err_alloc_disk; } disk->major = SD_MAJOR; disk->first_minor = channel << MMC_SHIFT; disk->fops = &sd_fops; sprintf(disk->disk_name, "%s%c", SD_NAME, 'a' + channel); disk->private_data = host; disk->queue = host->queue; host->disk = disk; retval = 0; goto out; err_alloc_disk: blk_cleanup_queue(host->queue); host->queue = NULL; err_blk_init_queue: out: return retval; }
static int __init simpleblockinit(void) { /* * Set up our internal device. */ Device.size = nsectors*hardsect_size; spin_lock_init(&Device.lock); Device.data = vmalloc(Device.size); if (Device.data == NULL) return -ENOMEM; /* * Get a request queue. */ Queue = blk_init_queue(simpleblockrequest, &Device.lock); if (Queue == NULL) goto out; blk_queue_hardsect_size(Queue, hardsect_size); /* * Get registered. */ major_num = register_blkdev(major_num, "sbd"); if (major_num <= 0) { printk(KERN_WARNING "sbd: unable to get major number\n"); goto out; } /* * And the gendisk structure. */ Device.gd = alloc_disk(16); if (! Device.gd) goto out_unregister; Device.gd->major = major_num; Device.gd->first_minor = 0; Device.gd->fops = &simpleblockops; Device.gd->private_data = &Device; strcpy (Device.gd->disk_name, "sbd0"); set_capacity(Device.gd, nsectors*(hardsect_size/KERNEL_SECTOR_SIZE)); Device.gd->queue = Queue; add_disk(Device.gd); return 0; out_unregister: unregister_blkdev(major_num, "sbd"); out: vfree(Device.data); return -ENOMEM; }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = *mmc_dev(host)->dma_mask; mq->card = card; mq->queue = blk_init_queue(mmc_request, lock); if (!mq->queue) return -ENOMEM; blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_sectors(mq->queue, host->max_req_size / 512); blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mq->queue->queuedata = mq; mq->req = NULL; mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } init_MUTEX(&mq->thread_sem); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto free_sg; } return 0; free_sg: kfree(mq->sg); mq->sg = NULL; cleanup_queue: blk_cleanup_queue(mq->queue); return ret; }
static int __init my_init (void) { disk_size = diskmb * 1024 * 1024; spin_lock_init (&lock); if (!(my_dev = vmalloc (disk_size))) return -ENOMEM; if (!(my_request_queue = blk_init_queue (my_request, &lock))) { vfree (my_dev); return -ENOMEM; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) blk_queue_hardsect_size (my_request_queue, sector_size); #else blk_queue_logical_block_size (my_request_queue, sector_size); #endif mybdrv_ma_no = register_blkdev (mybdrv_ma_no, MY_DEVICE_NAME); if (mybdrv_ma_no < 0) { printk (KERN_ERR "Failed registering mybdrv, returned %d\n", mybdrv_ma_no); vfree (my_dev); return mybdrv_ma_no; } if (!(my_gd = alloc_disk (16))) { unregister_blkdev (mybdrv_ma_no, MY_DEVICE_NAME); vfree (my_dev); return -ENOMEM; } my_gd->major = mybdrv_ma_no; my_gd->first_minor = 0; my_gd->fops = &mybdrv_fops; strcpy (my_gd->disk_name, MY_DEVICE_NAME); my_gd->queue = my_request_queue; set_capacity (my_gd, disk_size / sector_size); add_disk (my_gd); printk (KERN_INFO "device successfully registered, Major No. = %d\n", mybdrv_ma_no); printk (KERN_INFO "Capacity of ram disk is: %d MB\n", diskmb); return 0; }
int cyasblkdev_init_queue(struct cyasblkdev_queue *bq, spinlock_t *lock) { int ret; DBGPRN_FUNC_NAME; /* 1st param is a function that wakes up the queue thread */ bq->queue = blk_init_queue(cyasblkdev_request, lock); if (!bq->queue) return -ENOMEM; blk_queue_prep_rq(bq->queue, cyasblkdev_prep_request); blk_queue_bounce_limit(bq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(bq->queue, Q_MAX_SECTORS); /* As of now, we have the HAL/driver support to * merge scattered segments and handle them simultaneously. * so, setting the max_phys_segments to 8. */ /*blk_queue_max_phys_segments(bq->queue, Q_MAX_SGS); blk_queue_max_hw_segments(bq->queue, Q_MAX_SGS);*/ blk_queue_max_segments(bq->queue, Q_MAX_SGS); /* should be < then HAL can handle */ blk_queue_max_segment_size(bq->queue, 512*Q_MAX_SECTORS); bq->queue->queuedata = bq; bq->req = NULL; init_completion(&bq->thread_complete); init_waitqueue_head(&bq->thread_wq); sema_init(&bq->thread_sem, 1); ret = kernel_thread(cyasblkdev_queue_thread, bq, CLONE_KERNEL); if (ret >= 0) { /* wait until the thread is spawned */ wait_for_completion(&bq->thread_complete); /* reinitialize the completion */ init_completion(&bq->thread_complete); ret = 0; goto out; } out: return ret; }
static int swim3_attach(struct macio_dev *mdev, const struct of_device_id *match) { struct gendisk *disk; int index, rc; index = floppy_count++; if (index >= MAX_FLOPPIES) return -ENXIO; /* Add the drive */ rc = swim3_add_device(mdev, index); if (rc) return rc; /* Now register that disk. Same comment about failure handling */ disk = disks[index] = alloc_disk(1); if (disk == NULL) return -ENOMEM; disk->queue = blk_init_queue(do_fd_request, &swim3_lock); if (disk->queue == NULL) { put_disk(disk); return -ENOMEM; } blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH); disk->queue->queuedata = &floppy_states[index]; if (index == 0) { /* If we failed, there isn't much we can do as the driver is still * too dumb to remove the device, just bail out */ if (register_blkdev(FLOPPY_MAJOR, "fd")) return 0; } disk->major = FLOPPY_MAJOR; disk->first_minor = index; disk->fops = &floppy_fops; disk->private_data = &floppy_states[index]; disk->flags |= GENHD_FL_REMOVABLE; sprintf(disk->disk_name, "fd%d", index); set_capacity(disk, 2880); add_disk(disk); return 0; }