static struct card_blk_data *card_blk_alloc(struct memory_card *card)
{
	struct card_blk_data *card_data;
	int devidx, ret;

	devidx = find_first_zero_bit(dev_use, CARD_NUM_MINORS);

	if(card->card_type == CARD_INAND)
		devidx = CARD_INAND_START_MINOR>>CARD_SHIFT;
	
	if (devidx >= CARD_NUM_MINORS)
		return ERR_PTR(-ENOSPC);
	__set_bit(devidx, dev_use);

	card_data = kmalloc(sizeof(struct card_blk_data), GFP_KERNEL);
	if (!card_data) {
		ret = -ENOMEM;
		return ERR_PTR(ret);
	}

	memset(card_data, 0, sizeof(struct card_blk_data));

	card_data->block_bits = 9;

	card_data->disk = alloc_disk(1 << CARD_SHIFT);
	if (card_data->disk == NULL) {
		ret = -ENOMEM;
		kfree(card_data);
		return ERR_PTR(ret);
	}

	spin_lock_init(&card_data->lock);
	card_data->usage = 1;

	ret = card_init_queue(&card_data->queue, card, &card_data->lock);
	if (ret) {
		put_disk(card_data->disk);
		return ERR_PTR(ret);
	}

	card_data->queue.prep_fn = card_blk_prep_rq;
	card_data->queue.issue_fn = card_blk_issue_rq;
	card_data->queue.data = card_data;

	card_data->disk->major = major;
	card_data->disk->minors = 1 << CARD_SHIFT;
	card_data->disk->first_minor = devidx << CARD_SHIFT;
	card_data->disk->fops = &card_ops;
	card_data->disk->private_data = card_data;
	card_data->disk->queue = card_data->queue.queue;
	card_data->disk->driverfs_dev = &card->dev;

	sprintf(card_data->disk->disk_name, "cardblk%s", card->name);

	blk_queue_logical_block_size(card_data->queue.queue, 1 << card_data->block_bits);

	set_capacity(card_data->disk, card->capacity);

	return card_data;
}
/*
 * Checks if media is still valid.
 */
static int sd_revalidate_disk(struct gendisk *disk)
{
	struct sd_host *host = disk->private_data;
	int retval = 0;

	/* report missing medium for zombies */
	if (!host) {
		retval = -ENOMEDIUM;
		goto out;
	}

	/* the block layer likes to call us multiple times... */
	if (!sd_media_changed(host->disk))
		goto out;

	/* get the card into a known status */
	retval = sd_welcome_card(host);
	if (retval < 0 || sd_card_is_bad(host)) {
		retval = -ENOMEDIUM;
		goto out;
	}

	/* inform the block layer about various sizes */
	blk_queue_logical_block_size(host->queue, 1 << KERNEL_SECTOR_SHIFT);
	set_capacity(host->disk, host->card.csd.capacity /*<< (host->card.csd.read_blkbits - KERNEL_SECTOR_SHIFT)*/);

	clear_bit(__SD_MEDIA_CHANGED, &host->flags);

out:
	return retval;
}
Beispiel #3
0
static int __init ndas_init(void)
{
	int retval = 0;
	struct ndas_dev *dev;

	func();

	retval = register_blkdev(0, "myndas");
	if (retval <= 0) {
		printk(KERN_ERR "ndas: failed to register device\n");
		return retval;
	} else {
		major_number = retval;
		printk(KERN_INFO "ndas: register device major number %d\n", major_number);
	}

	/* init block device */
	dev = kmalloc(sizeof(struct ndas_dev), GFP_KERNEL);
	if (dev == NULL) {
		printk(KERN_ERR "ndas: failed to allocate memory for device\n");
		goto err1;
	}
	memset(dev, sizeof(struct ndas_dev), 0);
	spin_lock_init(&dev->lock);
	Device = dev;

	/* init queue */
	dev->queue = blk_init_queue(ndas_request, &dev->lock);
	if (dev->queue == NULL) {
		printk(KERN_ERR "ndas: failed to allocate memory for queue\n");
		goto err2;
	}
	blk_queue_logical_block_size(dev->queue, HARDSECT_SIZE);
	dev->queue->queuedata = dev;

	/* gendisk structure */
	dev->gd = alloc_disk(NDAS_MINORS);
	if (dev->gd == NULL) {
		printk(KERN_ERR "ndas: failed to allocate memory for gendisk\n");
		goto err3;
	}
	dev->gd->major = major_number;
	dev->gd->first_minor = 0;
	dev->gd->fops = &blk_ops;
	dev->gd->queue = dev->queue;
	dev->gd->private_data = dev;
	set_capacity(dev->gd, NSECTOR * (HARDSECT_SIZE / KERNEL_SECTOR_SIZE));
	snprintf(dev->gd->disk_name, 6, "myndas");
	add_disk(dev->gd);
	return 0;
err3:
	blk_cleanup_queue(dev->queue);
err2:
	kfree(dev);
err1:
	Device = NULL;
	unregister_blkdev(major_number, "ndas");
	return -ENOMEM;
}
/**
 * add_last_partition : add card last partition as a full device, refer to
 * board-****.c  inand_partition_info[] last partition
 * @card: inand_card_lp
 * @size: set last partition capacity
 */
int add_last_partition(struct memory_card* card, uint64_t offset ,uint64_t size)
{
      struct card_blk_data *card_data;
      int ret;

      card_data = kmalloc(sizeof(struct card_blk_data), GFP_KERNEL);
      if (!card_data) {
            ret = -ENOMEM;
            return ret;
      }

      memset(card_data, 0, sizeof(struct card_blk_data));

      if(card->state & CARD_STATE_READONLY)
            card_data->read_only = 1;

      card_data->block_bits = 9;

      card_data->disk = alloc_disk(1 << CARD_SHIFT);
      if (card_data->disk == NULL) {
            ret = -ENOMEM;
            kfree(card_data);
            return ret;
      }

      spin_lock_init(&card_data->lock);
      card_data->usage = 1;

      ret = card_init_queue(&card_data->queue, card, &card_data->lock);
      if (ret) {
            put_disk(card_data->disk);
            return ret;
      }

      card->part_offset=offset;
      card_data->queue.prep_fn = card_blk_prep_rq;
      card_data->queue.issue_fn = card_blk_issue_rq;
      card_data->queue.data = card_data;

      card_data->disk->major = INAND_LAST_PART_MAJOR;
      card_data->disk->minors = 1 << CARD_SHIFT;
      card_data->disk->first_minor = 0;
      card_data->disk->fops = &card_ops;
      card_data->disk->private_data = card_data;
      card_data->disk->queue = card_data->queue.queue;
      card_data->disk->driverfs_dev = &card->dev;

      sprintf(card_data->disk->disk_name, "cardblk%s", card->name);

      blk_queue_logical_block_size(card_data->queue.queue, 1 << card_data->block_bits);

      set_capacity(card_data->disk, size);
      card_set_drvdata(card, card_data);
      add_disk(card_data->disk);
      return 0;
}
static void setup_blk_device(struct ramdisk_dev* dev)
{
    mutex_init(&dev->mutex);
    init_waitqueue_head(&dev->waitqueue);

    /*
     * Get some memory.
     */
    memset (dev, 0, sizeof (struct ramdisk_dev));
    dev->size = NSECTORS*HARDSECT_SIZE;
    dev->data = vmalloc(dev->size);
    if (dev->data == NULL) {
        printk (KERN_NOTICE "vmalloc failure.\n");
        return;
    }
    spin_lock_init(&dev->lock);
    
    /*
     * The timer which "invalidates" the device.
     */
    init_timer(&dev->timer);
    dev->timer.data = (unsigned long) dev;
    dev->timer.function = ramdisk_invalidate;
    
    dev->queue = blk_init_queue(ramdisk_request, &dev->lock);
    if (dev->queue == NULL)
        goto out_vfree;

    blk_queue_logical_block_size(dev->queue, HARDSECT_SIZE);
    dev->queue->queuedata = dev;
    /*
     * And the gendisk structure.
     */
    dev->gd = alloc_disk(BLK_MINORS);
    if (! dev->gd) {
        printk (KERN_NOTICE "alloc_disk failure\n");
        goto out_vfree;
    }
    dev->gd->major = blk_major;
    dev->gd->first_minor = BLK_MINORS;
    dev->gd->fops = &ramdisk_ops;
    dev->gd->queue = dev->queue;
    dev->gd->private_data = dev;
    snprintf (dev->gd->disk_name, 32, "ramdisk%c", 'a');
    set_capacity(dev->gd, NSECTORS*(HARDSECT_SIZE/KERNEL_SECTOR_SIZE));
//    set_capacity(dev->gd, 0);
    add_disk(dev->gd);
    return;

  out_vfree:
    if (dev->data)
        vfree(dev->data);
}
Beispiel #6
0
static int __init looper_init(void) {
  
  if (filename == NULL) {
    printk(KERN_WARNING "looper: no filename defined");
    return -1;
  }

  /*
   * Set up our internal device.
   */
  Device.size = nsectors * logical_block_size;
  spin_lock_init(&Device.lock);
  Device.data = vmalloc(Device.size);
  if (Device.data == NULL)
    return -ENOMEM;
  /*
   * Get a request queue.
   */
  Queue = blk_init_queue(looper_request, &Device.lock);
  if (Queue == NULL)
    goto out;
  blk_queue_logical_block_size(Queue, logical_block_size);
  /*
   * Get registered.
   */
  major_num = register_blkdev(major_num, "looper");
  if (major_num <= 0) {
    printk(KERN_WARNING "looper: unable to get major number\n");
    goto out;
  }
  /*
   * And the gendisk structure.
   */
  Device.gd = alloc_disk(16);
  if (!Device.gd)
    goto out_unregister;
  Device.gd->major = major_num;
  Device.gd->first_minor = 0;
  Device.gd->fops = &looper_ops;
  Device.gd->private_data = &Device;
  strcpy(Device.gd->disk_name, "looper0");
  set_capacity(Device.gd, nsectors);
  Device.gd->queue = Queue;
  add_disk(Device.gd);
  
  return 0;
  
 out_unregister:
  unregister_blkdev(major_num, "looper");
 out:
  vfree(Device.data);
  return -ENOMEM;
}
Beispiel #7
0
/*
* Set up our internal device.
*/
static void setup_device(struct sbull_dev *dev, int which)
{
	/*
	* Get some memory.
	*/
	memset (dev, 0, sizeof (struct sbull_dev));
	dev->size = nsectors*hardsect_size;
	dev->data = vmalloc(dev->size);
	if (dev->data == NULL) {
	   printk (KERN_NOTICE "vmalloc failure.\n");
	   return;
	}
	spin_lock_init(&dev->lock);

	/*
	* The timer which "invalidates" the device.
	*/
	init_timer(&dev->timer);
	dev->timer.data = (unsigned long) dev;
	dev->timer.function = sbull_invalidate;


	dev->queue = blk_init_queue(sbull_request, &dev->lock);
	if (dev->queue == NULL)
		goto out_vfree;


	blk_queue_logical_block_size(dev->queue, hardsect_size);
	dev->queue->queuedata = dev;
	/*
	* And the gendisk structure.
	*/
	dev->gd = alloc_disk(SBULL_MINORS);
	if (! dev->gd) {
	   printk (KERN_NOTICE "alloc_disk failure\n");
	   goto out_vfree;
	}
	dev->gd->major = sbull_major;
	dev->gd->first_minor = which*SBULL_MINORS;
	dev->gd->fops = &sbull_ops;
	dev->gd->queue = dev->queue;
	dev->gd->private_data = dev;
	snprintf (dev->gd->disk_name, 32, "sbull%c", which + 'a');
	set_capacity(dev->gd, nsectors*(hardsect_size/KERNEL_SECTOR_SIZE));
	add_disk(dev->gd);
	return;

	out_vfree:
	if (dev->data)
	   vfree(dev->data);
}
Beispiel #8
0
static int __init stheno_module_init( void )
{
    stheno_major = register_blkdev( 0, MODNAME );
    if( stheno_major <= 0 ){
	printk( KERN_WARNING "register_blkdev failed\n" );
        return stheno_major;
    }

    spin_lock_init( &stheno_lock );
    stheno_queue = blk_init_queue(
        stheno_request, //
        &stheno_lock );
    if( ! stheno_queue ){
	printk( KERN_WARNING "blk_init_queue failed\n" );        
        unregister_blkdev( stheno_major, MODNAME );
        return -ENOMEM;
    }
   	blk_queue_logical_block_size( stheno_queue, SECT_SIZE );
    blk_queue_max_sectors( stheno_queue, MAX_SECTORS );
	blk_queue_bounce_limit(stheno_queue, BLK_BOUNCE_ANY);

    stheno_gd = alloc_disk( MINOR_COUNT );
    if( ! stheno_gd ){
	printk( KERN_WARNING "alloc_disk failed\n" );        
        blk_cleanup_queue( stheno_queue );
        unregister_blkdev( stheno_major, MODNAME );
        return -ENOMEM;
    }
    sprintf( stheno_gd->disk_name, "%s", MODNAME ); //
    stheno_gd->queue = stheno_queue; //
    stheno_gd->major = stheno_major;
    stheno_gd->first_minor = 0;
    stheno_gd->fops = &stheno_fops;  //
    set_capacity( stheno_gd, SECT_NUM );

    sema_init(&stheno_sem, 1);
    init_waitqueue_head(&stheno_process_q);

    stheno_thread = kthread_create(stheno_do_request, 0, "sthenod");
    
    wake_up_process(stheno_thread);


    add_disk( stheno_gd );

    printk( KERN_INFO "stheno is loaded\n" );
    printk( KERN_INFO "major = %d\n", stheno_major );

    return 0;
}
Beispiel #9
0
static int __init sbd_init(void) {
	
	crypt_cipher = crypto_alloc_cipher("aes", 0, 0);
	
	/*  Set up our internal device. */
	Device.size = nsectors * logical_block_size;
	spin_lock_init(&Device.lock);
	Device.data = vmalloc(Device.size);
	if (Device.data == NULL) {
		return -ENOMEM;
	}
	
	/* Get a request queue. */
	Queue = blk_init_queue(sbd_request, &Device.lock);
	if (Queue == NULL) {
		goto out;
	}
	blk_queue_logical_block_size(Queue, logical_block_size);
	
	/* Get registered. */
	major_num = register_blkdev(major_num, "sbd");
	if (major_num < 0) {
		printk(KERN_WARNING "sbd: unable to get major number\n");
		goto out;
	}
	
	/* And the gendisk structure. */
	Device.gd = alloc_disk(16);
	if (!Device.gd) {
		goto out_unregister;
	}
	
	Device.gd->major = major_num;
	Device.gd->first_minor = 0;
	Device.gd->fops = &sbd_ops;
	Device.gd->private_data = &Device;
	strcpy(Device.gd->disk_name, "sbd0");
	set_capacity(Device.gd, nsectors);
	Device.gd->queue = Queue;
	add_disk(Device.gd);
	
	return 0;
	
out_unregister:
	unregister_blkdev(major_num, "sbd");
out:
	vfree(Device.data);
	return -ENOMEM;
}
Beispiel #10
0
static int __init my_init (void)
{
    disk_size = diskmb * 1024 * 1024;
    spin_lock_init (&lock);

    if (!(my_dev = vmalloc (disk_size)))
        return -ENOMEM;

    if (!(my_request_queue = blk_init_queue (my_request, &lock))) {
        vfree (my_dev);
        return -ENOMEM;
    }

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)
    blk_queue_hardsect_size (my_request_queue, sector_size);
#else
    blk_queue_logical_block_size (my_request_queue, sector_size);
#endif

    mybdrv_ma_no = register_blkdev (mybdrv_ma_no, MY_DEVICE_NAME);
    if (mybdrv_ma_no < 0) {
        printk (KERN_ERR "Failed registering mybdrv, returned %d\n",
                mybdrv_ma_no);
        vfree (my_dev);
        return mybdrv_ma_no;
    }

    if (!(my_gd = alloc_disk (16))) {
        unregister_blkdev (mybdrv_ma_no, MY_DEVICE_NAME);
        vfree (my_dev);
        return -ENOMEM;
    }

    my_gd->major = mybdrv_ma_no;
    my_gd->first_minor = 0;
    my_gd->fops = &mybdrv_fops;
    strcpy (my_gd->disk_name, MY_DEVICE_NAME);
    my_gd->queue = my_request_queue;
    set_capacity (my_gd, disk_size / sector_size);
    add_disk (my_gd);

    printk (KERN_INFO "device successfully   registered, Major No. = %d\n",
            mybdrv_ma_no);
    printk (KERN_INFO "Capacity of ram disk is: %d MB\n", diskmb);

    return 0;
}
Beispiel #11
0
static int __init stackbd_init(void)
{
	/* Set up our internal device */
	spin_lock_init(&stackbd.lock);

	/* blk_alloc_queue() instead of blk_init_queue() so it won't set up the
     * queue for requests.
     */
    if (!(stackbd.queue = blk_alloc_queue(GFP_KERNEL)))
    {
        printk("stackbd: alloc_queue failed\n");
        return -EFAULT;
    }

    blk_queue_make_request(stackbd.queue, stackbd_make_request);
	blk_queue_logical_block_size(stackbd.queue, LOGICAL_BLOCK_SIZE);

	/* Get registered */
	if ((major_num = register_blkdev(major_num, STACKBD_NAME)) < 0)
    {
		printk("stackbd: unable to get major number\n");
		goto error_after_alloc_queue;
	}

	/* Gendisk structure */
	if (!(stackbd.gd = alloc_disk(16)))
		goto error_after_redister_blkdev;
	stackbd.gd->major = major_num;
	stackbd.gd->first_minor = 0;
	stackbd.gd->fops = &stackbd_ops;
	stackbd.gd->private_data = &stackbd;
	strcpy(stackbd.gd->disk_name, STACKBD_NAME_0);
	stackbd.gd->queue = stackbd.queue;
	add_disk(stackbd.gd);

    printk("stackbd: init done\n");

	return 0;

error_after_redister_blkdev:
	unregister_blkdev(major_num, STACKBD_NAME);
error_after_alloc_queue:
    blk_cleanup_queue(stackbd.queue);

	return -EFAULT;
}
Beispiel #12
0
int nbdx_register_block_device(struct nbdx_file *nbdx_file)
{
	sector_t size = nbdx_file->stbuf.st_size;
	int page_size = PAGE_SIZE;
	int err = 0;

	pr_debug("%s called\n", __func__);

	nbdx_file->major = nbdx_major;

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
	nbdx_mq_reg.nr_hw_queues = submit_queues;

	nbdx_file->queue = blk_mq_init_queue(&nbdx_mq_reg, nbdx_file);
#else
	nbdx_file->tag_set.ops = &nbdx_mq_ops;
	nbdx_file->tag_set.nr_hw_queues = submit_queues;
	nbdx_file->tag_set.queue_depth = NBDX_QUEUE_DEPTH;
	nbdx_file->tag_set.numa_node = NUMA_NO_NODE;
	nbdx_file->tag_set.cmd_size	= sizeof(struct raio_io_u);
	nbdx_file->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	nbdx_file->tag_set.driver_data = nbdx_file;

	err = blk_mq_alloc_tag_set(&nbdx_file->tag_set);
	if (err)
		goto out;

	nbdx_file->queue = blk_mq_init_queue(&nbdx_file->tag_set);
#endif
	if (IS_ERR(nbdx_file->queue)) {
		pr_err("%s: Failed to allocate blk queue ret=%ld\n",
		       __func__, PTR_ERR(nbdx_file->queue));
		err = PTR_ERR(nbdx_file->queue);
		goto blk_mq_init;
	}

	nbdx_file->queue->queuedata = nbdx_file;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nbdx_file->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nbdx_file->queue);

	nbdx_file->disk = alloc_disk_node(1, NUMA_NO_NODE);
	if (!nbdx_file->disk) {
		pr_err("%s: Failed to allocate disk node\n", __func__);
		err = -ENOMEM;
		goto alloc_disk;
	}

	nbdx_file->disk->major = nbdx_file->major;
	nbdx_file->disk->first_minor = nbdx_file->index;
	nbdx_file->disk->fops = &nbdx_ops;
	nbdx_file->disk->queue = nbdx_file->queue;
	nbdx_file->disk->private_data = nbdx_file;
	blk_queue_logical_block_size(nbdx_file->queue, NBDX_SECT_SIZE);
	blk_queue_physical_block_size(nbdx_file->queue, NBDX_SECT_SIZE);
	sector_div(page_size, NBDX_SECT_SIZE);
	blk_queue_max_hw_sectors(nbdx_file->queue, page_size * MAX_SGL_LEN);
	sector_div(size, NBDX_SECT_SIZE);
	set_capacity(nbdx_file->disk, size);
	sscanf(nbdx_file->dev_name, "%s", nbdx_file->disk->disk_name);
	add_disk(nbdx_file->disk);
	goto out;

alloc_disk:
	blk_cleanup_queue(nbdx_file->queue);
blk_mq_init:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
	blk_mq_free_tag_set(&nbdx_file->tag_set);
#endif
out:
	return err;
}
Beispiel #13
0
Datei: vbd.c Projekt: truncs/vbd
static int __init vbd_init(void) {
	
  int read_latency_usec = read_latency * 1000;
  int write_latency_usec = write_latency * 1000;
  int read_confd_limit = (read_latency_usec * error_limit) / 100;
  int write_confd_limit = (write_latency_usec * error_limit) / 100;

  /*
   * Assign the delay parameters to the device.
   * The confd_limit parameters gives the values
   * to subtract and add from to get the lower and
   * and the higher value of the confidence limit.
   */
  device.r_lower_limit = read_latency_usec - read_confd_limit;
  device.r_upper_limit = read_latency_usec + read_confd_limit;
  device.w_lower_limit = write_latency_usec - write_confd_limit;
  device.w_upper_limit = write_latency_usec + write_confd_limit;
  device.r_confd_freq = 0;
  device.r_error_freq = 0;
  device.w_confd_freq = 0;
  device.w_confd_freq = 0;


  /*
   * Allocate some memory for the device
   */
  device.size = nsectors * logical_block_size;
  spin_lock_init(&device.lock);
  device.data = vmalloc(device.size);

  /*
   * if the kernel can't allocate space to this device
   * then exit with -ENOMEM
   */
  if(device.data == NULL)
	return -ENOMEM;

  /*
   * Initialize procfs entry for vbd
   */
  device.procfs_file = create_proc_read_entry(MODULE_NAME, 0444, NULL,
	proc_read_vbd_stats, NULL);

  if(device.procfs_file == NULL)
	return -ENOMEM;

  vbd_queue = blk_init_queue(vbd_request, &device.lock);

  /* if queue is not allocated then release the device */
	if(vbd_queue == NULL)
	goto out;

/*
 * Let the kernel know the queue for this device and logical block size
 * that it operate on
 */
  blk_queue_logical_block_size(vbd_queue, logical_block_size);

  /* Register the device */
  major_num = register_blkdev(major_num, MODULE_NAME);

  /* if the device is unable to get a major number then release the device */
  if(major_num < 0) {
	printk(KERN_WARNING "vbd: unable to get a major problem\n");
	goto out;
  }

  device.gd = alloc_disk(16);
  if(!device.gd)
	goto out_unregister;

  /* Populate our device structure */
  device.gd->major = major_num;
  device.gd->first_minor = 0;
  device.gd->fops = &vbd_ops;
  device.gd->private_data = &device;
  strcpy(device.gd->disk_name, MODULE_NAME);
  set_capacity(device.gd, nsectors);
  device.gd->queue = vbd_queue;
  add_disk(device.gd);

  return 0;

 out_unregister:
  unregister_blkdev(major_num, MODULE_NAME);
 out:
  vfree(device.data);
  return -ENOMEM;

}
Beispiel #14
0
static int __init stheno_module_init( void )
{
    int retval;

    print_info( "stheno_module_init was called.\n" );

    init_waitqueue_head( &stheno_wait_q );

    wake_lock_init( &stheno_wakelock, WAKE_LOCK_SUSPEND, STHENO_NAME );

    stheno_major = register_blkdev( 0, STHENO_NAME );
    if( stheno_major <= 0 ){
        print_error( "stheno register_blkdev failed.\n" );
        retval = -EBUSY;
        goto error;
    }

    spin_lock_init( &stheno_lock );
    stheno_queue = blk_init_queue( stheno_request, &stheno_lock );
    if( stheno_queue == NULL ){
        print_error( "stheno blk_init_queue failed.\n" );
        retval = -ENOMEM;
        goto error;
    }

    /*blk_queue_hardsect_size( stheno_queue, SECTOR_SIZE );*/
    /*blk_queue_max_sectors( stheno_queue, MAX_SECTORS );*/
    blk_queue_logical_block_size( stheno_queue, SECTOR_SIZE );
    blk_queue_max_hw_sectors( stheno_queue, MAX_SECTORS );
#if defined( STHENO_BLK_BOUNCE_ANY )
    blk_queue_bounce_limit( stheno_queue, BLK_BOUNCE_ANY );
#else
    blk_queue_bounce_limit( stheno_queue, BLK_BOUNCE_HIGH ); /* default */
#endif

    stheno_gd = alloc_disk( STHENO_MINOR_COUNT );
    if( stheno_gd == NULL ){
        print_error( "stheno alloc_disk failed.\n" );
        retval = -ENOMEM;
        goto error;
    }

    stheno_gd->major = stheno_major;
    stheno_gd->first_minor = 0;
    stheno_gd->fops = &stheno_fops;
    stheno_gd->queue = stheno_queue;
    /*stheno_gd->flags = GENHD_FL_REMOVABLE;*/
    /*stheno_gd->private_data = NULL;*/
    snprintf( stheno_gd->disk_name, DISK_NAME_LEN, "%s", STHENO_NAME );
    set_capacity( stheno_gd, AMOUNT_OF_SECTORS );

    stheno_thread = kthread_create( stheno_request_thread, 0, STHENO_THREAD_NAME );
    if( IS_ERR( stheno_thread ) ){
        print_error( "stheno kthread_create failed.\n" );
        retval = -EBUSY;
        goto error;
    }
    wake_up_process( stheno_thread );

    add_disk( stheno_gd );

    print_debug( "stheno major = %d\n", stheno_major );
    return 0;
error:
    if( stheno_gd != NULL ) del_gendisk( stheno_gd );
    if( stheno_queue != NULL ) blk_cleanup_queue( stheno_queue );
    if( stheno_major > 0 ) unregister_blkdev( stheno_major, STHENO_NAME );
    return retval;
}
static int __init hd_init(void)
{
	int drive;

	if (register_blkdev(HD_MAJOR, "hd"))
		return -1;

	hd_queue = blk_init_queue(do_hd_request, &hd_lock);
	if (!hd_queue) {
		unregister_blkdev(HD_MAJOR, "hd");
		return -ENOMEM;
	}

	blk_queue_max_hw_sectors(hd_queue, 255);
	init_timer(&device_timer);
	device_timer.function = hd_times_out;
	blk_queue_logical_block_size(hd_queue, 512);

	if (!NR_HD) {
		/*
		 * We don't know anything about the drive.  This means
		 * that you *MUST* specify the drive parameters to the
		 * kernel yourself.
		 *
		 * If we were on an i386, we used to read this info from
		 * the BIOS or CMOS.  This doesn't work all that well,
		 * since this assumes that this is a primary or secondary
		 * drive, and if we're using this legacy driver, it's
		 * probably an auxilliary controller added to recover
		 * legacy data off an ST-506 drive.  Either way, it's
		 * definitely safest to have the user explicitly specify
		 * the information.
		 */
		printk("hd: no drives specified - use hd=cyl,head,sectors"
			" on kernel command line\n");
		goto out;
	}

	for (drive = 0 ; drive < NR_HD ; drive++) {
		struct gendisk *disk = alloc_disk(64);
		struct hd_i_struct *p = &hd_info[drive];
		if (!disk)
			goto Enomem;
		disk->major = HD_MAJOR;
		disk->first_minor = drive << 6;
		disk->fops = &hd_fops;
		sprintf(disk->disk_name, "hd%c", 'a'+drive);
		disk->private_data = p;
		set_capacity(disk, p->head * p->sect * p->cyl);
		disk->queue = hd_queue;
		p->unit = drive;
		hd_gendisk[drive] = disk;
		printk("%s: %luMB, CHS=%d/%d/%d\n",
			disk->disk_name, (unsigned long)get_capacity(disk)/2048,
			p->cyl, p->head, p->sect);
	}

	if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) {
		printk("hd: unable to get IRQ%d for the hard disk driver\n",
			HD_IRQ);
		goto out1;
	}
	if (!request_region(HD_DATA, 8, "hd")) {
		printk(KERN_WARNING "hd: port 0x%x busy\n", HD_DATA);
		goto out2;
	}
	if (!request_region(HD_CMD, 1, "hd(cmd)")) {
		printk(KERN_WARNING "hd: port 0x%x busy\n", HD_CMD);
		goto out3;
	}

	/* Let them fly */
	for (drive = 0; drive < NR_HD; drive++)
		add_disk(hd_gendisk[drive]);

	return 0;

out3:
	release_region(HD_DATA, 8);
out2:
	free_irq(HD_IRQ, NULL);
out1:
	for (drive = 0; drive < NR_HD; drive++)
		put_disk(hd_gendisk[drive]);
	NR_HD = 0;
out:
	del_timer(&device_timer);
	unregister_blkdev(HD_MAJOR, "hd");
	blk_cleanup_queue(hd_queue);
	return -1;
Enomem:
	while (drive--)
		put_disk(hd_gendisk[drive]);
	goto out;
}
Beispiel #16
0
/*
    Create system device file for the enabled slot.
*/
ndas_error_t slot_enable(int s)
{
    ndas_error_t ret = NDAS_ERROR_INTERNAL;
    int got;
    struct ndas_slot* slot = NDAS_GET_SLOT_DEV(s); 
    dbgl_blk(3, "ing s#=%d slot=%p",s, slot);
    got = try_module_get(THIS_MODULE);
    MOD_INC_USE_COUNT;
    
    if ( slot == NULL)
        goto out1;
    
    if ( slot->enabled ) {
        dbgl_blk(1, "already enabled");
        ret = NDAS_OK;
        goto out2;
    }
    ret = ndas_query_slot(s, &slot->info);
    if ( !NDAS_SUCCESS(ret) ) {
        dbgl_blk(1, "fail ndas_query_slot");
        goto out2;
    }
    dbgl_blk(1, "mode=%d", slot->info.mode);
    
    slot->enabled = 1;
    
#if LINUX_VERSION_25_ABOVE

    slot->disk = NULL;
    spin_lock_init(&slot->lock);
    slot->queue = blk_init_queue(
        nblk_request_proc, 
        &slot->lock
    );
	#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33))    
	    blk_queue_max_phys_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);
	    blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);
	#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33))
	    blk_queue_max_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);	//renamed in 2.6.34	
	    //blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); //removed in 2.6.34
	#endif

	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31))
	    blk_queue_logical_block_size(slot->queue, slot->info.sector_size);
	#else
	    blk_queue_hardsect_size(slot->queue, slot->info.sector_size);
	#endif

	#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33))
	    blk_queue_max_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS);
	#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33))
	    blk_queue_max_hw_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS); //renamed in 2.6.34
	#endif

	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
	    // Set ordered queue property.
		#if 0
		    blk_queue_ordered(slot->queue, QUEUE_ORDERED_TAG_FLUSH, nblk_prepare_flush);
		#endif
	#endif

    slot->disk = alloc_disk(NR_PARTITION);
    if ( slot->disk == NULL ) {
        slot->enabled = 0;
        dbgl_blk(1, "fail alloc disk");
        goto out2;
    }

    slot->disk->major = NDAS_BLK_MAJOR;
    slot->disk->first_minor = (s - NDAS_FIRST_SLOT_NR) << PARTN_BITS;
    slot->disk->fops = &ndas_fops;
    slot->disk->queue = slot->queue;
    slot->disk->private_data = (void*) (long)s;
    slot->queue_flags = 0;

    dbgl_blk(1, "mode=%d", slot->info.mode);
    if ( slot->info.mode == NDAS_DISK_MODE_SINGLE || 
        slot->info.mode == NDAS_DISK_MODE_ATAPI ||
        slot->info.mode == NDAS_DISK_MODE_MEDIAJUKE) 
    {
        char short_serial[NDAS_SERIAL_SHORT_LENGTH + 1];
        if (strlen(slot->info.ndas_serial) > 8) {
            /* Extended serial number is too long as sysfs object name. Use last 8 digit only */
            strncpy(
                short_serial,
                slot->info.ndas_serial + ( NDAS_SERIAL_EXTEND_LENGTH - NDAS_SERIAL_SHORT_LENGTH),
                8);
        } else {
            strncpy(short_serial, slot->info.ndas_serial, 8);
        }
        short_serial[8] =0;
        snprintf(slot->devname,
            sizeof(slot->devname)-1, 
            "ndas-%s-%d", short_serial, slot->info.unit
        );

        strcpy(slot->disk->disk_name, slot->devname);

	    dbgl_blk(1, "just set slot->disk->%s, slot->%s", slot->disk->disk_name, slot->devname );

	#if !LINUX_VERSION_DEVFS_REMOVED_COMPLETELY
	        strcpy(slot->disk->devfs_name, slot->devname);
	#endif
        set_capacity(slot->disk, slot->info.sectors);
	    dbgl_blk(1, "just set capacity slot->disk, slot->info.sectors:%llu", slot->info.sectors);

    } else {
        /* Other mode is not implemented */

    }
    
    if (slot->info.mode == NDAS_DISK_MODE_ATAPI) {
        slot->disk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
	    dbgl_blk(1, "just set slot->disk->flags");
	#if 0
	        kref_init(&slot->ndascd.kref);
	#endif
    }

    dbgl_blk(4, "adding disk: slot=%d, first_minor=%d, capacity=%llu", s, slot->disk->first_minor, slot->info.sectors);
    add_disk(slot->disk);
    dbgl_blk(1, "added disk: slot=%d", s);
   
		#ifndef NDAS_DONT_CARE_SCHEDULER
			#if LINUX_VERSION_AVOID_CFQ_SCHEDULER
				#if CONFIG_SYSFS
				    sal_assert(slot->queue->kobj.ktype);	
				    sal_assert(slot->queue->kobj.ktype->default_attrs);
				    {
				        struct queue_sysfs_entry {
				        	struct attribute attr;
				        	ssize_t (*show)(struct request_queue *, char *);
				        	ssize_t (*store)(struct request_queue *, const char *, size_t);
				        };
				        struct attribute *attr = slot->queue->kobj.ktype->default_attrs[4];
				        struct queue_sysfs_entry *entry = container_of(attr , struct queue_sysfs_entry, attr);
				        //dbgl_blk(1, "now to set the scheduler: slot-queue=%d, scheduler==%s, scheduler_len=%d", slot->queue, NDAS_QUEUE_SCHEDULER, strlen(NDAS_QUEUE_SCHEDULER));
				        entry->store(slot->queue,NDAS_QUEUE_SCHEDULER,strlen(NDAS_QUEUE_SCHEDULER)); 
				        
				    }
				#else
					#error "NDAS driver doesn't work well with CFQ scheduler of 2.6.13 or above kernel." \
				   "if you forcely want to use it, please specify compiler flags by " \
				   "export NDAS_EXTRA_CFLAGS=\"-DNDAS_DONT_CARE_SCHEDULER\" "\
				   "then compile the source again."
				#endif
			#endif
		#endif        
    printk("ndas: /dev/%s enabled\n" , 
            slot->devname);
#else 
    /* < LINUX_VERSION_25_ABOVE */
    dbgl_blk(4, "blksize=%d", DEFAULT_ND_BLKSIZE);
    dbgl_blk(4, "size=%lld", slot->info.sectors);
    dbgl_blk(1, "hardsectsize=%d", slot->info.sector_size);
    ndas_ops_set_blk_size(
        s, 
        DEFAULT_ND_BLKSIZE, 
        slot->info.sectors,
        slot->info.sector_size, 
        DEFAULT_ND_MAX_SECTORS
    );
#ifdef NDAS_DEVFS    
    printk("ndas: /dev/nd/disc%d enabled\n" , 
            s - NDAS_FIRST_SLOT_NR);
#else
    printk("ndas: /dev/nd%c enabled\n" , 
            s + 'a' - NDAS_FIRST_SLOT_NR);
#endif

#endif
    
    //up(&slot->mutex);
 #ifdef NDAS_MSHARE 
    if(NDAS_GET_SLOT_DEV(s)->info.mode == NDAS_DISK_MODE_MEDIAJUKE)
    {
  	    ndas_CheckFormat(s);
    }
 #endif
#if !LINUX_VERSION_25_ABOVE
    ndas_ops_read_partition(s);
#endif
    dbgl_blk(3, "ed");
    return NDAS_OK;
out2:    
    //up(&slot->mutex);
out1:    
    if ( got ) module_put(THIS_MODULE);
    MOD_DEC_USE_COUNT;
    return ret;
}
static int __devinit ace_setup(struct ace_device *ace)
{
	u16 version;
	u16 val;
	int rc;

	dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
	dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n",
		(unsigned long long)ace->physaddr, ace->irq);

	spin_lock_init(&ace->lock);
	init_completion(&ace->id_completion);

	/*
                  
  */
	ace->baseaddr = ioremap(ace->physaddr, 0x80);
	if (!ace->baseaddr)
		goto err_ioremap;

	/*
                                                        
  */
	tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
	setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);

	/*
                                
  */
	ace->queue = blk_init_queue(ace_request, &ace->lock);
	if (ace->queue == NULL)
		goto err_blk_initq;
	blk_queue_logical_block_size(ace->queue, 512);

	/*
                                        
  */
	ace->gd = alloc_disk(ACE_NUM_MINORS);
	if (!ace->gd)
		goto err_alloc_disk;

	ace->gd->major = ace_major;
	ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
	ace->gd->fops = &ace_fops;
	ace->gd->queue = ace->queue;
	ace->gd->private_data = ace;
	snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');

	/*               */
	if (ace->bus_width == ACE_BUS_WIDTH_16) {
		/*                                            */
		ace_out_le16(ace, ACE_BUSMODE, 0x0101);

		/*                                     */
		if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
			ace->reg_ops = &ace_reg_le16_ops;
		else
			ace->reg_ops = &ace_reg_be16_ops;
	} else {
		ace_out_8(ace, ACE_BUSMODE, 0x00);
		ace->reg_ops = &ace_reg_8_ops;
	}

	/*                                    */
	version = ace_in(ace, ACE_VERSION);
	if ((version == 0) || (version == 0xFFFF))
		goto err_read;

	/*                                                              */
	ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
		ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);

	/*                                    */
	if (ace->irq) {
		rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
		if (rc) {
			/*                                    */
			dev_err(ace->dev, "request_irq failed\n");
			ace->irq = 0;
		}
	}

	/*                   */
	val = ace_in(ace, ACE_CTRL);
	val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
	ace_out(ace, ACE_CTRL, val);

	/*                          */
	dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
		 (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
	dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n",
		(unsigned long long) ace->physaddr, ace->baseaddr, ace->irq);

	ace->media_change = 1;
	ace_revalidate_disk(ace->gd);

	/*                               */
	add_disk(ace->gd);

	return 0;

err_read:
	put_disk(ace->gd);
err_alloc_disk:
	blk_cleanup_queue(ace->queue);
err_blk_initq:
	iounmap(ace->baseaddr);
err_ioremap:
	dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n",
		 (unsigned long long) ace->physaddr);
	return -ENOMEM;
}
Beispiel #18
0
static int __init sbd_init(void) {
	/*
	 * Set up our internal device.
	 */

	int ret;

	tfm = crypto_alloc_cipher("aes", 0, 16);
	if (IS_ERR(tfm)){
		printk(KERN_ERR "alg: cipher: Failed to load transform");
		return PTR_ERR(tfm);
	}
	Device.size = nsectors * logical_block_size;
	spin_lock_init(&Device.lock);
	Device.data = vmalloc(Device.size);
	if (Device.data == NULL)
		return -ENOMEM;
	/*
	 * Get a request queue.
	 */
	Queue = blk_init_queue(sbd_request, &Device.lock);
	if (Queue == NULL)
		goto out;
	blk_queue_logical_block_size(Queue, logical_block_size);
	/*
	 * Get registered.
	 */
	major_num = register_blkdev(major_num, "sbd");
	if (major_num < 0) {
		printk(KERN_WARNING "sbd: unable to get major number\n");
		goto out;
	}
	/*
	 * And the gendisk structure.
	 */
	Device.gd = alloc_disk(16);
	if (!Device.gd)
		goto out_unregister;
	Device.gd->major = major_num;
	Device.gd->first_minor = 0;
	Device.gd->fops = &sbd_ops;
	Device.gd->private_data = &Device;
	strcpy(Device.gd->disk_name, "sbd0");
	set_capacity(Device.gd, nsectors);
	Device.gd->queue = Queue;
	add_disk(Device.gd);

	ret = device_register(&rd_root_dev);
	if (ret < 0)
		goto out_unregister;

	ret = device_create_file(&rd_root_dev, &dev_attr_key);
	if (ret < 0) {
		device_unregister(&rd_root_dev);
		goto out_unregister;
	}

	return 0;

out_unregister:
	unregister_blkdev(major_num, "sbd");
out:
	vfree(Device.data);
	crypto_free_cipher(tfm);
	return -ENOMEM;
}
Beispiel #19
0
static int null_add_dev(void)
{
	struct nullb *nullb;
	int rv;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb) {
		rv = -ENOMEM;
		goto out;
	}

	spin_lock_init(&nullb->lock);

	if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
		submit_queues = nr_online_nodes;

	rv = setup_queues(nullb);
	if (rv)
		goto out_free_nullb;

	if (queue_mode == NULL_Q_MQ) {
		nullb->tag_set.ops = &null_mq_ops;
		nullb->tag_set.nr_hw_queues = submit_queues;
		nullb->tag_set.queue_depth = hw_queue_depth;
		nullb->tag_set.numa_node = home_node;
		nullb->tag_set.cmd_size	= sizeof(struct nullb_cmd);
		nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
		nullb->tag_set.driver_data = nullb;

		rv = blk_mq_alloc_tag_set(&nullb->tag_set);
		if (rv)
			goto out_cleanup_queues;

		nullb->q = blk_mq_init_queue(&nullb->tag_set);
		if (IS_ERR(nullb->q)) {
			rv = -ENOMEM;
			goto out_cleanup_tags;
		}
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_make_request(nullb->q, null_queue_bio);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	}

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);

	mutex_lock(&lock);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	sprintf(nullb->disk_name, "nullb%d", nullb->index);

	if (use_lightnvm)
		rv = null_nvm_register(nullb);
	else
		rv = null_gendisk_register(nullb);

	if (rv)
		goto out_cleanup_blk_queue;

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	mutex_unlock(&lock);

	return 0;
out_cleanup_blk_queue:
	blk_cleanup_queue(nullb->q);
out_cleanup_tags:
	if (queue_mode == NULL_Q_MQ)
		blk_mq_free_tag_set(&nullb->tag_set);
out_cleanup_queues:
	cleanup_queues(nullb);
out_free_nullb:
	kfree(nullb);
out:
	return rv;
}
Beispiel #20
0
/**
 * axon_ram_probe - probe() method for platform driver
 * @device: see platform_driver method
 */
static int axon_ram_probe(struct platform_device *device)
{
	static int axon_ram_bank_id = -1;
	struct axon_ram_bank *bank;
	struct resource resource;
	int rc = 0;

	axon_ram_bank_id++;

	dev_info(&device->dev, "Found memory controller on %s\n",
			device->dev.of_node->full_name);

	bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL);
	if (bank == NULL) {
		dev_err(&device->dev, "Out of memory\n");
		rc = -ENOMEM;
		goto failed;
	}

	device->dev.platform_data = bank;

	bank->device = device;

	if (of_address_to_resource(device->dev.of_node, 0, &resource) != 0) {
		dev_err(&device->dev, "Cannot access device tree\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->size = resource_size(&resource);

	if (bank->size == 0) {
		dev_err(&device->dev, "No DDR2 memory found for %s%d\n",
				AXON_RAM_DEVICE_NAME, axon_ram_bank_id);
		rc = -ENODEV;
		goto failed;
	}

	dev_info(&device->dev, "Register DDR2 memory device %s%d with %luMB\n",
			AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20);

	bank->ph_addr = resource.start;
	bank->io_addr = (unsigned long) ioremap_prot(
			bank->ph_addr, bank->size, _PAGE_NO_CACHE);
	if (bank->io_addr == 0) {
		dev_err(&device->dev, "ioremap() failed\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->disk = alloc_disk(AXON_RAM_MINORS_PER_DISK);
	if (bank->disk == NULL) {
		dev_err(&device->dev, "Cannot register disk\n");
		rc = -EFAULT;
		goto failed;
	}

	bank->disk->major = azfs_major;
	bank->disk->first_minor = azfs_minor;
	bank->disk->fops = &axon_ram_devops;
	bank->disk->private_data = bank;
	bank->disk->driverfs_dev = &device->dev;

	sprintf(bank->disk->disk_name, "%s%d",
			AXON_RAM_DEVICE_NAME, axon_ram_bank_id);

	bank->disk->queue = blk_alloc_queue(GFP_KERNEL);
	if (bank->disk->queue == NULL) {
		dev_err(&device->dev, "Cannot register disk queue\n");
		rc = -EFAULT;
		goto failed;
	}

	set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
	blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
	blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
	add_disk(bank->disk);

	bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0);
	if (bank->irq_id == NO_IRQ) {
		dev_err(&device->dev, "Cannot access ECC interrupt ID\n");
		rc = -EFAULT;
		goto failed;
	}

	rc = request_irq(bank->irq_id, axon_ram_irq_handler,
			AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device);
	if (rc != 0) {
		dev_err(&device->dev, "Cannot register ECC interrupt handler\n");
		bank->irq_id = NO_IRQ;
		rc = -EFAULT;
		goto failed;
	}

	rc = device_create_file(&device->dev, &dev_attr_ecc);
	if (rc != 0) {
		dev_err(&device->dev, "Cannot create sysfs file\n");
		rc = -EFAULT;
		goto failed;
	}

	azfs_minor += bank->disk->minors;

	return 0;

failed:
	if (bank != NULL) {
		if (bank->irq_id != NO_IRQ)
			free_irq(bank->irq_id, device);
		if (bank->disk != NULL) {
			if (bank->disk->major > 0)
				unregister_blkdev(bank->disk->major,
						bank->disk->disk_name);
			del_gendisk(bank->disk);
		}
		device->dev.platform_data = NULL;
		if (bank->io_addr != 0)
			iounmap((void __iomem *) bank->io_addr);
		kfree(bank);
	}

	return rc;
}
Beispiel #21
0
static int null_add_dev(void)
{
	struct gendisk *disk;
	struct nullb *nullb;
	sector_t size;
	int rv;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb) {
		rv = -ENOMEM;
		goto out;
	}

	spin_lock_init(&nullb->lock);

	if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
		submit_queues = nr_online_nodes;

	rv = setup_queues(nullb);
	if (rv)
		goto out_free_nullb;

	if (queue_mode == NULL_Q_MQ) {
		nullb->tag_set.ops = &null_mq_ops;
		nullb->tag_set.nr_hw_queues = submit_queues;
		nullb->tag_set.queue_depth = hw_queue_depth;
		nullb->tag_set.numa_node = home_node;
		nullb->tag_set.cmd_size	= sizeof(struct nullb_cmd);
		nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
		nullb->tag_set.driver_data = nullb;

		rv = blk_mq_alloc_tag_set(&nullb->tag_set);
		if (rv)
			goto out_cleanup_queues;

		nullb->q = blk_mq_init_queue(&nullb->tag_set);
		if (IS_ERR(nullb->q)) {
			rv = -ENOMEM;
			goto out_cleanup_tags;
		}
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_make_request(nullb->q, null_queue_bio);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	}

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);

	disk = nullb->disk = alloc_disk_node(1, home_node);
	if (!disk) {
		rv = -ENOMEM;
		goto out_cleanup_blk_queue;
	}

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	size = gb * 1024 * 1024 * 1024ULL;
	sector_div(size, bs);
	set_capacity(disk, size);

	disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
	disk->major		= null_major;
	disk->first_minor	= nullb->index;
	disk->fops		= &null_fops;
	disk->private_data	= nullb;
	disk->queue		= nullb->q;
	sprintf(disk->disk_name, "nullb%d", nullb->index);
	add_disk(disk);
	return 0;

out_cleanup_blk_queue:
	blk_cleanup_queue(nullb->q);
out_cleanup_tags:
	if (queue_mode == NULL_Q_MQ)
		blk_mq_free_tag_set(&nullb->tag_set);
out_cleanup_queues:
	cleanup_queues(nullb);
out_free_nullb:
	kfree(nullb);
out:
	return rv;
}
Beispiel #22
0
/**
* @brief 	Card initial function.
* @param 	work[in]: Work structure.
* @return 	None.
*/
static void gp_sdcard_work_init(struct work_struct *work)
{
	gpSDInfo_t* sd = container_of(work, gpSDInfo_t,init);
	int pin_handle;
	int ret = 0,i=0;
	int pin_id;

	if(sd->device_id == 0)
		pin_id = GP_PIN_SD0;
	else if(sd->device_id == 1)
		pin_id = GP_PIN_SD1;
	else
		pin_id = GP_PIN_SD2;

	pin_handle = gp_board_pin_func_request( pin_id, GP_BOARD_WAIT_FOREVER);
	if(pin_handle<0)
	{
		DERROR("[%d]: can't get pin handle\n", sd->device_id);
		goto init_work_end;
	}
    /* ----- chris: Set Pin state for SD before power on ----- */
    sd->sd_func->set_power(1);
	/* ----- chris: delay 250ms after card power on ----- */
	msleep(250);
	/* ----- Initial SD card ----- */
	ret = gp_sdcard_cardinit(sd);
	if (ret != 0)
	{
		DERROR("[%d]: initial fail\n",sd->device_id);
		gp_board_pin_func_release(pin_handle);
		goto init_work_end;
	}
	gp_board_pin_func_release(pin_handle);

	if(sd->present==1)
	{
		if(sd->card_type == SDIO)
		{
			sd->pin_handle = gp_board_pin_func_request(pin_id, GP_BOARD_WAIT_FOREVER);
			if(sd->pin_handle<0)
			{
				DERROR("[%d]: can't get pin handle\n", sd->device_id);
				goto init_work_end;
			}
			DEBUG("SDIO card detected\n");
			gp_sdio_insert_device(sd->device_id, sd->RCA);
		}
		else
		{
			unsigned int cnt =0;
			/* ----- Wait 30 second for all process close handle ----- */
			while((sd->users)&&cnt<120)
			{
				msleep(250);
				cnt++;
			}
			if(sd->users)
			{
				DERROR("Some handle do not free\n");
			}
			if(sd->status)
			{
				gp_sdcard_blk_put(sd);
				sd->status = 0;
			}
			sd->handle_dma = gp_apbdma0_request(1000);
			if(sd->handle_dma==0)
				goto init_work_end;
			sd->queue = blk_init_queue(gp_sdcard_request, &sd->lock);
			if(sd->queue==NULL)
			{
				DERROR("NO MEMORY: queue\n");
				goto fail_queue;
			}
			blk_queue_ordered(sd->queue, QUEUE_ORDERED_DRAIN, NULL);
			queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sd->queue);
			blk_queue_logical_block_size(sd->queue, 512);
			blk_queue_max_sectors(sd->queue, SD_MAX_SECTORS );
			blk_queue_max_phys_segments(sd->queue, SD_MAX_PHY_SEGMENTS);
			blk_queue_max_hw_segments(sd->queue, SD_MAX_HW_SEGMENTS);
			blk_queue_max_segment_size(sd->queue, SD_MAX_PHY_SEGMENTS_SIZE);
			/* ----- Initial scatter list ----- */
			sd->sg = kmalloc(sizeof(struct scatterlist) *SD_MAX_PHY_SEGMENTS, GFP_KERNEL);
			if (!sd->sg)
			{
				DERROR("NO MEMORY: queue\n");
				goto fail_thread;
			}
			sg_init_table(sd->sg, SD_MAX_PHY_SEGMENTS);
			init_MUTEX(&sd->thread_sem);
			/* ----- Enable thread ----- */
			sd->thread = kthread_run(gp_sdcard_queue_thread, sd, "sd-qd");
			if (IS_ERR(sd->thread))
			{
				goto fail_thread;
			}
			sd->queue->queuedata = sd;
			/* ----- Check SD card for GP special header ----- */
			if(gp_sdcard_parse_header(sd)<0)
			{
				goto fail_gd;
			}
			/* ----- Setup gendisk structure ----- */
			sd->gd = alloc_disk(SD_MINORS);
			if (sd->gd==NULL)
			{
				DERROR("NO MEMORY: gendisk\n");
				blk_cleanup_queue(sd->queue);
				goto fail_gd;
			}
			/* ----- Set gendisk structure ----- */
			sd->gd->major = sd_major;
			sd->gd->first_minor = sd->device_id*SD_MINORS;
			sd->gd->fops = &gp_sdcard_ops;
			sd->gd->queue = sd->queue;
			sd->gd->private_data = sd;
			snprintf (sd->gd->disk_name, 32, "sdcard%c", sd->device_id + 'a');
			/* ----- Set GP partition ----- */
			if(sd->partition.activity)
			{
				set_capacity(sd->gd,0);
				add_disk(sd->gd);
				for(i=0;i<MAX_SD_PART;i++)
				{
					if(sd->partition.capacity[i]==0)
						continue;
					gp_add_partition(sd->gd,i+1,sd->partition.offset[i],sd->partition.capacity[i],ADDPART_FLAG_WHOLEDISK);
				}
			}
			/* ----- Normal Setting ----- */
			else
			{
				set_capacity(sd->gd,sd->capacity);
				add_disk(sd->gd);
			}
		}
		//DEBUG("Initial success\n");
		goto init_work_end;
	}
	else
	{
		DERROR("Initial fail\n");
		goto init_work_end;
	}
fail_gd:
	/* ----- Then terminate our worker thread ----- */
	kthread_stop(sd->thread);
	sd->thread = NULL;
fail_thread:
	if (sd->sg)
		kfree(sd->sg);
	sd->sg = NULL;
	blk_cleanup_queue (sd->queue);
	sd->queue = NULL;
fail_queue:
	if(sd->handle_dma)
		gp_apbdma0_release(sd->handle_dma);
	sd->handle_dma = 0;
	/* ----- For re-initialize ----- */
	sd->present = 0;
init_work_end:
	sd->timer.expires = jiffies + SD_CD_POLL;
	add_timer(&sd->timer);
}
Beispiel #23
0
static int null_add_dev(void)
{
	struct gendisk *disk;
	struct nullb *nullb;
	sector_t size;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb)
		return -ENOMEM;

	spin_lock_init(&nullb->lock);

	if (setup_queues(nullb))
		goto err;

	if (queue_mode == NULL_Q_MQ) {
		null_mq_reg.numa_node = home_node;
		null_mq_reg.queue_depth = hw_queue_depth;

		if (use_per_node_hctx) {
			null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
			null_mq_reg.ops->free_hctx = null_free_hctx;

			null_mq_reg.nr_hw_queues = nr_online_nodes;
		} else {
			null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
			null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;

			null_mq_reg.nr_hw_queues = submit_queues;
		}

		nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		blk_queue_make_request(nullb->q, null_queue_bio);
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		if (nullb->q)
			blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
	}

	if (!nullb->q)
		goto queue_fail;

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);

	disk = nullb->disk = alloc_disk_node(1, home_node);
	if (!disk) {
queue_fail:
		if (queue_mode == NULL_Q_MQ)
			blk_mq_free_queue(nullb->q);
		else
			blk_cleanup_queue(nullb->q);
		cleanup_queues(nullb);
err:
		kfree(nullb);
		return -ENOMEM;
	}

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	size = gb * 1024 * 1024 * 1024ULL;
	sector_div(size, bs);
	set_capacity(disk, size);

	disk->flags |= GENHD_FL_EXT_DEVT;
	disk->major		= null_major;
	disk->first_minor	= nullb->index;
	disk->fops		= &null_fops;
	disk->private_data	= nullb;
	disk->queue		= nullb->q;
	sprintf(disk->disk_name, "nullb%d", nullb->index);
	add_disk(disk);
	return 0;
}
Beispiel #24
0
/* pdev is NULL for eisa */
static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
{
	struct request_queue *q;
	int j;

	/* 
	 * register block devices
	 * Find disks and fill in structs
	 * Get an interrupt, set the Q depth and get into /proc
	 */

	/* If this successful it should insure that we are the only */
	/* instance of the driver */
	if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
		goto Enomem4;
	}
	hba[i]->access.set_intr_mask(hba[i], 0);
	if (request_irq(hba[i]->intr, do_ida_intr,
		IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
	{
		printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
				hba[i]->intr, hba[i]->devname);
		goto Enomem3;
	}
		
	for (j=0; j<NWD; j++) {
		ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
		if (!ida_gendisk[i][j])
			goto Enomem2;
	}

	hba[i]->cmd_pool = pci_alloc_consistent(
		hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
		&(hba[i]->cmd_pool_dhandle));
	hba[i]->cmd_pool_bits = kcalloc(
		DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
		GFP_KERNEL);

	if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
			goto Enomem1;

	memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
	printk(KERN_INFO "cpqarray: Finding drives on %s",
		hba[i]->devname);

	spin_lock_init(&hba[i]->lock);
	q = blk_init_queue(do_ida_request, &hba[i]->lock);
	if (!q)
		goto Enomem1;

	hba[i]->queue = q;
	q->queuedata = hba[i];

	getgeometry(i);
	start_fwbk(i);

	ida_procinit(i);

	if (pdev)
		blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);

	/* This is a hardware imposed limit. */
	blk_queue_max_hw_segments(q, SG_MAX);

	/* This is a driver limit and could be eliminated. */
	blk_queue_max_phys_segments(q, SG_MAX);
	
	init_timer(&hba[i]->timer);
	hba[i]->timer.expires = jiffies + IDA_TIMER;
	hba[i]->timer.data = (unsigned long)hba[i];
	hba[i]->timer.function = ida_timer;
	add_timer(&hba[i]->timer);

	/* Enable IRQ now that spinlock and rate limit timer are set up */
	hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);

	for(j=0; j<NWD; j++) {
		struct gendisk *disk = ida_gendisk[i][j];
		drv_info_t *drv = &hba[i]->drv[j];
		sprintf(disk->disk_name, "ida/c%dd%d", i, j);
		disk->major = COMPAQ_SMART2_MAJOR + i;
		disk->first_minor = j<<NWD_SHIFT;
		disk->fops = &ida_fops;
		if (j && !drv->nr_blks)
			continue;
		blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
		set_capacity(disk, drv->nr_blks);
		disk->queue = hba[i]->queue;
		disk->private_data = drv;
		add_disk(disk);
	}

	/* done ! */
	return(i);

Enomem1:
	nr_ctlr = i; 
	kfree(hba[i]->cmd_pool_bits);
	if (hba[i]->cmd_pool)
		pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t), 
				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
Enomem2:
	while (j--) {
		put_disk(ida_gendisk[i][j]);
		ida_gendisk[i][j] = NULL;
	}
	free_irq(hba[i]->intr, hba[i]);
Enomem3:
	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
Enomem4:
	if (pdev)
		pci_set_drvdata(pdev, NULL);
	release_io_mem(hba[i]);
	free_hba(i);

	printk( KERN_ERR "cpqarray: out of memory");

	return -1;
}
Beispiel #25
0
static int __init sbd_init(void) {
	
	/*
	* crypto_alloc_cipher() - allocate single block cipher handle
	* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
	*           single block cipher
	* @type: specifies the type of the cipher
	* @mask: specifies the mask for the cipher
	*
	* Allocate a cipher handle for a single block cipher. The returned struct
	* crypto_cipher is the cipher handle that is required for any subsequent API
	* invocation for that single block cipher.
	*
	* Return: allocated cipher handle in case of success; IS_ERR() is true in case
	*         of an error, PTR_ERR() returns the error code.
	

	static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
	                                                     u32 type, u32 mask)
	{
		type &= ~CRYPTO_ALG_TYPE_MASK;
		type |= CRYPTO_ALG_TYPE_CIPHER;
		mask |= CRYPTO_ALG_TYPE_MASK;

		return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
	}


	aes, 0, 0 for placeholder alloc
	aes based off encryption

	 */

	crypto = crypto_alloc_cipher("aes", 0, 0);

	/*
	 * Set up our internal device.
	 */
	Device.size = nsectors * logical_block_size;
	spin_lock_init(&Device.lock);
	Device.data = vmalloc(Device.size);
	if (Device.data == NULL)
		return -ENOMEM;
	/*
	 * Get a request queue.
	 */
	Queue = blk_init_queue(sbd_request, &Device.lock);
	if (Queue == NULL)
		goto out;
	blk_queue_logical_block_size(Queue, logical_block_size);
	/*
	 * Get registered.
	 */
	major_num = register_blkdev(major_num, "sbd");
	if (major_num < 0) {
		printk(KERN_WARNING "sbd: unable to get major number\n");
		goto out;
	}
	/*
	 * And the gendisk structure.
	 */
	Device.gd = alloc_disk(16);
	if (!Device.gd)
		goto out_unregister;
	Device.gd->major = major_num;
	Device.gd->first_minor = 0;
	Device.gd->fops = &sbd_ops;
	Device.gd->private_data = &Device;
	strcpy(Device.gd->disk_name, "sbd0");
	set_capacity(Device.gd, nsectors);
	Device.gd->queue = Queue;
	add_disk(Device.gd);

	return 0;

out_unregister:
	unregister_blkdev(major_num, "sbd");
out:
	vfree(Device.data);
	return -ENOMEM;
}
Beispiel #26
0
static int __devinit
dm3730logic_cf_alloc(struct platform_device *pdev, int id, unsigned long physaddr,
		unsigned long physize, int irq, int gpio, int bus_width)
{
	struct device *dev = &pdev->dev;
	struct dm3730logic_cf_data *cf_data = dev->platform_data;
	struct cf_device *cf;
	struct request_queue *rq;
	int rc;

	DPRINTK(DEBUG_CF_GENDISK, "%s: dev %p\n", __FUNCTION__, dev);

	if (!physaddr) {
		rc = -ENODEV;
		goto err_noreg;
	}

	/* Allocate and initialize the cf device structure */
	cf = kzalloc(sizeof(struct cf_device), GFP_KERNEL);
	if (!cf) {
		rc = -ENOMEM;
		goto err_alloc;
	}

	platform_set_drvdata(pdev, cf);

	cf->dev = dev;
	cf->id = id;
	cf->physaddr = physaddr;
	cf->physize = physize;
	cf->irq = irq;
	cf->gpio_cd = cf_data->gpio_cd;
	cf->gpio_reset = cf_data->gpio_reset;
	cf->gpio_en = cf_data->gpio_en;
	cf->bus_width = bus_width;

	/* We fake it as ejected to start with */
	cf->ejected = 1;

	rq = blk_init_queue(cf_request, &cf->blk_lock);
	if (rq == NULL) {
		DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);
		return -ENOMEM;
	}
	blk_queue_logical_block_size(rq, 512);

	// Limit requests to simple contiguous ones
	blk_queue_max_sectors(rq, 8);  //4KB
	blk_queue_max_phys_segments(rq, 1);
	blk_queue_max_hw_segments(rq, 1);

	cf->queue = rq;

	// The IRQ semaphore is locked and only in the IRQ is it released
	init_MUTEX_LOCKED(&cf->irq_sem);

	/* The RW semaphore to have only one call into either read/write
	 * at a time */
	init_MUTEX(&cf->rw_sem);

	init_completion(&cf->task_completion);

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	// Create the thread that sits and waits for an interrupt
	rc = kernel_thread(cf_thread, cf, CLONE_KERNEL);
	if (rc < 0) {
		printk("%s:%d thread create fail! %d\n", __FUNCTION__, __LINE__, rc);
		goto err_setup;
	} else {
		wait_for_completion(&cf->task_completion);
	}

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	/* Call the setup code */
	rc = dm3730logic_cf_setup(cf);
	if (rc)
		goto err_setup;

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	dev_set_drvdata(dev, cf);


	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	return 0;

err_setup:
	dev_set_drvdata(dev, NULL);
	kfree(cf);
err_alloc:
err_noreg:
	dev_err(dev, "could not initialize device, err=%i\n", rc);
	return rc;
}
Beispiel #27
0
static int cyasblkdev_add_disks(int bus_num,
	struct cyasblkdev_blk_data *bd,
	int total_media_count,
	int devidx)
{
	int ret = 0;
	uint64_t disk_cap;
	int lcl_unit_no;
	cy_as_storage_query_unit_data unit_data = {0};

	#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s:query device: "
		"type:%d, removable:%d, writable:%d, "
		"blksize %d, units:%d, locked:%d, "
		"erase_sz:%d\n",
		__func__,
		dev_data.desc_p.type,
		dev_data.desc_p.removable,
		dev_data.desc_p.writeable,
		dev_data.desc_p.block_size,
		dev_data.desc_p.number_units,
		dev_data.desc_p.locked,
		dev_data.desc_p.erase_unit_size
		);
	#endif

	/*  make sure that device is not locked  */
	if (dev_data.desc_p.locked) {
		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message(
			"%s: device is locked\n", __func__);
		#endif
		ret = cy_as_storage_release(
			bd->dev_handle, bus_num, 0, 0, 0);
		if (ret != CY_AS_ERROR_SUCCESS) {
			#ifndef WESTBRIDGE_NDEBUG
			cy_as_hal_print_message("%s cannot release"
				" storage\n", __func__);
			#endif
			goto out;
		}
		goto out;
	}

	unit_data.device = 0;
	unit_data.unit   = 0;
	unit_data.bus    = bus_num;
	ret = cy_as_storage_query_unit(bd->dev_handle,
		&unit_data, 0, 0);
	if (ret != CY_AS_ERROR_SUCCESS) {
		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: cannot query "
			"%d device unit - reason code %d\n",
			__func__, bus_num, ret);
		#endif
		goto out;
	}

	if (private_partition_bus == bus_num) {
		if (private_partition_size > 0) {
			ret = cy_as_storage_create_p_partition(
				bd->dev_handle, bus_num, 0,
				private_partition_size, 0, 0);
			if ((ret != CY_AS_ERROR_SUCCESS) &&
			(ret != CY_AS_ERROR_ALREADY_PARTITIONED)) {
			#ifndef WESTBRIDGE_NDEBUG
				cy_as_hal_print_message("%s: cy_as_storage_"
				"create_p_partition after size > 0 check "
				"failed with error code %d\n",
				__func__, ret);
			#endif

				disk_cap = (uint64_t)
					(unit_data.desc_p.unit_size);
				lcl_unit_no = 0;

			} else if (ret == CY_AS_ERROR_ALREADY_PARTITIONED) {
				#ifndef WESTBRIDGE_NDEBUG
				cy_as_hal_print_message(
				"%s: cy_as_storage_create_p_partition "
				"indicates memory already partitioned\n",
				__func__);
				#endif

				/*check to see that partition
				 * matches size */
				if (unit_data.desc_p.unit_size !=
					private_partition_size) {
					ret = cy_as_storage_remove_p_partition(
						bd->dev_handle,
						bus_num, 0, 0, 0);
					if (ret == CY_AS_ERROR_SUCCESS) {
						ret = cy_as_storage_create_p_partition(
							bd->dev_handle, bus_num, 0,
							private_partition_size, 0, 0);
						if (ret == CY_AS_ERROR_SUCCESS) {
							unit_data.bus = bus_num;
							unit_data.device = 0;
							unit_data.unit = 1;
						} else {
							#ifndef WESTBRIDGE_NDEBUG
							cy_as_hal_print_message(
							"%s: cy_as_storage_create_p_partition "
							"after removal unexpectedly failed "
							"with error %d\n", __func__, ret);
							#endif

							/* need to requery bus
							 * seeing as delete
							 * successful and create
							 * failed we have changed
							 * the disk properties */
							unit_data.bus	= bus_num;
							unit_data.device = 0;
							unit_data.unit   = 0;
						}

						ret = cy_as_storage_query_unit(
						bd->dev_handle,
						&unit_data, 0, 0);
						if (ret != CY_AS_ERROR_SUCCESS) {
							#ifndef WESTBRIDGE_NDEBUG
							cy_as_hal_print_message(
							"%s: cannot query %d "
							"device unit - reason code %d\n",
							__func__, bus_num, ret);
							#endif
							goto out;
						} else {
							disk_cap = (uint64_t)
								(unit_data.desc_p.unit_size);
							lcl_unit_no =
								unit_data.unit;
						}
					} else {
					#ifndef WESTBRIDGE_NDEBUG
					cy_as_hal_print_message(
					"%s: cy_as_storage_remove_p_partition "
					"failed with error %d\n",
					__func__, ret);
					#endif

						unit_data.bus = bus_num;
						unit_data.device = 0;
						unit_data.unit = 1;

						ret = cy_as_storage_query_unit(
							bd->dev_handle, &unit_data, 0, 0);
						if (ret != CY_AS_ERROR_SUCCESS) {
						#ifndef WESTBRIDGE_NDEBUG
							cy_as_hal_print_message(
							"%s: cannot query %d "
							"device unit - reason "
							"code %d\n", __func__,
							bus_num, ret);
						#endif
							goto out;
						}

						disk_cap = (uint64_t)
							(unit_data.desc_p.unit_size);
						lcl_unit_no =
							unit_data.unit;
					}
				} else {
					#ifndef WESTBRIDGE_NDEBUG
					cy_as_hal_print_message("%s: partition "
						"exists and sizes equal\n",
						__func__);
					#endif

					/*partition already existed,
					 * need to query second unit*/
					unit_data.bus = bus_num;
					unit_data.device = 0;
					unit_data.unit = 1;

					ret = cy_as_storage_query_unit(
						bd->dev_handle, &unit_data, 0, 0);
					if (ret != CY_AS_ERROR_SUCCESS) {
					#ifndef WESTBRIDGE_NDEBUG
						cy_as_hal_print_message(
							"%s: cannot query %d "
							"device unit "
							"- reason code %d\n",
							__func__, bus_num, ret);
					#endif
						goto out;
					} else {
						disk_cap = (uint64_t)
						(unit_data.desc_p.unit_size);
						lcl_unit_no = unit_data.unit;
					}
				}
			} else {
				#ifndef WESTBRIDGE_NDEBUG
				cy_as_hal_print_message(
				"%s: cy_as_storage_create_p_partition "
				"created successfully\n", __func__);
				#endif

				disk_cap = (uint64_t)
				(unit_data.desc_p.unit_size -
				private_partition_size);

				lcl_unit_no = 1;
			}
		}
		#ifndef WESTBRIDGE_NDEBUG
		else {
			cy_as_hal_print_message(
			"%s: invalid partition_size%d\n", __func__,
			private_partition_size);

			disk_cap = (uint64_t)
				(unit_data.desc_p.unit_size);
			lcl_unit_no = 0;
		}
		#endif
	} else {
		disk_cap = (uint64_t)
			(unit_data.desc_p.unit_size);
		lcl_unit_no = 0;
	}

	if ((bus_num == 0) ||
		(total_media_count == 1)) {
		sprintf(bd->user_disk_0->disk_name,
			"cyasblkdevblk%d", devidx);

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message(
			"%s: disk unit_sz:%lu blk_sz:%d, "
			"start_blk:%lu, capacity:%llu\n",
			__func__, (unsigned long)
			unit_data.desc_p.unit_size,
			unit_data.desc_p.block_size,
			(unsigned long)
			unit_data.desc_p.start_block,
			(uint64_t)disk_cap
		);
		#endif

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: setting gendisk disk "
			"capacity to %d\n", __func__, (int) disk_cap);
		#endif

		/* initializing bd->queue */
		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message("%s: init bd->queue\n",
			__func__);
		#endif

		/* this will create a
		 * queue kernel thread */
		cyasblkdev_init_queue(
			&bd->queue, &bd->lock);

		bd->queue.prep_fn = cyasblkdev_blk_prep_rq;
		bd->queue.issue_fn = cyasblkdev_blk_issue_rq;
		bd->queue.data = bd;

		/*blk_size should always
		 * be a multiple of 512,
		 * set to the max to ensure
		 * that all accesses aligned
		 * to the greatest multiple,
		 * can adjust request to
		 * smaller block sizes
		 * dynamically*/

		bd->user_disk_0_read_only = !dev_data.desc_p.writeable;
		bd->user_disk_0_blk_size = dev_data.desc_p.block_size;
		bd->user_disk_0_type = dev_data.desc_p.type;
		bd->user_disk_0_bus_num = bus_num;
		bd->user_disk_0->major = major;
		bd->user_disk_0->first_minor = devidx << CYASBLKDEV_SHIFT;
		bd->user_disk_0->minors = 8;
		bd->user_disk_0->fops = &cyasblkdev_bdops;
		bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE;
		bd->user_disk_0->private_data = bd;
		bd->user_disk_0->queue = bd->queue.queue;
		bd->dbgprn_flags = DBGPRN_RD_RQ;
		bd->user_disk_0_unit_no = lcl_unit_no;

		blk_queue_logical_block_size(bd->queue.queue,
			bd->user_disk_0_blk_size);

		set_capacity(bd->user_disk_0,
			disk_cap);

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message(
			"%s: returned from set_capacity %d\n",
			__func__, (int) disk_cap);
		#endif

		/* need to start search from
		 * public partition beginning */
		if (vfat_search) {
			bd->user_disk_0_first_sector =
				cyasblkdev_get_vfat_offset(
					bd->user_disk_0_bus_num,
					bd->user_disk_0_unit_no);
		} else {
			bd->user_disk_0_first_sector = 0;
		}

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message(
			"%s: set user_disk_0_first "
			"sector to %d\n", __func__,
			 bd->user_disk_0_first_sector);
		cy_as_hal_print_message(
			"%s: add_disk: disk->major=0x%x\n",
			__func__,
			bd->user_disk_0->major);
		cy_as_hal_print_message(
			"%s: add_disk: "
			"disk->first_minor=0x%x\n", __func__,
			bd->user_disk_0->first_minor);
		cy_as_hal_print_message(
			"%s: add_disk: "
			"disk->minors=0x%x\n", __func__,
			bd->user_disk_0->minors);
		cy_as_hal_print_message(
			"%s: add_disk: "
			"disk->disk_name=%s\n",
			__func__,
			bd->user_disk_0->disk_name);
		cy_as_hal_print_message(
			"%s: add_disk: "
			"disk->part_tbl=0x%x\n", __func__,
			(unsigned int)
			bd->user_disk_0->part_tbl);
		cy_as_hal_print_message(
			"%s: add_disk: "
			"disk->queue=0x%x\n", __func__,
			(unsigned int)
			bd->user_disk_0->queue);
		cy_as_hal_print_message(
			"%s: add_disk: "
			"disk->flags=0x%x\n",
			__func__, (unsigned int)
			bd->user_disk_0->flags);
		cy_as_hal_print_message(
			"%s: add_disk: "
			"disk->driverfs_dev=0x%x\n",
			__func__, (unsigned int)
			bd->user_disk_0->driverfs_dev);
		cy_as_hal_print_message(
			"%s: add_disk: "
			"disk->slave_dir=0x%x\n",
			__func__, (unsigned int)
			bd->user_disk_0->slave_dir);
		cy_as_hal_print_message(
			"%s: add_disk: "
			"disk->random=0x%x\n",
			__func__, (unsigned int)
			bd->user_disk_0->random);
		cy_as_hal_print_message(
			"%s: add_disk: "
			"disk->node_id=0x%x\n",
			__func__, (unsigned int)
			bd->user_disk_0->node_id);

		#endif

		add_disk(bd->user_disk_0);

	} else if ((bus_num == 1) &&
		(total_media_count == 2)) {
		bd->user_disk_1_read_only = !dev_data.desc_p.writeable;
		bd->user_disk_1_blk_size = dev_data.desc_p.block_size;
		bd->user_disk_1_type = dev_data.desc_p.type;
		bd->user_disk_1_bus_num = bus_num;
		bd->user_disk_1->major	= major;
		bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT;
		bd->user_disk_1->minors = 8;
		bd->user_disk_1->fops = &cyasblkdev_bdops;
		bd->user_disk_1->events = DISK_EVENT_MEDIA_CHANGE;
		bd->user_disk_1->private_data = bd;
		bd->user_disk_1->queue = bd->queue.queue;
		bd->dbgprn_flags = DBGPRN_RD_RQ;
		bd->user_disk_1_unit_no = lcl_unit_no;

		sprintf(bd->user_disk_1->disk_name,
			"cyasblkdevblk%d", (devidx + 1));

		#ifndef WESTBRIDGE_NDEBUG
		cy_as_hal_print_message(
			"%s: disk unit_sz:%lu "
			"blk_sz:%d, "
			"start_blk:%lu, "
			"capacity:%llu\n",
			__func__,
			(unsigned long)
			unit_data.desc_p.unit_size,
			unit_data.desc_p.block_size,
			(unsigned long)
			unit_data.desc_p.start_block,
			(uint64_t)disk_cap
		);
		#endif

		/*blk_size should always be a
		 * multiple of 512, set to the max
		 * to ensure that all accesses
		 * aligned to the greatest multiple,
		 * can adjust request to smaller
		 * block sizes dynamically*/
		if (bd->user_disk_0_blk_size >
		bd->user_disk_1_blk_size) {
			blk_queue_logical_block_size(bd->queue.queue,
				bd->user_disk_0_blk_size);
			#ifndef WESTBRIDGE_NDEBUG
			cy_as_hal_print_message(
			"%s: set hard sect_sz:%d\n",
			__func__,
			bd->user_disk_0_blk_size);
			#endif
		} else {
			blk_queue_logical_block_size(bd->queue.queue,
				bd->user_disk_1_blk_size);
			#ifndef WESTBRIDGE_NDEBUG
			cy_as_hal_print_message(
			"%s: set hard sect_sz:%d\n",
			__func__,
			bd->user_disk_1_blk_size);
			#endif
		}

		set_capacity(bd->user_disk_1, disk_cap);
		if (vfat_search) {
			bd->user_disk_1_first_sector =
				cyasblkdev_get_vfat_offset(
					bd->user_disk_1_bus_num,
					bd->user_disk_1_unit_no);
		} else {
			bd->user_disk_1_first_sector
				= 0;
		}

		add_disk(bd->user_disk_1);
	}

	if (lcl_unit_no > 0) {
		if (bd->system_disk == NULL) {
			bd->system_disk =
				alloc_disk(8);

			if (bd->system_disk == NULL) {
				kfree(bd);
				bd = ERR_PTR(-ENOMEM);
				return bd;
			}
			disk_cap = (uint64_t)
				(private_partition_size);

			/* set properties of
			 * system disk */
			bd->system_disk_read_only = !dev_data.desc_p.writeable;
			bd->system_disk_blk_size = dev_data.desc_p.block_size;
			bd->system_disk_bus_num = bus_num;
			bd->system_disk->major = major;
			bd->system_disk->first_minor =
				(devidx + 2) << CYASBLKDEV_SHIFT;
			bd->system_disk->minors = 8;
			bd->system_disk->fops = &cyasblkdev_bdops;
			bd->system_disk->events = DISK_EVENT_MEDIA_CHANGE;
			bd->system_disk->private_data = bd;
			bd->system_disk->queue = bd->queue.queue;
			/* don't search for vfat
			 * with system disk */
			bd->system_disk_first_sector = 0;
			sprintf(
				bd->system_disk->disk_name,
				"cyasblkdevblk%d", (devidx + 2));

			set_capacity(bd->system_disk,
				disk_cap);

			add_disk(bd->system_disk);
		}
		#ifndef WESTBRIDGE_NDEBUG
		else {
			cy_as_hal_print_message(
				"%s: system disk already allocated %d\n",
				__func__, bus_num);
		}
		#endif
	}
out:
	return ret;
}
/**
* @brief 	Card initial function.
* @param 	work[in]: Work structure.
* @return 	None.
*/ 
static void gp_sdcard_work_init(struct work_struct *work)
{
	gpSDInfo_t* sd = container_of(work, gpSDInfo_t,init);
	int pin_handle;
	pin_handle = gp_board_pin_func_request((sd->device_id==0)?GP_PIN_SD0:GP_PIN_SD1, GP_BOARD_WAIT_FOREVER);
	if(pin_handle<0)
	{
		DERROR("SD%d: can't get pin handle\n", sd->device_id);
		goto init_work_end;
	}
	/* ----- Initial SD module (controller) ----- */
	gpHalSDInit(sd->device_id);
	/* ----- Initial SD card ----- */
	gp_sdcard_cardinit(sd);
	gp_board_pin_func_release(pin_handle);	
	if(sd->present==1)
	{
		if(sd->card_type == SDIO)
		{
			sd->pin_handle = gp_board_pin_func_request((sd->device_id==0)?GP_PIN_SD0:GP_PIN_SD1, GP_BOARD_WAIT_FOREVER);
			if(sd->pin_handle<0)
			{
				DERROR("SD%d: can't get pin handle\n", sd->device_id);
				goto init_work_end;
			}
			DEBUG("SDIO card detected\n");
			gp_sdio_insert_device(sd->device_id, sd->RCA);	
		}
		else
		{
			sd->queue = blk_init_queue(gp_sdcard_request, &sd->lock);
			if(sd->queue==NULL)
			{
				DERROR("NO MEMORY: queue\n");
				goto init_work_end;
			} 	 
			blk_queue_ordered(sd->queue, QUEUE_ORDERED_DRAIN, NULL);
			queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sd->queue);
			blk_queue_logical_block_size(sd->queue, 512);
			blk_queue_max_sectors(sd->queue, SD_MAX_SECTORS );
			blk_queue_max_phys_segments(sd->queue, SD_MAX_PHY_SEGMENTS);
			blk_queue_max_hw_segments(sd->queue, SD_MAX_HW_SEGMENTS);
			blk_queue_max_segment_size(sd->queue, SD_MAX_PHY_SEGMENTS_SIZE);
			/* ----- Initial scatter list ----- */
			sd->sg = kmalloc(sizeof(struct scatterlist) *SD_MAX_PHY_SEGMENTS, GFP_KERNEL);
			if (!sd->sg) 
			{
				DERROR("NO MEMORY: queue\n");
				goto fail_thread;
			}
			sg_init_table(sd->sg, SD_MAX_PHY_SEGMENTS);
		
			init_MUTEX(&sd->thread_sem);
			/* ----- Enable thread ----- */
			sd->thread = kthread_run(gp_sdcard_queue_thread, sd, "sd-qd");
			if (IS_ERR(sd->thread)) 
			{
				goto fail_thread;
			}
			sd->queue->queuedata = sd;
			/* ----- Setup gendisk structure ----- */
			sd->gd = alloc_disk(SD_MINORS);
			if (sd->gd==NULL) 
			{
				DERROR("NO MEMORY: gendisk\n");
				blk_cleanup_queue(sd->queue);	
				goto fail_gd;	
			}
			/* ----- Set gendisk structure ----- */
			sd->gd->major = sd_major;
			sd->gd->first_minor = sd->device_id*SD_MINORS;
			sd->gd->fops = &gp_sdcard_ops;
			sd->gd->queue = sd->queue;
			sd->gd->private_data = sd;
			snprintf (sd->gd->disk_name, 32, "sdcard%c", sd->device_id + 'a');
			set_capacity(sd->gd,sd->capacity);
			add_disk(sd->gd);
		}
		goto init_work_end;
	}
	else
	{
		DERROR("Initial fail\n");
		goto init_work_end;
	}
fail_gd:
	/* ----- Then terminate our worker thread ----- */
	kthread_stop(sd->thread);
fail_thread:
	if (sd->sg)
		kfree(sd->sg);
	sd->sg = NULL;
	blk_cleanup_queue (sd->queue);	
init_work_end:	
	sd->timer.expires = jiffies + SD_CD_POLL;
	add_timer(&sd->timer);
}
Beispiel #29
0
/* --------------------------------------------------------------------
 * SystemACE device setup/teardown code
 */
static int __devinit ace_setup(struct ace_device *ace)
{
	u16 version;
	u16 val;
	int rc;

	dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
	dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n",
		(unsigned long long)ace->physaddr, ace->irq);

	spin_lock_init(&ace->lock);
	init_completion(&ace->id_completion);

	/*
	 * Map the device
	 */
	ace->baseaddr = ioremap(ace->physaddr, 0x80);
	if (!ace->baseaddr)
		goto err_ioremap;

	/*
	 * Initialize the state machine tasklet and stall timer
	 */
	tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
	setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);

	/*
	 * Initialize the request queue
	 */
	ace->queue = blk_init_queue(ace_request, &ace->lock);
	if (ace->queue == NULL)
		goto err_blk_initq;
	blk_queue_logical_block_size(ace->queue, 512);

	/*
	 * Allocate and initialize GD structure
	 */
	ace->gd = alloc_disk(ACE_NUM_MINORS);
	if (!ace->gd)
		goto err_alloc_disk;

	ace->gd->major = ace_major;
	ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
	ace->gd->fops = &ace_fops;
	ace->gd->queue = ace->queue;
	ace->gd->private_data = ace;
	snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');

	/* set bus width */
	if (ace->bus_width == ACE_BUS_WIDTH_16) {
		/* 0x0101 should work regardless of endianess */
		ace_out_le16(ace, ACE_BUSMODE, 0x0101);

		/* read it back to determine endianess */
		if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
			ace->reg_ops = &ace_reg_le16_ops;
		else
			ace->reg_ops = &ace_reg_be16_ops;
	} else {
		ace_out_8(ace, ACE_BUSMODE, 0x00);
		ace->reg_ops = &ace_reg_8_ops;
	}

	/* Make sure version register is sane */
	version = ace_in(ace, ACE_VERSION);
	if ((version == 0) || (version == 0xFFFF))
		goto err_read;

	/* Put sysace in a sane state by clearing most control reg bits */
	ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
		ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);

	/* Now we can hook up the irq handler */
	if (ace->irq != NO_IRQ) {
		rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
		if (rc) {
			/* Failure - fall back to polled mode */
			dev_err(ace->dev, "request_irq failed\n");
			ace->irq = NO_IRQ;
		}
	}

	/* Enable interrupts */
	val = ace_in(ace, ACE_CTRL);
	val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
	ace_out(ace, ACE_CTRL, val);

	/* Print the identification */
	dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
		 (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
	dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n",
		(unsigned long long) ace->physaddr, ace->baseaddr, ace->irq);

	ace->media_change = 1;
	ace_revalidate_disk(ace->gd);

	/* Make the sysace device 'live' */
	add_disk(ace->gd);

	return 0;

err_read:
	put_disk(ace->gd);
err_alloc_disk:
	blk_cleanup_queue(ace->queue);
err_blk_initq:
	iounmap(ace->baseaddr);
err_ioremap:
	dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n",
		 (unsigned long long) ace->physaddr);
	return -ENOMEM;
}
Beispiel #30
0
int block_init(void)
{
  printk(KERN_INFO "initializing block device module\n");

  //
  spin_lock_init(&lock);

  //
  size = NSECTORS * LOGICAL_BLOCK_SIZE;
  data = vmalloc(size);
  if (data == NULL) {
    printk(KERN_INFO "block_init: could not malloc a block of size %lu\n", size);
    return -ENOMEM;
  }

  //
  queue = blk_init_queue(block_request, &lock);
  if (queue == NULL) {
    printk(KERN_INFO "block_init: could not initialize blk queue\n");
    block_cleanup();
    return -ENOMEM;
  }

  //
  blk_queue_logical_block_size(queue, LOGICAL_BLOCK_SIZE);

  //
  major_number = register_blkdev(major_number, DEVICE_NAME);
  if (major_number < 0) {
    printk(KERN_INFO "block_init: could not register blk device, major number=%d\n", major_number);
    block_cleanup();
    return -ENOMEM;
  }


  gdisk = alloc_disk(16);
  if (!gdisk) {
    printk(KERN_INFO "block_init: could not alloc gdisk\n");
    block_cleanup();
    return -ENOMEM;
  }

  //
  gdisk->major = major_number;
  //
  gdisk->first_minor = 0;
  //
  gdisk->fops = &block_ops;
  //
  gdisk->private_data = &data;
  //
  gdisk->queue = queue;
  //
  strcpy(gdisk->disk_name, DISK_NAME);

  //
  set_capacity(gdisk, 0);
  //
  add_disk(gdisk);
  printk(KERN_INFO "block_init: added gendisk\n");

  set_capacity(gdisk, NSECTORS);
  printk(KERN_INFO "block_init: set capacity on gendisk to %d sectors\n", NSECTORS);

  return 0;
}