예제 #1
0
static void ide_floppy_setup(ide_drive_t *drive)
{
	struct ide_disk_obj *floppy = drive->driver_data;
	u16 *id = drive->id;

	drive->pc_callback	 = ide_floppy_callback;

	
	if (!strncmp((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI", 20)) {
		drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
		
		drive->pc_delay = IDEFLOPPY_PC_DELAY;
		blk_queue_max_sectors(drive->queue, 64);
	}

	
	if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) {
		blk_queue_max_sectors(drive->queue, 64);
		drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
		
		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
	}

	(void) ide_floppy_get_capacity(drive);

	ide_proc_register_driver(drive, floppy->driver);

	drive->dev_flags |= IDE_DFLAG_ATTACH;
}
예제 #2
0
int ata_scsi_slave_config(struct scsi_device *sdev)
{
	sdev->use_10_for_rw = 1;
	sdev->use_10_for_ms = 1;

	blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD);

	if (sdev->id < ATA_MAX_DEVICES) {
		struct ata_port *ap;
		struct ata_device *dev;

		ap = (struct ata_port *) &sdev->host->hostdata[0];
		dev = &ap->device[sdev->id];

		/* TODO: 1024 is an arbitrary number, not the
		 * hardware maximum.  This should be increased to
		 * 65534 when Jens Axboe's patch for dynamically
		 * determining max_sectors is merged.
		 */
		if (dev->flags & ATA_DFLAG_LBA48) {
			sdev->host->max_sectors = 2048;
			blk_queue_max_sectors(sdev->request_queue, 2048);
		}
	}

	return 0;	/* scsi layer doesn't check return value, sigh */
}
예제 #3
0
파일: scsiglue.c 프로젝트: cilynx/dd-wrt
/* Input routine for the sysfs max_sectors file */
static ssize_t store_max_sectors(struct device *dev, struct device_attribute *attr, const char *buf,
		size_t count)
{
	struct scsi_device *sdev = to_scsi_device(dev);
	unsigned short ms;

	if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) {
		blk_queue_max_sectors(sdev->request_queue, ms);
		return strlen(buf);
	}
	return -EINVAL;	
}
예제 #4
0
static void ide_floppy_setup(ide_drive_t *drive)
{
	struct ide_disk_obj *floppy = drive->driver_data;
	u16 *id = drive->id;

	drive->pc_callback	 = ide_floppy_callback;

	/*
	 * We used to check revisions here. At this point however I'm giving up.
	 * Just assume they are all broken, its easier.
	 *
	 * The actual reason for the workarounds was likely a driver bug after
	 * all rather than a firmware bug, and the workaround below used to hide
	 * it. It should be fixed as of version 1.9, but to be on the safe side
	 * we'll leave the limitation below for the 2.2.x tree.
	 */
	if (!strncmp((char *)&id[ATA_ID_PROD], "IOMEGA ZIP 100 ATAPI", 20)) {
		drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE;
		/* This value will be visible in the /proc/ide/hdx/settings */
		drive->pc_delay = IDEFLOPPY_PC_DELAY;
		blk_queue_max_sectors(drive->queue, 64);
	}

	/*
	 * Guess what? The IOMEGA Clik! drive also needs the above fix. It makes
	 * nasty clicking noises without it, so please don't remove this.
	 */
	if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) {
		blk_queue_max_sectors(drive->queue, 64);
		drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE;
		/* IOMEGA Clik! drives do not support lock/unlock commands */
		drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
	}

	(void) ide_floppy_get_capacity(drive);

	ide_proc_register_driver(drive, floppy->driver);

	drive->dev_flags |= IDE_DFLAG_ATTACH;
}
예제 #5
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (host->dev->dma_mask && *host->dev->dma_mask)
		limit = *host->dev->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_bounce_limit(mq->queue, limit);
	blk_queue_max_sectors(mq->queue, host->max_sectors);
	blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
	blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
	blk_queue_max_segment_size(mq->queue, host->max_seg_size);

	mq->queue->queuedata = mq;
	mq->req = NULL;

	mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs,
			 GFP_KERNEL);
	if (!mq->sg) {
		ret = -ENOMEM;
		goto cleanup;
	}

	init_completion(&mq->thread_complete);
	init_waitqueue_head(&mq->thread_wq);
	init_MUTEX(&mq->thread_sem);

	ret = kernel_thread(mmc_queue_thread, mq, CLONE_KERNEL);
	if (ret >= 0) {
		wait_for_completion(&mq->thread_complete);
		init_completion(&mq->thread_complete);
		ret = 0;
		goto out;
	}

 cleanup:
	kfree(mq->sg);
	mq->sg = NULL;

	blk_cleanup_queue(mq->queue);
 out:
	return ret;
}
예제 #6
0
static int __init stheno_module_init( void )
{
    stheno_major = register_blkdev( 0, MODNAME );
    if( stheno_major <= 0 ){
	printk( KERN_WARNING "register_blkdev failed\n" );
        return stheno_major;
    }

    spin_lock_init( &stheno_lock );
    stheno_queue = blk_init_queue(
        stheno_request, //
        &stheno_lock );
    if( ! stheno_queue ){
	printk( KERN_WARNING "blk_init_queue failed\n" );        
        unregister_blkdev( stheno_major, MODNAME );
        return -ENOMEM;
    }
   	blk_queue_logical_block_size( stheno_queue, SECT_SIZE );
    blk_queue_max_sectors( stheno_queue, MAX_SECTORS );
	blk_queue_bounce_limit(stheno_queue, BLK_BOUNCE_ANY);

    stheno_gd = alloc_disk( MINOR_COUNT );
    if( ! stheno_gd ){
	printk( KERN_WARNING "alloc_disk failed\n" );        
        blk_cleanup_queue( stheno_queue );
        unregister_blkdev( stheno_major, MODNAME );
        return -ENOMEM;
    }
    sprintf( stheno_gd->disk_name, "%s", MODNAME ); //
    stheno_gd->queue = stheno_queue; //
    stheno_gd->major = stheno_major;
    stheno_gd->first_minor = 0;
    stheno_gd->fops = &stheno_fops;  //
    set_capacity( stheno_gd, SECT_NUM );

    sema_init(&stheno_sem, 1);
    init_waitqueue_head(&stheno_process_q);

    stheno_thread = kthread_create(stheno_do_request, 0, "sthenod");
    
    wake_up_process(stheno_thread);


    add_disk( stheno_gd );

    printk( KERN_INFO "stheno is loaded\n" );
    printk( KERN_INFO "major = %d\n", stheno_major );

    return 0;
}
예제 #7
0
/*
 * Initializes the block layer interfaces.
 */
static int sd_init_blk_dev(struct sd_host *host)
{
	struct gendisk *disk;
	struct request_queue *queue;
	int channel;
	int retval;

	channel = to_channel(exi_get_exi_channel(host->exi_device));

	/* queue */
	retval = -ENOMEM;
	spin_lock_init(&host->queue_lock);
	queue = blk_init_queue(sd_request_func, &host->queue_lock);
	if (!queue) {
		sd_printk(KERN_ERR, "error initializing queue\n");
		goto err_blk_init_queue;
	}
	blk_queue_dma_alignment(queue, EXI_DMA_ALIGN);
	blk_queue_max_phys_segments(queue, 1);
	blk_queue_max_hw_segments(queue, 1);
	blk_queue_max_sectors(queue, 8);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue);
	queue->queuedata = host;
	host->queue = queue;

	/* disk */
	disk = alloc_disk(1 << MMC_SHIFT);
	if (!disk) {
		sd_printk(KERN_ERR, "error allocating disk\n");
		goto err_alloc_disk;
	}
	disk->major = SD_MAJOR;
	disk->first_minor = channel << MMC_SHIFT;
	disk->fops = &sd_fops;
	sprintf(disk->disk_name, "%s%c", SD_NAME, 'a' + channel);
	disk->private_data = host;
	disk->queue = host->queue;
	host->disk = disk;

	retval = 0;
	goto out;

err_alloc_disk:
	blk_cleanup_queue(host->queue);
	host->queue = NULL;
err_blk_init_queue:
out:
	return retval;
}
예제 #8
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
    struct mmc_host *host = card->host;
    u64 limit = BLK_BOUNCE_HIGH;
    int ret;

    if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
        limit = *mmc_dev(host)->dma_mask;

    mq->card = card;
    mq->queue = blk_init_queue(mmc_request, lock);
    if (!mq->queue)
        return -ENOMEM;

    blk_queue_prep_rq(mq->queue, mmc_prep_request);
    blk_queue_bounce_limit(mq->queue, limit);
    blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
    blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
    blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
    blk_queue_max_segment_size(mq->queue, host->max_seg_size);

    mq->queue->queuedata = mq;
    mq->req = NULL;

    mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs,
                     GFP_KERNEL);
    if (!mq->sg) {
        ret = -ENOMEM;
        goto cleanup_queue;
    }

    init_MUTEX(&mq->thread_sem);

    mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
    if (IS_ERR(mq->thread)) {
        ret = PTR_ERR(mq->thread);
        goto free_sg;
    }

    return 0;

free_sg:
    kfree(mq->sg);
    mq->sg = NULL;
cleanup_queue:
    blk_cleanup_queue(mq->queue);
    return ret;
}
예제 #9
0
static int slave_configure(struct scsi_device *sdev)
{
	struct us_data *us = host_to_us(sdev->host);

	/* Many devices have trouble transfering more than 32KB at a time,
	 * while others have trouble with more than 64K. At this time we
	 * are limiting both to 32K (64 sectores).
	 */
	if (us->fflags & (US_FL_MAX_SECTORS_64 | US_FL_MAX_SECTORS_MIN)) {
		unsigned int max_sectors = 64;

		if (us->fflags & US_FL_MAX_SECTORS_MIN)
			max_sectors = PAGE_CACHE_SIZE >> 9;
		if (sdev->request_queue->max_sectors > max_sectors)
			blk_queue_max_sectors(sdev->request_queue,
					      max_sectors);
	} else if (sdev->type == TYPE_TAPE) {
예제 #10
0
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
{
	/*
	 * Make sure we obey the optimistic sub devices
	 * restrictions.
	 */
	blk_queue_max_sectors(q, t->limits.max_sectors);
	q->max_phys_segments = t->limits.max_phys_segments;
	q->max_hw_segments = t->limits.max_hw_segments;
	q->hardsect_size = t->limits.hardsect_size;
	q->max_segment_size = t->limits.max_segment_size;
	q->seg_boundary_mask = t->limits.seg_boundary_mask;
	if (t->limits.no_cluster)
		q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
	else
		q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);

}
예제 #11
0
static int slave_configure(struct scsi_device *sdev)
{
	struct us_data *us = host_to_us(sdev->host);
    struct usb_device *udev = us->pusb_dev; /* 2010/10/06, modified by Panasonic (SAV) */

	/* Many devices have trouble transfering more than 32KB at a time,
	 * while others have trouble with more than 64K. At this time we
	 * are limiting both to 32K (64 sectores).
	 */
	if (us->fflags & (US_FL_MAX_SECTORS_64 | US_FL_MAX_SECTORS_MIN)) {
		unsigned int max_sectors = 64;

		if (us->fflags & US_FL_MAX_SECTORS_MIN)
			max_sectors = PAGE_CACHE_SIZE >> 9;
		if (sdev->request_queue->max_sectors > max_sectors)
			blk_queue_max_sectors(sdev->request_queue,
					      max_sectors);
/* 2010/10/06, modified by Panasonic (SAV) ---> */
    } else if (udev->bus->max_sectors) {
예제 #12
0
파일: pd.c 프로젝트: fgeraci/cs518-sched
int pd_init (void)

{       int i;
	request_queue_t * q; 

	if (disable) return -1;
        if (devfs_register_blkdev(MAJOR_NR,name,&pd_fops)) {
                printk("%s: unable to get major number %d\n",
                        name,major);
                return -1;
        }
	q = BLK_DEFAULT_QUEUE(MAJOR_NR);
	blk_init_queue(q, DEVICE_REQUEST, &pd_lock);
	blk_queue_max_sectors(q, cluster);
        read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
        
	pd_gendisk.major = major;
	pd_gendisk.major_name = name;
	add_gendisk(&pd_gendisk);

	for(i=0;i<PD_DEVS;i++) pd_blocksizes[i] = 1024;
	blksize_size[MAJOR_NR] = pd_blocksizes;

	printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
		name,name,PD_VERSION,major,cluster,nice);
	pd_init_units();
	pd_valid = 0;
	pd_gendisk.nr_real = pd_detect();
	pd_valid = 1;

#ifdef MODULE
        if (!pd_gendisk.nr_real) {
		cleanup_module();
		return -1;
	}
#endif
        return 0;
}
예제 #13
0
/**
* @brief 	Card initial function.
* @param 	work[in]: Work structure.
* @return 	None.
*/
static void gp_sdcard_work_init(struct work_struct *work)
{
	gpSDInfo_t* sd = container_of(work, gpSDInfo_t,init);
	int pin_handle;
	int ret = 0,i=0;
	int pin_id;

	if(sd->device_id == 0)
		pin_id = GP_PIN_SD0;
	else if(sd->device_id == 1)
		pin_id = GP_PIN_SD1;
	else
		pin_id = GP_PIN_SD2;

	pin_handle = gp_board_pin_func_request( pin_id, GP_BOARD_WAIT_FOREVER);
	if(pin_handle<0)
	{
		DERROR("[%d]: can't get pin handle\n", sd->device_id);
		goto init_work_end;
	}
    /* ----- chris: Set Pin state for SD before power on ----- */
    sd->sd_func->set_power(1);
	/* ----- chris: delay 250ms after card power on ----- */
	msleep(250);
	/* ----- Initial SD card ----- */
	ret = gp_sdcard_cardinit(sd);
	if (ret != 0)
	{
		DERROR("[%d]: initial fail\n",sd->device_id);
		gp_board_pin_func_release(pin_handle);
		goto init_work_end;
	}
	gp_board_pin_func_release(pin_handle);

	if(sd->present==1)
	{
		if(sd->card_type == SDIO)
		{
			sd->pin_handle = gp_board_pin_func_request(pin_id, GP_BOARD_WAIT_FOREVER);
			if(sd->pin_handle<0)
			{
				DERROR("[%d]: can't get pin handle\n", sd->device_id);
				goto init_work_end;
			}
			DEBUG("SDIO card detected\n");
			gp_sdio_insert_device(sd->device_id, sd->RCA);
		}
		else
		{
			unsigned int cnt =0;
			/* ----- Wait 30 second for all process close handle ----- */
			while((sd->users)&&cnt<120)
			{
				msleep(250);
				cnt++;
			}
			if(sd->users)
			{
				DERROR("Some handle do not free\n");
			}
			if(sd->status)
			{
				gp_sdcard_blk_put(sd);
				sd->status = 0;
			}
			sd->handle_dma = gp_apbdma0_request(1000);
			if(sd->handle_dma==0)
				goto init_work_end;
			sd->queue = blk_init_queue(gp_sdcard_request, &sd->lock);
			if(sd->queue==NULL)
			{
				DERROR("NO MEMORY: queue\n");
				goto fail_queue;
			}
			blk_queue_ordered(sd->queue, QUEUE_ORDERED_DRAIN, NULL);
			queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sd->queue);
			blk_queue_logical_block_size(sd->queue, 512);
			blk_queue_max_sectors(sd->queue, SD_MAX_SECTORS );
			blk_queue_max_phys_segments(sd->queue, SD_MAX_PHY_SEGMENTS);
			blk_queue_max_hw_segments(sd->queue, SD_MAX_HW_SEGMENTS);
			blk_queue_max_segment_size(sd->queue, SD_MAX_PHY_SEGMENTS_SIZE);
			/* ----- Initial scatter list ----- */
			sd->sg = kmalloc(sizeof(struct scatterlist) *SD_MAX_PHY_SEGMENTS, GFP_KERNEL);
			if (!sd->sg)
			{
				DERROR("NO MEMORY: queue\n");
				goto fail_thread;
			}
			sg_init_table(sd->sg, SD_MAX_PHY_SEGMENTS);
			init_MUTEX(&sd->thread_sem);
			/* ----- Enable thread ----- */
			sd->thread = kthread_run(gp_sdcard_queue_thread, sd, "sd-qd");
			if (IS_ERR(sd->thread))
			{
				goto fail_thread;
			}
			sd->queue->queuedata = sd;
			/* ----- Check SD card for GP special header ----- */
			if(gp_sdcard_parse_header(sd)<0)
			{
				goto fail_gd;
			}
			/* ----- Setup gendisk structure ----- */
			sd->gd = alloc_disk(SD_MINORS);
			if (sd->gd==NULL)
			{
				DERROR("NO MEMORY: gendisk\n");
				blk_cleanup_queue(sd->queue);
				goto fail_gd;
			}
			/* ----- Set gendisk structure ----- */
			sd->gd->major = sd_major;
			sd->gd->first_minor = sd->device_id*SD_MINORS;
			sd->gd->fops = &gp_sdcard_ops;
			sd->gd->queue = sd->queue;
			sd->gd->private_data = sd;
			snprintf (sd->gd->disk_name, 32, "sdcard%c", sd->device_id + 'a');
			/* ----- Set GP partition ----- */
			if(sd->partition.activity)
			{
				set_capacity(sd->gd,0);
				add_disk(sd->gd);
				for(i=0;i<MAX_SD_PART;i++)
				{
					if(sd->partition.capacity[i]==0)
						continue;
					gp_add_partition(sd->gd,i+1,sd->partition.offset[i],sd->partition.capacity[i],ADDPART_FLAG_WHOLEDISK);
				}
			}
			/* ----- Normal Setting ----- */
			else
			{
				set_capacity(sd->gd,sd->capacity);
				add_disk(sd->gd);
			}
		}
		//DEBUG("Initial success\n");
		goto init_work_end;
	}
	else
	{
		DERROR("Initial fail\n");
		goto init_work_end;
	}
fail_gd:
	/* ----- Then terminate our worker thread ----- */
	kthread_stop(sd->thread);
	sd->thread = NULL;
fail_thread:
	if (sd->sg)
		kfree(sd->sg);
	sd->sg = NULL;
	blk_cleanup_queue (sd->queue);
	sd->queue = NULL;
fail_queue:
	if(sd->handle_dma)
		gp_apbdma0_release(sd->handle_dma);
	sd->handle_dma = 0;
	/* ----- For re-initialize ----- */
	sd->present = 0;
init_work_end:
	sd->timer.expires = jiffies + SD_CD_POLL;
	add_timer(&sd->timer);
}
예제 #14
0
파일: queue.c 프로젝트: Tigrouzen/k1099
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;
#if 0
	if (!mq) {
#else
    //插着USB线(充电姿态),拔插卡,有偶尔死机现象。出现mq->thread为空的现象;modifyed by xbw
    if (!mq ||!mq->thread) {
#endif	
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}

/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_ANY ; // BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;

		mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
		if (!mq->bounce_buf) {
			printk(KERN_WARNING "%s: unable to allocate "
				"bounce buffer\n", mmc_card_name(card));
		} else {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	/* Mark that we should start throwing out stragglers */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	spin_unlock_irqrestore(q->queue_lock, flags);

	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

	/* Then terminate our worker thread */
	kthread_stop(mq->thread);

 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;

	kfree(mq->sg);
	mq->sg = NULL;

	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

	blk_cleanup_queue(mq->queue);

	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}

static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
	struct scatterlist *src, unsigned int src_len)
{
	unsigned int chunk;
	char *dst_buf, *src_buf;
	unsigned int dst_size, src_size;

	dst_buf = NULL;
	src_buf = NULL;
	dst_size = 0;
	src_size = 0;

	while (src_len) {
		BUG_ON(dst_len == 0);

		if (dst_size == 0) {
			dst_buf = sg_virt(dst);
			dst_size = dst->length;
		}

		if (src_size == 0) {
			src_buf = sg_virt(src);
			src_size = src->length;
		}

		chunk = min(dst_size, src_size);

		memcpy(dst_buf, src_buf, chunk);

		dst_buf += chunk;
		src_buf += chunk;
		dst_size -= chunk;
		src_size -= chunk;

		if (dst_size == 0) {
			dst++;
			dst_len--;
		}

		if (src_size == 0) {
			src++;
			src_len--;
		}
	}
}
예제 #15
0
파일: scsiglue.c 프로젝트: cilynx/dd-wrt
static int slave_configure(struct scsi_device *sdev)
{
	struct us_data *us = host_to_us(sdev->host);

	/* Scatter-gather buffers (all but the last) must have a length
	 * divisible by the bulk maxpacket size.  Otherwise a data packet
	 * would end up being short, causing a premature end to the data
	 * transfer.  Since high-speed bulk pipes have a maxpacket size
	 * of 512, we'll use that as the scsi device queue's DMA alignment
	 * mask.  Guaranteeing proper alignment of the first buffer will
	 * have the desired effect because, except at the beginning and
	 * the end, scatter-gather buffers follow page boundaries. */
	blk_queue_dma_alignment(sdev->request_queue, (512 - 1));

	/* Many devices have trouble transfering more than 32KB at a time,
	 * while others have trouble with more than 64K. At this time we
	 * are limiting both to 32K (64 sectores).
	 */
	if ((us->flags & US_FL_MAX_SECTORS_64) &&
			sdev->request_queue->max_sectors > 64)
		blk_queue_max_sectors(sdev->request_queue, 64);

	/* We can't put these settings in slave_alloc() because that gets
	 * called before the device type is known.  Consequently these
	 * settings can't be overridden via the scsi devinfo mechanism. */
	if (sdev->type == TYPE_DISK) {

		/* Disk-type devices use MODE SENSE(6) if the protocol
		 * (SubClass) is Transparent SCSI, otherwise they use
		 * MODE SENSE(10). */
		if (us->subclass != US_SC_SCSI)
			sdev->use_10_for_ms = 1;

		/* Many disks only accept MODE SENSE transfer lengths of
		 * 192 bytes (that's what Windows uses). */
		sdev->use_192_bytes_for_3f = 1;

		/* Some devices don't like MODE SENSE with page=0x3f,
		 * which is the command used for checking if a device
		 * is write-protected.  Now that we tell the sd driver
		 * to do a 192-byte transfer with this command the
		 * majority of devices work fine, but a few still can't
		 * handle it.  The sd driver will simply assume those
		 * devices are write-enabled. */
		if (us->flags & US_FL_NO_WP_DETECT)
			sdev->skip_ms_page_3f = 1;

		/* A number of devices have problems with MODE SENSE for
		 * page x08, so we will skip it. */
		sdev->skip_ms_page_8 = 1;

		/* Some disks return the total number of blocks in response
		 * to READ CAPACITY rather than the highest block number.
		 * If this device makes that mistake, tell the sd driver. */
		if (us->flags & US_FL_FIX_CAPACITY)
			sdev->fix_capacity = 1;

		/* A few disks have two indistinguishable version, one of
		 * which reports the correct capacity and the other does not.
		 * The sd driver has to guess which is the case. */
		if (us->flags & US_FL_CAPACITY_HEURISTICS)
			sdev->guess_capacity = 1;

		/* Some devices report a SCSI revision level above 2 but are
		 * unable to handle the REPORT LUNS command (for which
		 * support is mandatory at level 3).  Since we already have
		 * a Get-Max-LUN request, we won't lose much by setting the
		 * revision level down to 2.  The only devices that would be
		 * affected are those with sparse LUNs. */
		if (sdev->scsi_level > SCSI_2)
			sdev->sdev_target->scsi_level =
					sdev->scsi_level = SCSI_2;

		/* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable
		 * Hardware Error) when any low-level error occurs,
		 * recoverable or not.  Setting this flag tells the SCSI
		 * midlayer to retry such commands, which frequently will
		 * succeed and fix the error.  The worst this can lead to
		 * is an occasional series of retries that will all fail. */
		sdev->retry_hwerror = 1;

	} else {

		/* Non-disk-type devices don't need to blacklist any pages
		 * or to force 192-byte transfer lengths for MODE SENSE.
		 * But they do need to use MODE SENSE(10). */
		sdev->use_10_for_ms = 1;
	}

	/* The CB and CBI transports have no way to pass LUN values
	 * other than the bits in the second byte of a CDB.  But those
	 * bits don't get set to the LUN value if the device reports
	 * scsi_level == 0 (UNKNOWN).  Hence such devices must necessarily
	 * be single-LUN.
	 */
	if ((us->protocol == US_PR_CB || us->protocol == US_PR_CBI) &&
			sdev->scsi_level == SCSI_UNKNOWN)
		us->max_lun = 0;

	/* Some devices choke when they receive a PREVENT-ALLOW MEDIUM
	 * REMOVAL command, so suppress those commands. */
	if (us->flags & US_FL_NOT_LOCKABLE)
		sdev->lockable = 0;

	/* this is to satisfy the compiler, tho I don't think the 
	 * return code is ever checked anywhere. */
	return 0;
}
예제 #16
0
static int slave_configure(struct scsi_device *sdev)
{
	struct us_data *us = host_to_us(sdev->host);

	/* Scatter-gather buffers (all but the last) must have a length
	 * divisible by the bulk maxpacket size.  Otherwise a data packet
	 * would end up being short, causing a premature end to the data
	 * transfer.  Since high-speed bulk pipes have a maxpacket size
	 * of 512, we'll use that as the scsi device queue's DMA alignment
	 * mask.  Guaranteeing proper alignment of the first buffer will
	 * have the desired effect because, except at the beginning and
	 * the end, scatter-gather buffers follow page boundaries. */
	blk_queue_dma_alignment(sdev->request_queue, (512 - 1));

	/* Set the SCSI level to at least 2.  We'll leave it at 3 if that's
	 * what is originally reported.  We need this to avoid confusing
	 * the SCSI layer with devices that report 0 or 1, but need 10-byte
	 * commands (ala ATAPI devices behind certain bridges, or devices
	 * which simply have broken INQUIRY data).
	 *
	 * NOTE: This means /dev/sg programs (ala cdrecord) will get the
	 * actual information.  This seems to be the preference for
	 * programs like that.
	 *
	 * NOTE: This also means that /proc/scsi/scsi and sysfs may report
	 * the actual value or the modified one, depending on where the
	 * data comes from.
	 */
	if (sdev->scsi_level < SCSI_2)
		sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;

	/* According to the technical support people at Genesys Logic,
	 * devices using their chips have problems transferring more than
	 * 32 KB at a time.  In practice people have found that 64 KB
	 * works okay and that's what Windows does.  But we'll be
	 * conservative; people can always use the sysfs interface to
	 * increase max_sectors. */
	if (le16_to_cpu(us->pusb_dev->descriptor.idVendor) == USB_VENDOR_ID_GENESYS &&
			sdev->request_queue->max_sectors > 64)
		blk_queue_max_sectors(sdev->request_queue, 64);

	/* We can't put these settings in slave_alloc() because that gets
	 * called before the device type is known.  Consequently these
	 * settings can't be overridden via the scsi devinfo mechanism. */
	if (sdev->type == TYPE_DISK) {

		/* Disk-type devices use MODE SENSE(6) if the protocol
		 * (SubClass) is Transparent SCSI, otherwise they use
		 * MODE SENSE(10). */
		if (us->subclass != US_SC_SCSI)
			sdev->use_10_for_ms = 1;

		/* Many disks only accept MODE SENSE transfer lengths of
		 * 192 bytes (that's what Windows uses). */
		sdev->use_192_bytes_for_3f = 1;

		/* Some devices don't like MODE SENSE with page=0x3f,
		 * which is the command used for checking if a device
		 * is write-protected.  Now that we tell the sd driver
		 * to do a 192-byte transfer with this command the
		 * majority of devices work fine, but a few still can't
		 * handle it.  The sd driver will simply assume those
		 * devices are write-enabled. */
		if (us->flags & US_FL_NO_WP_DETECT)
			sdev->skip_ms_page_3f = 1;

		/* A number of devices have problems with MODE SENSE for
		 * page x08, so we will skip it. */
		sdev->skip_ms_page_8 = 1;

		/* Some disks return the total number of blocks in response
		 * to READ CAPACITY rather than the highest block number.
		 * If this device makes that mistake, tell the sd driver. */
		if (us->flags & US_FL_FIX_CAPACITY)
			sdev->fix_capacity = 1;

		/* Some devices report a SCSI revision level above 2 but are
		 * unable to handle the REPORT LUNS command (for which
		 * support is mandatory at level 3).  Since we already have
		 * a Get-Max-LUN request, we won't lose much by setting the
		 * revision level down to 2.  The only devices that would be
		 * affected are those with sparse LUNs. */
		sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;

		/* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable
		 * Hardware Error) when any low-level error occurs,
		 * recoverable or not.  Setting this flag tells the SCSI
		 * midlayer to retry such commands, which frequently will
		 * succeed and fix the error.  The worst this can lead to
		 * is an occasional series of retries that will all fail. */
		sdev->retry_hwerror = 1;

	} else {

		/* Non-disk-type devices don't need to blacklist any pages
		 * or to force 192-byte transfer lengths for MODE SENSE.
		 * But they do need to use MODE SENSE(10). */
		sdev->use_10_for_ms = 1;
	}

	/* Some devices choke when they receive a PREVENT-ALLOW MEDIUM
	 * REMOVAL command, so suppress those commands. */
	if (us->flags & US_FL_NOT_LOCKABLE)
		sdev->lockable = 0;

	/* this is to satisfy the compiler, tho I don't think the 
	 * return code is ever checked anywhere. */
	return 0;
}
예제 #17
0
/*
    Create system device file for the enabled slot.
*/
ndas_error_t slot_enable(int s)
{
    ndas_error_t ret = NDAS_ERROR_INTERNAL;
    int got;
    struct ndas_slot* slot = NDAS_GET_SLOT_DEV(s); 
    dbgl_blk(3, "ing s#=%d slot=%p",s, slot);
    got = try_module_get(THIS_MODULE);
    MOD_INC_USE_COUNT;
    
    if ( slot == NULL)
        goto out1;
    
    if ( slot->enabled ) {
        dbgl_blk(1, "already enabled");
        ret = NDAS_OK;
        goto out2;
    }
    ret = ndas_query_slot(s, &slot->info);
    if ( !NDAS_SUCCESS(ret) ) {
        dbgl_blk(1, "fail ndas_query_slot");
        goto out2;
    }
    dbgl_blk(1, "mode=%d", slot->info.mode);
    
    slot->enabled = 1;
    
#if LINUX_VERSION_25_ABOVE

    slot->disk = NULL;
    spin_lock_init(&slot->lock);
    slot->queue = blk_init_queue(
        nblk_request_proc, 
        &slot->lock
    );
	#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33))    
	    blk_queue_max_phys_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);
	    blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);
	#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33))
	    blk_queue_max_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);	//renamed in 2.6.34	
	    //blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); //removed in 2.6.34
	#endif

	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31))
	    blk_queue_logical_block_size(slot->queue, slot->info.sector_size);
	#else
	    blk_queue_hardsect_size(slot->queue, slot->info.sector_size);
	#endif

	#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33))
	    blk_queue_max_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS);
	#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33))
	    blk_queue_max_hw_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS); //renamed in 2.6.34
	#endif

	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
	    // Set ordered queue property.
		#if 0
		    blk_queue_ordered(slot->queue, QUEUE_ORDERED_TAG_FLUSH, nblk_prepare_flush);
		#endif
	#endif

    slot->disk = alloc_disk(NR_PARTITION);
    if ( slot->disk == NULL ) {
        slot->enabled = 0;
        dbgl_blk(1, "fail alloc disk");
        goto out2;
    }

    slot->disk->major = NDAS_BLK_MAJOR;
    slot->disk->first_minor = (s - NDAS_FIRST_SLOT_NR) << PARTN_BITS;
    slot->disk->fops = &ndas_fops;
    slot->disk->queue = slot->queue;
    slot->disk->private_data = (void*) (long)s;
    slot->queue_flags = 0;

    dbgl_blk(1, "mode=%d", slot->info.mode);
    if ( slot->info.mode == NDAS_DISK_MODE_SINGLE || 
        slot->info.mode == NDAS_DISK_MODE_ATAPI ||
        slot->info.mode == NDAS_DISK_MODE_MEDIAJUKE) 
    {
        char short_serial[NDAS_SERIAL_SHORT_LENGTH + 1];
        if (strlen(slot->info.ndas_serial) > 8) {
            /* Extended serial number is too long as sysfs object name. Use last 8 digit only */
            strncpy(
                short_serial,
                slot->info.ndas_serial + ( NDAS_SERIAL_EXTEND_LENGTH - NDAS_SERIAL_SHORT_LENGTH),
                8);
        } else {
            strncpy(short_serial, slot->info.ndas_serial, 8);
        }
        short_serial[8] =0;
        snprintf(slot->devname,
            sizeof(slot->devname)-1, 
            "ndas-%s-%d", short_serial, slot->info.unit
        );

        strcpy(slot->disk->disk_name, slot->devname);

	    dbgl_blk(1, "just set slot->disk->%s, slot->%s", slot->disk->disk_name, slot->devname );

	#if !LINUX_VERSION_DEVFS_REMOVED_COMPLETELY
	        strcpy(slot->disk->devfs_name, slot->devname);
	#endif
        set_capacity(slot->disk, slot->info.sectors);
	    dbgl_blk(1, "just set capacity slot->disk, slot->info.sectors:%llu", slot->info.sectors);

    } else {
        /* Other mode is not implemented */

    }
    
    if (slot->info.mode == NDAS_DISK_MODE_ATAPI) {
        slot->disk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
	    dbgl_blk(1, "just set slot->disk->flags");
	#if 0
	        kref_init(&slot->ndascd.kref);
	#endif
    }

    dbgl_blk(4, "adding disk: slot=%d, first_minor=%d, capacity=%llu", s, slot->disk->first_minor, slot->info.sectors);
    add_disk(slot->disk);
    dbgl_blk(1, "added disk: slot=%d", s);
   
		#ifndef NDAS_DONT_CARE_SCHEDULER
			#if LINUX_VERSION_AVOID_CFQ_SCHEDULER
				#if CONFIG_SYSFS
				    sal_assert(slot->queue->kobj.ktype);	
				    sal_assert(slot->queue->kobj.ktype->default_attrs);
				    {
				        struct queue_sysfs_entry {
				        	struct attribute attr;
				        	ssize_t (*show)(struct request_queue *, char *);
				        	ssize_t (*store)(struct request_queue *, const char *, size_t);
				        };
				        struct attribute *attr = slot->queue->kobj.ktype->default_attrs[4];
				        struct queue_sysfs_entry *entry = container_of(attr , struct queue_sysfs_entry, attr);
				        //dbgl_blk(1, "now to set the scheduler: slot-queue=%d, scheduler==%s, scheduler_len=%d", slot->queue, NDAS_QUEUE_SCHEDULER, strlen(NDAS_QUEUE_SCHEDULER));
				        entry->store(slot->queue,NDAS_QUEUE_SCHEDULER,strlen(NDAS_QUEUE_SCHEDULER)); 
				        
				    }
				#else
					#error "NDAS driver doesn't work well with CFQ scheduler of 2.6.13 or above kernel." \
				   "if you forcely want to use it, please specify compiler flags by " \
				   "export NDAS_EXTRA_CFLAGS=\"-DNDAS_DONT_CARE_SCHEDULER\" "\
				   "then compile the source again."
				#endif
			#endif
		#endif        
    printk("ndas: /dev/%s enabled\n" , 
            slot->devname);
#else 
    /* < LINUX_VERSION_25_ABOVE */
    dbgl_blk(4, "blksize=%d", DEFAULT_ND_BLKSIZE);
    dbgl_blk(4, "size=%lld", slot->info.sectors);
    dbgl_blk(1, "hardsectsize=%d", slot->info.sector_size);
    ndas_ops_set_blk_size(
        s, 
        DEFAULT_ND_BLKSIZE, 
        slot->info.sectors,
        slot->info.sector_size, 
        DEFAULT_ND_MAX_SECTORS
    );
#ifdef NDAS_DEVFS    
    printk("ndas: /dev/nd/disc%d enabled\n" , 
            s - NDAS_FIRST_SLOT_NR);
#else
    printk("ndas: /dev/nd%c enabled\n" , 
            s + 'a' - NDAS_FIRST_SLOT_NR);
#endif

#endif
    
    //up(&slot->mutex);
 #ifdef NDAS_MSHARE 
    if(NDAS_GET_SLOT_DEV(s)->info.mode == NDAS_DISK_MODE_MEDIAJUKE)
    {
  	    ndas_CheckFormat(s);
    }
 #endif
#if !LINUX_VERSION_25_ABOVE
    ndas_ops_read_partition(s);
#endif
    dbgl_blk(3, "ed");
    return NDAS_OK;
out2:    
    //up(&slot->mutex);
out1:    
    if ( got ) module_put(THIS_MODULE);
    MOD_DEC_USE_COUNT;
    return ret;
}
예제 #18
0
static int __devinit
dm3730logic_cf_alloc(struct platform_device *pdev, int id, unsigned long physaddr,
		unsigned long physize, int irq, int gpio, int bus_width)
{
	struct device *dev = &pdev->dev;
	struct dm3730logic_cf_data *cf_data = dev->platform_data;
	struct cf_device *cf;
	struct request_queue *rq;
	int rc;

	DPRINTK(DEBUG_CF_GENDISK, "%s: dev %p\n", __FUNCTION__, dev);

	if (!physaddr) {
		rc = -ENODEV;
		goto err_noreg;
	}

	/* Allocate and initialize the cf device structure */
	cf = kzalloc(sizeof(struct cf_device), GFP_KERNEL);
	if (!cf) {
		rc = -ENOMEM;
		goto err_alloc;
	}

	platform_set_drvdata(pdev, cf);

	cf->dev = dev;
	cf->id = id;
	cf->physaddr = physaddr;
	cf->physize = physize;
	cf->irq = irq;
	cf->gpio_cd = cf_data->gpio_cd;
	cf->gpio_reset = cf_data->gpio_reset;
	cf->gpio_en = cf_data->gpio_en;
	cf->bus_width = bus_width;

	/* We fake it as ejected to start with */
	cf->ejected = 1;

	rq = blk_init_queue(cf_request, &cf->blk_lock);
	if (rq == NULL) {
		DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);
		return -ENOMEM;
	}
	blk_queue_logical_block_size(rq, 512);

	// Limit requests to simple contiguous ones
	blk_queue_max_sectors(rq, 8);  //4KB
	blk_queue_max_phys_segments(rq, 1);
	blk_queue_max_hw_segments(rq, 1);

	cf->queue = rq;

	// The IRQ semaphore is locked and only in the IRQ is it released
	init_MUTEX_LOCKED(&cf->irq_sem);

	/* The RW semaphore to have only one call into either read/write
	 * at a time */
	init_MUTEX(&cf->rw_sem);

	init_completion(&cf->task_completion);

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	// Create the thread that sits and waits for an interrupt
	rc = kernel_thread(cf_thread, cf, CLONE_KERNEL);
	if (rc < 0) {
		printk("%s:%d thread create fail! %d\n", __FUNCTION__, __LINE__, rc);
		goto err_setup;
	} else {
		wait_for_completion(&cf->task_completion);
	}

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	/* Call the setup code */
	rc = dm3730logic_cf_setup(cf);
	if (rc)
		goto err_setup;

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	dev_set_drvdata(dev, cf);


	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	return 0;

err_setup:
	dev_set_drvdata(dev, NULL);
	kfree(cf);
err_alloc:
err_noreg:
	dev_err(dev, "could not initialize device, err=%i\n", rc);
	return rc;
}
예제 #19
0
int td_linux_block_create(struct td_osdev *dev)
{
	int rc;
	struct request_queue *queue;
	unsigned bio_sector_size = dev->block_params.bio_sector_size;
	unsigned hw_sector_size = dev->block_params.hw_sector_size;

	/* very simple sector size support */
	if (!bio_sector_size || bio_sector_size & 511 || bio_sector_size > 4096) {
		td_os_err(dev, "bio sector size of %u is not supported\n", bio_sector_size);
		return -EINVAL;
	}

	/* MetaData is reported here */
	if (hw_sector_size == 520)
		hw_sector_size = 512;
	if (!hw_sector_size || hw_sector_size & 511 || hw_sector_size > 4096) {
		td_os_err(dev, "hw sector size of %u is not supported\n", hw_sector_size);
		return -EINVAL;
	}

	td_os_notice(dev, " - Set capacity to %llu (%u bytes/sector)\n",
		dev->block_params.capacity, dev->block_params.hw_sector_size);

	/* create a new bio queue */
	queue = blk_alloc_queue(GFP_KERNEL);
	if (!queue) {
		td_os_err(dev, "Error allocating disk queue.\n");
		rc = -ENOMEM;
		goto error_alloc_queue;
	}

#ifdef QUEUE_FLAG_NONROT
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue);
#endif
	
	switch (dev->type) {
	case TD_OSDEV_DEVICE:
		blk_queue_make_request(queue, td_device_make_request);
		dev->_bio_error = td_device_bio_error;
		break;
	case TD_OSDEV_RAID:
		blk_queue_make_request(queue, td_raid_make_request);
		dev->_bio_error = td_raid_bio_error;
		break;
		
	default:
		td_os_err(dev, "Unkonwn OS Type, cannot register block request handler\n");
		goto error_config_queue;
	}
	queue->queuedata = dev;

#if defined QUEUE_FLAG_PLUGGED 
	queue->unplug_fn = td_device_queue_unplug;
#endif

	/* configure queue ordering */

	/* in QUEUE_ORDERED_DRAIN we will get BARRIERS after the queue has
	 * been drained. */
#if defined KABI__blk_queue_ordered

#if KABI__blk_queue_ordered == 2
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN);
#elif KABI__blk_queue_ordered == 3
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN, NULL);
#else
#error unhandled value of KABI__blk_queue_ordered
#endif

#elif defined KABI__blk_queue_flush
	/*
	 * blk_queue_ordered was replaced with blk_queue_flush 
	 * The default implementation is QUEUE_ORDERED_DRAIN
	 */
	blk_queue_flush(queue, 0);
#else
#error undefined KABI__blk_queue_flush or KABI__blk_queue_ordered
#endif

	/* max out the throttling */
#ifdef KABI__blk_queue_max_hw_sectors
	blk_queue_max_hw_sectors(queue, dev->block_params.bio_max_bytes/512);
#elif defined KABI__blk_queue_max_sectors
	blk_queue_max_sectors(queue, dev->block_params.bio_max_bytes/512);
#else
	td_os_err(dev, "No kernel API for maximum sectors\n");
#endif

#if defined KABI__blk_queue_max_segments
	blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
#elif defined KABI__blk_queue_max_phys_segments
	blk_queue_max_phys_segments(queue, MAX_SEGMENT_SIZE);
	blk_queue_max_hw_segments(queue, MAX_SEGMENT_SIZE);
#else
	td_os_err(dev, "No kernel API for maximum segments\n");
#endif

	blk_queue_max_segment_size(queue, dev->block_params.bio_max_bytes);

	blk_queue_bounce_limit(queue, BLK_BOUNCE_ANY);

	/* setup paged based access */
	td_os_info(dev, "Set queue physical block size to %u\n", hw_sector_size);
#ifdef KABI__blk_queue_physical_block_size
	blk_queue_physical_block_size(queue, hw_sector_size);
#elif defined KABI__blk_queue_hardsect_size
	blk_queue_hardsect_size(queue, hw_sector_size);
#else
	td_os_err(dev, "No kernel API for physical sector size\n");
#endif

#ifdef KABI__blk_queue_logical_block_size
	td_os_info(dev, "Set queue logical block size to %u\n", bio_sector_size);
	blk_queue_logical_block_size(queue, bio_sector_size);
#else
	td_os_err(dev, "No kernel API for logical block size\n");
#endif
#ifdef KABI__blk_queue_io_min
	td_os_info(dev, "Set queue io_min to %u\n", bio_sector_size);
	blk_queue_io_min(queue, bio_sector_size);
#else
	td_os_err(dev, "No kernel API for minimum IO size\n");
#endif
#ifdef KABI__blk_queue_io_opt
	td_os_info(dev, "Set queue io_opt to %u\n", dev->block_params.bio_max_bytes);
	blk_queue_io_opt(queue,  dev->block_params.bio_max_bytes);
#else
	td_os_err(dev, "No kernel API for optimal IO size\n");
#endif

#if 0
	if (dev->block_params.discard)
	{
		int did_something = 0;
#if defined KABI__blk_queue_discard_granularity
		queue->limits.discard_granularity = bio_sector_size;
		did_something++;
#endif
#ifdef KABI__blk_queue_max_discard_sectors
		/* 0xFFFF (max sector size of chunk on trim) * 64  * # SSD */
		blk_queue_max_discard_sectors(queue, TD_MAX_DISCARD_LBA_COUNT * 2);
		did_something++;
#endif
#ifdef KABI__blk_queue_discard_zeroes_data
		queue->limits.discard_zeroes_data = 1;
		did_something++;
#endif
#ifdef KABI__queue_flag_set_unlocked
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue);
		did_something++;
#endif
		/* Maybe some day.. But not today. 
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, queue);
		*/
		if (did_something)
			td_os_info(dev, "Enabling discard support\n");
		else
			td_os_notice(dev, "No kernel API for discard support\n");
	} else {
		td_os_info(dev, "No DISCARD support enabled\n");
	}
#else
	/* bug 7444 */
	if (dev->block_params.discard)
		td_os_info(dev, "Device supports DISCARD but is currently being forced disabled\n");
#endif

	/*  assign */
	dev->queue = queue;

	return 0;

error_config_queue:
	blk_cleanup_queue(dev->queue);
	dev->queue = NULL;

error_alloc_queue:
	return rc;
}
예제 #20
0
static int slave_configure(struct scsi_device *sdev)
{
	struct us_data *us = host_to_us(sdev->host);

	/* Scatter-gather buffers (all but the last) must have a length
	 * divisible by the bulk maxpacket size.  Otherwise a data packet
	 * would end up being short, causing a premature end to the data
	 * transfer.  Since high-speed bulk pipes have a maxpacket size
	 * of 512, we'll use that as the scsi device queue's DMA alignment
	 * mask.  Guaranteeing proper alignment of the first buffer will
	 * have the desired effect because, except at the beginning and
	 * the end, scatter-gather buffers follow page boundaries. */
	blk_queue_dma_alignment(sdev->request_queue, (512 - 1));

	/* According to the technical support people at Genesys Logic,
	 * devices using their chips have problems transferring more than
	 * 32 KB at a time.  In practice people have found that 64 KB
	 * works okay and that's what Windows does.  But we'll be
	 * conservative; people can always use the sysfs interface to
	 * increase max_sectors. */
	if (us->pusb_dev->descriptor.idVendor == USB_VENDOR_ID_GENESYS &&
			sdev->request_queue->max_sectors > 64)
		blk_queue_max_sectors(sdev->request_queue, 64);

	/* We can't put these settings in slave_alloc() because that gets
	 * called before the device type is known.  Consequently these
	 * settings can't be overridden via the scsi devinfo mechanism. */
	if (sdev->type == TYPE_DISK) {

		/* Disk-type devices use MODE SENSE(6) if the protocol
		 * (SubClass) is Transparent SCSI, otherwise they use
		 * MODE SENSE(10). */
		if (us->subclass != US_SC_SCSI)
			sdev->use_10_for_ms = 1;

		/* Many disks only accept MODE SENSE transfer lengths of
		 * 192 bytes (that's what Windows uses). */
		sdev->use_192_bytes_for_3f = 1;

		/* A number of devices have problems with MODE SENSE for
		 * page x08, so we will skip it. */
		sdev->skip_ms_page_8 = 1;

#ifndef CONFIG_USB_STORAGE_RW_DETECT
		/* Some devices may not like MODE SENSE with page=0x3f.
		 * Now that we're using 192-byte transfers this may no
		 * longer be a problem.  So this will be a configuration
		 * option. */
		sdev->skip_ms_page_3f = 1;
#endif

	} else {

		/* Non-disk-type devices don't need to blacklist any pages
		 * or to force 192-byte transfer lengths for MODE SENSE.
		 * But they do need to use MODE SENSE(10). */
		sdev->use_10_for_ms = 1;
	}

	/* this is to satisfy the compiler, tho I don't think the 
	 * return code is ever checked anywhere. */
	return 0;
}
예제 #21
0
static inline void dummy(void){
	struct request_queue *q = NULL;
	blk_queue_max_sectors(q, 0);
}
예제 #22
0
파일: hd.c 프로젝트: 3sOx/asuswrt-merlin
static int __init hd_init(void)
{
	int drive;

	if (register_blkdev(MAJOR_NR,"hd"))
		return -1;

	hd_queue = blk_init_queue(do_hd_request, &hd_lock);
	if (!hd_queue) {
		unregister_blkdev(MAJOR_NR,"hd");
		return -ENOMEM;
	}

	blk_queue_max_sectors(hd_queue, 255);
	init_timer(&device_timer);
	device_timer.function = hd_times_out;
	blk_queue_hardsect_size(hd_queue, 512);

#ifdef __i386__
	if (!NR_HD) {
		extern struct drive_info drive_info;
		unsigned char *BIOS = (unsigned char *) &drive_info;
		unsigned long flags;
		int cmos_disks;

		for (drive=0 ; drive<2 ; drive++) {
			hd_info[drive].cyl = *(unsigned short *) BIOS;
			hd_info[drive].head = *(2+BIOS);
			hd_info[drive].wpcom = *(unsigned short *) (5+BIOS);
			hd_info[drive].ctl = *(8+BIOS);
			hd_info[drive].lzone = *(unsigned short *) (12+BIOS);
			hd_info[drive].sect = *(14+BIOS);
#ifdef does_not_work_for_everybody_with_scsi_but_helps_ibm_vp
			if (hd_info[drive].cyl && NR_HD == drive)
				NR_HD++;
#endif
			BIOS += 16;
		}

	/*
		We query CMOS about hard disks : it could be that 
		we have a SCSI/ESDI/etc controller that is BIOS
		compatible with ST-506, and thus showing up in our
		BIOS table, but not register compatible, and therefore
		not present in CMOS.

		Furthermore, we will assume that our ST-506 drives
		<if any> are the primary drives in the system, and 
		the ones reflected as drive 1 or 2.

		The first drive is stored in the high nibble of CMOS
		byte 0x12, the second in the low nibble.  This will be
		either a 4 bit drive type or 0xf indicating use byte 0x19 
		for an 8 bit type, drive 1, 0x1a for drive 2 in CMOS.

		Needless to say, a non-zero value means we have 
		an AT controller hard disk for that drive.

		Currently the rtc_lock is a bit academic since this
		driver is non-modular, but someday... ?         Paul G.
	*/

		spin_lock_irqsave(&rtc_lock, flags);
		cmos_disks = CMOS_READ(0x12);
		spin_unlock_irqrestore(&rtc_lock, flags);

		if (cmos_disks & 0xf0) {
			if (cmos_disks & 0x0f)
				NR_HD = 2;
			else
				NR_HD = 1;
		}
	}
#endif /* __i386__ */
#ifdef __arm__
	if (!NR_HD) {
		/* We don't know anything about the drive.  This means
		 * that you *MUST* specify the drive parameters to the
		 * kernel yourself.
		 */
		printk("hd: no drives specified - use hd=cyl,head,sectors"
			" on kernel command line\n");
	}
#endif
	if (!NR_HD)
		goto out;

	for (drive=0 ; drive < NR_HD ; drive++) {
		struct gendisk *disk = alloc_disk(64);
		struct hd_i_struct *p = &hd_info[drive];
		if (!disk)
			goto Enomem;
		disk->major = MAJOR_NR;
		disk->first_minor = drive << 6;
		disk->fops = &hd_fops;
		sprintf(disk->disk_name, "hd%c", 'a'+drive);
		disk->private_data = p;
		set_capacity(disk, p->head * p->sect * p->cyl);
		disk->queue = hd_queue;
		p->unit = drive;
		hd_gendisk[drive] = disk;
		printk ("%s: %luMB, CHS=%d/%d/%d\n",
			disk->disk_name, (unsigned long)get_capacity(disk)/2048,
			p->cyl, p->head, p->sect);
	}

	if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) {
		printk("hd: unable to get IRQ%d for the hard disk driver\n",
			HD_IRQ);
		goto out1;
	}
	if (!request_region(HD_DATA, 8, "hd")) {
		printk(KERN_WARNING "hd: port 0x%x busy\n", HD_DATA);
		goto out2;
	}
	if (!request_region(HD_CMD, 1, "hd(cmd)")) {
		printk(KERN_WARNING "hd: port 0x%x busy\n", HD_CMD);
		goto out3;
	}

	/* Let them fly */
	for(drive=0; drive < NR_HD; drive++)
		add_disk(hd_gendisk[drive]);

	return 0;

out3:
	release_region(HD_DATA, 8);
out2:
	free_irq(HD_IRQ, NULL);
out1:
	for (drive = 0; drive < NR_HD; drive++)
		put_disk(hd_gendisk[drive]);
	NR_HD = 0;
out:
	del_timer(&device_timer);
	unregister_blkdev(MAJOR_NR,"hd");
	blk_cleanup_queue(hd_queue);
	return -1;
Enomem:
	while (drive--)
		put_disk(hd_gendisk[drive]);
	goto out;
}
예제 #23
0
/**
* @brief 	Card initial function.
* @param 	work[in]: Work structure.
* @return 	None.
*/ 
static void gp_sdcard_work_init(struct work_struct *work)
{
	gpSDInfo_t* sd = container_of(work, gpSDInfo_t,init);
	int pin_handle;
	pin_handle = gp_board_pin_func_request((sd->device_id==0)?GP_PIN_SD0:GP_PIN_SD1, GP_BOARD_WAIT_FOREVER);
	if(pin_handle<0)
	{
		DERROR("SD%d: can't get pin handle\n", sd->device_id);
		goto init_work_end;
	}
	/* ----- Initial SD module (controller) ----- */
	gpHalSDInit(sd->device_id);
	/* ----- Initial SD card ----- */
	gp_sdcard_cardinit(sd);
	gp_board_pin_func_release(pin_handle);	
	if(sd->present==1)
	{
		if(sd->card_type == SDIO)
		{
			sd->pin_handle = gp_board_pin_func_request((sd->device_id==0)?GP_PIN_SD0:GP_PIN_SD1, GP_BOARD_WAIT_FOREVER);
			if(sd->pin_handle<0)
			{
				DERROR("SD%d: can't get pin handle\n", sd->device_id);
				goto init_work_end;
			}
			DEBUG("SDIO card detected\n");
			gp_sdio_insert_device(sd->device_id, sd->RCA);	
		}
		else
		{
			sd->queue = blk_init_queue(gp_sdcard_request, &sd->lock);
			if(sd->queue==NULL)
			{
				DERROR("NO MEMORY: queue\n");
				goto init_work_end;
			} 	 
			blk_queue_ordered(sd->queue, QUEUE_ORDERED_DRAIN, NULL);
			queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sd->queue);
			blk_queue_logical_block_size(sd->queue, 512);
			blk_queue_max_sectors(sd->queue, SD_MAX_SECTORS );
			blk_queue_max_phys_segments(sd->queue, SD_MAX_PHY_SEGMENTS);
			blk_queue_max_hw_segments(sd->queue, SD_MAX_HW_SEGMENTS);
			blk_queue_max_segment_size(sd->queue, SD_MAX_PHY_SEGMENTS_SIZE);
			/* ----- Initial scatter list ----- */
			sd->sg = kmalloc(sizeof(struct scatterlist) *SD_MAX_PHY_SEGMENTS, GFP_KERNEL);
			if (!sd->sg) 
			{
				DERROR("NO MEMORY: queue\n");
				goto fail_thread;
			}
			sg_init_table(sd->sg, SD_MAX_PHY_SEGMENTS);
		
			init_MUTEX(&sd->thread_sem);
			/* ----- Enable thread ----- */
			sd->thread = kthread_run(gp_sdcard_queue_thread, sd, "sd-qd");
			if (IS_ERR(sd->thread)) 
			{
				goto fail_thread;
			}
			sd->queue->queuedata = sd;
			/* ----- Setup gendisk structure ----- */
			sd->gd = alloc_disk(SD_MINORS);
			if (sd->gd==NULL) 
			{
				DERROR("NO MEMORY: gendisk\n");
				blk_cleanup_queue(sd->queue);	
				goto fail_gd;	
			}
			/* ----- Set gendisk structure ----- */
			sd->gd->major = sd_major;
			sd->gd->first_minor = sd->device_id*SD_MINORS;
			sd->gd->fops = &gp_sdcard_ops;
			sd->gd->queue = sd->queue;
			sd->gd->private_data = sd;
			snprintf (sd->gd->disk_name, 32, "sdcard%c", sd->device_id + 'a');
			set_capacity(sd->gd,sd->capacity);
			add_disk(sd->gd);
		}
		goto init_work_end;
	}
	else
	{
		DERROR("Initial fail\n");
		goto init_work_end;
	}
fail_gd:
	/* ----- Then terminate our worker thread ----- */
	kthread_stop(sd->thread);
fail_thread:
	if (sd->sg)
		kfree(sd->sg);
	sd->sg = NULL;
	blk_cleanup_queue (sd->queue);	
init_work_end:	
	sd->timer.expires = jiffies + SD_CD_POLL;
	add_timer(&sd->timer);
}
예제 #24
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mq->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce buffer\n",
					mmc_card_name(card));
			}
		}

		if (mq->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}
예제 #25
0
파일: hd.c 프로젝트: 10x-Amin/x10_Th_kernel
static int __init hd_init(void)
{
	int drive;

	if (register_blkdev(MAJOR_NR, "hd"))
		return -1;

	hd_queue = blk_init_queue(do_hd_request, &hd_lock);
	if (!hd_queue) {
		unregister_blkdev(MAJOR_NR, "hd");
		return -ENOMEM;
	}

	blk_queue_max_sectors(hd_queue, 255);
	init_timer(&device_timer);
	device_timer.function = hd_times_out;
	blk_queue_hardsect_size(hd_queue, 512);

	if (!NR_HD) {
		/*
		 * We don't know anything about the drive.  This means
		 * that you *MUST* specify the drive parameters to the
		 * kernel yourself.
		 *
		 * If we were on an i386, we used to read this info from
		 * the BIOS or CMOS.  This doesn't work all that well,
		 * since this assumes that this is a primary or secondary
		 * drive, and if we're using this legacy driver, it's
		 * probably an auxilliary controller added to recover
		 * legacy data off an ST-506 drive.  Either way, it's
		 * definitely safest to have the user explicitly specify
		 * the information.
		 */
		printk("hd: no drives specified - use hd=cyl,head,sectors"
			" on kernel command line\n");
		goto out;
	}

	for (drive = 0 ; drive < NR_HD ; drive++) {
		struct gendisk *disk = alloc_disk(64);
		struct hd_i_struct *p = &hd_info[drive];
		if (!disk)
			goto Enomem;
		disk->major = MAJOR_NR;
		disk->first_minor = drive << 6;
		disk->fops = &hd_fops;
		sprintf(disk->disk_name, "hd%c", 'a'+drive);
		disk->private_data = p;
		set_capacity(disk, p->head * p->sect * p->cyl);
		disk->queue = hd_queue;
		p->unit = drive;
		hd_gendisk[drive] = disk;
		printk("%s: %luMB, CHS=%d/%d/%d\n",
			disk->disk_name, (unsigned long)get_capacity(disk)/2048,
			p->cyl, p->head, p->sect);
	}

	if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) {
		printk("hd: unable to get IRQ%d for the hard disk driver\n",
			HD_IRQ);
		goto out1;
	}
	if (!request_region(HD_DATA, 8, "hd")) {
		printk(KERN_WARNING "hd: port 0x%x busy\n", HD_DATA);
		goto out2;
	}
	if (!request_region(HD_CMD, 1, "hd(cmd)")) {
		printk(KERN_WARNING "hd: port 0x%x busy\n", HD_CMD);
		goto out3;
	}

	/* Let them fly */
	for (drive = 0; drive < NR_HD; drive++)
		add_disk(hd_gendisk[drive]);

	return 0;

out3:
	release_region(HD_DATA, 8);
out2:
	free_irq(HD_IRQ, NULL);
out1:
	for (drive = 0; drive < NR_HD; drive++)
		put_disk(hd_gendisk[drive]);
	NR_HD = 0;
out:
	del_timer(&device_timer);
	unregister_blkdev(MAJOR_NR, "hd");
	blk_cleanup_queue(hd_queue);
	return -1;
Enomem:
	while (drive--)
		put_disk(hd_gendisk[drive]);
	goto out;
}