Exemple #1
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (host->dev->dma_mask && *host->dev->dma_mask)
		limit = *host->dev->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_bounce_limit(mq->queue, limit);
	blk_queue_max_sectors(mq->queue, host->max_sectors);
	blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
	blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
	blk_queue_max_segment_size(mq->queue, host->max_seg_size);

	mq->queue->queuedata = mq;
	mq->req = NULL;

	mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs,
			 GFP_KERNEL);
	if (!mq->sg) {
		ret = -ENOMEM;
		goto cleanup;
	}

	init_completion(&mq->thread_complete);
	init_waitqueue_head(&mq->thread_wq);
	init_MUTEX(&mq->thread_sem);

	ret = kernel_thread(mmc_queue_thread, mq, CLONE_KERNEL);
	if (ret >= 0) {
		wait_for_completion(&mq->thread_complete);
		init_completion(&mq->thread_complete);
		ret = 0;
		goto out;
	}

 cleanup:
	kfree(mq->sg);
	mq->sg = NULL;

	blk_cleanup_queue(mq->queue);
 out:
	return ret;
}
/*
 * Initializes the block layer interfaces.
 */
static int sd_init_blk_dev(struct sd_host *host)
{
	struct gendisk *disk;
	struct request_queue *queue;
	int channel;
	int retval;

	channel = to_channel(exi_get_exi_channel(host->exi_device));

	/* queue */
	retval = -ENOMEM;
	spin_lock_init(&host->queue_lock);
	queue = blk_init_queue(sd_request_func, &host->queue_lock);
	if (!queue) {
		sd_printk(KERN_ERR, "error initializing queue\n");
		goto err_blk_init_queue;
	}
	blk_queue_dma_alignment(queue, EXI_DMA_ALIGN);
	blk_queue_max_phys_segments(queue, 1);
	blk_queue_max_hw_segments(queue, 1);
	blk_queue_max_sectors(queue, 8);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue);
	queue->queuedata = host;
	host->queue = queue;

	/* disk */
	disk = alloc_disk(1 << MMC_SHIFT);
	if (!disk) {
		sd_printk(KERN_ERR, "error allocating disk\n");
		goto err_alloc_disk;
	}
	disk->major = SD_MAJOR;
	disk->first_minor = channel << MMC_SHIFT;
	disk->fops = &sd_fops;
	sprintf(disk->disk_name, "%s%c", SD_NAME, 'a' + channel);
	disk->private_data = host;
	disk->queue = host->queue;
	host->disk = disk;

	retval = 0;
	goto out;

err_alloc_disk:
	blk_cleanup_queue(host->queue);
	host->queue = NULL;
err_blk_init_queue:
out:
	return retval;
}
Exemple #3
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
    struct mmc_host *host = card->host;
    u64 limit = BLK_BOUNCE_HIGH;
    int ret;

    if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
        limit = *mmc_dev(host)->dma_mask;

    mq->card = card;
    mq->queue = blk_init_queue(mmc_request, lock);
    if (!mq->queue)
        return -ENOMEM;

    blk_queue_prep_rq(mq->queue, mmc_prep_request);
    blk_queue_bounce_limit(mq->queue, limit);
    blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
    blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
    blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
    blk_queue_max_segment_size(mq->queue, host->max_seg_size);

    mq->queue->queuedata = mq;
    mq->req = NULL;

    mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs,
                     GFP_KERNEL);
    if (!mq->sg) {
        ret = -ENOMEM;
        goto cleanup_queue;
    }

    init_MUTEX(&mq->thread_sem);

    mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
    if (IS_ERR(mq->thread)) {
        ret = PTR_ERR(mq->thread);
        goto free_sg;
    }

    return 0;

free_sg:
    kfree(mq->sg);
    mq->sg = NULL;
cleanup_queue:
    blk_cleanup_queue(mq->queue);
    return ret;
}
Exemple #4
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mq->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce buffer\n",
					mmc_card_name(card));
			}
		}

		if (mq->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}
/* pdev is NULL for eisa */
static int cpqarray_register_ctlr( int i, struct pci_dev *pdev)
{
	request_queue_t *q;
	int j;

	/* 
	 * register block devices
	 * Find disks and fill in structs
	 * Get an interrupt, set the Q depth and get into /proc
	 */

	/* If this successful it should insure that we are the only */
	/* instance of the driver */
	if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
		goto Enomem4;
	}
	hba[i]->access.set_intr_mask(hba[i], 0);
	if (request_irq(hba[i]->intr, do_ida_intr,
		SA_INTERRUPT|SA_SHIRQ|SA_SAMPLE_RANDOM,
		hba[i]->devname, hba[i]))
	{
		printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
				hba[i]->intr, hba[i]->devname);
		goto Enomem3;
	}
		
	for (j=0; j<NWD; j++) {
		ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
		if (!ida_gendisk[i][j])
			goto Enomem2;
	}

	hba[i]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
		hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
		&(hba[i]->cmd_pool_dhandle));
	hba[i]->cmd_pool_bits = kmalloc(
		((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long),
		GFP_KERNEL);

	if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
			goto Enomem1;

	memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
	memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
	printk(KERN_INFO "cpqarray: Finding drives on %s",
		hba[i]->devname);

	spin_lock_init(&hba[i]->lock);
	q = blk_init_queue(do_ida_request, &hba[i]->lock);
	if (!q)
		goto Enomem1;

	hba[i]->queue = q;
	q->queuedata = hba[i];

	getgeometry(i);
	start_fwbk(i);

	ida_procinit(i);

	if (pdev)
		blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);

	/* This is a hardware imposed limit. */
	blk_queue_max_hw_segments(q, SG_MAX);

	/* This is a driver limit and could be eliminated. */
	blk_queue_max_phys_segments(q, SG_MAX);
	
	init_timer(&hba[i]->timer);
	hba[i]->timer.expires = jiffies + IDA_TIMER;
	hba[i]->timer.data = (unsigned long)hba[i];
	hba[i]->timer.function = ida_timer;
	add_timer(&hba[i]->timer);

	/* Enable IRQ now that spinlock and rate limit timer are set up */
	hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);

	for(j=0; j<NWD; j++) {
		struct gendisk *disk = ida_gendisk[i][j];
		drv_info_t *drv = &hba[i]->drv[j];
		sprintf(disk->disk_name, "ida/c%dd%d", i, j);
		disk->major = COMPAQ_SMART2_MAJOR + i;
		disk->first_minor = j<<NWD_SHIFT;
		disk->fops = &ida_fops;
		if (j && !drv->nr_blks)
			continue;
		blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
		set_capacity(disk, drv->nr_blks);
		disk->queue = hba[i]->queue;
		disk->private_data = drv;
		add_disk(disk);
	}

	/* done ! */
	return(i);

Enomem1:
	nr_ctlr = i; 
	kfree(hba[i]->cmd_pool_bits);
	if (hba[i]->cmd_pool)
		pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t), 
				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
Enomem2:
	while (j--) {
		put_disk(ida_gendisk[i][j]);
		ida_gendisk[i][j] = NULL;
	}
	free_irq(hba[i]->intr, hba[i]);
Enomem3:
	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
Enomem4:
	if (pdev)
		pci_set_drvdata(pdev, NULL);
	release_io_mem(hba[i]);
	free_hba(i);

	printk( KERN_ERR "cpqarray: out of memory");

	return -1;
}
Exemple #6
0
/*
    Create system device file for the enabled slot.
*/
ndas_error_t slot_enable(int s)
{
    ndas_error_t ret = NDAS_ERROR_INTERNAL;
    int got;
    struct ndas_slot* slot = NDAS_GET_SLOT_DEV(s); 
    dbgl_blk(3, "ing s#=%d slot=%p",s, slot);
    got = try_module_get(THIS_MODULE);
    MOD_INC_USE_COUNT;
    
    if ( slot == NULL)
        goto out1;
    
    if ( slot->enabled ) {
        dbgl_blk(1, "already enabled");
        ret = NDAS_OK;
        goto out2;
    }
    ret = ndas_query_slot(s, &slot->info);
    if ( !NDAS_SUCCESS(ret) ) {
        dbgl_blk(1, "fail ndas_query_slot");
        goto out2;
    }
    dbgl_blk(1, "mode=%d", slot->info.mode);
    
    slot->enabled = 1;
    
#if LINUX_VERSION_25_ABOVE

    slot->disk = NULL;
    spin_lock_init(&slot->lock);
    slot->queue = blk_init_queue(
        nblk_request_proc, 
        &slot->lock
    );
	#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33))    
	    blk_queue_max_phys_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);
	    blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);
	#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33))
	    blk_queue_max_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT);	//renamed in 2.6.34	
	    //blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); //removed in 2.6.34
	#endif

	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31))
	    blk_queue_logical_block_size(slot->queue, slot->info.sector_size);
	#else
	    blk_queue_hardsect_size(slot->queue, slot->info.sector_size);
	#endif

	#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33))
	    blk_queue_max_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS);
	#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33))
	    blk_queue_max_hw_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS); //renamed in 2.6.34
	#endif

	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
	    // Set ordered queue property.
		#if 0
		    blk_queue_ordered(slot->queue, QUEUE_ORDERED_TAG_FLUSH, nblk_prepare_flush);
		#endif
	#endif

    slot->disk = alloc_disk(NR_PARTITION);
    if ( slot->disk == NULL ) {
        slot->enabled = 0;
        dbgl_blk(1, "fail alloc disk");
        goto out2;
    }

    slot->disk->major = NDAS_BLK_MAJOR;
    slot->disk->first_minor = (s - NDAS_FIRST_SLOT_NR) << PARTN_BITS;
    slot->disk->fops = &ndas_fops;
    slot->disk->queue = slot->queue;
    slot->disk->private_data = (void*) (long)s;
    slot->queue_flags = 0;

    dbgl_blk(1, "mode=%d", slot->info.mode);
    if ( slot->info.mode == NDAS_DISK_MODE_SINGLE || 
        slot->info.mode == NDAS_DISK_MODE_ATAPI ||
        slot->info.mode == NDAS_DISK_MODE_MEDIAJUKE) 
    {
        char short_serial[NDAS_SERIAL_SHORT_LENGTH + 1];
        if (strlen(slot->info.ndas_serial) > 8) {
            /* Extended serial number is too long as sysfs object name. Use last 8 digit only */
            strncpy(
                short_serial,
                slot->info.ndas_serial + ( NDAS_SERIAL_EXTEND_LENGTH - NDAS_SERIAL_SHORT_LENGTH),
                8);
        } else {
            strncpy(short_serial, slot->info.ndas_serial, 8);
        }
        short_serial[8] =0;
        snprintf(slot->devname,
            sizeof(slot->devname)-1, 
            "ndas-%s-%d", short_serial, slot->info.unit
        );

        strcpy(slot->disk->disk_name, slot->devname);

	    dbgl_blk(1, "just set slot->disk->%s, slot->%s", slot->disk->disk_name, slot->devname );

	#if !LINUX_VERSION_DEVFS_REMOVED_COMPLETELY
	        strcpy(slot->disk->devfs_name, slot->devname);
	#endif
        set_capacity(slot->disk, slot->info.sectors);
	    dbgl_blk(1, "just set capacity slot->disk, slot->info.sectors:%llu", slot->info.sectors);

    } else {
        /* Other mode is not implemented */

    }
    
    if (slot->info.mode == NDAS_DISK_MODE_ATAPI) {
        slot->disk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
	    dbgl_blk(1, "just set slot->disk->flags");
	#if 0
	        kref_init(&slot->ndascd.kref);
	#endif
    }

    dbgl_blk(4, "adding disk: slot=%d, first_minor=%d, capacity=%llu", s, slot->disk->first_minor, slot->info.sectors);
    add_disk(slot->disk);
    dbgl_blk(1, "added disk: slot=%d", s);
   
		#ifndef NDAS_DONT_CARE_SCHEDULER
			#if LINUX_VERSION_AVOID_CFQ_SCHEDULER
				#if CONFIG_SYSFS
				    sal_assert(slot->queue->kobj.ktype);	
				    sal_assert(slot->queue->kobj.ktype->default_attrs);
				    {
				        struct queue_sysfs_entry {
				        	struct attribute attr;
				        	ssize_t (*show)(struct request_queue *, char *);
				        	ssize_t (*store)(struct request_queue *, const char *, size_t);
				        };
				        struct attribute *attr = slot->queue->kobj.ktype->default_attrs[4];
				        struct queue_sysfs_entry *entry = container_of(attr , struct queue_sysfs_entry, attr);
				        //dbgl_blk(1, "now to set the scheduler: slot-queue=%d, scheduler==%s, scheduler_len=%d", slot->queue, NDAS_QUEUE_SCHEDULER, strlen(NDAS_QUEUE_SCHEDULER));
				        entry->store(slot->queue,NDAS_QUEUE_SCHEDULER,strlen(NDAS_QUEUE_SCHEDULER)); 
				        
				    }
				#else
					#error "NDAS driver doesn't work well with CFQ scheduler of 2.6.13 or above kernel." \
				   "if you forcely want to use it, please specify compiler flags by " \
				   "export NDAS_EXTRA_CFLAGS=\"-DNDAS_DONT_CARE_SCHEDULER\" "\
				   "then compile the source again."
				#endif
			#endif
		#endif        
    printk("ndas: /dev/%s enabled\n" , 
            slot->devname);
#else 
    /* < LINUX_VERSION_25_ABOVE */
    dbgl_blk(4, "blksize=%d", DEFAULT_ND_BLKSIZE);
    dbgl_blk(4, "size=%lld", slot->info.sectors);
    dbgl_blk(1, "hardsectsize=%d", slot->info.sector_size);
    ndas_ops_set_blk_size(
        s, 
        DEFAULT_ND_BLKSIZE, 
        slot->info.sectors,
        slot->info.sector_size, 
        DEFAULT_ND_MAX_SECTORS
    );
#ifdef NDAS_DEVFS    
    printk("ndas: /dev/nd/disc%d enabled\n" , 
            s - NDAS_FIRST_SLOT_NR);
#else
    printk("ndas: /dev/nd%c enabled\n" , 
            s + 'a' - NDAS_FIRST_SLOT_NR);
#endif

#endif
    
    //up(&slot->mutex);
 #ifdef NDAS_MSHARE 
    if(NDAS_GET_SLOT_DEV(s)->info.mode == NDAS_DISK_MODE_MEDIAJUKE)
    {
  	    ndas_CheckFormat(s);
    }
 #endif
#if !LINUX_VERSION_25_ABOVE
    ndas_ops_read_partition(s);
#endif
    dbgl_blk(3, "ed");
    return NDAS_OK;
out2:    
    //up(&slot->mutex);
out1:    
    if ( got ) module_put(THIS_MODULE);
    MOD_DEC_USE_COUNT;
    return ret;
}
Exemple #7
0
static int __devinit
dm3730logic_cf_alloc(struct platform_device *pdev, int id, unsigned long physaddr,
		unsigned long physize, int irq, int gpio, int bus_width)
{
	struct device *dev = &pdev->dev;
	struct dm3730logic_cf_data *cf_data = dev->platform_data;
	struct cf_device *cf;
	struct request_queue *rq;
	int rc;

	DPRINTK(DEBUG_CF_GENDISK, "%s: dev %p\n", __FUNCTION__, dev);

	if (!physaddr) {
		rc = -ENODEV;
		goto err_noreg;
	}

	/* Allocate and initialize the cf device structure */
	cf = kzalloc(sizeof(struct cf_device), GFP_KERNEL);
	if (!cf) {
		rc = -ENOMEM;
		goto err_alloc;
	}

	platform_set_drvdata(pdev, cf);

	cf->dev = dev;
	cf->id = id;
	cf->physaddr = physaddr;
	cf->physize = physize;
	cf->irq = irq;
	cf->gpio_cd = cf_data->gpio_cd;
	cf->gpio_reset = cf_data->gpio_reset;
	cf->gpio_en = cf_data->gpio_en;
	cf->bus_width = bus_width;

	/* We fake it as ejected to start with */
	cf->ejected = 1;

	rq = blk_init_queue(cf_request, &cf->blk_lock);
	if (rq == NULL) {
		DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);
		return -ENOMEM;
	}
	blk_queue_logical_block_size(rq, 512);

	// Limit requests to simple contiguous ones
	blk_queue_max_sectors(rq, 8);  //4KB
	blk_queue_max_phys_segments(rq, 1);
	blk_queue_max_hw_segments(rq, 1);

	cf->queue = rq;

	// The IRQ semaphore is locked and only in the IRQ is it released
	init_MUTEX_LOCKED(&cf->irq_sem);

	/* The RW semaphore to have only one call into either read/write
	 * at a time */
	init_MUTEX(&cf->rw_sem);

	init_completion(&cf->task_completion);

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	// Create the thread that sits and waits for an interrupt
	rc = kernel_thread(cf_thread, cf, CLONE_KERNEL);
	if (rc < 0) {
		printk("%s:%d thread create fail! %d\n", __FUNCTION__, __LINE__, rc);
		goto err_setup;
	} else {
		wait_for_completion(&cf->task_completion);
	}

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	/* Call the setup code */
	rc = dm3730logic_cf_setup(cf);
	if (rc)
		goto err_setup;

	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	dev_set_drvdata(dev, cf);


	DPRINTK(DEBUG_CF_TRACE, "%s:%d\n", __FUNCTION__, __LINE__);

	return 0;

err_setup:
	dev_set_drvdata(dev, NULL);
	kfree(cf);
err_alloc:
err_noreg:
	dev_err(dev, "could not initialize device, err=%i\n", rc);
	return rc;
}
Exemple #8
0
/* --------------------------------------------------------------------
 * SystemACE device setup/teardown code
 */
static int __devinit ace_setup(struct ace_device *ace)
{
    u16 version;
    u16 val;
    int rc;

    dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
    dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n",
            (unsigned long long)ace->physaddr, ace->irq);

    spin_lock_init(&ace->lock);
    init_completion(&ace->id_completion);

    /*
     * Map the device
     */
    ace->baseaddr = ioremap(ace->physaddr, 0x80);
    if (!ace->baseaddr)
        goto err_ioremap;

    /*
     * Initialize the state machine tasklet and stall timer
     */
    tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
    setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);

    /*
     * Initialize the request queue
     */
    ace->queue = blk_init_queue(ace_request, &ace->lock);
    if (ace->queue == NULL)
        goto err_blk_initq;
    blk_queue_logical_block_size(ace->queue, 512);
#ifdef CONFIG_PPC_256K_PAGES
    blk_queue_max_phys_segments(ace->queue, 1);
    blk_queue_max_hw_segments(ace->queue, 1);
#endif

    /*
     * Allocate and initialize GD structure
     */
    ace->gd = alloc_disk(ACE_NUM_MINORS);
    if (!ace->gd)
        goto err_alloc_disk;

    ace->gd->major = ace_major;
    ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
    ace->gd->fops = &ace_fops;
    ace->gd->queue = ace->queue;
    ace->gd->private_data = ace;
    snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');

    /* set bus width */
    if (ace->bus_width == ACE_BUS_WIDTH_16) {
        /* 0x0101 should work regardless of endianess */
        ace_out_le16(ace, ACE_BUSMODE, 0x0101);

        /* read it back to determine endianess */
        if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
            ace->reg_ops = &ace_reg_le16_ops;
        else
            ace->reg_ops = &ace_reg_be16_ops;
    } else {
        ace_out_8(ace, ACE_BUSMODE, 0x00);
        ace->reg_ops = &ace_reg_8_ops;
    }

    /* Make sure version register is sane */
    version = ace_in(ace, ACE_VERSION);
    if ((version == 0) || (version == 0xFFFF))
        goto err_read;

    /* Put sysace in a sane state by clearing most control reg bits */
    ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
            ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);

    /* Now we can hook up the irq handler */
    if (ace->irq != NO_IRQ) {
        rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
        if (rc) {
            /* Failure - fall back to polled mode */
            dev_err(ace->dev, "request_irq failed\n");
            ace->irq = NO_IRQ;
        }
    }

    /* Enable interrupts */
    val = ace_in(ace, ACE_CTRL);
    val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
    ace_out(ace, ACE_CTRL, val);

    /* Print the identification */
    dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
             (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
    dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n",
            (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq);

    ace->media_change = 1;
    ace_revalidate_disk(ace->gd);

    /* Make the sysace device 'live' */
    add_disk(ace->gd);

    return 0;

err_read:
    put_disk(ace->gd);
err_alloc_disk:
    blk_cleanup_queue(ace->queue);
err_blk_initq:
    iounmap(ace->baseaddr);
err_ioremap:
    dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n",
             (unsigned long long) ace->physaddr);
    return -ENOMEM;
}
/**
* @brief 	Card initial function.
* @param 	work[in]: Work structure.
* @return 	None.
*/ 
static void gp_sdcard_work_init(struct work_struct *work)
{
	gpSDInfo_t* sd = container_of(work, gpSDInfo_t,init);
	int pin_handle;
	pin_handle = gp_board_pin_func_request((sd->device_id==0)?GP_PIN_SD0:GP_PIN_SD1, GP_BOARD_WAIT_FOREVER);
	if(pin_handle<0)
	{
		DERROR("SD%d: can't get pin handle\n", sd->device_id);
		goto init_work_end;
	}
	/* ----- Initial SD module (controller) ----- */
	gpHalSDInit(sd->device_id);
	/* ----- Initial SD card ----- */
	gp_sdcard_cardinit(sd);
	gp_board_pin_func_release(pin_handle);	
	if(sd->present==1)
	{
		if(sd->card_type == SDIO)
		{
			sd->pin_handle = gp_board_pin_func_request((sd->device_id==0)?GP_PIN_SD0:GP_PIN_SD1, GP_BOARD_WAIT_FOREVER);
			if(sd->pin_handle<0)
			{
				DERROR("SD%d: can't get pin handle\n", sd->device_id);
				goto init_work_end;
			}
			DEBUG("SDIO card detected\n");
			gp_sdio_insert_device(sd->device_id, sd->RCA);	
		}
		else
		{
			sd->queue = blk_init_queue(gp_sdcard_request, &sd->lock);
			if(sd->queue==NULL)
			{
				DERROR("NO MEMORY: queue\n");
				goto init_work_end;
			} 	 
			blk_queue_ordered(sd->queue, QUEUE_ORDERED_DRAIN, NULL);
			queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sd->queue);
			blk_queue_logical_block_size(sd->queue, 512);
			blk_queue_max_sectors(sd->queue, SD_MAX_SECTORS );
			blk_queue_max_phys_segments(sd->queue, SD_MAX_PHY_SEGMENTS);
			blk_queue_max_hw_segments(sd->queue, SD_MAX_HW_SEGMENTS);
			blk_queue_max_segment_size(sd->queue, SD_MAX_PHY_SEGMENTS_SIZE);
			/* ----- Initial scatter list ----- */
			sd->sg = kmalloc(sizeof(struct scatterlist) *SD_MAX_PHY_SEGMENTS, GFP_KERNEL);
			if (!sd->sg) 
			{
				DERROR("NO MEMORY: queue\n");
				goto fail_thread;
			}
			sg_init_table(sd->sg, SD_MAX_PHY_SEGMENTS);
		
			init_MUTEX(&sd->thread_sem);
			/* ----- Enable thread ----- */
			sd->thread = kthread_run(gp_sdcard_queue_thread, sd, "sd-qd");
			if (IS_ERR(sd->thread)) 
			{
				goto fail_thread;
			}
			sd->queue->queuedata = sd;
			/* ----- Setup gendisk structure ----- */
			sd->gd = alloc_disk(SD_MINORS);
			if (sd->gd==NULL) 
			{
				DERROR("NO MEMORY: gendisk\n");
				blk_cleanup_queue(sd->queue);	
				goto fail_gd;	
			}
			/* ----- Set gendisk structure ----- */
			sd->gd->major = sd_major;
			sd->gd->first_minor = sd->device_id*SD_MINORS;
			sd->gd->fops = &gp_sdcard_ops;
			sd->gd->queue = sd->queue;
			sd->gd->private_data = sd;
			snprintf (sd->gd->disk_name, 32, "sdcard%c", sd->device_id + 'a');
			set_capacity(sd->gd,sd->capacity);
			add_disk(sd->gd);
		}
		goto init_work_end;
	}
	else
	{
		DERROR("Initial fail\n");
		goto init_work_end;
	}
fail_gd:
	/* ----- Then terminate our worker thread ----- */
	kthread_stop(sd->thread);
fail_thread:
	if (sd->sg)
		kfree(sd->sg);
	sd->sg = NULL;
	blk_cleanup_queue (sd->queue);	
init_work_end:	
	sd->timer.expires = jiffies + SD_CD_POLL;
	add_timer(&sd->timer);
}
Exemple #10
0
/**
* @brief 	Card initial function.
* @param 	work[in]: Work structure.
* @return 	None.
*/
static void gp_sdcard_work_init(struct work_struct *work)
{
	gpSDInfo_t* sd = container_of(work, gpSDInfo_t,init);
	int pin_handle;
	int ret = 0,i=0;
	int pin_id;

	if(sd->device_id == 0)
		pin_id = GP_PIN_SD0;
	else if(sd->device_id == 1)
		pin_id = GP_PIN_SD1;
	else
		pin_id = GP_PIN_SD2;

	pin_handle = gp_board_pin_func_request( pin_id, GP_BOARD_WAIT_FOREVER);
	if(pin_handle<0)
	{
		DERROR("[%d]: can't get pin handle\n", sd->device_id);
		goto init_work_end;
	}
    /* ----- chris: Set Pin state for SD before power on ----- */
    sd->sd_func->set_power(1);
	/* ----- chris: delay 250ms after card power on ----- */
	msleep(250);
	/* ----- Initial SD card ----- */
	ret = gp_sdcard_cardinit(sd);
	if (ret != 0)
	{
		DERROR("[%d]: initial fail\n",sd->device_id);
		gp_board_pin_func_release(pin_handle);
		goto init_work_end;
	}
	gp_board_pin_func_release(pin_handle);

	if(sd->present==1)
	{
		if(sd->card_type == SDIO)
		{
			sd->pin_handle = gp_board_pin_func_request(pin_id, GP_BOARD_WAIT_FOREVER);
			if(sd->pin_handle<0)
			{
				DERROR("[%d]: can't get pin handle\n", sd->device_id);
				goto init_work_end;
			}
			DEBUG("SDIO card detected\n");
			gp_sdio_insert_device(sd->device_id, sd->RCA);
		}
		else
		{
			unsigned int cnt =0;
			/* ----- Wait 30 second for all process close handle ----- */
			while((sd->users)&&cnt<120)
			{
				msleep(250);
				cnt++;
			}
			if(sd->users)
			{
				DERROR("Some handle do not free\n");
			}
			if(sd->status)
			{
				gp_sdcard_blk_put(sd);
				sd->status = 0;
			}
			sd->handle_dma = gp_apbdma0_request(1000);
			if(sd->handle_dma==0)
				goto init_work_end;
			sd->queue = blk_init_queue(gp_sdcard_request, &sd->lock);
			if(sd->queue==NULL)
			{
				DERROR("NO MEMORY: queue\n");
				goto fail_queue;
			}
			blk_queue_ordered(sd->queue, QUEUE_ORDERED_DRAIN, NULL);
			queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sd->queue);
			blk_queue_logical_block_size(sd->queue, 512);
			blk_queue_max_sectors(sd->queue, SD_MAX_SECTORS );
			blk_queue_max_phys_segments(sd->queue, SD_MAX_PHY_SEGMENTS);
			blk_queue_max_hw_segments(sd->queue, SD_MAX_HW_SEGMENTS);
			blk_queue_max_segment_size(sd->queue, SD_MAX_PHY_SEGMENTS_SIZE);
			/* ----- Initial scatter list ----- */
			sd->sg = kmalloc(sizeof(struct scatterlist) *SD_MAX_PHY_SEGMENTS, GFP_KERNEL);
			if (!sd->sg)
			{
				DERROR("NO MEMORY: queue\n");
				goto fail_thread;
			}
			sg_init_table(sd->sg, SD_MAX_PHY_SEGMENTS);
			init_MUTEX(&sd->thread_sem);
			/* ----- Enable thread ----- */
			sd->thread = kthread_run(gp_sdcard_queue_thread, sd, "sd-qd");
			if (IS_ERR(sd->thread))
			{
				goto fail_thread;
			}
			sd->queue->queuedata = sd;
			/* ----- Check SD card for GP special header ----- */
			if(gp_sdcard_parse_header(sd)<0)
			{
				goto fail_gd;
			}
			/* ----- Setup gendisk structure ----- */
			sd->gd = alloc_disk(SD_MINORS);
			if (sd->gd==NULL)
			{
				DERROR("NO MEMORY: gendisk\n");
				blk_cleanup_queue(sd->queue);
				goto fail_gd;
			}
			/* ----- Set gendisk structure ----- */
			sd->gd->major = sd_major;
			sd->gd->first_minor = sd->device_id*SD_MINORS;
			sd->gd->fops = &gp_sdcard_ops;
			sd->gd->queue = sd->queue;
			sd->gd->private_data = sd;
			snprintf (sd->gd->disk_name, 32, "sdcard%c", sd->device_id + 'a');
			/* ----- Set GP partition ----- */
			if(sd->partition.activity)
			{
				set_capacity(sd->gd,0);
				add_disk(sd->gd);
				for(i=0;i<MAX_SD_PART;i++)
				{
					if(sd->partition.capacity[i]==0)
						continue;
					gp_add_partition(sd->gd,i+1,sd->partition.offset[i],sd->partition.capacity[i],ADDPART_FLAG_WHOLEDISK);
				}
			}
			/* ----- Normal Setting ----- */
			else
			{
				set_capacity(sd->gd,sd->capacity);
				add_disk(sd->gd);
			}
		}
		//DEBUG("Initial success\n");
		goto init_work_end;
	}
	else
	{
		DERROR("Initial fail\n");
		goto init_work_end;
	}
fail_gd:
	/* ----- Then terminate our worker thread ----- */
	kthread_stop(sd->thread);
	sd->thread = NULL;
fail_thread:
	if (sd->sg)
		kfree(sd->sg);
	sd->sg = NULL;
	blk_cleanup_queue (sd->queue);
	sd->queue = NULL;
fail_queue:
	if(sd->handle_dma)
		gp_apbdma0_release(sd->handle_dma);
	sd->handle_dma = 0;
	/* ----- For re-initialize ----- */
	sd->present = 0;
init_work_end:
	sd->timer.expires = jiffies + SD_CD_POLL;
	add_timer(&sd->timer);
}
Exemple #11
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret;
#if 0
	if (!mq) {
#else
    //插着USB线(充电姿态),拔插卡,有偶尔死机现象。出现mq->thread为空的现象;modifyed by xbw
    if (!mq ||!mq->thread) {
#endif	
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = __blk_end_request(req, -EIO,
							blk_rq_cur_bytes(req));
			} while (ret);
		}
		return;
	}

	if (!mq->req)
		wake_up_process(mq->thread);
}

/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_ANY ; // BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;

		mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
		if (!mq->bounce_buf) {
			printk(KERN_WARNING "%s: unable to allocate "
				"bounce buffer\n", mmc_card_name(card));
		} else {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}

void mmc_cleanup_queue(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	/* Mark that we should start throwing out stragglers */
	spin_lock_irqsave(q->queue_lock, flags);
	q->queuedata = NULL;
	spin_unlock_irqrestore(q->queue_lock, flags);

	/* Make sure the queue isn't suspended, as that will deadlock */
	mmc_queue_resume(mq);

	/* Then terminate our worker thread */
	kthread_stop(mq->thread);

 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;

	kfree(mq->sg);
	mq->sg = NULL;

	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;

	blk_cleanup_queue(mq->queue);

	mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);

/**
 * mmc_queue_suspend - suspend a MMC request queue
 * @mq: MMC queue to suspend
 *
 * Stop the block request queue, and wait for our thread to
 * complete any outstanding requests.  This ensures that we
 * won't suspend while a request is being processed.
 */
void mmc_queue_suspend(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
		mq->flags |= MMC_QUEUE_SUSPENDED;

		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);

		down(&mq->thread_sem);
	}
}

/**
 * mmc_queue_resume - resume a previously suspended MMC request queue
 * @mq: MMC queue to resume
 */
void mmc_queue_resume(struct mmc_queue *mq)
{
	struct request_queue *q = mq->queue;
	unsigned long flags;

	if (mq->flags & MMC_QUEUE_SUSPENDED) {
		mq->flags &= ~MMC_QUEUE_SUSPENDED;

		up(&mq->thread_sem);

		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
}

static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
	struct scatterlist *src, unsigned int src_len)
{
	unsigned int chunk;
	char *dst_buf, *src_buf;
	unsigned int dst_size, src_size;

	dst_buf = NULL;
	src_buf = NULL;
	dst_size = 0;
	src_size = 0;

	while (src_len) {
		BUG_ON(dst_len == 0);

		if (dst_size == 0) {
			dst_buf = sg_virt(dst);
			dst_size = dst->length;
		}

		if (src_size == 0) {
			src_buf = sg_virt(src);
			src_size = src->length;
		}

		chunk = min(dst_size, src_size);

		memcpy(dst_buf, src_buf, chunk);

		dst_buf += chunk;
		src_buf += chunk;
		dst_size -= chunk;
		src_size -= chunk;

		if (dst_size == 0) {
			dst++;
			dst_len--;
		}

		if (src_size == 0) {
			src++;
			src_len--;
		}
	}
}
Exemple #12
0
int td_linux_block_create(struct td_osdev *dev)
{
	int rc;
	struct request_queue *queue;
	unsigned bio_sector_size = dev->block_params.bio_sector_size;
	unsigned hw_sector_size = dev->block_params.hw_sector_size;

	/* very simple sector size support */
	if (!bio_sector_size || bio_sector_size & 511 || bio_sector_size > 4096) {
		td_os_err(dev, "bio sector size of %u is not supported\n", bio_sector_size);
		return -EINVAL;
	}

	/* MetaData is reported here */
	if (hw_sector_size == 520)
		hw_sector_size = 512;
	if (!hw_sector_size || hw_sector_size & 511 || hw_sector_size > 4096) {
		td_os_err(dev, "hw sector size of %u is not supported\n", hw_sector_size);
		return -EINVAL;
	}

	td_os_notice(dev, " - Set capacity to %llu (%u bytes/sector)\n",
		dev->block_params.capacity, dev->block_params.hw_sector_size);

	/* create a new bio queue */
	queue = blk_alloc_queue(GFP_KERNEL);
	if (!queue) {
		td_os_err(dev, "Error allocating disk queue.\n");
		rc = -ENOMEM;
		goto error_alloc_queue;
	}

#ifdef QUEUE_FLAG_NONROT
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue);
#endif
	
	switch (dev->type) {
	case TD_OSDEV_DEVICE:
		blk_queue_make_request(queue, td_device_make_request);
		dev->_bio_error = td_device_bio_error;
		break;
	case TD_OSDEV_RAID:
		blk_queue_make_request(queue, td_raid_make_request);
		dev->_bio_error = td_raid_bio_error;
		break;
		
	default:
		td_os_err(dev, "Unkonwn OS Type, cannot register block request handler\n");
		goto error_config_queue;
	}
	queue->queuedata = dev;

#if defined QUEUE_FLAG_PLUGGED 
	queue->unplug_fn = td_device_queue_unplug;
#endif

	/* configure queue ordering */

	/* in QUEUE_ORDERED_DRAIN we will get BARRIERS after the queue has
	 * been drained. */
#if defined KABI__blk_queue_ordered

#if KABI__blk_queue_ordered == 2
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN);
#elif KABI__blk_queue_ordered == 3
	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN, NULL);
#else
#error unhandled value of KABI__blk_queue_ordered
#endif

#elif defined KABI__blk_queue_flush
	/*
	 * blk_queue_ordered was replaced with blk_queue_flush 
	 * The default implementation is QUEUE_ORDERED_DRAIN
	 */
	blk_queue_flush(queue, 0);
#else
#error undefined KABI__blk_queue_flush or KABI__blk_queue_ordered
#endif

	/* max out the throttling */
#ifdef KABI__blk_queue_max_hw_sectors
	blk_queue_max_hw_sectors(queue, dev->block_params.bio_max_bytes/512);
#elif defined KABI__blk_queue_max_sectors
	blk_queue_max_sectors(queue, dev->block_params.bio_max_bytes/512);
#else
	td_os_err(dev, "No kernel API for maximum sectors\n");
#endif

#if defined KABI__blk_queue_max_segments
	blk_queue_max_segments(queue, BLK_MAX_SEGMENTS);
#elif defined KABI__blk_queue_max_phys_segments
	blk_queue_max_phys_segments(queue, MAX_SEGMENT_SIZE);
	blk_queue_max_hw_segments(queue, MAX_SEGMENT_SIZE);
#else
	td_os_err(dev, "No kernel API for maximum segments\n");
#endif

	blk_queue_max_segment_size(queue, dev->block_params.bio_max_bytes);

	blk_queue_bounce_limit(queue, BLK_BOUNCE_ANY);

	/* setup paged based access */
	td_os_info(dev, "Set queue physical block size to %u\n", hw_sector_size);
#ifdef KABI__blk_queue_physical_block_size
	blk_queue_physical_block_size(queue, hw_sector_size);
#elif defined KABI__blk_queue_hardsect_size
	blk_queue_hardsect_size(queue, hw_sector_size);
#else
	td_os_err(dev, "No kernel API for physical sector size\n");
#endif

#ifdef KABI__blk_queue_logical_block_size
	td_os_info(dev, "Set queue logical block size to %u\n", bio_sector_size);
	blk_queue_logical_block_size(queue, bio_sector_size);
#else
	td_os_err(dev, "No kernel API for logical block size\n");
#endif
#ifdef KABI__blk_queue_io_min
	td_os_info(dev, "Set queue io_min to %u\n", bio_sector_size);
	blk_queue_io_min(queue, bio_sector_size);
#else
	td_os_err(dev, "No kernel API for minimum IO size\n");
#endif
#ifdef KABI__blk_queue_io_opt
	td_os_info(dev, "Set queue io_opt to %u\n", dev->block_params.bio_max_bytes);
	blk_queue_io_opt(queue,  dev->block_params.bio_max_bytes);
#else
	td_os_err(dev, "No kernel API for optimal IO size\n");
#endif

#if 0
	if (dev->block_params.discard)
	{
		int did_something = 0;
#if defined KABI__blk_queue_discard_granularity
		queue->limits.discard_granularity = bio_sector_size;
		did_something++;
#endif
#ifdef KABI__blk_queue_max_discard_sectors
		/* 0xFFFF (max sector size of chunk on trim) * 64  * # SSD */
		blk_queue_max_discard_sectors(queue, TD_MAX_DISCARD_LBA_COUNT * 2);
		did_something++;
#endif
#ifdef KABI__blk_queue_discard_zeroes_data
		queue->limits.discard_zeroes_data = 1;
		did_something++;
#endif
#ifdef KABI__queue_flag_set_unlocked
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue);
		did_something++;
#endif
		/* Maybe some day.. But not today. 
		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, queue);
		*/
		if (did_something)
			td_os_info(dev, "Enabling discard support\n");
		else
			td_os_notice(dev, "No kernel API for discard support\n");
	} else {
		td_os_info(dev, "No DISCARD support enabled\n");
	}
#else
	/* bug 7444 */
	if (dev->block_params.discard)
		td_os_info(dev, "Device supports DISCARD but is currently being forced disabled\n");
#endif

	/*  assign */
	dev->queue = queue;

	return 0;

error_config_queue:
	blk_cleanup_queue(dev->queue);
	dev->queue = NULL;

error_alloc_queue:
	return rc;
}