static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); mutex_init(&mq->complete_lock); init_waitqueue_head(&mq->wait); }
int card_init_queue(struct card_queue *cq, struct memory_card *card, spinlock_t * lock) { struct card_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret=0; if (host->parent->dma_mask && *host->parent->dma_mask) limit = *host->parent->dma_mask; cq->card = card; cq->queue = blk_init_queue(card_request, lock); if (!cq->queue) return -ENOMEM; blk_queue_prep_rq(cq->queue, card_prep_request); card_init_bounce_buf(cq, card); if(!cq->bounce_buf){ blk_queue_bounce_limit(cq->queue, limit); blk_queue_max_hw_sectors(cq->queue, host->max_sectors); //blk_queue_max_hw_phys_segments(cq->queue, host->max_phys_segs); blk_queue_max_segments(cq->queue, host->max_hw_segs); blk_queue_max_segment_size(cq->queue, host->max_seg_size); cq->queue->queuedata = cq; cq->req = NULL; cq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL); if (!cq->sg) { ret = -ENOMEM; blk_cleanup_queue(cq->queue); return ret; } } /*change card io scheduler from cfq to deadline*/ cq->queue->queuedata = cq; elevator_exit(cq->queue->elevator); cq->queue->elevator = NULL; ret = elevator_init(cq->queue, "deadline"); if (ret) { printk("[card_init_queue] elevator_init deadline fail\n"); blk_cleanup_queue(cq->queue); return ret; } init_MUTEX(&cq->thread_sem); cq->thread = kthread_run(card_queue_thread, cq, "%s_queue", card->name); if (IS_ERR(cq->thread)) { ret = PTR_ERR(cq->thread); //goto free_bounce_sg; } cq->nb.notifier_call = card_reboot_notifier; register_reboot_notifier(&cq->nb); return ret; }
/* * Alloc bounce buf for read/write numbers of pages in one request */ static int card_init_bounce_buf(struct card_queue *cq, struct memory_card *card) { int ret=0; struct card_host *host = card->host; unsigned int bouncesz; bouncesz = CARD_QUEUE_BOUNCESZ; if (bouncesz > host->max_req_size) bouncesz = host->max_req_size; if (bouncesz >= PAGE_CACHE_SIZE) { //cq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); cq->bounce_buf = host->dma_buf; if (!cq->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce buffer\n", card->name); } } if (cq->bounce_buf) { blk_queue_bounce_limit(cq->queue, BLK_BOUNCE_HIGH); blk_queue_max_hw_sectors(cq->queue, bouncesz / 512); blk_queue_physical_block_size(cq->queue, bouncesz); blk_queue_max_segments(cq->queue, bouncesz / PAGE_CACHE_SIZE); blk_queue_max_segment_size(cq->queue, bouncesz); cq->queue->queuedata = cq; cq->req = NULL; cq->sg = kmalloc(sizeof(struct scatterlist), GFP_KERNEL); if (!cq->sg) { ret = -ENOMEM; blk_cleanup_queue(cq->queue); return ret; } sg_init_table(cq->sg, 1); cq->bounce_sg = kmalloc(sizeof(struct scatterlist) * bouncesz / PAGE_CACHE_SIZE, GFP_KERNEL); if (!cq->bounce_sg) { ret = -ENOMEM; kfree(cq->sg); cq->sg = NULL; blk_cleanup_queue(cq->queue); return ret; } sg_init_table(cq->bounce_sg, bouncesz / PAGE_CACHE_SIZE); } return 0; }
/* * Initializes the block layer interfaces. */ static int sd_init_blk_dev(struct sd_host *host) { struct gendisk *disk; struct request_queue *queue; int channel; int retval; channel = to_channel(exi_get_exi_channel(host->exi_device)); /* queue */ retval = -ENOMEM; spin_lock_init(&host->queue_lock); queue = blk_init_queue(sd_request_func, &host->queue_lock); if (!queue) { sd_printk(KERN_ERR, "error initializing queue\n"); goto err_blk_init_queue; } blk_queue_dma_alignment(queue, EXI_DMA_ALIGN); blk_queue_max_segments(queue, 1); blk_queue_max_hw_sectors(queue, 8); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue); queue->queuedata = host; host->queue = queue; /* disk */ disk = alloc_disk(1 << MMC_SHIFT); if (!disk) { sd_printk(KERN_ERR, "error allocating disk\n"); goto err_alloc_disk; } disk->major = SD_MAJOR; disk->first_minor = channel << MMC_SHIFT; disk->fops = &sd_fops; sprintf(disk->disk_name, "%s%c", SD_NAME, 'a' + channel); disk->private_data = host; disk->queue = host->queue; host->disk = disk; retval = 0; goto out; err_alloc_disk: blk_cleanup_queue(host->queue); host->queue = NULL; err_blk_init_queue: out: return retval; }
int cyasblkdev_init_queue(struct cyasblkdev_queue *bq, spinlock_t *lock) { int ret; DBGPRN_FUNC_NAME; /* 1st param is a function that wakes up the queue thread */ bq->queue = blk_init_queue(cyasblkdev_request, lock); if (!bq->queue) return -ENOMEM; blk_queue_prep_rq(bq->queue, cyasblkdev_prep_request); blk_queue_bounce_limit(bq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(bq->queue, Q_MAX_SECTORS); /* As of now, we have the HAL/driver support to * merge scattered segments and handle them simultaneously. * so, setting the max_phys_segments to 8. */ /*blk_queue_max_phys_segments(bq->queue, Q_MAX_SGS); blk_queue_max_hw_segments(bq->queue, Q_MAX_SGS);*/ blk_queue_max_segments(bq->queue, Q_MAX_SGS); /* should be < then HAL can handle */ blk_queue_max_segment_size(bq->queue, 512*Q_MAX_SECTORS); bq->queue->queuedata = bq; bq->req = NULL; init_completion(&bq->thread_complete); init_waitqueue_head(&bq->thread_wq); sema_init(&bq->thread_sem, 1); ret = kernel_thread(cyasblkdev_queue_thread, bq, CLONE_KERNEL); if (ret >= 0) { /* wait until the thread is spawned */ wait_for_completion(&bq->thread_complete); /* reinitialize the completion */ init_completion(&bq->thread_complete); ret = 0; goto out; } out: return ret; }
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); /* Initialize thread_sem even if it is not used */ sema_init(&mq->thread_sem, 1); }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = *mmc_dev(host)->dma_mask; mq->card = card; mq->queue = blk_init_queue(mmc_request, lock); if (!mq->queue) return -ENOMEM; mq->queue->queuedata = mq; mq->req = NULL; blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_hw_segs == 1) { unsigned int bouncesz; bouncesz = MMC_QUEUE_BOUNCESZ; if (bouncesz > host->max_req_size) bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; if (bouncesz > (host->max_blk_count * 512)) bouncesz = host->max_blk_count * 512; if (bouncesz > 512) { mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); if (!mq->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce buffer\n", mmc_card_name(card)); } } if (mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); mq->sg = kmalloc(sizeof(struct scatterlist), GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, 1); mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * bouncesz / 512, GFP_KERNEL); if (!mq->bounce_sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->bounce_sg, bouncesz / 512); } } #endif if (!mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_hw_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, host->max_phys_segs); } init_MUTEX(&mq->thread_sem); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto free_bounce_sg; } return 0; free_bounce_sg: if (mq->bounce_sg) kfree(mq->bounce_sg); mq->bounce_sg = NULL; cleanup_queue: if (mq->sg) kfree(mq->sg); mq->sg = NULL; if (mq->bounce_buf) kfree(mq->bounce_buf); mq->bounce_buf = NULL; blk_cleanup_queue(mq->queue); return ret; }
static int __zvol_create_minor(const char *name) { zvol_state_t *zv; objset_t *os; dmu_object_info_t *doi; uint64_t volsize; unsigned minor = 0; int error = 0; ASSERT(MUTEX_HELD(&zvol_state_lock)); zv = zvol_find_by_name(name); if (zv) { error = EEXIST; goto out; } doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP); error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os); if (error) goto out_doi; /* Make sure we have the key loaded if we need one. */ error = dsl_crypto_key_inherit(name); if (error != 0 && error != EEXIST) goto out_dmu_objset_disown; error = dmu_object_info(os, ZVOL_OBJ, doi); if (error) goto out_dmu_objset_disown; error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); if (error) goto out_dmu_objset_disown; error = zvol_find_minor(&minor); if (error) goto out_dmu_objset_disown; zv = zvol_alloc(MKDEV(zvol_major, minor), name); if (zv == NULL) { error = EAGAIN; goto out_dmu_objset_disown; } if (dmu_objset_is_snapshot(os)) zv->zv_flags |= ZVOL_RDONLY; zv->zv_volblocksize = doi->doi_data_block_size; zv->zv_volsize = volsize; zv->zv_objset = os; set_capacity(zv->zv_disk, zv->zv_volsize >> 9); blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX); blk_queue_max_segments(zv->zv_queue, UINT16_MAX); blk_queue_max_segment_size(zv->zv_queue, UINT_MAX); blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize); blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize); #ifdef HAVE_BLK_QUEUE_DISCARD blk_queue_max_discard_sectors(zv->zv_queue, (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9); blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue); #endif #ifdef HAVE_BLK_QUEUE_NONROT queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue); #endif if (zil_replay_disable) zil_destroy(dmu_objset_zil(os), B_FALSE); else zil_replay(os, zv, zvol_replay_vector); out_dmu_objset_disown: dmu_objset_disown(os, zvol_tag); zv->zv_objset = NULL; out_doi: kmem_free(doi, sizeof(dmu_object_info_t)); out: if (error == 0) { zvol_insert(zv); add_disk(zv->zv_disk); } return (error); }
static int htifblk_probe(struct device *dev) { static unsigned int index = 0; static const char prefix[] = " size="; struct htif_device *htif_dev; struct htifblk_device *htifblk_dev; struct gendisk *disk; struct request_queue *queue; const char *str; u64 size; int ret; dev_info(dev, "detected disk\n"); htif_dev = to_htif_dev(dev); str = strstr(htif_dev->id, prefix); if (unlikely(str == NULL || kstrtou64(str + sizeof(prefix) - 1, 10, &size))) { dev_err(dev, "error determining size of disk\n"); return -ENODEV; } if (unlikely(size & (SECTOR_SIZE - 1))) { dev_warn(dev, "disk size not a multiple of sector size:" " %llu\n", size); } ret = -ENOMEM; htifblk_dev = devm_kzalloc(dev, sizeof(struct htifblk_device), GFP_KERNEL); if (unlikely(htifblk_dev == NULL)) goto out; htifblk_dev->size = size; htifblk_dev->dev = htif_dev; htifblk_dev->tag = index; spin_lock_init(&htifblk_dev->lock); disk = alloc_disk(1); if (unlikely(disk == NULL)) goto out; queue = blk_init_queue(htifblk_request, &htifblk_dev->lock); if (unlikely(queue == NULL)) goto out_put_disk; queue->queuedata = htifblk_dev; blk_queue_max_segments(queue, 1); blk_queue_dma_alignment(queue, HTIF_ALIGN - 1); disk->queue = queue; disk->major = major; disk->minors = 1; disk->first_minor = 0; disk->fops = &htifblk_fops; set_capacity(disk, size >> SECTOR_SIZE_SHIFT); snprintf(disk->disk_name, DISK_NAME_LEN - 1, "htifblk%u", index++); htifblk_dev->disk = disk; add_disk(disk); dev_info(dev, "added %s\n", disk->disk_name); ret = htif_request_irq(htif_dev, htifblk_isr); if (unlikely(ret)) goto out_del_disk; dev_set_drvdata(dev, htifblk_dev); return 0; out_del_disk: del_gendisk(disk); blk_cleanup_queue(disk->queue); out_put_disk: put_disk(disk); out: return ret; }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * @subname: partition subname * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = *mmc_dev(host)->dma_mask; mq->card = card; mq->queue = blk_init_queue(mmc_request, lock); if (!mq->queue) return -ENOMEM; mq->queue->queuedata = mq; mq->req = NULL; blk_queue_prep_rq(mq->queue, mmc_prep_request); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); if (mmc_can_erase(card)) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); mq->queue->limits.max_discard_sectors = UINT_MAX; if (card->erased_byte == 0) mq->queue->limits.discard_zeroes_data = 1; mq->queue->limits.discard_granularity = card->pref_erase << 9; if (mmc_can_secure_erase_trim(card)) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, mq->queue); } #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_segs == 1) { unsigned int bouncesz; bouncesz = MMC_QUEUE_BOUNCESZ; if (bouncesz > host->max_req_size) bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; if (bouncesz > (host->max_blk_count * 512)) bouncesz = host->max_blk_count * 512; if (bouncesz > 512) { mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); if (!mq->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce buffer\n", mmc_card_name(card)); } } if (mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); mq->sg = kmalloc(sizeof(struct scatterlist), GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, 1); mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * bouncesz / 512, GFP_KERNEL); if (!mq->bounce_sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->bounce_sg, bouncesz / 512); } } #endif if (!mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_segs, GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, host->max_segs); } sema_init(&mq->thread_sem, 1); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", host->index, subname ? subname : ""); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto free_bounce_sg; } return 0; free_bounce_sg: if (mq->bounce_sg) kfree(mq->bounce_sg); mq->bounce_sg = NULL; cleanup_queue: if (mq->sg) kfree(mq->sg); mq->sg = NULL; if (mq->bounce_buf) kfree(mq->bounce_buf); mq->bounce_buf = NULL; blk_cleanup_queue(mq->queue); return ret; }
/* Create system device file for the enabled slot. */ ndas_error_t slot_enable(int s) { ndas_error_t ret = NDAS_ERROR_INTERNAL; int got; struct ndas_slot* slot = NDAS_GET_SLOT_DEV(s); dbgl_blk(3, "ing s#=%d slot=%p",s, slot); got = try_module_get(THIS_MODULE); MOD_INC_USE_COUNT; if ( slot == NULL) goto out1; if ( slot->enabled ) { dbgl_blk(1, "already enabled"); ret = NDAS_OK; goto out2; } ret = ndas_query_slot(s, &slot->info); if ( !NDAS_SUCCESS(ret) ) { dbgl_blk(1, "fail ndas_query_slot"); goto out2; } dbgl_blk(1, "mode=%d", slot->info.mode); slot->enabled = 1; #if LINUX_VERSION_25_ABOVE slot->disk = NULL; spin_lock_init(&slot->lock); slot->queue = blk_init_queue( nblk_request_proc, &slot->lock ); #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33)) blk_queue_max_phys_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33)) blk_queue_max_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); //renamed in 2.6.34 //blk_queue_max_hw_segments(slot->queue, ND_BLK_MAX_REQ_SEGMENT); //removed in 2.6.34 #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)) blk_queue_logical_block_size(slot->queue, slot->info.sector_size); #else blk_queue_hardsect_size(slot->queue, slot->info.sector_size); #endif #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,33)) blk_queue_max_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS); #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,33)) blk_queue_max_hw_sectors(slot->queue, DEFAULT_ND_MAX_SECTORS); //renamed in 2.6.34 #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)) // Set ordered queue property. #if 0 blk_queue_ordered(slot->queue, QUEUE_ORDERED_TAG_FLUSH, nblk_prepare_flush); #endif #endif slot->disk = alloc_disk(NR_PARTITION); if ( slot->disk == NULL ) { slot->enabled = 0; dbgl_blk(1, "fail alloc disk"); goto out2; } slot->disk->major = NDAS_BLK_MAJOR; slot->disk->first_minor = (s - NDAS_FIRST_SLOT_NR) << PARTN_BITS; slot->disk->fops = &ndas_fops; slot->disk->queue = slot->queue; slot->disk->private_data = (void*) (long)s; slot->queue_flags = 0; dbgl_blk(1, "mode=%d", slot->info.mode); if ( slot->info.mode == NDAS_DISK_MODE_SINGLE || slot->info.mode == NDAS_DISK_MODE_ATAPI || slot->info.mode == NDAS_DISK_MODE_MEDIAJUKE) { char short_serial[NDAS_SERIAL_SHORT_LENGTH + 1]; if (strlen(slot->info.ndas_serial) > 8) { /* Extended serial number is too long as sysfs object name. Use last 8 digit only */ strncpy( short_serial, slot->info.ndas_serial + ( NDAS_SERIAL_EXTEND_LENGTH - NDAS_SERIAL_SHORT_LENGTH), 8); } else { strncpy(short_serial, slot->info.ndas_serial, 8); } short_serial[8] =0; snprintf(slot->devname, sizeof(slot->devname)-1, "ndas-%s-%d", short_serial, slot->info.unit ); strcpy(slot->disk->disk_name, slot->devname); dbgl_blk(1, "just set slot->disk->%s, slot->%s", slot->disk->disk_name, slot->devname ); #if !LINUX_VERSION_DEVFS_REMOVED_COMPLETELY strcpy(slot->disk->devfs_name, slot->devname); #endif set_capacity(slot->disk, slot->info.sectors); dbgl_blk(1, "just set capacity slot->disk, slot->info.sectors:%llu", slot->info.sectors); } else { /* Other mode is not implemented */ } if (slot->info.mode == NDAS_DISK_MODE_ATAPI) { slot->disk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE; dbgl_blk(1, "just set slot->disk->flags"); #if 0 kref_init(&slot->ndascd.kref); #endif } dbgl_blk(4, "adding disk: slot=%d, first_minor=%d, capacity=%llu", s, slot->disk->first_minor, slot->info.sectors); add_disk(slot->disk); dbgl_blk(1, "added disk: slot=%d", s); #ifndef NDAS_DONT_CARE_SCHEDULER #if LINUX_VERSION_AVOID_CFQ_SCHEDULER #if CONFIG_SYSFS sal_assert(slot->queue->kobj.ktype); sal_assert(slot->queue->kobj.ktype->default_attrs); { struct queue_sysfs_entry { struct attribute attr; ssize_t (*show)(struct request_queue *, char *); ssize_t (*store)(struct request_queue *, const char *, size_t); }; struct attribute *attr = slot->queue->kobj.ktype->default_attrs[4]; struct queue_sysfs_entry *entry = container_of(attr , struct queue_sysfs_entry, attr); //dbgl_blk(1, "now to set the scheduler: slot-queue=%d, scheduler==%s, scheduler_len=%d", slot->queue, NDAS_QUEUE_SCHEDULER, strlen(NDAS_QUEUE_SCHEDULER)); entry->store(slot->queue,NDAS_QUEUE_SCHEDULER,strlen(NDAS_QUEUE_SCHEDULER)); } #else #error "NDAS driver doesn't work well with CFQ scheduler of 2.6.13 or above kernel." \ "if you forcely want to use it, please specify compiler flags by " \ "export NDAS_EXTRA_CFLAGS=\"-DNDAS_DONT_CARE_SCHEDULER\" "\ "then compile the source again." #endif #endif #endif printk("ndas: /dev/%s enabled\n" , slot->devname); #else /* < LINUX_VERSION_25_ABOVE */ dbgl_blk(4, "blksize=%d", DEFAULT_ND_BLKSIZE); dbgl_blk(4, "size=%lld", slot->info.sectors); dbgl_blk(1, "hardsectsize=%d", slot->info.sector_size); ndas_ops_set_blk_size( s, DEFAULT_ND_BLKSIZE, slot->info.sectors, slot->info.sector_size, DEFAULT_ND_MAX_SECTORS ); #ifdef NDAS_DEVFS printk("ndas: /dev/nd/disc%d enabled\n" , s - NDAS_FIRST_SLOT_NR); #else printk("ndas: /dev/nd%c enabled\n" , s + 'a' - NDAS_FIRST_SLOT_NR); #endif #endif //up(&slot->mutex); #ifdef NDAS_MSHARE if(NDAS_GET_SLOT_DEV(s)->info.mode == NDAS_DISK_MODE_MEDIAJUKE) { ndas_CheckFormat(s); } #endif #if !LINUX_VERSION_25_ABOVE ndas_ops_read_partition(s); #endif dbgl_blk(3, "ed"); return NDAS_OK; out2: //up(&slot->mutex); out1: if ( got ) module_put(THIS_MODULE); MOD_DEC_USE_COUNT; return ret; }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = *mmc_dev(host)->dma_mask; mq->card = card; mq->queue = blk_init_queue(mmc_request, lock); if (!mq->queue) return -ENOMEM; mq->queue->queuedata = mq; mq->req = NULL; blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); /* Set max discard size, << 11 converts to megabytes in sectors */ blk_queue_max_discard_sectors(mq->queue, 16 << 11); if (card->csd.cmdclass & CCC_ERASE) queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); /* * Calculating a correct span is way to messy if this * assumption is broken, so remove the erase support */ if (unlikely(mmc_card_blockaddr(card) && (card->csd.erase_size % 512))) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mq->queue); #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_hw_segs == 1) { unsigned int bouncesz; bouncesz = MMC_QUEUE_BOUNCESZ; if (bouncesz > host->max_req_size) bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; if (bouncesz > (host->max_blk_count * 512)) bouncesz = host->max_blk_count * 512; if (bouncesz > 512) { mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); if (!mq->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce buffer\n", mmc_card_name(card)); } } if (mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); mq->sg = kmalloc(sizeof(struct scatterlist), GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, 1); mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * bouncesz / 512, GFP_KERNEL); if (!mq->bounce_sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->bounce_sg, bouncesz / 512); } } #endif if (!mq->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_hw_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL); if (!mq->sg) { ret = -ENOMEM; goto cleanup_queue; } sg_init_table(mq->sg, host->max_phys_segs); } init_MUTEX(&mq->thread_sem); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto free_bounce_sg; } return 0; free_bounce_sg: if (mq->bounce_sg) kfree(mq->bounce_sg); mq->bounce_sg = NULL; cleanup_queue: if (mq->sg) kfree(mq->sg); mq->sg = NULL; if (mq->bounce_buf) kfree(mq->bounce_buf); mq->bounce_buf = NULL; blk_cleanup_queue(mq->queue); return ret; }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * @subname: partition subname * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; bool bounce = false; int ret = -ENOMEM; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; mq->card = card; mq->queue = blk_init_queue(mmc_request_fn, lock); if (!mq->queue) return -ENOMEM; mq->qdepth = 2; mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), GFP_KERNEL); if (!mq->mqrq) goto blk_cleanup; mq->mqrq_cur = &mq->mqrq[0]; mq->mqrq_prev = &mq->mqrq[1]; mq->queue->queuedata = mq; blk_queue_prep_rq(mq->queue, mmc_prep_request); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_segs == 1) { unsigned int bouncesz; bouncesz = MMC_QUEUE_BOUNCESZ; if (bouncesz > host->max_req_size) bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; if (bouncesz > (host->max_blk_count * 512)) bouncesz = host->max_blk_count * 512; if (bouncesz > 512 && mmc_queue_alloc_bounce_bufs(mq, bouncesz)) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz); if (ret) goto cleanup_queue; bounce = true; } } #endif if (!bounce) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); ret = mmc_queue_alloc_sgs(mq, host->max_segs); if (ret) goto cleanup_queue; } sema_init(&mq->thread_sem, 1); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", host->index, subname ? subname : ""); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto cleanup_queue; } return 0; cleanup_queue: mmc_queue_reqs_free_bufs(mq); kfree(mq->mqrq); mq->mqrq = NULL; blk_cleanup: blk_cleanup_queue(mq->queue); return ret; }
int card_init_queue(struct card_queue *cq, struct memory_card *card, spinlock_t * lock) { struct card_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret=0, card_quene_num; struct card_queue_list *cq_node_current; struct card_queue_list *cq_node_prev = NULL; if (host->parent->dma_mask && *host->parent->dma_mask) limit = *host->parent->dma_mask; cq->card = card; cq->queue = blk_init_queue(card_request, lock); if (!cq->queue) return -ENOMEM; blk_queue_prep_rq(cq->queue, card_prep_request); card_init_bounce_buf(cq, card); if(!cq->bounce_buf){ blk_queue_bounce_limit(cq->queue, limit); blk_queue_max_hw_sectors(cq->queue, host->max_sectors); //blk_queue_max_hw_phys_segments(cq->queue, host->max_phys_segs); blk_queue_max_segments(cq->queue, host->max_hw_segs); blk_queue_max_segment_size(cq->queue, host->max_seg_size); cq->queue->queuedata = cq; cq->req = NULL; cq->sg = kmalloc(sizeof(struct scatterlist) * host->max_phys_segs, GFP_KERNEL); if (!cq->sg) { ret = -ENOMEM; blk_cleanup_queue(cq->queue); return ret; } } if (card_queue_head == NULL) { card_queue_head = kmalloc(sizeof(struct card_queue_list), GFP_KERNEL); if (card_queue_head == NULL) { ret = -ENOMEM; kfree(card_queue_head); card_queue_head = NULL; return ret; } card_queue_head->cq = cq; card_queue_head->cq_num = 0; card_queue_head->cq_flag = 0; card_queue_head->cq_next = NULL; init_completion(&card_thread_complete); init_waitqueue_head(&card_thread_wq); init_MUTEX(&card_thread_sem); host->queue_task = kthread_run(card_queue_thread, cq, "card_queue"); if (host->queue_task) { wait_for_completion(&card_thread_complete); init_completion(&card_thread_complete); ret = 0; return ret; } } else { card_quene_num = 0; cq_node_current = card_queue_head; do { card_quene_num = cq_node_current->cq_num; cq_node_prev = cq_node_current; cq_node_current = cq_node_current->cq_next; } while (cq_node_current != NULL); cq_node_current = kmalloc(sizeof(struct card_queue_list), GFP_KERNEL); if (cq_node_current == NULL) { ret = -ENOMEM; kfree(cq_node_current); cq_node_current = NULL; return ret; } cq_node_prev->cq_next = cq_node_current; cq_node_current->cq = cq; cq_node_current->cq_next = NULL; cq_node_current->cq_num = (++card_quene_num); cq_node_current->cq_flag = 0; ret = 0; return ret; } return ret; }
/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * @subname: partition subname * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = *mmc_dev(host)->dma_mask; mq->card = card; mq->queue = blk_init_queue(mmc_request, lock); if (!mq->queue) return -ENOMEM; memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev)); mq->mqrq_cur = mqrq_cur; mq->mqrq_prev = mqrq_prev; mq->queue->queuedata = mq; blk_queue_prep_rq(mq->queue, mmc_prep_request); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); #ifdef CONFIG_MMC_BLOCK_BOUNCE if (host->max_segs == 1) { unsigned int bouncesz; if(!mmc_card_sd(card)) bouncesz = MMC_QUEUE_BOUNCESZ; else bouncesz = MMC_QUEUE_SD_BOUNCESZ; if (bouncesz > host->max_req_size) bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; if (bouncesz > (host->max_blk_count * 512)) bouncesz = host->max_blk_count * 512; if (bouncesz > 512) { if(!mmc_card_sd(card)) mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); else mqrq_cur->bounce_buf = mmc_queue_cur_bounce_buf; if (!mqrq_cur->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce cur buffer\n", mmc_card_name(card)); } if(!mmc_card_sd(card)) mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); else mqrq_prev->bounce_buf = mmc_queue_prev_bounce_buf; if (!mqrq_prev->bounce_buf) { printk(KERN_WARNING "%s: unable to " "allocate bounce prev buffer\n", mmc_card_name(card)); kfree(mqrq_cur->bounce_buf); mqrq_cur->bounce_buf = NULL; } } if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); mqrq_cur->sg = mmc_alloc_sg(1, &ret); if (ret) goto cleanup_queue; mqrq_cur->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); if (ret) goto cleanup_queue; mqrq_prev->sg = mmc_alloc_sg(1, &ret); if (ret) goto cleanup_queue; mqrq_prev->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); if (ret) goto cleanup_queue; } } #endif if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); if (ret) goto cleanup_queue; mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); if (ret) goto cleanup_queue; } sema_init(&mq->thread_sem, 1); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", host->index, subname ? subname : ""); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto free_bounce_sg; } return 0; free_bounce_sg: kfree(mqrq_cur->bounce_sg); mqrq_cur->bounce_sg = NULL; kfree(mqrq_prev->bounce_sg); mqrq_prev->bounce_sg = NULL; cleanup_queue: kfree(mqrq_cur->sg); mqrq_cur->sg = NULL; if(!mmc_card_sd(card)) kfree(mqrq_cur->bounce_buf); mqrq_cur->bounce_buf = NULL; kfree(mqrq_prev->sg); mqrq_prev->sg = NULL; if(!mmc_card_sd(card)) kfree(mqrq_prev->bounce_buf); mqrq_prev->bounce_buf = NULL; blk_cleanup_queue(mq->queue); return ret; }
/* * Create a block device minor node and setup the linkage between it * and the specified volume. Once this function returns the block * device is live and ready for use. */ static int zvol_create_minor_impl(const char *name) { zvol_state_t *zv; objset_t *os; dmu_object_info_t *doi; uint64_t volsize; uint64_t len; unsigned minor = 0; int error = 0; mutex_enter(&zvol_state_lock); zv = zvol_find_by_name(name); if (zv) { error = SET_ERROR(EEXIST); goto out; } doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP); error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os); if (error) goto out_doi; error = dmu_object_info(os, ZVOL_OBJ, doi); if (error) goto out_dmu_objset_disown; error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); if (error) goto out_dmu_objset_disown; error = zvol_find_minor(&minor); if (error) goto out_dmu_objset_disown; zv = zvol_alloc(MKDEV(zvol_major, minor), name); if (zv == NULL) { error = SET_ERROR(EAGAIN); goto out_dmu_objset_disown; } if (dmu_objset_is_snapshot(os)) zv->zv_flags |= ZVOL_RDONLY; zv->zv_volblocksize = doi->doi_data_block_size; zv->zv_volsize = volsize; zv->zv_objset = os; set_capacity(zv->zv_disk, zv->zv_volsize >> 9); blk_queue_max_hw_sectors(zv->zv_queue, (DMU_MAX_ACCESS / 4) >> 9); blk_queue_max_segments(zv->zv_queue, UINT16_MAX); blk_queue_max_segment_size(zv->zv_queue, UINT_MAX); blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize); blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize); blk_queue_max_discard_sectors(zv->zv_queue, (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9); blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue); #ifdef QUEUE_FLAG_NONROT queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue); #endif #ifdef QUEUE_FLAG_ADD_RANDOM queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zv->zv_queue); #endif if (spa_writeable(dmu_objset_spa(os))) { if (zil_replay_disable) zil_destroy(dmu_objset_zil(os), B_FALSE); else zil_replay(os, zv, zvol_replay_vector); } /* * When udev detects the addition of the device it will immediately * invoke blkid(8) to determine the type of content on the device. * Prefetching the blocks commonly scanned by blkid(8) will speed * up this process. */ len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE); if (len > 0) { dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ); dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len, ZIO_PRIORITY_SYNC_READ); } zv->zv_objset = NULL; out_dmu_objset_disown: dmu_objset_disown(os, zvol_tag); out_doi: kmem_free(doi, sizeof (dmu_object_info_t)); out: if (error == 0) { zvol_insert(zv); /* * Drop the lock to prevent deadlock with sys_open() -> * zvol_open(), which first takes bd_disk->bd_mutex and then * takes zvol_state_lock, whereas this code path first takes * zvol_state_lock, and then takes bd_disk->bd_mutex. */ mutex_exit(&zvol_state_lock); add_disk(zv->zv_disk); } else { mutex_exit(&zvol_state_lock); } return (SET_ERROR(error)); }
int td_linux_block_create(struct td_osdev *dev) { int rc; struct request_queue *queue; unsigned bio_sector_size = dev->block_params.bio_sector_size; unsigned hw_sector_size = dev->block_params.hw_sector_size; /* very simple sector size support */ if (!bio_sector_size || bio_sector_size & 511 || bio_sector_size > 4096) { td_os_err(dev, "bio sector size of %u is not supported\n", bio_sector_size); return -EINVAL; } /* MetaData is reported here */ if (hw_sector_size == 520) hw_sector_size = 512; if (!hw_sector_size || hw_sector_size & 511 || hw_sector_size > 4096) { td_os_err(dev, "hw sector size of %u is not supported\n", hw_sector_size); return -EINVAL; } td_os_notice(dev, " - Set capacity to %llu (%u bytes/sector)\n", dev->block_params.capacity, dev->block_params.hw_sector_size); /* create a new bio queue */ queue = blk_alloc_queue(GFP_KERNEL); if (!queue) { td_os_err(dev, "Error allocating disk queue.\n"); rc = -ENOMEM; goto error_alloc_queue; } #ifdef QUEUE_FLAG_NONROT queue_flag_set_unlocked(QUEUE_FLAG_NONROT, queue); #endif switch (dev->type) { case TD_OSDEV_DEVICE: blk_queue_make_request(queue, td_device_make_request); dev->_bio_error = td_device_bio_error; break; case TD_OSDEV_RAID: blk_queue_make_request(queue, td_raid_make_request); dev->_bio_error = td_raid_bio_error; break; default: td_os_err(dev, "Unkonwn OS Type, cannot register block request handler\n"); goto error_config_queue; } queue->queuedata = dev; #if defined QUEUE_FLAG_PLUGGED queue->unplug_fn = td_device_queue_unplug; #endif /* configure queue ordering */ /* in QUEUE_ORDERED_DRAIN we will get BARRIERS after the queue has * been drained. */ #if defined KABI__blk_queue_ordered #if KABI__blk_queue_ordered == 2 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN); #elif KABI__blk_queue_ordered == 3 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN, NULL); #else #error unhandled value of KABI__blk_queue_ordered #endif #elif defined KABI__blk_queue_flush /* * blk_queue_ordered was replaced with blk_queue_flush * The default implementation is QUEUE_ORDERED_DRAIN */ blk_queue_flush(queue, 0); #else #error undefined KABI__blk_queue_flush or KABI__blk_queue_ordered #endif /* max out the throttling */ #ifdef KABI__blk_queue_max_hw_sectors blk_queue_max_hw_sectors(queue, dev->block_params.bio_max_bytes/512); #elif defined KABI__blk_queue_max_sectors blk_queue_max_sectors(queue, dev->block_params.bio_max_bytes/512); #else td_os_err(dev, "No kernel API for maximum sectors\n"); #endif #if defined KABI__blk_queue_max_segments blk_queue_max_segments(queue, BLK_MAX_SEGMENTS); #elif defined KABI__blk_queue_max_phys_segments blk_queue_max_phys_segments(queue, MAX_SEGMENT_SIZE); blk_queue_max_hw_segments(queue, MAX_SEGMENT_SIZE); #else td_os_err(dev, "No kernel API for maximum segments\n"); #endif blk_queue_max_segment_size(queue, dev->block_params.bio_max_bytes); blk_queue_bounce_limit(queue, BLK_BOUNCE_ANY); /* setup paged based access */ td_os_info(dev, "Set queue physical block size to %u\n", hw_sector_size); #ifdef KABI__blk_queue_physical_block_size blk_queue_physical_block_size(queue, hw_sector_size); #elif defined KABI__blk_queue_hardsect_size blk_queue_hardsect_size(queue, hw_sector_size); #else td_os_err(dev, "No kernel API for physical sector size\n"); #endif #ifdef KABI__blk_queue_logical_block_size td_os_info(dev, "Set queue logical block size to %u\n", bio_sector_size); blk_queue_logical_block_size(queue, bio_sector_size); #else td_os_err(dev, "No kernel API for logical block size\n"); #endif #ifdef KABI__blk_queue_io_min td_os_info(dev, "Set queue io_min to %u\n", bio_sector_size); blk_queue_io_min(queue, bio_sector_size); #else td_os_err(dev, "No kernel API for minimum IO size\n"); #endif #ifdef KABI__blk_queue_io_opt td_os_info(dev, "Set queue io_opt to %u\n", dev->block_params.bio_max_bytes); blk_queue_io_opt(queue, dev->block_params.bio_max_bytes); #else td_os_err(dev, "No kernel API for optimal IO size\n"); #endif #if 0 if (dev->block_params.discard) { int did_something = 0; #if defined KABI__blk_queue_discard_granularity queue->limits.discard_granularity = bio_sector_size; did_something++; #endif #ifdef KABI__blk_queue_max_discard_sectors /* 0xFFFF (max sector size of chunk on trim) * 64 * # SSD */ blk_queue_max_discard_sectors(queue, TD_MAX_DISCARD_LBA_COUNT * 2); did_something++; #endif #ifdef KABI__blk_queue_discard_zeroes_data queue->limits.discard_zeroes_data = 1; did_something++; #endif #ifdef KABI__queue_flag_set_unlocked queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue); did_something++; #endif /* Maybe some day.. But not today. queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, queue); */ if (did_something) td_os_info(dev, "Enabling discard support\n"); else td_os_notice(dev, "No kernel API for discard support\n"); } else { td_os_info(dev, "No DISCARD support enabled\n"); } #else /* bug 7444 */ if (dev->block_params.discard) td_os_info(dev, "Device supports DISCARD but is currently being forced disabled\n"); #endif /* assign */ dev->queue = queue; return 0; error_config_queue: blk_cleanup_queue(dev->queue); dev->queue = NULL; error_alloc_queue: return rc; }