static int stackbd_threadfn(void *data) { struct bio *bio; set_user_nice(current, -20); while (!kthread_should_stop()) { /* wake_up() is after adding bio to list. No need for condition */ wait_event_interruptible(req_event, kthread_should_stop() || !bio_list_empty(&stackbd.bio_list)); spin_lock_irq(&stackbd.lock); if (bio_list_empty(&stackbd.bio_list)) { spin_unlock_irq(&stackbd.lock); continue; } bio = bio_list_pop(&stackbd.bio_list); spin_unlock_irq(&stackbd.lock); stackbd_io_fn(bio); } return 0; }
static void punt_bios_to_rescuer(struct bio_set *bs) { struct bio_list punt, nopunt; struct bio *bio; /* * In order to guarantee forward progress we must punt only bios that * were allocated from this bio_set; otherwise, if there was a bio on * there for a stacking driver higher up in the stack, processing it * could require allocating bios from this bio_set, and doing that from * our own rescuer would be bad. * * Since bio lists are singly linked, pop them all instead of trying to * remove from the middle of the list: */ bio_list_init(&punt); bio_list_init(&nopunt); while ((bio = bio_list_pop(current->bio_list))) bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); *current->bio_list = nopunt; spin_lock(&bs->rescue_lock); bio_list_merge(&bs->rescue_list, &punt); spin_unlock(&bs->rescue_lock); queue_work(bs->rescue_workqueue, &bs->rescue_work); }
static void iblock_submit_bios(struct bio_list *list, int rw) { struct blk_plug plug; struct bio *bio; blk_start_plug(&plug); while ((bio = bio_list_pop(list))) submit_bio(rw, bio); blk_finish_plug(&plug); }
static void rrpc_requeue(struct work_struct *work) { struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue); struct bio_list bios; struct bio *bio; bio_list_init(&bios); spin_lock(&rrpc->bio_lock); bio_list_merge(&bios, &rrpc->requeue_bios); bio_list_init(&rrpc->requeue_bios); spin_unlock(&rrpc->bio_lock); while ((bio = bio_list_pop(&bios))) rrpc_make_rq(rrpc->disk->queue, bio); }
static void bio_alloc_rescue(struct work_struct *work) { struct bio_set *bs = container_of(work, struct bio_set, rescue_work); struct bio *bio; while (1) { spin_lock(&bs->rescue_lock); bio = bio_list_pop(&bs->rescue_list); spin_unlock(&bs->rescue_lock); if (!bio) break; generic_make_request(bio); } }
void dm_cell_error(struct dm_bio_prison *prison, struct dm_bio_prison_cell *cell) { struct bio_list bios; struct bio *bio; unsigned long flags; bio_list_init(&bios); spin_lock_irqsave(&prison->lock, flags); __cell_release(cell, &bios); spin_unlock_irqrestore(&prison->lock, flags); while ((bio = bio_list_pop(&bios))) bio_io_error(bio); }
static int iblock_execute_rw(struct se_cmd *cmd) { struct scatterlist *sgl = cmd->t_data_sg; u32 sgl_nents = cmd->t_data_nents; enum dma_data_direction data_direction = cmd->data_direction; struct se_device *dev = cmd->se_dev; struct iblock_req *ibr; struct bio *bio; struct bio_list list; struct scatterlist *sg; u32 sg_num = sgl_nents; sector_t block_lba; unsigned bio_cnt; int rw; int i; if (data_direction == DMA_TO_DEVICE) { /* * Force data to disk if we pretend to not have a volatile * write cache, or the initiator set the Force Unit Access bit. */ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && (cmd->se_cmd_flags & SCF_FUA))) rw = WRITE_FUA; else rw = WRITE; } else { rw = READ; } /* * Convert the blocksize advertised to the initiator to the 512 byte * units unconditionally used by the Linux block layer. */ if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) block_lba = (cmd->t_task_lba << 3); else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) block_lba = (cmd->t_task_lba << 2); else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) block_lba = (cmd->t_task_lba << 1); else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) block_lba = cmd->t_task_lba; else { pr_err("Unsupported SCSI -> BLOCK LBA conversion:" " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return -ENOSYS; } ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); if (!ibr) goto fail; cmd->priv = ibr; bio = iblock_get_bio(cmd, block_lba, sgl_nents); if (!bio) goto fail_free_ibr; bio_list_init(&list); bio_list_add(&list, bio); atomic_set(&ibr->pending, 2); bio_cnt = 1; for_each_sg(sgl, sg, sgl_nents, i) { /* * XXX: if the length the device accepts is shorter than the * length of the S/G list entry this will cause and * endless loop. Better hope no driver uses huge pages. */ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { iblock_submit_bios(&list, rw); bio_cnt = 0; } bio = iblock_get_bio(cmd, block_lba, sg_num); if (!bio) goto fail_put_bios; atomic_inc(&ibr->pending); bio_list_add(&list, bio); bio_cnt++; } /* Always in 512 byte units for Linux/Block */ block_lba += sg->length >> IBLOCK_LBA_SHIFT; sg_num--; } iblock_submit_bios(&list, rw); iblock_complete_cmd(cmd); return 0; fail_put_bios: while ((bio = bio_list_pop(&list))) bio_put(bio); fail_free_ibr: kfree(ibr); cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; fail: return -ENOMEM; }
static sense_reason_t iblock_execute_write_same(struct se_cmd *cmd) { struct iblock_req *ibr; struct scatterlist *sg; struct bio *bio; struct bio_list list; sector_t block_lba = cmd->t_task_lba; sector_t sectors = sbc_get_write_same_sectors(cmd); sg = &cmd->t_data_sg[0]; if (cmd->t_data_nents > 1 || sg->length != cmd->se_dev->dev_attrib.block_size) { pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" " block_size: %u\n", cmd->t_data_nents, sg->length, cmd->se_dev->dev_attrib.block_size); return TCM_INVALID_CDB_FIELD; } ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); if (!ibr) goto fail; cmd->priv = ibr; bio = iblock_get_bio(cmd, block_lba, 1); if (!bio) goto fail_free_ibr; bio_list_init(&list); bio_list_add(&list, bio); atomic_set(&ibr->pending, 1); while (sectors) { while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { bio = iblock_get_bio(cmd, block_lba, 1); if (!bio) goto fail_put_bios; atomic_inc(&ibr->pending); bio_list_add(&list, bio); } /* Always in 512 byte units for Linux/Block */ block_lba += sg->length >> IBLOCK_LBA_SHIFT; sectors -= 1; } iblock_submit_bios(&list, WRITE); return 0; fail_put_bios: while ((bio = bio_list_pop(&list))) bio_put(bio); fail_free_ibr: kfree(ibr); fail: return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; }