static void mmc_blk_put(struct mmc_blk_data *md) { mutex_lock(&open_lock); md->usage--; if (md->usage == 0) { int devmaj = MAJOR(disk_devt(md->disk)); int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; if (!devmaj) devidx = md->disk->first_minor >> MMC_SHIFT; blk_cleanup_queue(md->queue.queue); __clear_bit(devidx, dev_use); put_disk(md->disk); kfree(md); }
/* * Returns: * DM_MAPIO_* : the request has been processed as indicated * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued * < 0 : the request was completed due to failure */ static int map_request(struct dm_rq_target_io *tio) { int r; struct dm_target *ti = tio->ti; struct mapped_device *md = tio->md; struct request *rq = tio->orig; struct request *clone = NULL; blk_status_t ret; r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); check_again: switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); return DM_MAPIO_REQUEUE; } /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); ret = dm_dispatch_clone_request(clone, rq); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { blk_rq_unprep_clone(clone); tio->ti->type->release_clone_rq(clone); tio->clone = NULL; if (!rq->q->mq_ops) r = DM_MAPIO_DELAY_REQUEUE; else r = DM_MAPIO_REQUEUE; goto check_again; } break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ break; case DM_MAPIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */ dm_requeue_original_request(tio, true); break; case DM_MAPIO_KILL: /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, BLK_STS_IOERR); break; default: DMWARN("unimplemented target map return value: %d", r); BUG(); } return r; }
static void mmc_blk_put(struct mmc_blk_data *md) { mutex_lock(&open_lock); md->usage--; if (md->usage == 0) { int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; __clear_bit(devidx, dev_use); put_disk(md->disk); kfree(md); }
/* * Returns: * 0 : the request has been processed * DM_MAPIO_REQUEUE : the original request needs to be requeued * < 0 : the request was completed due to failure */ static int map_request(struct dm_rq_target_io *tio, struct request *rq, struct mapped_device *md) { int r; struct dm_target *ti = tio->ti; struct request *clone = NULL; if (tio->clone) { clone = tio->clone; r = ti->type->map_rq(ti, clone, &tio->info); } else { r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); if (r < 0) { /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, r); return r; } if (r != DM_MAPIO_REMAPPED) return r; if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); return DM_MAPIO_REQUEUE; } } switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); dm_dispatch_clone_request(clone, rq); break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ dm_requeue_original_request(md, tio->orig); break; default: if (r > 0) { DMWARN("unimplemented target map return value: %d", r); BUG(); } /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, r); return r; } return 0; }
/** * blk_add_trace_rq_remap - Add a trace for a request-remap operation * @q: queue the io is for * @rq: the source request * @dev: target device * @from: source sector * * Description: * Device mapper remaps request to other devices. * Add a trace for that action. * **/ static void blk_add_trace_rq_remap(struct request_queue *q, struct request *rq, dev_t dev, sector_t from) { struct blk_trace *bt = q->blk_trace; struct blk_io_trace_remap r; if (likely(!bt)) return; r.device_from = cpu_to_be32(dev); r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); r.sector_from = cpu_to_be64(from); __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, sizeof(r), &r); }
static int dasd_devices_show(struct seq_file *m, void *v) { struct dasd_device *device; struct dasd_block *block; char *substr; device = dasd_device_from_devindex((unsigned long) v - 1); if (IS_ERR(device)) return 0; if (device->block) block = device->block; else { dasd_put_device(device); return 0; } /* Print device number. */ seq_printf(m, "%s", dev_name(&device->cdev->dev)); /* Print discipline string. */ if (device != NULL && device->discipline != NULL) seq_printf(m, "(%s)", device->discipline->name); else seq_printf(m, "(none)"); /* Print kdev. */ if (block->gdp) seq_printf(m, " at (%3d:%6d)", MAJOR(disk_devt(block->gdp)), MINOR(disk_devt(block->gdp))); else seq_printf(m, " at (???:??????)"); /* Print device name. */ if (block->gdp) seq_printf(m, " is %-8s", block->gdp->disk_name); else seq_printf(m, " is ????????"); /* Print devices features. */ substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " "; seq_printf(m, "%4s: ", substr); /* Print device status information. */ switch ((device != NULL) ? device->state : -1) { case -1: seq_printf(m, "unknown"); break; case DASD_STATE_NEW: seq_printf(m, "new"); break; case DASD_STATE_KNOWN: seq_printf(m, "detected"); break; case DASD_STATE_BASIC: seq_printf(m, "basic"); break; case DASD_STATE_UNFMT: seq_printf(m, "unformatted"); break; case DASD_STATE_READY: case DASD_STATE_ONLINE: seq_printf(m, "active "); if (dasd_check_blocksize(block->bp_block)) seq_printf(m, "n/f "); else seq_printf(m, "at blocksize: %d, %ld blocks, %ld MB", block->bp_block, block->blocks, ((block->bp_block >> 9) * block->blocks) >> 11); break; default: seq_printf(m, "no stat"); break; } dasd_put_device(device); if (dasd_probeonly) seq_printf(m, "(probeonly)"); seq_printf(m, "\n"); return 0; }