/* * Fully initialize a .request_fn request-based queue. */ int dm_old_init_request_queue(struct mapped_device *md) { /* Fully initialize the queue */ if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL)) return -EINVAL; /* disable dm_old_request_fn's merge heuristic by default */ md->seq_rq_merge_deadline_usecs = 0; dm_init_normal_md_queue(md); blk_queue_softirq_done(md->queue, dm_softirq_done); blk_queue_prep_rq(md->queue, dm_old_prep_fn); /* Initialize the request-based DM worker thread */ init_kthread_worker(&md->kworker); md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, "kdmwork-%s", dm_device_name(md)); elv_register_queue(md->queue); return 0; }
/* * Fully initialize a .request_fn request-based queue. */ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t) { struct dm_target *immutable_tgt; /* Fully initialize the queue */ md->queue->cmd_size = sizeof(struct dm_rq_target_io); md->queue->rq_alloc_data = md; md->queue->request_fn = dm_old_request_fn; md->queue->init_rq_fn = dm_rq_init_rq; immutable_tgt = dm_table_get_immutable_target(t); if (immutable_tgt && immutable_tgt->per_io_data_size) { /* any target-specific per-io data is immediately after the tio */ md->queue->cmd_size += immutable_tgt->per_io_data_size; md->init_tio_pdu = true; } if (blk_init_allocated_queue(md->queue) < 0) return -EINVAL; /* disable dm_old_request_fn's merge heuristic by default */ md->seq_rq_merge_deadline_usecs = 0; dm_init_normal_md_queue(md); blk_queue_softirq_done(md->queue, dm_softirq_done); /* Initialize the request-based DM worker thread */ kthread_init_worker(&md->kworker); md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, "kdmwork-%s", dm_device_name(md)); if (IS_ERR(md->kworker_task)) { int error = PTR_ERR(md->kworker_task); md->kworker_task = NULL; return error; } elv_register_queue(md->queue); return 0; }
int dm_table_add_target(struct dm_table *t, const char *type, sector_t start, sector_t len, char *params) { int r = -EINVAL, argc; char **argv; struct dm_target *tgt; if ((r = check_space(t))) return r; tgt = t->targets + t->num_targets; memset(tgt, 0, sizeof(*tgt)); if (!len) { DMERR("%s: zero-length target", dm_device_name(t->md)); return -EINVAL; } tgt->type = dm_get_target_type(type); if (!tgt->type) { DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); return -EINVAL; } tgt->table = t; tgt->begin = start; tgt->len = len; tgt->error = "Unknown error"; /* * Does this target adjoin the previous one ? */ if (!adjoin(t, tgt)) { tgt->error = "Gap in table"; r = -EINVAL; goto bad; } r = dm_split_args(&argc, &argv, params); if (r) { tgt->error = "couldn't split parameters (insufficient memory)"; goto bad; } r = tgt->type->ctr(tgt, argc, argv); kfree(argv); if (r) goto bad; t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; /* FIXME: the plan is to combine high here and then have * the merge fn apply the target level restrictions. */ combine_restrictions_low(&t->limits, &tgt->limits); return 0; bad: DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); dm_put_target_type(tgt->type); return r; }
static void __init dm_setup_drive(void) { struct mapped_device *md = NULL; struct dm_table *table = NULL; struct dm_setup_target *target; char *uuid = dm_setup_args.uuid; fmode_t fmode = FMODE_READ; /* Finish parsing the targets. */ if (dm_setup_parse_targets(dm_setup_args.targets)) goto parse_fail; if (dm_create(dm_setup_args.minor, &md)) { DMDEBUG("failed to create the device"); goto dm_create_fail; } DMDEBUG("created device '%s'", dm_device_name(md)); /* In addition to flagging the table below, the disk must be * set explicitly ro/rw. */ set_disk_ro(dm_disk(md), dm_setup_args.ro); if (!dm_setup_args.ro) fmode |= FMODE_WRITE; if (dm_table_create(&table, fmode, dm_setup_args.target_count, md)) { DMDEBUG("failed to create the table"); goto dm_table_create_fail; } target = dm_setup_args.target; while (target) { DMINFO("adding target '%llu %llu %s %s'", (unsigned long long) target->begin, (unsigned long long) target->length, target->type, target->params); if (dm_table_add_target(table, target->type, target->begin, target->length, target->params)) { DMDEBUG("failed to add the target to the table"); goto add_target_fail; } target = target->next; } if (dm_table_complete(table)) { DMDEBUG("failed to complete the table"); goto table_complete_fail; } /* Suspend the device so that we can bind it to the table. */ if (dm_suspend(md, 0)) { DMDEBUG("failed to suspend the device pre-bind"); goto suspend_fail; } /* Bind the table to the device. This is the only way to associate * md->map with the table and set the disk capacity directly. */ if (dm_swap_table(md, table)) { /* should return NULL. */ DMDEBUG("failed to bind the device to the table"); goto table_bind_fail; } /* Finally, resume and the device should be ready. */ if (dm_resume(md)) { DMDEBUG("failed to resume the device"); goto resume_fail; } /* Export the dm device via the ioctl interface */ if (!strcmp(DM_NO_UUID, dm_setup_args.uuid)) uuid = NULL; if (dm_ioctl_export(md, dm_setup_args.name, uuid)) { DMDEBUG("failed to export device with given name and uuid"); goto export_fail; } printk(KERN_INFO "dm: dm-%d is ready\n", dm_setup_args.minor); dm_setup_cleanup(); return; export_fail: resume_fail: table_bind_fail: suspend_fail: table_complete_fail: add_target_fail: dm_table_put(table); dm_table_create_fail: dm_put(md); dm_create_fail: dm_setup_cleanup(); parse_fail: printk(KERN_WARNING "dm: starting dm-%d (%s) failed\n", dm_setup_args.minor, dm_setup_args.name); }