static int ca_load(struct count_array *ca, struct dm_space_map *sm) { int r; uint32_t count; dm_block_t nr_blocks, i; r = dm_sm_get_nr_blocks(sm, &nr_blocks); if (r) return r; BUG_ON(ca->nr != nr_blocks); DMWARN("Loading debug space map from disk. This may take some time"); for (i = 0; i < nr_blocks; i++) { r = dm_sm_get_count(sm, i, &count); if (r) { DMERR("load failed"); return r; } ca_set_count(ca, i, count); } DMWARN("Load complete"); return 0; }
/* * get_type * @type_name * * Attempt to retrieve the dm_dirty_log_type by name. If not already * available, attempt to load the appropriate module. * * Log modules are named "dm-log-" followed by the 'type_name'. * Modules may contain multiple types. * This function will first try the module "dm-log-<type_name>", * then truncate 'type_name' on the last '-' and try again. * * For example, if type_name was "clustered-disk", it would search * 'dm-log-clustered-disk' then 'dm-log-clustered'. * * Returns: dirty_log_type* on success, NULL on failure */ static struct dm_dirty_log_type *get_type(const char *type_name) { char *p, *type_name_dup; struct dm_dirty_log_type *log_type; if (!type_name) return NULL; log_type = _get_dirty_log_type(type_name); if (log_type) return log_type; type_name_dup = kstrdup(type_name, GFP_KERNEL); if (!type_name_dup) { DMWARN("No memory left to attempt log module load for \"%s\"", type_name); return NULL; } while (request_module("dm-log-%s", type_name_dup) || !(log_type = _get_dirty_log_type(type_name))) { p = strrchr(type_name_dup, '-'); if (!p) break; p[0] = '\0'; } if (!log_type) DMWARN("Module for logging type \"%s\" not found.", type_name); kfree(type_name_dup); return log_type; }
static int persistent_read_metadata(struct exception_store *store) { int r, new_snapshot; struct pstore *ps = get_info(store); /* * Read the snapshot header. */ r = read_header(ps, &new_snapshot); if (r) return r; /* * Do we need to setup a new snapshot ? */ if (new_snapshot) { r = write_header(ps); if (r) { DMWARN("write_header failed"); return r; } r = zero_area(ps, 0); if (r) { DMWARN("zero_area(0) failed"); return r; } } else { /* * Sanity checks. */ if (!ps->valid) { DMWARN("snapshot is marked invalid"); return -EINVAL; } if (ps->version != SNAPSHOT_DISK_VERSION) { DMWARN("unable to handle snapshot disk version %d", ps->version); return -EINVAL; } /* * Read the metadata. */ r = read_exceptions(ps); if (r) return r; } return 0; }
static int disk_resume(struct dirty_log *log) { int r = 0; unsigned i; struct log_c *lc = (struct log_c *) log->context; size_t size = lc->bitset_uint32_count * sizeof(uint32_t); /* * Read the disk header, but only if we know it is good. * Assume the worst in the event of failure. */ if (!lc->log_dev_failed && (r = read_header(lc))) { DMWARN("Read %s failed on mirror log device, %s.", r ? "header" : "bits", lc->log_dev->name); fail_log_device(lc); lc->header.nr_regions = 0; } /* set or clear any new bits -- device has grown */ if (lc->sync == NOSYNC) for (i = lc->header.nr_regions; i < lc->region_count; i++) /* FIXME: amazingly inefficient */ log_set_bit(lc, lc->clean_bits, i); else for (i = lc->header.nr_regions; i < lc->region_count; i++) /* FIXME: amazingly inefficient */ log_clear_bit(lc, lc->clean_bits, i); /* clear any old bits if device has shrunk */ for (i = lc->region_count; i % 32; i++) log_clear_bit(lc, lc->clean_bits, i); /* copy clean across to sync */ memcpy(lc->sync_bits, lc->clean_bits, size); lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count); lc->sync_search = 0; /* set the correct number of regions in the header */ lc->header.nr_regions = lc->region_count; /* write out the log */ if ((r = write_header(lc))) { DMWARN("Write header failed on mirror log device, %s.", lc->log_dev->name); fail_log_device(lc); } else restore_log_device(lc); return r; }
static int build_constructor_string(struct dm_target *ti, unsigned argc, char **argv, char **ctr_str) { int i, str_size; char *str = NULL; *ctr_str = NULL; /* * Determine overall size of the string. */ for (i = 0, str_size = 0; i < argc; i++) str_size += strlen(argv[i]) + 1; /* +1 for space between args */ str_size += 20; /* Max number of chars in a printed u64 number */ str = kzalloc(str_size, GFP_KERNEL); if (!str) { DMWARN("Unable to allocate memory for constructor string"); return -ENOMEM; } str_size = sprintf(str, "%llu", (unsigned long long)ti->len); for (i = 0; i < argc; i++) str_size += sprintf(str + str_size, " %s", argv[i]); *ctr_str = str; return str_size; }
static int read_header(struct log_c *log) { int r; r = rw_header(log, READ); if (r) return r; header_from_disk(&log->header, log->disk_header); /* New log required? */ if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) { log->header.magic = MIRROR_MAGIC; log->header.version = MIRROR_DISK_VERSION; log->header.nr_regions = 0; } /* Version 2 is like version 1 but always little endian on disk. */ #ifdef __LITTLE_ENDIAN if (log->header.version == 1) log->header.version = 2; #endif if (log->header.version != MIRROR_DISK_VERSION) { DMWARN("incompatible disk log version"); return -EINVAL; } return 0; }
static int __init dm_dirty_log_init(void) { int r; r = dm_dirty_log_type_register(&_core_type); if (r) DMWARN("couldn't register core log"); r = dm_dirty_log_type_register(&_disk_type); if (r) { DMWARN("couldn't register disk type"); dm_dirty_log_type_unregister(&_core_type); } return r; }
static int read_header(struct pstore *ps, int *new_snapshot) { int r; struct disk_header *dh; r = chunk_io(ps, 0, READ); if (r) return r; dh = (struct disk_header *) ps->area; if (le32_to_cpu(dh->magic) == 0) { *new_snapshot = 1; } else if (le32_to_cpu(dh->magic) == SNAP_MAGIC) { *new_snapshot = 0; ps->valid = le32_to_cpu(dh->valid); ps->version = le32_to_cpu(dh->version); ps->chunk_size = le32_to_cpu(dh->chunk_size); } else { DMWARN("Invalid/corrupt snapshot"); r = -ENXIO; } return r; }
static void table_destroy(struct dm_table *t) { unsigned int i; /* free the indexes (see dm_table_complete) */ if (t->depth >= 2) vfree(t->index[t->depth - 2]); /* free the targets */ for (i = 0; i < t->num_targets; i++) { struct dm_target *tgt = t->targets + i; if (tgt->type->dtr) tgt->type->dtr(tgt); dm_put_target_type(tgt->type); } vfree(t->highs); /* free the device list */ if (t->devices.next != &t->devices) { DMWARN("devices still present during destroy: " "dm_table_remove_device calls missing"); free_devices(&t->devices); } kfree(t); }
static int read_header(struct log_c *log) { int r; unsigned long ebits; r = dm_io_sync_vm(1, &log->header_location, READ, log->disk_header, &ebits); if (r) return r; header_from_disk(&log->header, log->disk_header); /* New log required? */ if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) { log->header.magic = MIRROR_MAGIC; log->header.version = MIRROR_DISK_VERSION; log->header.nr_regions = 0; } #ifdef __LITTLE_ENDIAN if (log->header.version == 1) log->header.version = 2; #endif if (log->header.version != MIRROR_DISK_VERSION) { DMWARN("incompatible disk log version"); return -EINVAL; } return 0; }
/* * Functions to lock and unlock any filesystem running on the * device. */ static int lock_fs(struct mapped_device *md) { int r = -ENOMEM; md->frozen_bdev = bdget_disk(md->disk, 0); if (!md->frozen_bdev) { DMWARN("bdget failed in lock_fs"); goto out; } WARN_ON(md->frozen_sb); md->frozen_sb = freeze_bdev(md->frozen_bdev); if (IS_ERR(md->frozen_sb)) { r = PTR_ERR(md->frozen_sb); goto out_bdput; } /* don't bdput right now, we don't want the bdev * to go away while it is locked. We'll bdput * in unlock_fs */ return 0; out_bdput: bdput(md->frozen_bdev); md->frozen_sb = NULL; md->frozen_bdev = NULL; out: return r; }
/* * hp_sw_end_io - Completion handler for HP path activation. * @req: path activation request * @error: scsi-ml error * * Check sense data, free request structure, and notify dm that * pg initialization has completed. * * Context: scsi-ml softirq * */ static void hp_sw_end_io(struct request *req, int error) { struct dm_path *path = req->end_io_data; unsigned err_flags = 0; if (!error) { DMDEBUG("%s path activation command - success", path->dev->name); goto out; } if (hp_sw_error_is_retryable(req)) { DMDEBUG("%s path activation command - retry", path->dev->name); err_flags = MP_RETRY; goto out; } DMWARN("%s path activation fail - error=0x%x", path->dev->name, error); err_flags = MP_FAIL_PATH; out: req->end_io_data = NULL; __blk_put_request(req->q, req); dm_pg_init_complete(path, err_flags); }
static void dm_done(struct request *clone, int error, bool mapped) { int r = error; struct dm_rq_target_io *tio = clone->end_io_data; dm_request_endio_fn rq_end_io = NULL; if (tio->ti) { rq_end_io = tio->ti->type->rq_end_io; if (mapped && rq_end_io) r = rq_end_io(tio->ti, clone, error, &tio->info); } if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) && !clone->q->limits.max_write_same_sectors)) disable_write_same(tio->md); if (r <= 0) /* The target wants to complete the I/O */ dm_end_request(clone, r); else if (r == DM_ENDIO_INCOMPLETE) /* The target will handle the I/O */ return; else if (r == DM_ENDIO_REQUEUE) /* The target wants to requeue the I/O */ dm_requeue_original_request(tio->md, tio->orig); else { DMWARN("unimplemented target endio return value: %d", r); BUG(); } }
/**ltl * 功能: 将目标设备加入到列表中 * 参数: t ->映射表 * ti ->映射目标 * path ->映射目标<major:minor> * start-> 映射目标相对低层设备的起始偏移量(类似磁盘分区的起始地址) * len -> 此目标设备在dm设备的长度 * mode -> rw * result-> 底层设备对象 * 返回值: * 说明: */ static int __table_get_device(struct dm_table *t, struct dm_target *ti, const char *path, sector_t start, sector_t len, int mode, struct dm_dev **result) { int r; dev_t dev; struct dm_dev *dd; unsigned int major, minor; BUG_ON(!t); /* 获取主设备号和次设备号 */ if (sscanf(path, "%u:%u", &major, &minor) == 2) { /* Extract the major/minor numbers */ dev = MKDEV(major, minor); if (MAJOR(dev) != major || MINOR(dev) != minor) return -EOVERFLOW; } else { /* convert the path to a device */ if ((r = lookup_device(path, &dev))) /* 根据设备路径/dev/sdb获取<major:minor> */ return r; } /* 在列表中查找映射目标 */ dd = find_device(&t->devices, dev); if (!dd) { dd = kmalloc(sizeof(*dd), GFP_KERNEL); if (!dd) return -ENOMEM; dd->mode = mode; dd->bdev = NULL; /* 打开设备 */ if ((r = open_dev(dd, dev, t->md))) { kfree(dd); return r; } format_dev_t(dd->name, dev); /* 主设备号次设备号 */ atomic_set(&dd->count, 0); /* 将目标设备插入到映射表中 */ list_add(&dd->list, &t->devices); } else if (dd->mode != (mode | dd->mode)) { r = upgrade_mode(dd, mode, t->md); if (r) return r; } atomic_inc(&dd->count); /* 检查区域是否超过设备 */ if (!check_device_area(dd, start, len)) { DMWARN("device %s too small for target", path); dm_put_device(ti, dd); return -EINVAL; } *result = dd; return 0; }
/*---------------------------------------------------------------- * disk log constructor/destructor * * argv contains log_device region_size followed optionally by [no]sync *--------------------------------------------------------------*/ static int disk_ctr(struct dirty_log *log, struct dm_target *ti, unsigned int argc, char **argv) { int r; size_t size; struct log_c *lc; struct dm_dev *dev; if (argc < 2 || argc > 3) { DMWARN("wrong number of arguments to disk mirror log"); return -EINVAL; } r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */, FMODE_READ | FMODE_WRITE, &dev); if (r) return r; r = core_ctr(log, ti, argc - 1, argv + 1); if (r) { dm_put_device(ti, dev); return r; } lc = (struct log_c *) log->context; lc->log_dev = dev; /* setup the disk header fields */ lc->header_location.bdev = lc->log_dev->bdev; lc->header_location.sector = 0; lc->header_location.count = 1; /* * We can't read less than this amount, even though we'll * not be using most of this space. */ lc->disk_header = vmalloc(1 << SECTOR_SHIFT); if (!lc->disk_header) goto bad; /* setup the disk bitset fields */ lc->bits_location.bdev = lc->log_dev->bdev; lc->bits_location.sector = LOG_OFFSET; size = dm_round_up(lc->bitset_uint32_count * sizeof(uint32_t), 1 << SECTOR_SHIFT); lc->bits_location.count = size >> SECTOR_SHIFT; lc->disk_bits = vmalloc(size); if (!lc->disk_bits) { vfree(lc->disk_header); goto bad; } return 0; bad: dm_put_device(ti, lc->log_dev); core_dtr(log); return -ENOMEM; }
static void persistent_drop(struct exception_store *store) { struct pstore *ps = get_info(store); ps->valid = 0; if (write_header(ps)) DMWARN("write header failed"); }
/* * Add a device to the list, or just increment the usage count if * it's already present. */ static int __table_get_device(struct dm_table *t, struct dm_target *ti, const char *path, sector_t start, sector_t len, int mode, struct dm_dev **result) { int r; dev_t dev; struct dm_dev *dd; unsigned int major, minor; if (!t) BUG(); if (sscanf(path, "%u:%u", &major, &minor) == 2) { /* Extract the major/minor numbers */ dev = MKDEV(major, minor); if (MAJOR(dev) != major || MINOR(dev) != minor) return -EOVERFLOW; } else { /* convert the path to a device */ if ((r = lookup_device(path, &dev))) return r; } dd = find_device(&t->devices, dev); if (!dd) { dd = kmalloc(sizeof(*dd), GFP_KERNEL); if (!dd) return -ENOMEM; dd->mode = mode; dd->bdev = NULL; if ((r = open_dev(dd, dev))) { kfree(dd); return r; } atomic_set(&dd->count, 0); list_add(&dd->list, &t->devices); } else if (dd->mode != (mode | dd->mode)) { r = upgrade_mode(dd, mode); if (r) return r; } atomic_inc(&dd->count); if (!check_device_area(dd, start, len)) { DMWARN("device %s too small for target", path); dm_put_device(ti, dd); return -EINVAL; } *result = dd; return 0; }
/* * Returns: * DM_MAPIO_* : the request has been processed as indicated * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued * < 0 : the request was completed due to failure */ static int map_request(struct dm_rq_target_io *tio) { int r; struct dm_target *ti = tio->ti; struct mapped_device *md = tio->md; struct request *rq = tio->orig; struct request *clone = NULL; blk_status_t ret; r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); check_again: switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); return DM_MAPIO_REQUEUE; } /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); ret = dm_dispatch_clone_request(clone, rq); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { blk_rq_unprep_clone(clone); tio->ti->type->release_clone_rq(clone); tio->clone = NULL; if (!rq->q->mq_ops) r = DM_MAPIO_DELAY_REQUEUE; else r = DM_MAPIO_REQUEUE; goto check_again; } break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ break; case DM_MAPIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */ dm_requeue_original_request(tio, true); break; case DM_MAPIO_KILL: /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, BLK_STS_IOERR); break; default: DMWARN("unimplemented target map return value: %d", r); BUG(); } return r; }
/* * Returns: * 0 : the request has been processed * DM_MAPIO_REQUEUE : the original request needs to be requeued * < 0 : the request was completed due to failure */ static int map_request(struct dm_rq_target_io *tio, struct request *rq, struct mapped_device *md) { int r; struct dm_target *ti = tio->ti; struct request *clone = NULL; if (tio->clone) { clone = tio->clone; r = ti->type->map_rq(ti, clone, &tio->info); } else { r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); if (r < 0) { /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, r); return r; } if (r != DM_MAPIO_REMAPPED) return r; if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); return DM_MAPIO_REQUEUE; } } switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: /* The target has remapped the I/O so dispatch it */ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), blk_rq_pos(rq)); dm_dispatch_clone_request(clone, rq); break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ dm_requeue_original_request(md, tio->orig); break; default: if (r > 0) { DMWARN("unimplemented target map return value: %d", r); BUG(); } /* The target wants to complete the I/O */ dm_kill_unmapped_request(rq, r); return r; } return 0; }
int dm_unregister_dirty_log_type(struct dirty_log_type *type) { spin_lock(&_lock); if (type->use_count) DMWARN("Attempt to unregister a log type that is still in use"); else list_del(&type->list); spin_unlock(&_lock); return 0; }
/* * Set the geometry of a device. */ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) { sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; if (geo->start > sz) { DMWARN("Start sector is beyond the geometry limits."); return -EINVAL; } md->geometry = *geo; return 0; }
int dm_unregister_dirty_log_type(struct dirty_log_type *type) { spin_lock(&_lock); if (type->use_count) DMWARN("Unregister failed: log type '%s' still in use", type->name); else list_del(&type->list); spin_unlock(&_lock); return 0; }
static struct dirty_log_type *get_type(const char *type_name) { struct dirty_log_type *type; if ((type = _get_type(type_name))) return type; /* Logging type not found. Try to find module. */ if (load_log_module(type_name)) { DMWARN("Module for logging type \"%s\" not found.", type_name); return NULL; } return _get_type(type_name); }
/* * See if the device with a specific minor # is free. */ static int specific_minor(unsigned int minor) { int r = -EBUSY; if (minor >= MAX_DEVICES) { DMWARN("request for a mapped_device beyond MAX_DEVICES (%d)", MAX_DEVICES); return -EINVAL; } spin_lock(&_minor_lock); if (!test_and_set_bit(minor, _minor_bits)) r = 0; spin_unlock(&_minor_lock); return r; }
static void __map_bio(struct dm_target *ti, struct bio *clone, struct target_io *tio) { int r; sector_t sector; struct mapped_device *md; /* * Sanity checks. */ BUG_ON(!clone->bi_size); clone->bi_end_io = clone_endio; clone->bi_private = tio; /* * Map the clone. If r == 0 we don't need to do * anything, the target has assumed ownership of * this io. */ atomic_inc(&tio->io->io_count); sector = clone->bi_sector; r = ti->type->map(ti, clone, &tio->info); if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, tio->io->bio->bi_bdev->bd_dev, sector, clone->bi_sector); generic_make_request(clone); } else if (r < 0 || r == DM_MAPIO_REQUEUE) { /* error the io and bail out, or requeue it if needed */ md = tio->io->md; dec_pending(tio->io, r); /* * Store bio_set for cleanup. */ clone->bi_private = md->bs; bio_put(clone); free_tio(md, tio); } else if (r) { DMWARN("unimplemented target map return value: %d", r); BUG(); } }
static int __unlock_fs(struct mapped_device *md) { struct block_device *bdev; if (!test_and_clear_bit(DMF_FS_LOCKED, &md->flags)) return 0; bdev = bdget_disk(md->disk, 0); if (!bdev) { DMWARN("bdget failed in __unlock_fs"); return -ENOMEM; } thaw_bdev(bdev, md->frozen_sb); md->frozen_sb = NULL; bdput(bdev); bdput(bdev); return 0; }
static int flush_header(struct log_c *lc) { struct dm_io_region null_location = { .bdev = lc->header_location.bdev, .sector = 0, .count = 0, }; lc->io_req.bi_op = REQ_OP_WRITE; lc->io_req.bi_op_flags = WRITE_FLUSH; return dm_io(&lc->io_req, 1, &null_location, NULL); } static int read_header(struct log_c *log) { int r; r = rw_header(log, REQ_OP_READ); if (r) return r; header_from_disk(&log->header, log->disk_header); /* New log required? */ if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) { log->header.magic = MIRROR_MAGIC; log->header.version = MIRROR_DISK_VERSION; log->header.nr_regions = 0; } #ifdef __LITTLE_ENDIAN if (log->header.version == 1) log->header.version = 2; #endif if (log->header.version != MIRROR_DISK_VERSION) { DMWARN("incompatible disk log version"); return -EINVAL; } return 0; }
static void dm_done(struct request *clone, blk_status_t error, bool mapped) { int r = DM_ENDIO_DONE; struct dm_rq_target_io *tio = clone->end_io_data; dm_request_endio_fn rq_end_io = NULL; if (tio->ti) { rq_end_io = tio->ti->type->rq_end_io; if (mapped && rq_end_io) r = rq_end_io(tio->ti, clone, error, &tio->info); } if (unlikely(error == BLK_STS_TARGET)) { if (req_op(clone) == REQ_OP_WRITE_SAME && !clone->q->limits.max_write_same_sectors) disable_write_same(tio->md); if (req_op(clone) == REQ_OP_WRITE_ZEROES && !clone->q->limits.max_write_zeroes_sectors) disable_write_zeroes(tio->md); } switch (r) { case DM_ENDIO_DONE: /* The target wants to complete the I/O */ dm_end_request(clone, error); break; case DM_ENDIO_INCOMPLETE: /* The target will handle the I/O */ return; case DM_ENDIO_REQUEUE: /* The target wants to requeue the I/O */ dm_requeue_original_request(tio, false); break; case DM_ENDIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */ dm_requeue_original_request(tio, true); break; default: DMWARN("unimplemented target endio return value: %d", r); BUG(); } }
/* * Called with the queue lock held. */ static int dm_old_prep_fn(struct request_queue *q, struct request *rq) { struct mapped_device *md = q->queuedata; struct dm_rq_target_io *tio; if (unlikely(rq->special)) { DMWARN("Already has something in rq->special."); return BLKPREP_KILL; } tio = dm_old_prep_tio(rq, md, GFP_ATOMIC); if (!tio) return BLKPREP_DEFER; rq->special = tio; rq->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; }
static int emc_create(struct hw_handler *hwh, unsigned argc, char **argv) { struct emc_handler *h; unsigned hr, short_trespass; if (argc == 0) { /* No arguments: use defaults */ hr = 0; short_trespass = 0; } else if (argc != 2) { DMWARN("dm-emc hwhandler: incorrect number of arguments"); return -EINVAL; } else { if ((sscanf(argv[0], "%u", &short_trespass) != 1) || (short_trespass > 1)) { DMWARN("dm-emc: invalid trespass mode selected"); return -EINVAL; } if ((sscanf(argv[1], "%u", &hr) != 1) || (hr > 1)) { DMWARN("dm-emc: invalid honor reservation flag selected"); return -EINVAL; } } h = alloc_emc_handler(); if (!h) return -ENOMEM; hwh->context = h; if ((h->short_trespass = short_trespass)) DMWARN("dm-emc: short trespass command will be send"); else DMWARN("dm-emc: long trespass command will be send"); if ((h->hr = hr)) DMWARN("dm-emc: honor reservation bit will be set"); else DMWARN("dm-emc: honor reservation bit will not be set (default)"); return 0; }