static int out(struct sm_metadata *smm)
{
	int r = 0;

	/*
	 * If we're not recursing then very bad things are happening.
	 */
	if (!smm->recursion_count) {
		DMERR("lost track of recursion depth");
		return -ENOMEM;
	}

	if (smm->recursion_count == 1) {
		while (!brb_empty(&smm->uncommitted)) {
			struct block_op bop;

			r = brb_pop(&smm->uncommitted, &bop);
			if (r) {
				DMERR("bug in bop ring buffer");
				break;
			}

			r = commit_bop(smm, &bop);
			if (r)
				break;
		}
	}

	smm->recursion_count--;

	return r;
}
示例#2
0
static struct bio *get_failover_bio(struct path *path, unsigned data_size)
{
	struct bio *bio;
	struct page *page;

	bio = bio_alloc(GFP_ATOMIC, 1);
	if (!bio) {
		DMERR("dm-emc: get_failover_bio: bio_alloc() failed.");
		return NULL;
	}

	bio->bi_rw |= (1 << BIO_RW);
	bio->bi_bdev = path->dev->bdev;
	bio->bi_sector = 0;
	bio->bi_private = path;
	bio->bi_end_io = emc_endio;

	page = alloc_page(GFP_ATOMIC);
	if (!page) {
		DMERR("dm-emc: get_failover_bio: alloc_page() failed.");
		bio_put(bio);
		return NULL;
	}

	if (bio_add_page(bio, page, data_size, 0) != data_size) {
		DMERR("dm-emc: get_failover_bio: alloc_page() failed.");
		__free_page(page);
		bio_put(bio);
		return NULL;
	}

	return bio;
}
示例#3
0
static int sb_check(struct dm_block_validator *v,
		    struct dm_block *b,
		    size_t block_size)
{
	struct thin_disk_superblock *disk_super = dm_block_data(b);
	__le32 csum_le;

	if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
		DMERR("sb_check failed: blocknr %llu: "
		      "wanted %llu", le64_to_cpu(disk_super->blocknr),
		      (unsigned long long)dm_block_location(b));
		return -ENOTBLK;
	}

	if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
		DMERR("sb_check failed: magic %llu: "
		      "wanted %llu", le64_to_cpu(disk_super->magic),
		      (unsigned long long)THIN_SUPERBLOCK_MAGIC);
		return -EILSEQ;
	}

	csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
					     block_size - sizeof(__le32),
					     SUPERBLOCK_CSUM_XOR));
	if (csum_le != disk_super->csum) {
		DMERR("sb_check failed: csum %u: wanted %u",
		      le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
		return -EILSEQ;
	}

	return 0;
}
示例#4
0
static struct request *emc_trespass_get(struct emc_handler *h,
					struct path *path)
{
	struct bio *bio;
	struct request *rq;
	unsigned char *page22;
	unsigned char long_trespass_pg[] = {
		0, 0, 0, 0,
		TRESPASS_PAGE,        /* Page code */
		0x09,                 /* Page length - 2 */
		h->hr ? 0x01 : 0x81,  /* Trespass code + Honor reservation bit */
		0xff, 0xff,           /* Trespass target */
		0, 0, 0, 0, 0, 0      /* Reserved bytes / unknown */
		};
	unsigned char short_trespass_pg[] = {
		0, 0, 0, 0,
		TRESPASS_PAGE,        /* Page code */
		0x02,                 /* Page length - 2 */
		h->hr ? 0x01 : 0x81,  /* Trespass code + Honor reservation bit */
		0xff,                 /* Trespass target */
		};
	unsigned data_size = h->short_trespass ? sizeof(short_trespass_pg) :
				sizeof(long_trespass_pg);

	/* get bio backing */
	if (data_size > PAGE_SIZE)
		/* this should never happen */
		return NULL;

	bio = get_failover_bio(path, data_size);
	if (!bio) {
		DMERR("dm-emc: emc_trespass_get: no bio");
		return NULL;
	}

	page22 = (unsigned char *)bio_data(bio);
	memset(page22, 0, data_size);

	memcpy(page22, h->short_trespass ?
		short_trespass_pg : long_trespass_pg, data_size);

	/* get request for block layer packet command */
	rq = get_failover_req(h, bio, path);
	if (!rq) {
		DMERR("dm-emc: emc_trespass_get: no rq");
		free_bio(bio);
		return NULL;
	}

	/* Prepare the command. */
	rq->cmd[0] = MODE_SELECT;
	rq->cmd[1] = 0x10;
	rq->cmd[4] = data_size;
	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);

	return rq;
}
示例#5
0
static int write_metadata(struct log_writes_c *lc, void *entry,
			  size_t entrylen, void *data, size_t datalen,
			  sector_t sector)
{
	struct bio *bio;
	struct page *page;
	void *ptr;
	size_t ret;

	bio = bio_alloc(GFP_KERNEL, 1);
	if (!bio) {
		DMERR("Couldn't alloc log bio");
		goto error;
	}
	bio->bi_iter.bi_size = 0;
	bio->bi_iter.bi_sector = sector;
	bio->bi_bdev = lc->logdev->bdev;
	bio->bi_end_io = log_end_io;
	bio->bi_private = lc;
	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);

	page = alloc_page(GFP_KERNEL);
	if (!page) {
		DMERR("Couldn't alloc log page");
		bio_put(bio);
		goto error;
	}

	ptr = kmap_atomic(page);
	memcpy(ptr, entry, entrylen);
	if (datalen)
		memcpy(ptr + entrylen, data, datalen);
	memset(ptr + entrylen + datalen, 0,
	       lc->sectorsize - entrylen - datalen);
	kunmap_atomic(ptr);

	ret = bio_add_page(bio, page, lc->sectorsize, 0);
	if (ret != lc->sectorsize) {
		DMERR("Couldn't add page to the log block");
		goto error_bio;
	}
	submit_bio(bio);
	return 0;
error_bio:
	bio_put(bio);
	__free_page(page);
error:
	put_io_block(lc);
	return -1;
}
static int ca_load(struct count_array *ca, struct dm_space_map *sm)
{
	int r;
	uint32_t count;
	dm_block_t nr_blocks, i;

	r = dm_sm_get_nr_blocks(sm, &nr_blocks);
	if (r)
		return r;

	BUG_ON(ca->nr != nr_blocks);

	DMWARN("Loading debug space map from disk.  This may take some time");
	for (i = 0; i < nr_blocks; i++) {
		r = dm_sm_get_count(sm, i, &count);
		if (r) {
			DMERR("load failed");
			return r;
		}

		ca_set_count(ca, i, count);
	}
	DMWARN("Load complete");

	return 0;
}
示例#7
0
static struct request *get_failover_req(struct emc_handler *h,
					struct bio *bio, struct dm_path *path)
{
	struct request *rq;
	struct block_device *bdev = bio->bi_bdev;
	struct request_queue *q = bdev_get_queue(bdev);

	/* FIXME: Figure out why it fails with GFP_ATOMIC. */
	rq = blk_get_request(q, WRITE, __GFP_WAIT);
	if (!rq) {
		DMERR("get_failover_req: blk_get_request failed");
		return NULL;
	}

	blk_rq_append_bio(q, rq, bio);

	rq->sense = h->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;

	rq->timeout = EMC_FAILOVER_TIMEOUT;
	rq->cmd_type = REQ_TYPE_BLOCK_PC;
	rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;

	return rq;
}
/* get_boot_dev is bassed on dm_get_device_by_uuid in dm_bootcache. */
static dev_t get_boot_dev(void)
{
	const char partuuid[] = "PARTUUID=";
	char uuid[2 * sizeof(partuuid) + 36];	/* Room for 2 PARTUUIDs */
	char *uuid_str;
	dev_t devt = 0;

	uuid_str = get_info_from_cmdline(" kern_guid=",
			&uuid[sizeof(partuuid) - 1],
			sizeof(uuid) - sizeof(partuuid));
	if (!uuid_str) {
		DMERR("Couldn't get uuid, try root dev");
		return 0;
	}

	if (strncmp(uuid_str, partuuid, strlen(partuuid)) != 0) {
		/* Not prefixed with "PARTUUID=", so add it */
		memcpy(uuid, partuuid, sizeof(partuuid) - 1);
		uuid_str = uuid;
	}
	devt = name_to_dev_t(uuid_str);
	if (!devt)
		goto found_nothing;
	return devt;

found_nothing:
	DMDEBUG("No matching partition for GUID: %s", uuid_str);
	return 0;
}
static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
{
	int r = sm_metadata_new_block_(sm, b);
	if (r)
		DMERR("out of metadata space");
	return r;
}
示例#10
0
static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed,
			struct path *path)
{
	struct request *rq;
	struct request_queue *q = bdev_get_queue(path->dev->bdev);

	/*
	 * We can either blindly init the pg (then look at the sense),
	 * or we can send some commands to get the state here (then
	 * possibly send the fo cmnd), or we can also have the
	 * initial state passed into us and then get an update here.
	 */
	if (!q) {
		DMINFO("dm-emc: emc_pg_init: no queue");
		goto fail_path;
	}

	/* FIXME: The request should be pre-allocated. */
	rq = emc_trespass_get(hwh->context, path);
	if (!rq) {
		DMERR("dm-emc: emc_pg_init: no rq");
		goto fail_path;
	}

	DMINFO("dm-emc: emc_pg_init: sending switch-over command");
	elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
	return;

fail_path:
	dm_pg_init_complete(path, MP_FAIL_PATH);
}
static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
				  uint32_t count)
{
	DMERR("bootstrap doesn't support set_count");

	return -EINVAL;
}
static int out(struct sm_metadata *smm)
{
	int r = 0;

	/*
	 * If we're not recursing then very bad things are happening.
	 */
	if (!smm->recursion_count) {
		DMERR("lost track of recursion depth");
		return -ENOMEM;
	}

	if (smm->recursion_count == 1 && smm->nr_uncommitted) {
		while (smm->nr_uncommitted && !r) {
			smm->nr_uncommitted--;
			r = commit_bop(smm, smm->uncommitted +
				       smm->nr_uncommitted);
			if (r)
				break;
		}
	}

	smm->recursion_count--;

	return r;
}
示例#13
0
static struct request *get_failover_req(struct emc_handler *h,
					struct bio *bio, struct path *path)
{
	struct request *rq;
	struct block_device *bdev = bio->bi_bdev;
	struct request_queue *q = bdev_get_queue(bdev);

	/* FIXME: Figure out why it fails with GFP_ATOMIC. */
	rq = blk_get_request(q, WRITE, __GFP_WAIT);
	if (!rq) {
		DMERR("dm-emc: get_failover_req: blk_get_request failed");
		return NULL;
	}

	rq->bio = rq->biotail = bio;
	blk_rq_bio_prep(q, rq, bio);

	rq->rq_disk = bdev->bd_contains->bd_disk;

	/* bio backed don't set data */
	rq->buffer = rq->data = NULL;
	/* rq data_len used for pc cmd's request_bufflen */
	rq->data_len = bio->bi_size;

	rq->sense = h->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;

	memset(&rq->cmd, 0, BLK_MAX_CDB);

	rq->timeout = EMC_FAILOVER_TIMEOUT;
	rq->flags |= (REQ_BLOCK_PC | REQ_FAILFAST | REQ_NOMERGE);

	return rq;
}
示例#14
0
static void __exit dm_zero_exit(void)
{
	int r = dm_unregister_target(&zero_target);

	if (r < 0)
		DMERR("zero: unregister failed %d", r);
}
void
flashcache_handle_read_write_error(struct flashcache_copy_job *job)
{
	struct kcached_job *io_error_job;
	struct cache_c *dmc = job->dmc;
	int set;
	struct cache_set *cache_set;
	int i, index;

	DMERR("flashcache: Disk writeback failed ! read/write error %lu", 
	      job->job_io_regions.disk.sector);	
	index = CACHE_ADDR_TO_INDEX(dmc, 
				    job->job_io_regions.cache[0].sector);
	set = index / dmc->assoc;
	cache_set = &dmc->cache_sets[set];
	for (i = 0 ; i < job->nr_writes ; i++) {
		index = CACHE_ADDR_TO_INDEX(dmc, 
					    job->job_io_regions.cache[i].sector);
		io_error_job = job->job_base[i];
		io_error_job->action = WRITEDISK;
		spin_lock_irq(&cache_set->set_spin_lock);
		VERIFY(dmc->cache[index].cache_state & (DISKWRITEINPROG | VALID | DIRTY));
		VERIFY(cache_set->clean_inprog > 0);
		cache_set->clean_inprog--;
		VERIFY(atomic_read(&dmc->clean_inprog) > 0);
		atomic_dec(&dmc->clean_inprog);
		spin_unlock_irq(&cache_set->set_spin_lock);
		io_error_job->error = -EIO;
		flashcache_do_pending(io_error_job);
	}
	free_flashcache_copy_job(dmc, job);
	flashcache_clean_set(dmc, set, 0); /* Kick off more cleanings */
	dmc->flashcache_stats.cleanings++;
}
示例#16
0
static void __exit dm_emc_exit(void)
{
	int r = dm_unregister_hw_handler(&emc_hwh);

	if (r < 0)
		DMERR("emc: unregister failed %d", r);
}
static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
				  size_t max)
{
	DMERR("bootstrap doesn't support copy_root");

	return -EINVAL;
}
示例#18
0
static void __exit hp_sw_exit(void)
{
    int r;

    r = dm_unregister_hw_handler(&hp_sw_hwh);
    if (r < 0)
        DMERR("unregister failed %d", r);
}
static int node_check(struct dm_block_validator *v,
		      struct dm_block *b,
		      size_t block_size)
{
	struct btree_node *n = dm_block_data(b);
	struct node_header *h = &n->header;
	size_t value_size;
	__le32 csum_disk;
	uint32_t flags;

	if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
		DMERR("node_check failed blocknr %llu wanted %llu",
		      le64_to_cpu(h->blocknr), dm_block_location(b));
		return -ENOTBLK;
	}

	csum_disk = cpu_to_le32(dm_bm_checksum(&h->flags,
					       block_size - sizeof(__le32),
					       BTREE_CSUM_XOR));
	if (csum_disk != h->csum) {
		DMERR("node_check failed csum %u wanted %u",
		      le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
		return -EILSEQ;
	}

	value_size = le32_to_cpu(h->value_size);

	if (sizeof(struct node_header) +
	    (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) {
		DMERR("node_check failed: max_entries too large");
		return -EILSEQ;
	}

	if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) {
		DMERR("node_check failed, too many entries");
		return -EILSEQ;
	}

	flags = le32_to_cpu(h->flags);
	if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) {
		DMERR("node_check failed, node is neither INTERNAL or LEAF");
		return -EILSEQ;
	}

	return 0;
}
static void chromeos_invalidate_kernel_endio(struct bio *bio, int err)
{
	const char *mode = ((bio->bi_rw & REQ_WRITE) ? "write" : "read");
	if (err)
		chromeos_set_need_recovery();

	if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
		DMERR("invalidate_kernel: %s not supported", mode);
		chromeos_set_need_recovery();
	} else if (!bio_flagged(bio, BIO_UPTODATE)) {
		DMERR("invalidate_kernel: %s not up to date", mode);
		chromeos_set_need_recovery();
	} else {
		DMERR("invalidate_kernel: partition header %s completed", mode);
	}

	complete(bio->bi_private);
}
示例#21
0
static int __init dm_zero_init(void)
{
	int r = dm_register_target(&zero_target);

	if (r < 0)
		DMERR("register failed %d", r);

	return r;
}
示例#22
0
文件: dm-rq.c 项目: harlanstars/linux
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
{
	struct request_queue *q;
	struct dm_target *immutable_tgt;
	int err;

	if (!dm_table_all_blk_mq_devices(t)) {
		DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
		return -EINVAL;
	}

	md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
	if (!md->tag_set)
		return -ENOMEM;

	md->tag_set->ops = &dm_mq_ops;
	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
	md->tag_set->numa_node = md->numa_node_id;
	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
	md->tag_set->driver_data = md;

	md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
	immutable_tgt = dm_table_get_immutable_target(t);
	if (immutable_tgt && immutable_tgt->per_io_data_size) {
		/* any target-specific per-io data is immediately after the tio */
		md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
		md->init_tio_pdu = true;
	}

	err = blk_mq_alloc_tag_set(md->tag_set);
	if (err)
		goto out_kfree_tag_set;

	q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
	if (IS_ERR(q)) {
		err = PTR_ERR(q);
		goto out_tag_set;
	}
	dm_init_md_queue(md);

	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
	err = blk_mq_register_dev(disk_to_dev(md->disk), q);
	if (err)
		goto out_cleanup_queue;

	return 0;

out_cleanup_queue:
	blk_cleanup_queue(q);
out_tag_set:
	blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
	kfree(md->tag_set);

	return err;
}
static int __init smq_init(void)
{
	int r;

	r = dm_cache_policy_register(&smq_policy_type);
	if (r) {
		DMERR("register failed %d", r);
		return -ENOMEM;
	}

	r = dm_cache_policy_register(&default_policy_type);
	if (r) {
		DMERR("register failed (as default) %d", r);
		dm_cache_policy_unregister(&smq_policy_type);
		return -ENOMEM;
	}

	return 0;
}
static int __init dm_verity_chromeos_init(void)
{
	int r;

	r = dm_verity_register_error_notifier(&chromeos_nb);
	if (r < 0)
		DMERR("failed to register handler: %d", r);
	else
		DMINFO("dm-verity-chromeos registered");
	return r;
}
示例#25
0
static int __init dm_emc_init(void)
{
	int r = dm_register_hw_handler(&emc_hwh);

	if (r < 0)
		DMERR("emc: register failed %d", r);

	DMINFO("dm-emc version 0.0.3 loaded");

	return r;
}
示例#26
0
文件: dm-btree.c 项目: janfj/dd-wrt
static int top_frame(struct del_stack *s, struct frame **f)
{
	if (s->top < 0) {
		DMERR("btree deletion stack empty");
		return -EINVAL;
	}

	*f = s->spine + s->top;

	return 0;
}
示例#27
0
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
{
	int r = brb_push(&smm->uncommitted, type, b);

	if (r) {
		DMERR("too many recursive allocations");
		return -ENOMEM;
	}

	return 0;
}
static void __init dm_substitute_devices(char *str, size_t str_len)
{
	char *candidate = str;
	char *candidate_end = str;
	char old_char;
	size_t len = 0;
	dev_t dev;

	if (str_len < 3)
		return;

	while (str && *str) {
		candidate = strchr(str, '/');
		if (!candidate)
			break;

		/* Avoid embedded slashes */
		if (candidate != str && *(candidate - 1) != DM_FIELD_SEP) {
			str = strchr(candidate, DM_FIELD_SEP);
			continue;
		}

		len = get_dm_option(candidate, &candidate_end, DM_FIELD_SEP);
		str = skip_spaces(candidate_end);
		if (len < 3 || len > 37)  /* name_to_dev_t max; maj:mix min */
			continue;

		/* Temporarily terminate with a nul */
		candidate_end--;
		old_char = *candidate_end;
		*candidate_end = '\0';

		DMDEBUG("converting candidate device '%s' to dev_t", candidate);
		/* Use the boot-time specific device naming */
		dev = name_to_dev_t(candidate);
		*candidate_end = old_char;

		DMDEBUG(" -> %u", dev);
		/* No suitable replacement found */
		if (!dev)
			continue;

		/* Rewrite the /dev/path as a major:minor */
		len = snprintf(candidate, len, "%u:%u", MAJOR(dev), MINOR(dev));
		if (!len) {
			DMERR("error substituting device major/minor.");
			break;
		}
		candidate += len;
		/* Pad out with spaces (fixing our nul) */
		while (candidate < candidate_end)
			*(candidate++) = DM_FIELD_SEP;
	}
}
static char * __init dm_setup_parse_device_args(char *str)
{
	char *next = NULL;
	size_t len = 0;

	/* Grab the logical name of the device to be exported to udev */
	len = get_dm_option(str, &next, DM_FIELD_SEP);
	if (!len) {
		DMERR("failed to parse device name");
		goto parse_fail;
	}
	len = min(len + 1, sizeof(dm_setup_args.name));
	strlcpy(dm_setup_args.name, str, len);  /* includes nul */
	str = skip_spaces(next);

	/* Grab the UUID value or "none" */
	len = get_dm_option(str, &next, DM_FIELD_SEP);
	if (!len) {
		DMERR("failed to parse device uuid");
		goto parse_fail;
	}
	len = min(len + 1, sizeof(dm_setup_args.uuid));
	strlcpy(dm_setup_args.uuid, str, len);
	str = skip_spaces(next);

	/* Determine if the table/device will be read only or read-write */
	if (!strncmp("ro,", str, 3)) {
		dm_setup_args.ro = 1;
	} else if (!strncmp("rw,", str, 3)) {
		dm_setup_args.ro = 0;
	} else {
		DMERR("failed to parse table mode");
		goto parse_fail;
	}
	str = skip_spaces(str + 3);

	return str;

parse_fail:
	return NULL;
}
示例#30
0
文件: dm.c 项目: wxlong/Test
static void local_exit(void)
{
	kmem_cache_destroy(_tio_cache);
	kmem_cache_destroy(_io_cache);

	if (unregister_blkdev(_major, _name) < 0)
		DMERR("devfs_unregister_blkdev failed");

	_major = 0;

	DMINFO("cleaned up");
}