int dm_table_create(struct dm_table **result, int mode, unsigned num_targets, struct mapped_device *md) { struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return -ENOMEM; INIT_LIST_HEAD(&t->devices); atomic_set(&t->holders, 1); if (!num_targets) num_targets = KEYS_PER_NODE; num_targets = dm_round_up(num_targets, KEYS_PER_NODE); if (alloc_targets(t, num_targets)) { kfree(t); t = NULL; return -ENOMEM; } t->mode = mode; t->md = md; *result = t; return 0; }
/*---------------------------------------------------------------- * disk log constructor/destructor * * argv contains log_device region_size followed optionally by [no]sync *--------------------------------------------------------------*/ static int disk_ctr(struct dirty_log *log, struct dm_target *ti, unsigned int argc, char **argv) { int r; size_t size; struct log_c *lc; struct dm_dev *dev; if (argc < 2 || argc > 3) { DMWARN("wrong number of arguments to disk mirror log"); return -EINVAL; } r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */, FMODE_READ | FMODE_WRITE, &dev); if (r) return r; r = core_ctr(log, ti, argc - 1, argv + 1); if (r) { dm_put_device(ti, dev); return r; } lc = (struct log_c *) log->context; lc->log_dev = dev; /* setup the disk header fields */ lc->header_location.bdev = lc->log_dev->bdev; lc->header_location.sector = 0; lc->header_location.count = 1; /* * We can't read less than this amount, even though we'll * not be using most of this space. */ lc->disk_header = vmalloc(1 << SECTOR_SHIFT); if (!lc->disk_header) goto bad; /* setup the disk bitset fields */ lc->bits_location.bdev = lc->log_dev->bdev; lc->bits_location.sector = LOG_OFFSET; size = dm_round_up(lc->bitset_uint32_count * sizeof(uint32_t), 1 << SECTOR_SHIFT); lc->bits_location.count = size >> SECTOR_SHIFT; lc->disk_bits = vmalloc(size); if (!lc->disk_bits) { vfree(lc->disk_header); goto bad; } return 0; bad: dm_put_device(ti, lc->log_dev); core_dtr(log); return -ENOMEM; }
static sector_t max_io_len(struct mapped_device *md, sector_t sector, struct dm_target *ti) { sector_t offset = sector - ti->begin; sector_t len = ti->len - offset; /* * Does the target need to split even further ? */ if (ti->split_io) { sector_t boundary; boundary = dm_round_up(offset + 1, ti->split_io) - offset; if (len > boundary) len = boundary; } return len; }
static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, unsigned int argc, char **argv, struct dm_dev *dev) { enum sync sync = DEFAULTSYNC; struct log_c *lc; uint32_t region_size; unsigned int region_count; size_t bitset_size, buf_size; int r; char dummy; if (argc < 1 || argc > 2) { DMWARN("wrong number of arguments to dirty region log"); return -EINVAL; } if (argc > 1) { if (!strcmp(argv[1], "sync")) sync = FORCESYNC; else if (!strcmp(argv[1], "nosync")) sync = NOSYNC; else { DMWARN("unrecognised sync argument to " "dirty region log: %s", argv[1]); return -EINVAL; } } if (sscanf(argv[0], "%u%c", ®ion_size, &dummy) != 1 || !_check_region_size(ti, region_size)) { DMWARN("invalid region size %s", argv[0]); return -EINVAL; } region_count = dm_sector_div_up(ti->len, region_size); lc = kmalloc(sizeof(*lc), GFP_KERNEL); if (!lc) { DMWARN("couldn't allocate core log"); return -ENOMEM; } lc->ti = ti; lc->touched_dirtied = 0; lc->touched_cleaned = 0; lc->flush_failed = 0; lc->region_size = region_size; lc->region_count = region_count; lc->sync = sync; /* * Work out how many "unsigned long"s we need to hold the bitset. */ bitset_size = dm_round_up(region_count, sizeof(*lc->clean_bits) << BYTE_SHIFT); bitset_size >>= BYTE_SHIFT; lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits); /* * Disk log? */ if (!dev) { lc->clean_bits = vmalloc(bitset_size); if (!lc->clean_bits) { DMWARN("couldn't allocate clean bitset"); kfree(lc); return -ENOMEM; } lc->disk_header = NULL; } else { lc->log_dev = dev; lc->log_dev_failed = 0; lc->log_dev_flush_failed = 0; lc->header_location.bdev = lc->log_dev->bdev; lc->header_location.sector = 0; /* * Buffer holds both header and bitset. */ buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size, bdev_logical_block_size(lc->header_location. bdev)); if (buf_size > i_size_read(dev->bdev->bd_inode)) { DMWARN("log device %s too small: need %llu bytes", dev->name, (unsigned long long)buf_size); kfree(lc); return -EINVAL; } lc->header_location.count = buf_size >> SECTOR_SHIFT; lc->io_req.mem.type = DM_IO_VMA; lc->io_req.notify.fn = NULL; lc->io_req.client = dm_io_client_create(); if (IS_ERR(lc->io_req.client)) { r = PTR_ERR(lc->io_req.client); DMWARN("couldn't allocate disk io client"); kfree(lc); return r; } lc->disk_header = vmalloc(buf_size); if (!lc->disk_header) { DMWARN("couldn't allocate disk log buffer"); dm_io_client_destroy(lc->io_req.client); kfree(lc); return -ENOMEM; } lc->io_req.mem.ptr.vma = lc->disk_header; lc->clean_bits = (void *)lc->disk_header + (LOG_OFFSET << SECTOR_SHIFT); } memset(lc->clean_bits, -1, bitset_size); lc->sync_bits = vmalloc(bitset_size); if (!lc->sync_bits) { DMWARN("couldn't allocate sync bitset"); if (!dev) vfree(lc->clean_bits); else dm_io_client_destroy(lc->io_req.client); vfree(lc->disk_header); kfree(lc); return -ENOMEM; } memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size); lc->sync_count = (sync == NOSYNC) ? region_count : 0; lc->recovering_bits = vzalloc(bitset_size); if (!lc->recovering_bits) { DMWARN("couldn't allocate sync bitset"); vfree(lc->sync_bits); if (!dev) vfree(lc->clean_bits); else dm_io_client_destroy(lc->io_req.client); vfree(lc->disk_header); kfree(lc); return -ENOMEM; } lc->sync_search = 0; log->context = lc; return 0; }
/*---------------------------------------------------------------- * disk log constructor/destructor * * argv contains 2 - 4 arguments: * <log_device> <region_size> [[no]sync] [block_on_error] *--------------------------------------------------------------*/ static int disk_ctr(struct dirty_log *log, struct dm_target *ti, unsigned int argc, char **argv) { int r; size_t size, bitset_size; struct log_c *lc; struct dm_dev *dev; uint32_t *clean_bits; if (argc < 2 || argc > 4) { DMWARN("wrong number of arguments to disk mirror log"); return -EINVAL; } r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */, FMODE_READ | FMODE_WRITE, &dev); if (r) return r; r = core_ctr(log, ti, argc - 1, argv + 1); if (r) { dm_put_device(ti, dev); return r; } lc = (struct log_c *) log->context; lc->log_dev = dev; lc->log_dev_failed = 0; /* setup the disk header fields */ lc->header_location.bdev = lc->log_dev->bdev; lc->header_location.sector = 0; /* Include both the header and the bitset in one buffer. */ bitset_size = lc->bitset_uint32_count * sizeof(uint32_t); size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size, ti->limits.hardsect_size); if (size > dev->bdev->bd_inode->i_size) { DMWARN("log device %s too small: need %llu bytes", dev->name, (unsigned long long)size); r = -EINVAL; goto bad; } lc->header_location.count = size >> SECTOR_SHIFT; lc->disk_header = vmalloc(size); if (!lc->disk_header) { r = -ENOMEM; goto bad; } /* * Deallocate the clean_bits buffer that was allocated in core_ctr() * and point it at the appropriate place in the disk_header buffer. */ clean_bits = lc->clean_bits; lc->clean_bits = (void *)lc->disk_header + (LOG_OFFSET << SECTOR_SHIFT); memcpy(lc->clean_bits, clean_bits, bitset_size); vfree(clean_bits); lc->io_req.mem.type = DM_IO_VMA; lc->io_req.client = dm_io_client_create(dm_div_up(size, PAGE_SIZE)); if (IS_ERR(lc->io_req.client)) { r = PTR_ERR(lc->io_req.client); DMWARN("couldn't allocate disk io client"); vfree(lc->disk_header); goto bad; } return 0; bad: dm_put_device(ti, lc->log_dev); core_dtr(log); return r; }
static int core_ctr(struct dirty_log *log, struct dm_target *ti, unsigned int argc, char **argv) { enum sync sync = DEFAULTSYNC; int failure_response = DMLOG_IOERR_IGNORE; struct log_c *lc; uint32_t region_size; unsigned int region_count; size_t bitset_size; unsigned i; if (argc < 1 || argc > 3) { DMWARN("wrong number of arguments to mirror log"); return -EINVAL; } for (i = 1; i < argc; i++) { if (!strcmp(argv[i], "sync")) sync = FORCESYNC; else if (!strcmp(argv[i], "nosync")) sync = NOSYNC; else if (!strcmp(argv[i], "block_on_error")) failure_response = DMLOG_IOERR_BLOCK; else { DMWARN("unrecognised sync argument to mirror log: %s", argv[i]); return -EINVAL; } } if (sscanf(argv[0], "%u", ®ion_size) != 1) { DMWARN("invalid region size string"); return -EINVAL; } region_count = dm_sector_div_up(ti->len, region_size); lc = kmalloc(sizeof(*lc), GFP_KERNEL); if (!lc) { DMWARN("couldn't allocate core log"); return -ENOMEM; } lc->ti = ti; lc->touched = 0; lc->region_size = region_size; lc->region_count = region_count; lc->sync = sync; lc->failure_response = failure_response; /* * Work out how many "unsigned long"s we need to hold the bitset. */ bitset_size = dm_round_up(region_count, sizeof(uint32_t) << BYTE_SHIFT); bitset_size >>= BYTE_SHIFT; lc->bitset_uint32_count = bitset_size / sizeof(uint32_t); lc->clean_bits = vmalloc(bitset_size); if (!lc->clean_bits) { DMWARN("couldn't allocate clean bitset"); kfree(lc); return -ENOMEM; } memset(lc->clean_bits, -1, bitset_size); lc->sync_bits = vmalloc(bitset_size); if (!lc->sync_bits) { DMWARN("couldn't allocate sync bitset"); vfree(lc->clean_bits); kfree(lc); return -ENOMEM; } memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size); lc->sync_count = (sync == NOSYNC) ? region_count : 0; lc->recovering_bits = vmalloc(bitset_size); if (!lc->recovering_bits) { DMWARN("couldn't allocate recovering bitset"); vfree(lc->sync_bits); vfree(lc->clean_bits); kfree(lc); return -ENOMEM; } memset(lc->recovering_bits, 0, bitset_size); lc->sync_search = 0; log->context = lc; return 0; }
static int create_log_context(struct dirty_log *log, struct dm_target *ti, unsigned int argc, char **argv, struct dm_dev *dev) { enum sync sync = DEFAULTSYNC; struct log_c *lc; uint32_t region_size; unsigned int region_count; size_t bitset_size, buf_size; if (argc < 1 || argc > 2) { DMWARN("wrong number of arguments to mirror log"); return -EINVAL; } if (argc > 1) { if (!strcmp(argv[1], "sync")) sync = FORCESYNC; else if (!strcmp(argv[1], "nosync")) sync = NOSYNC; else { DMWARN("unrecognised sync argument to mirror log: %s", argv[1]); return -EINVAL; } } if (sscanf(argv[0], "%u", ®ion_size) != 1) { DMWARN("invalid region size string"); return -EINVAL; } region_count = dm_sector_div_up(ti->len, region_size); lc = kmalloc(sizeof(*lc), GFP_KERNEL); if (!lc) { DMWARN("couldn't allocate core log"); return -ENOMEM; } lc->ti = ti; lc->touched = 0; lc->region_size = region_size; lc->region_count = region_count; lc->sync = sync; /* * Work out how many "unsigned long"s we need to hold the bitset. */ bitset_size = dm_round_up(region_count, sizeof(*lc->clean_bits) << BYTE_SHIFT); bitset_size >>= BYTE_SHIFT; lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits); /* * Disk log? */ if (!dev) { lc->clean_bits = vmalloc(bitset_size); if (!lc->clean_bits) { DMWARN("couldn't allocate clean bitset"); kfree(lc); return -ENOMEM; } lc->disk_header = NULL; } else { lc->log_dev = dev; lc->header_location.bdev = lc->log_dev->bdev; lc->header_location.sector = 0; /* * Buffer holds both header and bitset. */ buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size, ti->limits.hardsect_size); lc->header_location.count = buf_size >> SECTOR_SHIFT; lc->disk_header = vmalloc(buf_size); if (!lc->disk_header) { DMWARN("couldn't allocate disk log buffer"); kfree(lc); return -ENOMEM; } lc->clean_bits = (void *)lc->disk_header + (LOG_OFFSET << SECTOR_SHIFT); } memset(lc->clean_bits, -1, bitset_size); lc->sync_bits = vmalloc(bitset_size); if (!lc->sync_bits) { DMWARN("couldn't allocate sync bitset"); if (!dev) vfree(lc->clean_bits); vfree(lc->disk_header); kfree(lc); return -ENOMEM; } memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size); lc->sync_count = (sync == NOSYNC) ? region_count : 0; lc->recovering_bits = vmalloc(bitset_size); if (!lc->recovering_bits) { DMWARN("couldn't allocate sync bitset"); vfree(lc->sync_bits); if (!dev) vfree(lc->clean_bits); vfree(lc->disk_header); kfree(lc); return -ENOMEM; } memset(lc->recovering_bits, 0, bitset_size); lc->sync_search = 0; log->context = lc; return 0; }