static void _unlock_mem(struct cmd_context *cmd) { size_t unlock_mstats; log_very_verbose("Unlocking memory"); if (!_memlock_maps(cmd, LVM_MUNLOCK, &unlock_mstats)) stack; if (!_use_mlockall) { _restore_mmap(); if (close(_maps_fd)) log_sys_error("close", _procselfmaps); dm_free(_maps_buffer); _maps_buffer = NULL; if (_mstats < unlock_mstats) { if ((_mstats + lvm_getpagesize()) < unlock_mstats) log_error(INTERNAL_ERROR "Reserved memory (%ld) not enough: used %ld. Increase activation/reserved_memory?", (long)_mstats, (long)unlock_mstats); else /* FIXME Believed due to incorrect use of yes_no_prompt while locks held */ log_debug_mem("Suppressed internal error: Maps lock %ld < unlock %ld, a one-page difference.", (long)_mstats, (long)unlock_mstats); } } if (setpriority(PRIO_PROCESS, 0, _priority)) log_error("setpriority %u failed: %s", _priority, strerror(errno)); _release_memory(); }
static int _aligned_io(struct device_area *where, void *buffer, int should_write) { void *bounce; unsigned int block_size = 0; uintptr_t mask; struct device_area widened; if (!(where->dev->flags & DEV_REGULAR) && !_get_block_size(where->dev, &block_size)) return_0; if (!block_size) block_size = lvm_getpagesize(); _widen_region(block_size, where, &widened); /* Do we need to use a bounce buffer? */ mask = block_size - 1; if (!memcmp(where, &widened, sizeof(widened)) && !((uintptr_t) buffer & mask)) return _io(where, buffer, should_write); /* Allocate a bounce buffer with an extra block */ if (!(bounce = alloca((size_t) widened.size + block_size))) { log_error("Bounce buffer alloca failed"); return 0; } /* * Realign start of bounce buffer (using the extra sector) */ if (((uintptr_t) bounce) & mask) bounce = (void *) ((((uintptr_t) bounce) + mask) & ~mask); /* channel the io through the bounce buffer */ if (!_io(&widened, bounce, 0)) { if (!should_write) return_0; /* FIXME pre-extend the file */ memset(bounce, '\n', widened.size); } if (should_write) { memcpy(bounce + (where->start - widened.start), buffer, (size_t) where->size); /* ... then we write */ return _io(&widened, bounce, 1); } memcpy(buffer, bounce + (where->start - widened.start), (size_t) where->size); return 1; }
static void _touch_memory(void *mem, size_t size) { size_t pagesize = lvm_getpagesize(); char *pos = mem; char *end = pos + size - sizeof(long); while (pos < end) { *(long *) pos = 1; pos += pagesize; } }
int read_config_fd(struct config_tree *cft, struct device *dev, off_t offset, size_t size, off_t offset2, size_t size2, checksum_fn_t checksum_fn, uint32_t checksum) { struct cs *c = (struct cs *) cft; struct parser *p; int r = 0; int use_mmap = 1; off_t mmap_offset = 0; char *buf = NULL; if (!(p = dm_pool_alloc(c->mem, sizeof(*p)))) return_0; p->mem = c->mem; /* Only use mmap with regular files */ if (!(dev->flags & DEV_REGULAR) || size2) use_mmap = 0; if (use_mmap) { mmap_offset = offset % lvm_getpagesize(); /* memory map the file */ p->fb = mmap((caddr_t) 0, size + mmap_offset, PROT_READ, MAP_PRIVATE, dev_fd(dev), offset - mmap_offset); if (p->fb == (caddr_t) (-1)) { log_sys_error("mmap", dev_name(dev)); goto out; } p->fb = p->fb + mmap_offset; } else { if (!(buf = dm_malloc(size + size2))) return_0; if (!dev_read_circular(dev, (uint64_t) offset, size, (uint64_t) offset2, size2, buf)) { goto out; } p->fb = buf; } if (checksum_fn && checksum != (checksum_fn(checksum_fn(INITIAL_CRC, (const uint8_t *)p->fb, size), (const uint8_t *)(p->fb + size), size2))) { log_error("%s: Checksum error", dev_name(dev)); goto out; } p->fe = p->fb + size + size2; if (!_parse_config_file(p, cft)) goto_out; r = 1; out: if (!use_mmap) dm_free(buf); else { /* unmap the file */ if (munmap((char *) (p->fb - mmap_offset), size + mmap_offset)) { log_sys_error("munmap", dev_name(dev)); r = 0; } } return r; }
static int lvchange_readahead(struct cmd_context *cmd, struct logical_volume *lv) { unsigned read_ahead = 0; unsigned pagesize = (unsigned) lvm_getpagesize() >> SECTOR_SHIFT; int r = 0; read_ahead = arg_uint_value(cmd, readahead_ARG, 0); if (read_ahead != DM_READ_AHEAD_AUTO && (lv->vg->fid->fmt->features & FMT_RESTRICTED_READAHEAD) && (read_ahead < 2 || read_ahead > 120)) { log_error("Metadata only supports readahead values between 2 and 120."); return 0; } if (read_ahead != DM_READ_AHEAD_AUTO && read_ahead != DM_READ_AHEAD_NONE && read_ahead % pagesize) { if (read_ahead < pagesize) read_ahead = pagesize; else read_ahead = (read_ahead / pagesize) * pagesize; log_warn("WARNING: Overriding readahead to %u sectors, a multiple " "of %uK page size.", read_ahead, pagesize >> 1); } if (lv->read_ahead == read_ahead) { if (read_ahead == DM_READ_AHEAD_AUTO) log_error("Read ahead is already auto for \"%s\"", lv->name); else log_error("Read ahead is already %u for \"%s\"", read_ahead, lv->name); return 0; } lv->read_ahead = read_ahead; log_verbose("Setting read ahead to %u for \"%s\"", read_ahead, lv->name); log_very_verbose("Updating logical volume \"%s\" on disk(s)", lv->name); if (!vg_write(lv->vg)) return_0; if (!suspend_lv(cmd, lv)) { log_error("Failed to lock %s", lv->name); vg_revert(lv->vg); goto out; } if (!vg_commit(lv->vg)) { if (!resume_lv(cmd, lv)) stack; goto_out; } log_very_verbose("Updating permissions for \"%s\" in kernel", lv->name); if (!resume_lv(cmd, lv)) { log_error("Problem reactivating %s", lv->name); goto out; } r = 1; out: backup(lv->vg); return r; }
return_0; lp->thin = 0; } return 1; } /* * Generic mirror parameter checks. * FIXME: Should eventually be moved into lvm library. */ static int _validate_mirror_params(const struct cmd_context *cmd __attribute__((unused)), const struct lvcreate_params *lp) { int pagesize = lvm_getpagesize(); if (lp->region_size & (lp->region_size - 1)) { log_error("Region size (%" PRIu32 ") must be a power of 2", lp->region_size); return 0; } if (lp->region_size % (pagesize >> SECTOR_SHIFT)) { log_error("Region size (%" PRIu32 ") must be a multiple of " "machine memory page size (%d)", lp->region_size, pagesize >> SECTOR_SHIFT); return 0; } if (!lp->region_size) {
int config_file_read_fd(struct dm_config_tree *cft, struct device *dev, off_t offset, size_t size, off_t offset2, size_t size2, checksum_fn_t checksum_fn, uint32_t checksum) { char *fb, *fe; int r = 0; int use_mmap = 1; off_t mmap_offset = 0; char *buf = NULL; struct config_source *cs = dm_config_get_custom(cft); if ((cs->type != CONFIG_FILE) && (cs->type != CONFIG_PROFILE)) { log_error(INTERNAL_ERROR "config_file_read_fd: expected file or profile config source, " "found %s config source.", _config_source_names[cs->type]); return 0; } /* Only use mmap with regular files */ if (!(dev->flags & DEV_REGULAR) || size2) use_mmap = 0; if (use_mmap) { mmap_offset = offset % lvm_getpagesize(); /* memory map the file */ fb = mmap((caddr_t) 0, size + mmap_offset, PROT_READ, MAP_PRIVATE, dev_fd(dev), offset - mmap_offset); if (fb == (caddr_t) (-1)) { log_sys_error("mmap", dev_name(dev)); goto out; } fb = fb + mmap_offset; } else { if (!(buf = dm_malloc(size + size2))) { log_error("Failed to allocate circular buffer."); return 0; } if (!dev_read_circular(dev, (uint64_t) offset, size, (uint64_t) offset2, size2, buf)) { goto out; } fb = buf; } if (checksum_fn && checksum != (checksum_fn(checksum_fn(INITIAL_CRC, (const uint8_t *)fb, size), (const uint8_t *)(fb + size), size2))) { log_error("%s: Checksum error", dev_name(dev)); goto out; } fe = fb + size + size2; if (!dm_config_parse(cft, fb, fe)) goto_out; r = 1; out: if (!use_mmap) dm_free(buf); else { /* unmap the file */ if (munmap(fb - mmap_offset, size + mmap_offset)) { log_sys_error("munmap", dev_name(dev)); r = 0; } } return r; }