int write_config_node(const struct config_node *cn, putline_fn putline, void *baton) { struct output_line outline; outline.fp = NULL; if (!(outline.mem = dm_pool_create("config_line", 1024))) return_0; outline.putline = putline; outline.putline_baton = baton; if (!_write_config(cn, 0, &outline, 0)) { dm_pool_destroy(outline.mem); return_0; } dm_pool_destroy(outline.mem); return 1; }
/* Checks that freed chunks are marked NOACCESS */ static void check_free2() { struct dm_pool *p = dm_pool_create("", 900); /* 900 will get * rounded up to 1024, * 1024 would have got * rounded up to * 2048 */ char *data1, *data2; assert(p); data1 = dm_pool_alloc(p, 123); assert(data1); data1 = dm_pool_alloc(p, 1024); assert(data1); data2 = dm_pool_alloc(p, 123); assert(data2); data2[0] = 'A'; /* should work fine */ dm_pool_free(p, data1); /* * so now the first chunk is active, the second chunk has become * the free one. */ data2[0] = 'B'; /* should prompt an invalid write error */ dm_pool_destroy(p); }
/* * public interface */ struct config_tree *create_config_tree(const char *filename, int keep_open) { struct cs *c; struct dm_pool *mem = dm_pool_create("config", 10 * 1024); if (!mem) { log_error("Failed to allocate config pool."); return 0; } if (!(c = dm_pool_zalloc(mem, sizeof(*c)))) { log_error("Failed to allocate config tree."); dm_pool_destroy(mem); return 0; } c->mem = mem; c->cft.root = (struct config_node *) NULL; c->timestamp = 0; c->exists = 0; c->keep_open = keep_open; c->dev = 0; if (filename) c->filename = dm_pool_strdup(c->mem, filename); return &c->cft; }
/* * dev_manager implementation. */ struct dev_manager *dev_manager_create(struct cmd_context *cmd, const char *vg_name) { struct dm_pool *mem; struct dev_manager *dm; if (!(mem = dm_pool_create("dev_manager", 16 * 1024))) return_NULL; if (!(dm = dm_pool_zalloc(mem, sizeof(*dm)))) goto_bad; dm->cmd = cmd; dm->mem = mem; if (!(dm->vg_name = dm_pool_strdup(dm->mem, vg_name))) goto_bad; dm->target_state = NULL; dm_udev_set_sync_support(cmd->current_settings.udev_sync); return dm; bad: dm_pool_destroy(mem); return NULL; }
/* * Looking at the code I'm not sure allocations that are near the chunk * size are working. So this test is trying to exhibit a specific problem. */ static void check_allocation_near_chunk_size() { int i; char *data; struct dm_pool *p = dm_pool_create("", 900); /* * allocate a lot and then free everything so we know there * is a spare chunk. */ for (i = 0; i < 1000; i++) { data = dm_pool_alloc(p, 37); memset(data, 0, 37); assert(data); } dm_pool_empty(p); /* now we allocate something close to the chunk size ... */ data = dm_pool_alloc(p, 1020); assert(data); memset(data, 0, 1020); dm_pool_destroy(p); }
void destroy_config_tree(struct config_tree *cft) { struct cs *c = (struct cs *) cft; if (c->dev) dev_close(c->dev); dm_pool_destroy(c->mem); }
int main(int argc, char **argv) { struct io_space *ios; struct list_head *pvs, *tmp; struct dm_pool *mem; init_log(stderr); init_debug(_LOG_INFO); if (!dev_cache_init()) { fprintf(stderr, "init of dev-cache failed\n"); exit(1); } if (!dev_cache_add_dir("/dev/loop")) { fprintf(stderr, "couldn't add /dev to dir-cache\n"); exit(1); } if (!(mem = dm_pool_create(10 * 1024))) { fprintf(stderr, "couldn't create pool\n"); exit(1); } ios = create_lvm1_format("/dev", mem, NULL); if (!ios) { fprintf(stderr, "failed to create io_space for format1\n"); exit(1); } pvs = ios->get_pvs(ios); if (!pvs) { fprintf(stderr, "couldn't read vg %s\n", argv[1]); exit(1); } list_for_each(tmp, pvs) { struct pv_list *pvl = list_entry(tmp, struct pv_list, list); dump_pv(&pvl->pv, stdout); } ios->destroy(ios); dm_pool_destroy(mem); dev_cache_exit(); dump_memory(); fin_log(); return 0; }
int write_config_file(struct config_tree *cft, const char *file, int argc, char **argv) { const struct config_node *cn; int r = 1; struct output_line outline; outline.fp = NULL; outline.putline = NULL; if (!file) file = "stdout"; else if (!(outline.fp = fopen(file, "w"))) { log_sys_error("open", file); return 0; } if (!(outline.mem = dm_pool_create("config_line", 1024))) { r = 0; goto_out; } log_verbose("Dumping configuration to %s", file); if (!argc) { if (!_write_config(cft->root, 0, &outline, 0)) { log_error("Failure while writing to %s", file); r = 0; } } else while (argc--) { if ((cn = find_config_node(cft->root, *argv))) { if (!_write_config(cn, 1, &outline, 0)) { log_error("Failure while writing to %s", file); r = 0; } } else { log_error("Configuration node %s not found", *argv); r = 0; } argv++; } dm_pool_destroy(outline.mem); out: if (outline.fp && lvm_fclose(outline.fp, file)) { stack; r = 0; } return r; }
/* we shouldn't get any errors from this one */ static void check_object_growth() { int i; struct dm_pool *p = dm_pool_create("", 32); char data[100]; void *obj; memset(data, 0, sizeof(data)); dm_pool_begin_object(p, 43); for (i = 1; i < 100; i++) dm_pool_grow_object(p, data, i); obj = dm_pool_end_object(p); dm_pool_destroy(p); }
struct dm_config_tree *dm_config_create(void) { struct dm_config_tree *cft; struct dm_pool *mem = dm_pool_create("config", 10 * 1024); if (!mem) { log_error("Failed to allocate config pool."); return 0; } if (!(cft = dm_pool_zalloc(mem, sizeof(*cft)))) { log_error("Failed to allocate config tree."); dm_pool_destroy(mem); return 0; } cft->mem = mem; return cft; }
static dm_percent_t _metadata_percent(const struct logical_volume *lv) { dm_percent_t percent; struct lv_status_cache *status; if (lv_is_cache(lv) || lv_is_cache_pool(lv)) { if (!lv_cache_status(lv, &status)) { stack; return DM_PERCENT_INVALID; } percent = status->dirty_usage; dm_pool_destroy(status->mem); return percent; } if (lv_is_thin_pool(lv)) return lv_thin_pool_percent(lv, 1, &percent) ? percent : DM_PERCENT_INVALID; return DM_PERCENT_INVALID; }
/* * FIXME: Quick hack. We can use caching to * prevent a total re-read, even so vg_number * causes the tools to check *every* pv. Yuck. * Put in separate file so it wouldn't contaminate * other code. */ int get_free_vg_number(struct format_instance *fid, struct dev_filter *filter, const char *candidate_vg, int *result) { struct list all_pvs; struct disk_list *dl; struct dm_pool *mem = dm_pool_create("lvm1 vg_number", 10 * 1024); int numbers[MAX_VG], i, r = 0; list_init(&all_pvs); if (!mem) { stack; return 0; } if (!read_pvs_in_vg(fid->fmt, NULL, filter, mem, &all_pvs)) { stack; goto out; } memset(numbers, 0, sizeof(numbers)); list_iterate_items(dl, struct disk_list, &all_pvs) { if (!*dl->pvd.vg_name || !strcmp((char *)dl->pvd.vg_name, candidate_vg)) continue; numbers[dl->vgd.vg_number] = 1; } for (i = 0; i < MAX_VG; i++) { if (!numbers[i]) { r = 1; *result = i; break; } } out: dm_pool_destroy(mem); return r; }
static void check_free() { int i; char *blocks[COUNT]; struct dm_pool *p = dm_pool_create("blah", 1024); for (i = 0; i < COUNT; i++) blocks[i] = dm_pool_alloc(p, 37); /* check we can access the last block */ blocks[COUNT - 1][0] = 'E'; if (blocks[COUNT - 1][0] == 'E') printf("first branch worked (as expected)\n"); dm_pool_free(p, blocks[5]); if (blocks[COUNT - 1][0] == 'E') printf("second branch worked (valgrind should have flagged this as an error)\n"); dm_pool_destroy(p); }
static void check_alignment() { /* * Pool always tries to allocate blocks with particular alignment. * So there are potentially small gaps between allocations. This * test checks that valgrind is spotting illegal accesses to these * gaps. */ int i, sum; struct dm_pool *p = dm_pool_create("blah", 1024); char *data1, *data2; char buffer[16]; data1 = dm_pool_alloc_aligned(p, 1, 4); assert(data1); data2 = dm_pool_alloc_aligned(p, 1, 4); assert(data1); snprintf(buffer, sizeof(buffer), "%c", *(data1 + 1)); /* invalid read size 1 */ dm_pool_destroy(p); }
int dmstatus_fini(void) { dm_pool_destroy(_mem); return 0; }
void dev_manager_destroy(struct dev_manager *dm) { dm_pool_destroy(dm->mem); }
void dm_config_destroy(struct dm_config_tree *cft) { dm_pool_destroy(cft->mem); }
static int _write_node(const struct dm_config_node *cn, int only_one, dm_putline_fn putline, const struct dm_config_node_out_spec *out_spec, void *baton) { struct config_output out = { .mem = dm_pool_create("config_output", 1024), .putline = putline, .spec = out_spec, .baton = baton }; if (!out.mem) return_0; if (!_write_config(cn, only_one, &out, 0)) { dm_pool_destroy(out.mem); return_0; } dm_pool_destroy(out.mem); return 1; } int dm_config_write_one_node(const struct dm_config_node *cn, dm_putline_fn putline, void *baton) { return _write_node(cn, 1, putline, NULL, baton); } int dm_config_write_node(const struct dm_config_node *cn, dm_putline_fn putline, void *baton) { return _write_node(cn, 0, putline, NULL, baton); } int dm_config_write_one_node_out(const struct dm_config_node *cn, const struct dm_config_node_out_spec *out_spec, void *baton) { return _write_node(cn, 1, NULL, out_spec, baton); } int dm_config_write_node_out(const struct dm_config_node *cn, const struct dm_config_node_out_spec *out_spec, void *baton) { return _write_node(cn, 0, NULL, out_spec, baton); } /* * parser */ static char *_dup_string_tok(struct parser *p) { char *str; p->tb++, p->te--; /* strip "'s */ if (p->te < p->tb) { log_error("Parse error at byte %" PRIptrdiff_t " (line %d): " "expected a string token.", p->tb - p->fb + 1, p->line); return NULL; } if (!(str = _dup_tok(p))) return_NULL; p->te++; return str; }
int config_fini(void) { dm_pool_destroy(mem); return 0; }
static void _mem_exit(void *mem) { dm_pool_destroy(mem); }
int string_fini(void) { dm_pool_destroy(mem); return 0; }