struct dm_list *tag_list_copy(struct dm_pool *p, struct dm_list *tag_list) { struct dm_list *list; lvm_str_list_t *lsl; struct str_list *sl; if (!(list = dm_pool_zalloc(p, sizeof(*list)))) { log_errno(ENOMEM, "Memory allocation fail for dm_list."); return NULL; } dm_list_init(list); dm_list_iterate_items(sl, tag_list) { if (!(lsl = dm_pool_zalloc(p, sizeof(*lsl)))) { log_errno(ENOMEM, "Memory allocation fail for lvm_lv_list."); return NULL; } if (!(lsl->str = dm_pool_strdup(p, sl->str))) { log_errno(ENOMEM, "Memory allocation fail for lvm_lv_list->str."); return NULL; } dm_list_add(list, &lsl->list); } return list; }
int dm_get_status_thin(struct dm_pool *mem, const char *params, struct dm_status_thin **status) { struct dm_status_thin *s; if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_thin)))) { log_error("Failed to allocate thin status structure."); return 0; } if (strchr(params, '-')) { s->mapped_sectors = 0; s->highest_mapped_sector = 0; } else if (sscanf(params, FMTu64 " " FMTu64, &s->mapped_sectors, &s->highest_mapped_sector) != 2) { dm_pool_free(mem, s); log_error("Failed to parse thin params: %s.", params); return 0; } *status = s; return 1; }
/* * public interface */ struct dm_config_tree *config_file_open(const char *filename, int keep_open) { struct dm_config_tree *cft = dm_config_create(); struct config_file *cf; if (!cft) return NULL; cf = dm_pool_zalloc(cft->mem, sizeof(struct config_file)); if (!cf) goto fail; cf->timestamp = 0; cf->exists = 0; cf->keep_open = keep_open; dm_config_set_custom(cft, cf); if (filename && !(cf->filename = dm_pool_strdup(cft->mem, filename))) { log_error("Failed to duplicate filename."); goto fail; } return cft; fail: dm_config_destroy(cft); return NULL; }
/* * public interface */ struct config_tree *create_config_tree(const char *filename, int keep_open) { struct cs *c; struct dm_pool *mem = dm_pool_create("config", 10 * 1024); if (!mem) { log_error("Failed to allocate config pool."); return 0; } if (!(c = dm_pool_zalloc(mem, sizeof(*c)))) { log_error("Failed to allocate config tree."); dm_pool_destroy(mem); return 0; } c->mem = mem; c->cft.root = (struct config_node *) NULL; c->timestamp = 0; c->exists = 0; c->keep_open = keep_open; c->dev = 0; if (filename) c->filename = dm_pool_strdup(c->mem, filename); return &c->cft; }
/* * dev_manager implementation. */ struct dev_manager *dev_manager_create(struct cmd_context *cmd, const char *vg_name) { struct dm_pool *mem; struct dev_manager *dm; if (!(mem = dm_pool_create("dev_manager", 16 * 1024))) return_NULL; if (!(dm = dm_pool_zalloc(mem, sizeof(*dm)))) goto_bad; dm->cmd = cmd; dm->mem = mem; if (!(dm->vg_name = dm_pool_strdup(dm->mem, vg_name))) goto_bad; dm->target_state = NULL; dm_udev_set_sync_support(cmd->current_settings.udev_sync); return dm; bad: dm_pool_destroy(mem); return NULL; }
int override_config_tree_from_string(struct cmd_context *cmd, const char *config_settings) { struct dm_config_tree *cft_new; struct config_source *cs = dm_config_get_custom(cmd->cft); /* * Follow this sequence: * CONFIG_STRING -> CONFIG_PROFILE -> CONFIG_FILE/CONFIG_MERGED_FILES */ if (cs->type == CONFIG_STRING) { log_error(INTERNAL_ERROR "override_config_tree_from_string: " "config cascade already contains a string config."); return 0; } if (!(cft_new = dm_config_from_string(config_settings))) { log_error("Failed to set overridden configuration entries."); return 0; } if (!(cs = dm_pool_zalloc(cft_new->mem, sizeof(struct config_source)))) { log_error("Failed to allocate config source."); dm_config_destroy(cft_new); return 0; } cs->type = CONFIG_STRING; dm_config_set_custom(cft_new, cs); cmd->cft = dm_config_insert_cascaded_tree(cft_new, cmd->cft); return 1; }
struct rx_node *rx_parse_tok(struct dm_pool *mem, const char *begin, const char *end) { struct rx_node *r; struct parse_sp *ps = dm_pool_zalloc(mem, sizeof(*ps)); if (!ps) return_NULL; ps->mem = mem; if (!(ps->charset = dm_bitset_create(mem, 256))) { log_error("Regex charset allocation failed"); dm_pool_free(mem, ps); return NULL; } ps->cursor = begin; ps->rx_end = end; _rx_get_token(ps); /* load the first token */ if (!(r = _or_term(ps))) { log_error("Parse error in regex"); dm_pool_free(mem, ps); return NULL; } if (!(r = _optimise(mem, r))) { log_error("Regex optimisation error"); dm_pool_free(mem, ps); return NULL; } return r; }
/* Must be called after pvs are imported */ static struct user_subpool *_build_usp(struct dm_list *pls, struct dm_pool *mem, int *sps) { struct pool_list *pl; struct user_subpool *usp = NULL, *cur_sp = NULL; struct user_device *cur_dev = NULL; /* * FIXME: Need to do some checks here - I'm tempted to add a * user_pool structure and build the entire thing to check against. */ dm_list_iterate_items(pl, pls) { *sps = pl->pd.pl_subpools; if (!usp && (!(usp = dm_pool_zalloc(mem, sizeof(*usp) * (*sps))))) { log_error("Unable to allocate %d subpool structures", *sps); return 0; } if (cur_sp != &usp[pl->pd.pl_sp_id]) { cur_sp = &usp[pl->pd.pl_sp_id]; cur_sp->id = pl->pd.pl_sp_id; cur_sp->striping = pl->pd.pl_striping; cur_sp->num_devs = pl->pd.pl_sp_devs; cur_sp->type = pl->pd.pl_sp_type; cur_sp->initialized = 1; } if (!cur_sp->devs && (!(cur_sp->devs = dm_pool_zalloc(mem, sizeof(*usp->devs) * pl->pd.pl_sp_devs)))) { log_error("Unable to allocate %d pool_device " "structures", pl->pd.pl_sp_devs); return 0; } cur_dev = &cur_sp->devs[pl->pd.pl_sp_devid]; cur_dev->sp_id = cur_sp->id; cur_dev->devid = pl->pd.pl_sp_id; cur_dev->blocks = pl->pd.pl_blocks; cur_dev->pv = pl->pv; cur_dev->initialized = 1; }
static struct node *_tree_node(struct dm_pool *mem, unsigned int k) { struct node *n = dm_pool_zalloc(mem, sizeof(*n)); if (n) n->k = k; return n; }
struct ttree *ttree_create(struct dm_pool *mem, unsigned int klen) { struct ttree *tt; if (!(tt = dm_pool_zalloc(mem, sizeof(*tt)))) { stack; return NULL; } tt->klen = klen; tt->mem = mem; return tt; }
/* * public interface */ struct dm_config_tree *config_open(config_source_t source, const char *filename, int keep_open) { struct dm_config_tree *cft = dm_config_create(); struct config_source *cs; struct config_file *cf; if (!cft) return NULL; if (!(cs = dm_pool_zalloc(cft->mem, sizeof(struct config_source)))) { log_error("Failed to allocate config source."); goto fail; } if ((source == CONFIG_FILE) || (source == CONFIG_PROFILE)) { if (!(cf = dm_pool_zalloc(cft->mem, sizeof(struct config_file)))) { log_error("Failed to allocate config file."); goto fail; } cf->keep_open = keep_open; if (filename && !(cf->filename = dm_pool_strdup(cft->mem, filename))) { log_error("Failed to duplicate filename."); goto fail; } cs->source.file = cf; } cs->type = source; dm_config_set_custom(cft, cs); return cft; fail: dm_config_destroy(cft); return NULL; }
static struct rx_node *_node(struct dm_pool *mem, int type, struct rx_node *l, struct rx_node *r) { struct rx_node *n = dm_pool_zalloc(mem, sizeof(*n)); if (n) { if (!(n->charset = dm_bitset_create(mem, 256))) { dm_pool_free(mem, n); return NULL; } n->type = type; n->left = l; n->right = r; } return n; }
struct dm_config_tree *dm_config_create(void) { struct dm_config_tree *cft; struct dm_pool *mem = dm_pool_create("config", 10 * 1024); if (!mem) { log_error("Failed to allocate config pool."); return 0; } if (!(cft = dm_pool_zalloc(mem, sizeof(*cft)))) { log_error("Failed to allocate config tree."); dm_pool_destroy(mem); return 0; } cft->mem = mem; return cft; }
int dm_get_status_thin_pool(struct dm_pool *mem, const char *params, struct dm_status_thin_pool **status) { struct dm_status_thin_pool *s; if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_thin_pool)))) { log_error("Failed to allocate thin_pool status structure."); return 0; } if (!parse_thin_pool_status(params, s)) { dm_pool_free(mem, s); return_0; } *status = s; return 1; }
dm_bitset_t dm_bitset_create(struct dm_pool *mem, unsigned num_bits) { unsigned n = (num_bits / DM_BITS_PER_INT) + 2; size_t size = sizeof(int) * n; dm_bitset_t bs; if (mem) bs = dm_pool_zalloc(mem, size); else bs = dm_malloc(size); if (!bs) return NULL; *bs = num_bits; if (!mem) dm_bit_clear_all(bs); return bs; }
static struct replicator_site *_get_site(struct logical_volume *replicator, const char *key) { struct dm_pool *mem = replicator->vg->vgmem; struct replicator_site *rsite; dm_list_iterate_items(rsite, &replicator->rsites) if (strcasecmp(rsite->name, key) == 0) return rsite; if (!(rsite = dm_pool_zalloc(mem, sizeof(*rsite)))) return_NULL; if (!(rsite->name = dm_pool_strdup(mem, key))) return_NULL; rsite->replicator = replicator; dm_list_init(&rsite->rdevices); dm_list_add(&replicator->rsites, &rsite->list); return rsite; }
int dm_get_status_snapshot(struct dm_pool *mem, const char *params, struct dm_status_snapshot **status) { struct dm_status_snapshot *s; int r; if (!params) { log_error("Failed to parse invalid snapshot params."); return 0; } if (!(s = dm_pool_zalloc(mem, sizeof(*s)))) { log_error("Failed to allocate snapshot status structure."); return 0; } r = sscanf(params, FMTu64 "/" FMTu64 " " FMTu64, &s->used_sectors, &s->total_sectors, &s->metadata_sectors); if (r == 3 || r == 2) s->has_metadata_sectors = (r == 3); else if (!strcmp(params, "Invalid")) s->invalid = 1; else if (!strcmp(params, "Merge failed")) s->merge_failed = 1; else if (!strcmp(params, "Overflow")) s->overflow = 1; else { dm_pool_free(mem, s); log_error("Failed to parse snapshot params: %s.", params); return 0; } *status = s; return 1; }
static struct pv_segment *_alloc_pv_segment(struct dm_pool *mem, struct physical_volume *pv, uint32_t pe, uint32_t len, struct lv_segment *lvseg, uint32_t lv_area) { struct pv_segment *peg; if (!(peg = dm_pool_zalloc(mem, sizeof(*peg)))) { log_error("pv_segment allocation failed"); return NULL; } peg->pv = pv; peg->pe = pe; peg->len = len; peg->lvseg = lvseg; peg->lv_area = lv_area; list_init(&peg->list); return peg; }
static struct config_node *_create_node(struct dm_pool *mem) { return dm_pool_zalloc(mem, sizeof(struct config_node)); }
/* * memory management */ static struct dm_config_value *_create_value(struct dm_pool *mem) { return dm_pool_zalloc(mem, sizeof(struct dm_config_value)); }
static struct disk_list *__read_disk(const struct format_type *fmt, struct device *dev, struct dm_pool *mem, const char *vg_name) { struct disk_list *dl = dm_pool_zalloc(mem, sizeof(*dl)); const char *name = dev_name(dev); if (!dl) return_NULL; dl->dev = dev; dl->mem = mem; dm_list_init(&dl->uuids); dm_list_init(&dl->lvds); if (!_read_pvd(dev, &dl->pvd)) goto_bad; /* * is it an orphan ? */ if (!*dl->pvd.vg_name) { log_very_verbose("%s is not a member of any format1 VG", name); __update_lvmcache(fmt, dl, dev, fmt->orphan_vg_name, 0); return (vg_name) ? NULL : dl; } if (!read_vgd(dl->dev, &dl->vgd, &dl->pvd)) { log_error("Failed to read VG data from PV (%s)", name); __update_lvmcache(fmt, dl, dev, fmt->orphan_vg_name, 0); goto bad; } if (vg_name && strcmp(vg_name, (char *)dl->pvd.vg_name)) { log_very_verbose("%s is not a member of the VG %s", name, vg_name); __update_lvmcache(fmt, dl, dev, fmt->orphan_vg_name, 0); goto bad; } __update_lvmcache(fmt, dl, dev, (char *)dl->vgd.vg_uuid, dl->vgd.vg_status & VG_EXPORTED); if (!_read_uuids(dl)) { log_error("Failed to read PV uuid list from %s", name); goto bad; } if (!_read_lvs(dl)) { log_error("Failed to read LV's from %s", name); goto bad; } if (!_read_extents(dl)) { log_error("Failed to read extents from %s", name); goto bad; } log_very_verbose("Found %s in %sVG %s", name, (dl->vgd.vg_status & VG_EXPORTED) ? "exported " : "", dl->pvd.vg_name); return dl; bad: dm_pool_free(dl->mem, dl); return NULL; }
struct volume_group *text_read_metadata(struct format_instance *fid, const char *file, struct cached_vg_fmtdata **vg_fmtdata, unsigned *use_previous_vg, struct device *dev, int primary_mda, off_t offset, uint32_t size, off_t offset2, uint32_t size2, checksum_fn_t checksum_fn, uint32_t checksum, time_t *when, char **desc) { struct volume_group *vg = NULL; struct dm_config_tree *cft; struct text_vg_version_ops **vsn; int skip_parse; /* * This struct holds the checksum and size of the VG metadata * that was read from a previous device. When we read the VG * metadata from this device, we can skip parsing it into a * cft (saving time) if the checksum of the metadata buffer * we read from this device matches the size/checksum saved in * the mda_header/rlocn struct on this device, and matches the * size/checksum from the previous device. */ if (vg_fmtdata && !*vg_fmtdata && !(*vg_fmtdata = dm_pool_zalloc(fid->mem, sizeof(**vg_fmtdata)))) { log_error("Failed to allocate VG fmtdata for text format."); return NULL; } _init_text_import(); *desc = NULL; *when = 0; if (!(cft = config_open(CONFIG_FILE_SPECIAL, file, 0))) return_NULL; /* Does the metadata match the already-cached VG? */ skip_parse = vg_fmtdata && ((*vg_fmtdata)->cached_mda_checksum == checksum) && ((*vg_fmtdata)->cached_mda_size == (size + size2)); if (dev) { log_debug_metadata("Reading metadata from %s at %llu size %d (+%d)", dev_name(dev), (unsigned long long)offset, size, size2); if (!config_file_read_fd(cft, dev, MDA_CONTENT_REASON(primary_mda), offset, size, offset2, size2, checksum_fn, checksum, skip_parse, 1)) { /* FIXME: handle errors */ log_error("Couldn't read volume group metadata from %s.", dev_name(dev)); goto out; } } else { if (!config_file_read(cft)) { log_error("Couldn't read volume group metadata from file."); goto out; } } if (skip_parse) { if (use_previous_vg) *use_previous_vg = 1; log_debug_metadata("Skipped parsing metadata on %s", dev_name(dev)); goto out; } /* * Find a set of version functions that can read this file */ for (vsn = &_text_vsn_list[0]; *vsn; vsn++) { if (!(*vsn)->check_version(cft)) continue; if (!(vg = (*vsn)->read_vg(fid, cft, 0))) goto_out; (*vsn)->read_desc(vg->vgmem, cft, when, desc); break; } if (vg && vg_fmtdata && *vg_fmtdata) { (*vg_fmtdata)->cached_mda_size = (size + size2); (*vg_fmtdata)->cached_mda_checksum = checksum; } if (use_previous_vg) *use_previous_vg = 0; out: config_destroy(cft); return vg; }
/* * Various RAID status versions include: * Versions < 1.5.0 (4 fields): * <raid_type> <#devs> <health_str> <sync_ratio> * Versions 1.5.0+ (6 fields): * <raid_type> <#devs> <health_str> <sync_ratio> <sync_action> <mismatch_cnt> */ int dm_get_status_raid(struct dm_pool *mem, const char *params, struct dm_status_raid **status) { int i; const char *pp, *p; struct dm_status_raid *s; if (!params || !(p = strchr(params, ' '))) { log_error("Failed to parse invalid raid params."); return 0; } p++; /* second field holds the device count */ if (sscanf(p, "%d", &i) != 1) return_0; if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_raid)))) return_0; if (!(s->raid_type = dm_pool_zalloc(mem, p - params))) goto_bad; /* memory is freed when pool is destroyed */ if (!(s->dev_health = dm_pool_zalloc(mem, i + 1))) goto_bad; if (sscanf(params, "%s %u %s %" PRIu64 "/%" PRIu64, s->raid_type, &s->dev_count, s->dev_health, &s->insync_regions, &s->total_regions) != 5) { log_error("Failed to parse raid params: %s", params); goto bad; } *status = s; /* * All pre-1.5.0 version parameters are read. Now we check * for additional 1.5.0+ parameters. * * Note that 'sync_action' will be NULL (and mismatch_count * will be 0) if the kernel returns a pre-1.5.0 status. */ for (p = params, i = 0; i < 4; i++, p++) if (!(p = strchr(p, ' '))) return 1; /* return pre-1.5.0 status */ pp = p; if (!(p = strchr(p, ' '))) { log_error(INTERNAL_ERROR "Bad RAID status received."); goto bad; } p++; if (!(s->sync_action = dm_pool_zalloc(mem, p - pp))) goto_bad; if (sscanf(pp, "%s %" PRIu64, s->sync_action, &s->mismatch_count) != 2) { log_error("Failed to parse raid params: %s", params); goto bad; } return 1; bad: dm_pool_free(mem, s); return 0; }
int dm_get_status_mirror(struct dm_pool *mem, const char *params, struct dm_status_mirror **status) { struct dm_status_mirror *s; const char *p, *pos = params; unsigned num_devs, argc, i; int used; if (!(s = dm_pool_zalloc(mem, sizeof(*s)))) { log_error("Failed to alloc mem pool to parse mirror status."); return 0; } if (sscanf(pos, "%u %n", &num_devs, &used) != 1) goto_out; pos += used; if (num_devs > DM_MIRROR_MAX_IMAGES) { log_error(INTERNAL_ERROR "More then " DM_TO_STRING(DM_MIRROR_MAX_IMAGES) " reported in mirror status."); goto out; } if (!(s->devs = dm_pool_alloc(mem, num_devs * sizeof(*(s->devs))))) { log_error("Allocation of devs failed."); goto out; } for (i = 0; i < num_devs; ++i, pos += used) if (sscanf(pos, "%u:%u %n", &(s->devs[i].major), &(s->devs[i].minor), &used) != 2) goto_out; if (sscanf(pos, FMTu64 "/" FMTu64 "%n", &s->insync_regions, &s->total_regions, &used) != 2) goto_out; pos += used; if (sscanf(pos, "%u %n", &argc, &used) != 1) goto_out; pos += used; for (i = 0; i < num_devs ; ++i) s->devs[i].health = pos[i]; if (!(pos = _advance_to_next_word(pos, argc))) goto_out; if (sscanf(pos, "%u %n", &argc, &used) != 1) goto_out; pos += used; if (argc == 1) { /* core, cluster-core */ if (!(s->log_type = dm_pool_strdup(mem, pos))) { log_error("Allocation of log type string failed."); goto out; } } else { if (!(p = _advance_to_next_word(pos, 1))) goto_out; /* disk, cluster-disk */ if (!(s->log_type = dm_pool_strndup(mem, pos, p - pos - 1))) { log_error("Allocation of log type string failed."); goto out; } pos = p; if ((argc > 2) && !strcmp(s->log_type, "disk")) { s->log_count = argc - 2; if (!(s->logs = dm_pool_alloc(mem, s->log_count * sizeof(*(s->logs))))) { log_error("Allocation of logs failed."); goto out; } for (i = 0; i < s->log_count; ++i, pos += used) if (sscanf(pos, "%u:%u %n", &s->logs[i].major, &s->logs[i].minor, &used) != 2) goto_out; for (i = 0; i < s->log_count; ++i) s->logs[i].health = pos[i]; } } s->dev_count = num_devs; *status = s; return 1; out: log_error("Failed to parse mirror status %s.", params); dm_pool_free(mem, s); *status = NULL; return 0; }
/* * <metadata block size> <#used metadata blocks>/<#total metadata blocks> * <cache block size> <#used cache blocks>/<#total cache blocks> * <#read hits> <#read misses> <#write hits> <#write misses> * <#demotions> <#promotions> <#dirty> <#features> <features>* * <#core args> <core args>* <policy name> <#policy args> <policy args>* * * metadata block size : Fixed block size for each metadata block in * sectors * #used metadata blocks : Number of metadata blocks used * #total metadata blocks : Total number of metadata blocks * cache block size : Configurable block size for the cache device * in sectors * #used cache blocks : Number of blocks resident in the cache * #total cache blocks : Total number of cache blocks * #read hits : Number of times a READ bio has been mapped * to the cache * #read misses : Number of times a READ bio has been mapped * to the origin * #write hits : Number of times a WRITE bio has been mapped * to the cache * #write misses : Number of times a WRITE bio has been * mapped to the origin * #demotions : Number of times a block has been removed * from the cache * #promotions : Number of times a block has been moved to * the cache * #dirty : Number of blocks in the cache that differ * from the origin * #feature args : Number of feature args to follow * feature args : 'writethrough' (optional) * #core args : Number of core arguments (must be even) * core args : Key/value pairs for tuning the core * e.g. migration_threshold * *policy name : Name of the policy * #policy args : Number of policy arguments to follow (must be even) * policy args : Key/value pairs * e.g. sequential_threshold */ int dm_get_status_cache(struct dm_pool *mem, const char *params, struct dm_status_cache **status) { int i, feature_argc; char *str; const char *p, *pp; struct dm_status_cache *s; if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_cache)))) return_0; /* Read in args that have definitive placement */ if (sscanf(params, " %" PRIu32 " %" PRIu64 "/%" PRIu64 " %" PRIu32 " %" PRIu64 "/%" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %" PRIu64 " %d", &s->metadata_block_size, &s->metadata_used_blocks, &s->metadata_total_blocks, &s->block_size, /* AKA, chunk_size */ &s->used_blocks, &s->total_blocks, &s->read_hits, &s->read_misses, &s->write_hits, &s->write_misses, &s->demotions, &s->promotions, &s->dirty_blocks, &feature_argc) != 14) goto bad; /* Now jump to "features" section */ if (!(p = _advance_to_next_word(params, 12))) goto bad; /* Read in features */ for (i = 0; i < feature_argc; i++) { if (!strncmp(p, "writethrough ", 13)) s->feature_flags |= DM_CACHE_FEATURE_WRITETHROUGH; else if (!strncmp(p, "writeback ", 10)) s->feature_flags |= DM_CACHE_FEATURE_WRITEBACK; else log_error("Unknown feature in status: %s", params); if (!(p = _advance_to_next_word(p, 1))) goto bad; } /* Read in core_args. */ if (sscanf(p, "%d ", &s->core_argc) != 1) goto bad; if (s->core_argc && (!(s->core_argv = dm_pool_zalloc(mem, sizeof(char *) * s->core_argc)) || !(p = _advance_to_next_word(p, 1)) || !(str = dm_pool_strdup(mem, p)) || !(p = _advance_to_next_word(p, s->core_argc)) || (dm_split_words(str, s->core_argc, 0, s->core_argv) != s->core_argc))) goto bad; /* Read in policy args */ pp = p; if (!(p = _advance_to_next_word(p, 1)) || !(s->policy_name = dm_pool_zalloc(mem, (p - pp)))) goto bad; if (sscanf(pp, "%s %d", s->policy_name, &s->policy_argc) != 2) goto bad; if (s->policy_argc && (!(s->policy_argv = dm_pool_zalloc(mem, sizeof(char *) * s->policy_argc)) || !(p = _advance_to_next_word(p, 1)) || !(str = dm_pool_strdup(mem, p)) || (dm_split_words(str, s->policy_argc, 0, s->policy_argv) != s->policy_argc))) goto bad; *status = s; return 1; bad: log_error("Failed to parse cache params: %s", params); dm_pool_free(mem, s); *status = NULL; return 0; }