static int detach_metadata_devices(struct lv_segment *seg, struct dm_list *list) { uint32_t s; uint32_t num_meta_lvs; struct cmd_context *cmd = seg->lv->vg->cmd; struct lv_list *lvl; num_meta_lvs = seg_is_raid(seg) ? seg->area_count : !!seg->log_lv; if (!num_meta_lvs) return_0; if (!(lvl = dm_pool_alloc(cmd->mem, sizeof(*lvl) * num_meta_lvs))) return_0; if (seg_is_raid(seg)) { for (s = 0; s < seg->area_count; s++) { if (!seg_metalv(seg, s)) return_0; /* Trap this future possibility */ lvl[s].lv = seg_metalv(seg, s); lv_set_visible(lvl[s].lv); dm_list_add(list, &lvl[s].list); } return 1; } lvl[0].lv = detach_mirror_log(seg); dm_list_add(list, &lvl[0].list); return 1; }
struct dm_list *tag_list_copy(struct dm_pool *p, struct dm_list *tag_list) { struct dm_list *list; lvm_str_list_t *lsl; struct str_list *sl; if (!(list = dm_pool_zalloc(p, sizeof(*list)))) { log_errno(ENOMEM, "Memory allocation fail for dm_list."); return NULL; } dm_list_init(list); dm_list_iterate_items(sl, tag_list) { if (!(lsl = dm_pool_zalloc(p, sizeof(*lsl)))) { log_errno(ENOMEM, "Memory allocation fail for lvm_lv_list."); return NULL; } if (!(lsl->str = dm_pool_strdup(p, sl->str))) { log_errno(ENOMEM, "Memory allocation fail for lvm_lv_list->str."); return NULL; } dm_list_add(list, &lsl->list); } return list; }
static int _read_lvs(struct disk_list *data) { unsigned int i, lvs_read = 0; uint64_t pos; struct lvd_list *ll; struct vg_disk *vgd = &data->vgd; for (i = 0; (i < vgd->lv_max) && (lvs_read < vgd->lv_cur); i++) { pos = data->pvd.lv_on_disk.base + (i * sizeof(struct lv_disk)); ll = dm_pool_alloc(data->mem, sizeof(*ll)); if (!ll) return_0; if (!_read_lvd(data->dev, pos, &ll->lvd)) return_0; if (!_check_lvd(&ll->lvd)) continue; lvs_read++; dm_list_add(&data->lvds, &ll->list); } return 1; }
static int _read_uuids(struct disk_list *data) { unsigned num_read = 0; struct uuid_list *ul; char buffer[NAME_LEN] __attribute__((aligned(8))); uint64_t pos = data->pvd.pv_uuidlist_on_disk.base; uint64_t end = pos + data->pvd.pv_uuidlist_on_disk.size; while (pos < end && num_read < data->vgd.pv_cur) { if (!dev_read(data->dev, pos, sizeof(buffer), buffer)) return_0; if (!(ul = dm_pool_alloc(data->mem, sizeof(*ul)))) return_0; memcpy(ul->uuid, buffer, NAME_LEN); ul->uuid[NAME_LEN - 1] = '\0'; dm_list_add(&data->uuids, &ul->list); pos += NAME_LEN; num_read++; } return 1; }
int internal_filter_allow(struct dm_pool *mem, struct device *dev) { struct device_list *devl; if (!(devl = dm_pool_alloc(mem, sizeof(*devl)))) { log_error("device_list element allocation failed"); return 0; } devl->dev = dev; dm_list_add(&_allow_devs, &devl->list); return 1; }
static struct control_block *_cb_alloc(struct cb_set *cbs, void *context) { struct control_block *cb; if (dm_list_empty(&cbs->free)) return NULL; cb = dm_list_item(_list_pop(&cbs->free), struct control_block); cb->context = context; dm_list_add(&cbs->allocated, &cb->list); return cb; }
void regex_tests(struct dm_list *all_tests) { struct test_suite *ts = test_suite_create(_mem_init, _mem_exit); if (!ts) { fprintf(stderr, "out of memory\n"); exit(1); } T("fingerprints", "not sure", test_fingerprints); T("matching", "test the matcher with a variety of regexes", test_matching); T("kabi-query", "test the matcher with some specific patterns", test_kabi_query); dm_list_add(all_tests, &ts->list); }
struct dm_pool *dm_pool_create(const char *name, size_t chunk_hint) { size_t new_size = 1024; struct dm_pool *p = dm_zalloc(sizeof(*p)); if (!p) { log_error("Couldn't create memory pool %s (size %" PRIsize_t ")", name, sizeof(*p)); return 0; } p->name = name; /* round chunk_hint up to the next power of 2 */ p->chunk_size = chunk_hint + sizeof(struct chunk); while (new_size < p->chunk_size) new_size <<= 1; p->chunk_size = new_size; dm_list_add(&_dm_pools, &p->list); return p; }
void init_snapshot_seg(struct lv_segment *seg, struct logical_volume *origin, struct logical_volume *cow, uint32_t chunk_size) { seg->chunk_size = chunk_size; seg->origin = origin; seg->cow = cow; lv_set_hidden(cow); cow->snapshot = seg; origin->origin_count++; /* FIXME Assumes an invisible origin belongs to a sparse device */ if (!lv_is_visible(origin)) origin->status |= VIRTUAL_ORIGIN; seg->lv->status |= (SNAPSHOT | VIRTUAL); dm_list_add(&origin->snapshot_segs, &seg->origin_list); }
static struct lv_segment *_alloc_snapshot_seg(struct logical_volume *lv) { struct lv_segment *seg; const struct segment_type *segtype; segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_SNAPSHOT); if (!segtype) { log_error("Failed to find snapshot segtype"); return NULL; } if (!(seg = alloc_lv_segment(segtype, lv, 0, lv->le_count, 0, 0, 0, NULL, 0, lv->le_count, 0, 0, 0, 0, NULL))) { log_error("Couldn't allocate new snapshot segment."); return NULL; } dm_list_add(&lv->segments, &seg->list); return seg; }
static struct replicator_site *_get_site(struct logical_volume *replicator, const char *key) { struct dm_pool *mem = replicator->vg->vgmem; struct replicator_site *rsite; dm_list_iterate_items(rsite, &replicator->rsites) if (strcasecmp(rsite->name, key) == 0) return rsite; if (!(rsite = dm_pool_zalloc(mem, sizeof(*rsite)))) return_NULL; if (!(rsite->name = dm_pool_strdup(mem, key))) return_NULL; rsite->replicator = replicator; dm_list_init(&rsite->rdevices); dm_list_add(&replicator->rsites, &rsite->list); return rsite; }
static struct cb_set *_cb_set_create(unsigned nr) { int i; struct cb_set *cbs = malloc(sizeof(*cbs)); if (!cbs) return NULL; cbs->vec = malloc(nr * sizeof(*cbs->vec)); if (!cbs->vec) { free(cbs); return NULL; } dm_list_init(&cbs->free); dm_list_init(&cbs->allocated); for (i = 0; i < nr; i++) dm_list_add(&cbs->free, &cbs->vec[i].list); return cbs; }
int dev_open_flags(struct device *dev, int flags, int direct, int quiet) { struct stat buf; const char *name; int need_excl = 0, need_rw = 0; if ((flags & O_ACCMODE) == O_RDWR) need_rw = 1; if ((flags & O_EXCL)) need_excl = 1; if (dev->fd >= 0) { if (((dev->flags & DEV_OPENED_RW) || !need_rw) && ((dev->flags & DEV_OPENED_EXCL) || !need_excl)) { dev->open_count++; return 1; } if (dev->open_count && !need_excl) { /* FIXME Ensure we never get here */ log_error(INTERNAL_ERROR "%s already opened read-only", dev_name(dev)); dev->open_count++; } dev_close_immediate(dev); } if (critical_section()) /* FIXME Make this log_error */ log_verbose("dev_open(%s) called while suspended", dev_name(dev)); if (dev->flags & DEV_REGULAR) name = dev_name(dev); else if (!(name = dev_name_confirmed(dev, quiet))) return_0; if (!(dev->flags & DEV_REGULAR)) { if (stat(name, &buf) < 0) { log_sys_error("%s: stat failed", name); return 0; } if (buf.st_rdev != dev->dev) { log_error("%s: device changed", name); return 0; } } #ifdef O_DIRECT_SUPPORT if (direct) { if (!(dev->flags & DEV_O_DIRECT_TESTED)) dev->flags |= DEV_O_DIRECT; if ((dev->flags & DEV_O_DIRECT)) flags |= O_DIRECT; } #endif #ifdef O_NOATIME /* Don't update atime on device inodes */ if (!(dev->flags & DEV_REGULAR)) flags |= O_NOATIME; #endif if ((dev->fd = open(name, flags, 0777)) < 0) { #ifdef O_DIRECT_SUPPORT if (direct && !(dev->flags & DEV_O_DIRECT_TESTED)) { flags &= ~O_DIRECT; if ((dev->fd = open(name, flags, 0777)) >= 0) { dev->flags &= ~DEV_O_DIRECT; log_debug("%s: Not using O_DIRECT", name); goto opened; } } #endif if (quiet) log_sys_debug("open", name); else log_sys_error("open", name); return 0; } #ifdef O_DIRECT_SUPPORT opened: if (direct) dev->flags |= DEV_O_DIRECT_TESTED; #endif dev->open_count++; dev->flags &= ~DEV_ACCESSED_W; if (need_rw) dev->flags |= DEV_OPENED_RW; else dev->flags &= ~DEV_OPENED_RW; if (need_excl) dev->flags |= DEV_OPENED_EXCL; else dev->flags &= ~DEV_OPENED_EXCL; if (!(dev->flags & DEV_REGULAR) && ((fstat(dev->fd, &buf) < 0) || (buf.st_rdev != dev->dev))) { log_error("%s: fstat failed: Has device name changed?", name); dev_close_immediate(dev); return 0; } #ifndef O_DIRECT_SUPPORT if (!(dev->flags & DEV_REGULAR)) dev_flush(dev); #endif if ((flags & O_CREAT) && !(flags & O_TRUNC)) dev->end = lseek(dev->fd, (off_t) 0, SEEK_END); dm_list_add(&_open_devices, &dev->open_list); log_debug("Opened %s %s%s%s", dev_name(dev), dev->flags & DEV_OPENED_RW ? "RW" : "RO", dev->flags & DEV_OPENED_EXCL ? " O_EXCL" : "", dev->flags & DEV_O_DIRECT ? " O_DIRECT" : ""); return 1; }
/* * Remove an element from existing list and insert before 'head'. */ void dm_list_move(struct dm_list *head, struct dm_list *elem) { dm_list_del(elem); dm_list_add(head, elem); }
void bcache_utils_tests(struct dm_list *all_tests) { dm_list_add(all_tests, &_async_tests()->list); dm_list_add(all_tests, &_sync_tests()->list); }