static void _register_command(const char *name, command_fn fn, const char *desc, const char *usagestr, ...) { int nargs = 0, i; int *args; va_list ap; /* count how many arguments we have */ va_start(ap, usagestr); while (va_arg(ap, int) >= 0) nargs++; va_end(ap); /* allocate space for them */ if (!(args = dm_malloc(sizeof(*args) * nargs))) { log_fatal("Out of memory."); exit(ECMD_FAILED); } /* fill them in */ va_start(ap, usagestr); for (i = 0; i < nargs; i++) args[i] = va_arg(ap, int); va_end(ap); /* enter the command in the register */ _create_new_command(name, fn, desc, usagestr, nargs, args); }
struct target *create_target(uint64_t start, uint64_t len, const char *type, const char *params) { struct target *t = dm_malloc(sizeof(*t)); if (!t) { log_error("create_target: malloc(%" PRIsize_t ") failed", sizeof(*t)); return NULL; } memset(t, 0, sizeof(*t)); if (!(t->params = dm_strdup(params))) { log_error("create_target: strdup(params) failed"); goto bad; } if (!(t->type = dm_strdup(type))) { log_error("create_target: strdup(type) failed"); goto bad; } t->start = start; t->length = len; return t; bad: _dm_zfree_string(t->params); dm_free(t->type); dm_free(t); return NULL; }
/* * daemon_read * @fifos * @msg * * Read message from daemon. * * Returns: 0 on failure, 1 on success */ static int _daemon_read(struct dm_event_fifos *fifos, struct dm_event_daemon_message *msg) { unsigned bytes = 0; int ret, i; fd_set fds; struct timeval tval = { 0, 0 }; size_t size = 2 * sizeof(uint32_t); /* status + size */ char *buf = alloca(size); int header = 1; while (bytes < size) { for (i = 0, ret = 0; (i < 20) && (ret < 1); i++) { /* Watch daemon read FIFO for input. */ FD_ZERO(&fds); FD_SET(fifos->server, &fds); tval.tv_sec = 1; ret = select(fifos->server + 1, &fds, NULL, NULL, &tval); if (ret < 0 && errno != EINTR) { log_error("Unable to read from event server"); return 0; } } if (ret < 1) { log_error("Unable to read from event server."); return 0; } ret = read(fifos->server, buf + bytes, size); if (ret < 0) { if ((errno == EINTR) || (errno == EAGAIN)) continue; else { log_error("Unable to read from event server."); return 0; } } bytes += ret; if (bytes == 2 * sizeof(uint32_t) && header) { msg->cmd = ntohl(*((uint32_t *)buf)); msg->size = ntohl(*((uint32_t *)buf + 1)); buf = msg->data = dm_malloc(msg->size); size = msg->size; bytes = 0; header = 0; } } if (bytes != size) { if (msg->data) dm_free(msg->data); msg->data = NULL; } return bytes == size; }
struct dm_timestamp *dm_timestamp_alloc(void) { struct dm_timestamp *ts; if (!(ts = dm_malloc(sizeof(*ts)))) stack; return ts; }
char *lvm_lv_get_name(const lv_t lv) { char *name; name = dm_malloc(NAME_LEN + 1); strncpy(name, (const char *)lv->name, NAME_LEN); name[NAME_LEN] = '\0'; return name; }
static struct dm_hash_node *_create_node(const char *str, unsigned len) { struct dm_hash_node *n = dm_malloc(sizeof(*n) + len); if (n) { memcpy(n->key, str, len); n->keylen = len; } return n; }
int dm_asprintf(char **result, const char *format, ...) { int i, n, size = 16; va_list ap; char *buf = dm_malloc(size); *result = 0; if (!buf) return -1; for (i = 0;; i++) { va_start(ap, format); n = vsnprintf(buf, size, format, ap); va_end(ap); if (0 <= n && n < size) break; dm_free(buf); /* Up to glibc 2.0.6 returns -1 */ size = (n < 0) ? size * 2 : n + 1; if (!(buf = dm_malloc(size))) return -1; } if (i > 1) { /* Reallocating more then once? */ if (!(*result = dm_strdup(buf))) { dm_free(buf); return -1; } dm_free(buf); } else *result = buf; return n + 1; }
struct dm_event_handler *dm_event_handler_create(void) { struct dm_event_handler *dmevh = NULL; if (!(dmevh = dm_malloc(sizeof(*dmevh)))) return NULL; dmevh->dso = dmevh->dev_name = dmevh->uuid = NULL; dmevh->major = dmevh->minor = 0; dmevh->mask = 0; dmevh->timeout = 0; return dmevh; }
struct timestamp *get_timestamp(void) { struct timestamp *ts = NULL; if (!(ts = dm_malloc(sizeof(*ts)))) return_NULL; if (gettimeofday(&ts->t, NULL)) { log_sys_error("gettimeofday", "get_timestamp"); return NULL; } return ts; }
struct timestamp *get_timestamp(void) { struct timestamp *ts = NULL; if (!(ts = dm_malloc(sizeof(*ts)))) return_NULL; if (clock_gettime(CLOCK_MONOTONIC, &ts->t)) { log_sys_error("clock_gettime", "get_timestamp"); return NULL; } return ts; }
/* Allocate/free DSO data. */ static struct dso_data *_alloc_dso_data(struct message_data *data) { struct dso_data *ret = (typeof(ret)) dm_malloc(sizeof(*ret)); if (!ret) return NULL; memset(ret, 0, sizeof(*ret)); if (!(ret->dso_name = dm_strdup(data->dso_name))) { dm_free(ret); return NULL; } return ret; }
int dm_task_set_geometry(struct dm_task *dmt, const char *cylinders, const char *heads, const char *sectors, const char *start) { size_t len = strlen(cylinders) + 1 + strlen(heads) + 1 + strlen(sectors) + 1 + strlen(start) + 1; if (!(dmt->geometry = dm_malloc(len))) { log_error("dm_task_set_geometry: dm_malloc failed"); return 0; } if (sprintf(dmt->geometry, "%s %s %s %s", cylinders, heads, sectors, start) < 0) { log_error("dm_task_set_geometry: sprintf failed"); return 0; } return 1; }
struct label *label_create(struct labeller *labeller) { struct label *label; if (!(label = dm_malloc(sizeof(*label)))) { log_error("label allocaction failed"); return NULL; } memset(label, 0, sizeof(*label)); label->labeller = labeller; labeller->ops->initialise_label(labeller, label); return label; }
static struct labeller_i *_alloc_li(const char *name, struct labeller *l) { struct labeller_i *li; size_t len; len = sizeof(*li) + strlen(name) + 1; if (!(li = dm_malloc(len))) { log_error("Couldn't allocate memory for labeller list object."); return NULL; } li->l = l; strcpy(li->name, name); return li; }
struct dm_pool *dm_pool_create(const char *name, size_t chunk_hint) { size_t new_size = 1024; struct dm_pool *p = dm_malloc(sizeof(*p)); if (!p) { log_error("Couldn't create memory pool %s (size %" PRIsize_t ")", name, sizeof(*p)); return 0; } memset(p, 0, sizeof(*p)); /* round chunk_hint up to the next power of 2 */ p->chunk_size = chunk_hint + sizeof(struct chunk); while (new_size < p->chunk_size) new_size <<= 1; p->chunk_size = new_size; return p; }
char *alloc_printed_tags(struct dm_list *tags) { struct str_list *sl; int first = 1; size_t size = 0; char *buffer, *buf; dm_list_iterate_items(sl, tags) /* '"' + tag + '"' + ',' + ' ' */ size += strlen(sl->str) + 4; /* '[' + ']' + '\0' */ size += 3; if (!(buffer = buf = dm_malloc(size))) { log_error("Could not allocate memory for tag list buffer."); return NULL; } if (!emit_to_buffer(&buf, &size, "[")) goto_bad; dm_list_iterate_items(sl, tags) { if (!first) { if (!emit_to_buffer(&buf, &size, ", ")) goto_bad; } else first = 0; if (!emit_to_buffer(&buf, &size, "\"%s\"", sl->str)) goto_bad; } if (!emit_to_buffer(&buf, &size, "]")) goto_bad; return buffer; bad: dm_free(buffer); return_NULL; }
dm_bitset_t dm_bitset_create(struct dm_pool *mem, unsigned num_bits) { unsigned n = (num_bits / DM_BITS_PER_INT) + 2; size_t size = sizeof(int) * n; dm_bitset_t bs; if (mem) bs = dm_pool_zalloc(mem, size); else bs = dm_malloc(size); if (!bs) return NULL; *bs = num_bits; if (!mem) dm_bit_clear_all(bs); return bs; }
/* Allocate/free the status structure for a monitoring thread. */ static struct thread_status *_alloc_thread_status(struct message_data *data, struct dso_data *dso_data) { struct thread_status *ret = (typeof(ret)) dm_malloc(sizeof(*ret)); if (!ret) return NULL; memset(ret, 0, sizeof(*ret)); if (!(ret->device.uuid = dm_strdup(data->device_uuid))) { dm_free(ret); return NULL; } ret->current_task = NULL; ret->device.name = NULL; ret->device.major = ret->device.minor = 0; ret->dso_data = dso_data; ret->events = data->events.field; ret->timeout = data->timeout.secs; list_init(&ret->timeout_list); return ret; }
static int _aligned_io(struct device_area *where, char *buffer, int should_write) { char *bounce, *bounce_buf; unsigned int block_size = 0; uintptr_t mask; struct device_area widened; int r = 0; if (!(where->dev->flags & DEV_REGULAR) && !_get_block_size(where->dev, &block_size)) return_0; if (!block_size) block_size = lvm_getpagesize(); _widen_region(block_size, where, &widened); /* Do we need to use a bounce buffer? */ mask = block_size - 1; if (!memcmp(where, &widened, sizeof(widened)) && !((uintptr_t) buffer & mask)) return _io(where, buffer, should_write); /* Allocate a bounce buffer with an extra block */ if (!(bounce_buf = bounce = dm_malloc((size_t) widened.size + block_size))) { log_error("Bounce buffer malloc failed"); return 0; } /* * Realign start of bounce buffer (using the extra sector) */ if (((uintptr_t) bounce) & mask) bounce = (char *) ((((uintptr_t) bounce) + mask) & ~mask); /* channel the io through the bounce buffer */ if (!_io(&widened, bounce, 0)) { if (!should_write) goto_out; /* FIXME pre-extend the file */ memset(bounce, '\n', widened.size); } if (should_write) { memcpy(bounce + (where->start - widened.start), buffer, (size_t) where->size); /* ... then we write */ if (!(r = _io(&widened, bounce, 1))) stack; goto out; } memcpy(buffer, bounce + (where->start - widened.start), (size_t) where->size); r = 1; out: dm_free(bounce_buf); return r; }
int read_config_fd(struct config_tree *cft, struct device *dev, off_t offset, size_t size, off_t offset2, size_t size2, checksum_fn_t checksum_fn, uint32_t checksum) { struct cs *c = (struct cs *) cft; struct parser *p; int r = 0; int use_mmap = 1; off_t mmap_offset = 0; char *buf = NULL; if (!(p = dm_pool_alloc(c->mem, sizeof(*p)))) return_0; p->mem = c->mem; /* Only use mmap with regular files */ if (!(dev->flags & DEV_REGULAR) || size2) use_mmap = 0; if (use_mmap) { mmap_offset = offset % lvm_getpagesize(); /* memory map the file */ p->fb = mmap((caddr_t) 0, size + mmap_offset, PROT_READ, MAP_PRIVATE, dev_fd(dev), offset - mmap_offset); if (p->fb == (caddr_t) (-1)) { log_sys_error("mmap", dev_name(dev)); goto out; } p->fb = p->fb + mmap_offset; } else { if (!(buf = dm_malloc(size + size2))) return_0; if (!dev_read_circular(dev, (uint64_t) offset, size, (uint64_t) offset2, size2, buf)) { goto out; } p->fb = buf; } if (checksum_fn && checksum != (checksum_fn(checksum_fn(INITIAL_CRC, (const uint8_t *)p->fb, size), (const uint8_t *)(p->fb + size), size2))) { log_error("%s: Checksum error", dev_name(dev)); goto out; } p->fe = p->fb + size + size2; if (!_parse_config_file(p, cft)) goto_out; r = 1; out: if (!use_mmap) dm_free(buf); else { /* unmap the file */ if (munmap((char *) (p->fb - mmap_offset), size + mmap_offset)) { log_sys_error("munmap", dev_name(dev)); r = 0; } } return r; }
int _get_partition_type(struct dev_mgr *dm, struct device *d) { int pv_handle = -1; struct device *primary; ssize_t read_ret; ssize_t bytes_read = 0; char *buffer; unsigned short *s_buffer; struct partition *part; loff_t offset = 0; loff_t extended_offset = 0; int part_sought; int part_found = 0; int first_partition = 1; int extended_partition = 0; int p; if (!(primary = dev_primary(dm, d))) { log_error ("Failed to find main device containing partition %s", d->name); return 0; } if (!(buffer = dm_malloc(SECTOR_SIZE))) { log_error("Failed to allocate partition table buffer"); return 0; } /* Get partition table */ if ((pv_handle = open(primary->name, O_RDONLY)) < 0) { log_error("%s: open failed: %s", primary->name, strerror(errno)); return 0; } s_buffer = (unsigned short *) buffer; part = (struct partition *) (buffer + 0x1be); part_sought = MINOR_PART(dm, d); do { bytes_read = 0; if (llseek(pv_handle, offset * SECTOR_SIZE, SEEK_SET) == -1) { log_error("%s: llseek failed: %s", primary->name, strerror(errno)); return 0; } while ((bytes_read < SECTOR_SIZE) && (read_ret = read(pv_handle, buffer + bytes_read, SECTOR_SIZE - bytes_read)) != -1) bytes_read += read_ret; if (read_ret == -1) { log_error("%s: read failed: %s", primary->name, strerror(errno)); return 0; } if (s_buffer[255] == 0xAA55) { if (is_whole_disk(dm, d)) return -1; } else return 0; extended_partition = 0; /* Loop through primary partitions */ for (p = 0; p < 4; p++) { if (part[p].sys_ind == DOS_EXTENDED_PARTITION || part[p].sys_ind == LINUX_EXTENDED_PARTITION || part[p].sys_ind == WIN98_EXTENDED_PARTITION) { extended_partition = 1; offset = extended_offset + part[p].start_sect; if (extended_offset == 0) extended_offset = part[p].start_sect; if (first_partition == 1) part_found++; } else if (first_partition == 1) { if (p == part_sought) { if (part[p].sys_ind == 0) { /* missing primary? */ return 0; } } else part_found++; } else if (!part[p].sys_ind) part_found++; if (part_sought == part_found) return part[p].sys_ind; } first_partition = 0; } while (extended_partition == 1); return 0; }
struct dm_ioctl* nbsd_dm_dict_to_dmi(prop_dictionary_t dm_dict,const int cmd) { struct dm_ioctl *dmi; prop_array_t ver; size_t i; int r; char *name, *uuid; uint32_t major,minor; name = NULL; uuid = NULL; minor = 0; nbsd_get_dm_major(&major, DM_BLOCK_MAJOR); if (!(dmi = dm_malloc(DMI_SIZE))) return NULL; memset(dmi,0,DMI_SIZE); prop_dictionary_get_int32(dm_dict, DM_IOCTL_OPEN, &dmi->open_count); prop_dictionary_get_uint32(dm_dict, DM_IOCTL_EVENT, &dmi->event_nr); prop_dictionary_get_uint32(dm_dict, DM_IOCTL_FLAGS, &dmi->flags); prop_dictionary_get_uint32(dm_dict, DM_IOCTL_TARGET_COUNT, &dmi->target_count); if (prop_dictionary_get_uint32(dm_dict, DM_IOCTL_MINOR, &minor)) dmi->dev = MKDEV(major, minor); else dmi->dev = 0; /* Copy name and uuid to dm_ioctl. */ if (prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_NAME, (const char **)&name)){ strlcpy(dmi->name, name, DM_NAME_LEN); } else dmi->name[0] = '\0'; if (prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_UUID, (const char **)&uuid)){ strlcpy(dmi->uuid, uuid, DM_UUID_LEN); } else dmi->uuid[0] = '\0'; /* dmi parsing values, size of dmi block and offset to data. */ dmi->data_size = DMI_SIZE; dmi->data_start = sizeof(struct dm_ioctl); /* Get kernel version from dm_dict. */ ver = prop_dictionary_get(dm_dict,DM_IOCTL_VERSION); for(i=0; i<3; i++) prop_array_get_uint32(ver,i,&dmi->version[i]); switch (cmd){ case DM_LIST_VERSIONS: r = dm_list_versions(dm_dict,dmi); if (r >= 0) dmi->target_count = r; break; case DM_LIST_DEVICES: r = dm_list_devices(dm_dict,dmi); if (r >= 0) dmi->target_count = r; break; case DM_TABLE_STATUS: r = dm_table_status(dm_dict,dmi); if (r >= 0) dmi->target_count = r; break; case DM_TABLE_DEPS: r = dm_dev_deps(dm_dict,dmi); if (r >= 0) dmi->target_count = r; break; } return dmi; }
static struct dm_ioctl_v1 *_flatten_v1(struct dm_task *dmt) { const size_t min_size = 16 * 1024; const int (*version)[3]; struct dm_ioctl_v1 *dmi; struct target *t; size_t len = sizeof(struct dm_ioctl_v1); void *b, *e; int count = 0; for (t = dmt->head; t; t = t->next) { len += sizeof(struct dm_target_spec_v1); len += strlen(t->params) + 1 + ALIGNMENT_V1; count++; } if (count && dmt->newname) { log_error("targets and newname are incompatible"); return NULL; } if (dmt->newname) len += strlen(dmt->newname) + 1; /* * Give len a minimum size so that we have space to store * dependencies or status information. */ if (len < min_size) len = min_size; if (!(dmi = dm_malloc(len))) return NULL; memset(dmi, 0, len); version = &_cmd_data_v1[dmt->type].version; dmi->version[0] = (*version)[0]; dmi->version[1] = (*version)[1]; dmi->version[2] = (*version)[2]; dmi->data_size = len; dmi->data_start = sizeof(struct dm_ioctl_v1); if (dmt->dev_name) strncpy(dmi->name, dmt->dev_name, sizeof(dmi->name)); if (dmt->type == DM_DEVICE_SUSPEND) dmi->flags |= DM_SUSPEND_FLAG; if (dmt->read_only) dmi->flags |= DM_READONLY_FLAG; if (dmt->minor >= 0) { if (dmt->major <= 0) { log_error("Missing major number for persistent device"); return NULL; } dmi->flags |= DM_PERSISTENT_DEV_FLAG; dmi->dev = MKDEV(dmt->major, dmt->minor); } if (dmt->uuid) strncpy(dmi->uuid, dmt->uuid, sizeof(dmi->uuid)); dmi->target_count = count; b = (void *) (dmi + 1); e = (void *) ((char *) dmi + len); for (t = dmt->head; t; t = t->next) if (!(b = _add_target_v1(t, b, e))) { log_error("Ran out of memory building ioctl parameter"); goto bad; } if (dmt->newname) strcpy(b, dmt->newname); return dmi; bad: _dm_zfree_dmi_v1(dmi); return NULL; }
static struct dm_ioctl *_flatten(struct dm_task *dmt, unsigned repeat_count) { const size_t min_size = 16 * 1024; const int (*version)[3]; struct dm_ioctl *dmi; struct target *t; struct dm_target_msg *tmsg; size_t len = sizeof(struct dm_ioctl); void *b, *e; int count = 0; for (t = dmt->head; t; t = t->next) { len += sizeof(struct dm_target_spec); len += strlen(t->params) + 1 + ALIGNMENT; count++; } if (count && (dmt->sector || dmt->message)) { log_error("targets and message are incompatible"); return NULL; } if (count && dmt->newname) { log_error("targets and rename are incompatible"); return NULL; } if (count && dmt->geometry) { log_error("targets and geometry are incompatible"); return NULL; } if (dmt->newname && (dmt->sector || dmt->message)) { log_error("message and rename are incompatible"); return NULL; } if (dmt->newname && dmt->geometry) { log_error("geometry and rename are incompatible"); return NULL; } if (dmt->geometry && (dmt->sector || dmt->message)) { log_error("geometry and message are incompatible"); return NULL; } if (dmt->sector && !dmt->message) { log_error("message is required with sector"); return NULL; } if (dmt->newname) len += strlen(dmt->newname) + 1; if (dmt->message) len += sizeof(struct dm_target_msg) + strlen(dmt->message) + 1; if (dmt->geometry) len += strlen(dmt->geometry) + 1; /* * Give len a minimum size so that we have space to store * dependencies or status information. */ if (len < min_size) len = min_size; /* Increase buffer size if repeating because buffer was too small */ while (repeat_count--) len *= 2; if (!(dmi = dm_malloc(len))) return NULL; memset(dmi, 0, len); version = &_cmd_data_v4[dmt->type].version; dmi->version[0] = (*version)[0]; dmi->version[1] = (*version)[1]; dmi->version[2] = (*version)[2]; dmi->data_size = len; dmi->data_start = sizeof(struct dm_ioctl); if (dmt->minor >= 0) { if (dmt->major <= 0) { log_error("Missing major number for persistent device."); goto bad; } if (!_dm_multiple_major_support && dmt->allow_default_major_fallback && dmt->major != _dm_device_major) { log_verbose("Overriding major number of %" PRIu32 " with %" PRIu32 " for persistent device.", dmt->major, _dm_device_major); dmt->major = _dm_device_major; } dmi->flags |= DM_PERSISTENT_DEV_FLAG; dmi->dev = MKDEV(dmt->major, dmt->minor); } /* Does driver support device number referencing? */ if (_dm_version_minor < 3 && !dmt->dev_name && !dmt->uuid && dmi->dev) { if (!_lookup_dev_name(dmi->dev, dmi->name, sizeof(dmi->name))) { log_error("Unable to find name for device (%" PRIu32 ":%" PRIu32 ")", dmt->major, dmt->minor); goto bad; } log_verbose("device (%" PRIu32 ":%" PRIu32 ") is %s " "for compatibility with old kernel", dmt->major, dmt->minor, dmi->name); } /* FIXME Until resume ioctl supplies name, use dev_name for readahead */ if (dmt->dev_name && (dmt->type != DM_DEVICE_RESUME || dmt->minor < 0 || dmt->major < 0)) strncpy(dmi->name, dmt->dev_name, sizeof(dmi->name)); if (dmt->uuid) strncpy(dmi->uuid, dmt->uuid, sizeof(dmi->uuid)); if (dmt->type == DM_DEVICE_SUSPEND) dmi->flags |= DM_SUSPEND_FLAG; if (dmt->no_flush) dmi->flags |= DM_NOFLUSH_FLAG; if (dmt->read_only) dmi->flags |= DM_READONLY_FLAG; if (dmt->skip_lockfs) dmi->flags |= DM_SKIP_LOCKFS_FLAG; if (dmt->query_inactive_table) { if (_dm_version_minor < 16) log_warn("WARNING: Inactive table query unsupported " "by kernel. It will use live table."); dmi->flags |= DM_QUERY_INACTIVE_TABLE_FLAG; } if (dmt->new_uuid) { if (_dm_version_minor < 19) { log_error("WARNING: Setting UUID unsupported by " "kernel. Aborting operation."); goto bad; } dmi->flags |= DM_UUID_FLAG; } dmi->target_count = count; dmi->event_nr = dmt->event_nr; b = (void *) (dmi + 1); e = (void *) ((char *) dmi + len); for (t = dmt->head; t; t = t->next) if (!(b = _add_target(t, b, e))) { log_error("Ran out of memory building ioctl parameter"); goto bad; } if (dmt->newname) strcpy(b, dmt->newname); if (dmt->message) { tmsg = (struct dm_target_msg *) b; tmsg->sector = dmt->sector; strcpy(tmsg->message, dmt->message); } if (dmt->geometry) strcpy(b, dmt->geometry); return dmi; bad: _dm_zfree_dmi(dmi); return NULL; }
/* Real locking */ static int _lock_resource(const char *resource, int mode, int flags, int *lockid) { /* DLM table of allowed transition states */ static const int _dlm_table[6][6] = { /* Mode NL CR CW PR PW EX */ /* NL */ { 1, 1, 1, 1, 1, 1}, /* CR */ { 1, 1, 1, 1, 1, 0}, /* CW */ { 1, 1, 1, 0, 0, 0}, /* PR */ { 1, 1, 0, 1, 0, 0}, /* PW */ { 1, 1, 0, 0, 0, 0}, /* EX */ { 1, 0, 0, 0, 0, 0} }; struct lock *lck = NULL, *lckt; struct dm_list *head; DEBUGLOG("Locking resource %s, flags=0x%02x (%s%s%s), mode=%s (%d)\n", resource, flags, (flags & LCKF_NOQUEUE) ? "NOQUEUE" : "", ((flags & (LCKF_NOQUEUE | LCKF_CONVERT)) == (LCKF_NOQUEUE | LCKF_CONVERT)) ? "|" : "", (flags & LCKF_CONVERT) ? "CONVERT" : "", _get_mode(mode), mode); mode &= LCK_TYPE_MASK; pthread_mutex_lock(&_lock_mutex); retry: pthread_cond_broadcast(&_lock_cond); /* to wakeup waiters */ if (!(head = dm_hash_lookup(_locks, resource))) { if (flags & LCKF_CONVERT) { /* In real DLM, lock is identified only by lockid, resource is not used */ DEBUGLOG("Unlocked resource %s cannot be converted\n", resource); goto_bad; } /* Add new locked resource */ if (!(head = dm_malloc(sizeof(struct dm_list))) || !dm_hash_insert(_locks, resource, head)) { dm_free(head); goto_bad; } dm_list_init(head); } else /* Update/convert locked resource */ dm_list_iterate_items(lck, head) { /* Check is all locks are compatible with requested lock */ if (flags & LCKF_CONVERT) { if (lck->lockid != *lockid) continue; DEBUGLOG("Converting resource %s lockid=%d mode:%s -> %s...\n", resource, lck->lockid, _get_mode(lck->mode), _get_mode(mode)); dm_list_iterate_items(lckt, head) { if ((lckt->lockid != *lockid) && !_dlm_table[mode][lckt->mode]) { if (!(flags & LCKF_NOQUEUE) && /* TODO: Real dlm uses here conversion queues */ !pthread_cond_wait(&_lock_cond, &_lock_mutex) && _locks) /* End of the game? */ goto retry; goto bad; } } lck->mode = mode; /* Lock is now converted */ goto out; } else if (!_dlm_table[mode][lck->mode]) { DEBUGLOG("Resource %s already locked lockid=%d, mode:%s\n", resource, lck->lockid, _get_mode(lck->mode)); if (!(flags & LCKF_NOQUEUE) && !pthread_cond_wait(&_lock_cond, &_lock_mutex) && _locks) { /* End of the game? */ DEBUGLOG("Resource %s retrying lock in mode:%s...\n", resource, _get_mode(mode)); goto retry; } goto bad; } }
/* * Send a message to a(or all) node(s) in the cluster and wait for replies */ static int _cluster_request(char cmd, const char *node, void *data, int len, lvm_response_t ** response, int *num) { char outbuf[sizeof(struct clvm_header) + len + strlen(node) + 1]; char *inptr; char *retbuf = NULL; int status; int i; int num_responses = 0; struct clvm_header *head = (struct clvm_header *) outbuf; lvm_response_t *rarray; *num = 0; if (_clvmd_sock == -1) _clvmd_sock = _open_local_sock(); if (_clvmd_sock == -1) return 0; _build_header(head, cmd, node, len); memcpy(head->node + strlen(head->node) + 1, data, len); status = _send_request(outbuf, sizeof(struct clvm_header) + strlen(head->node) + len, &retbuf); if (!status) goto out; /* Count the number of responses we got */ head = (struct clvm_header *) retbuf; inptr = head->args; while (inptr[0]) { num_responses++; inptr += strlen(inptr) + 1; inptr += sizeof(int); inptr += strlen(inptr) + 1; } /* * Allocate response array. * With an extra pair of INTs on the front to sanity * check the pointer when we are given it back to free */ *response = dm_malloc(sizeof(lvm_response_t) * num_responses + sizeof(int) * 2); if (!*response) { errno = ENOMEM; status = 0; goto out; } rarray = *response; /* Unpack the response into an lvm_response_t array */ inptr = head->args; i = 0; while (inptr[0]) { strcpy(rarray[i].node, inptr); inptr += strlen(inptr) + 1; memcpy(&rarray[i].status, inptr, sizeof(int)); inptr += sizeof(int); rarray[i].response = dm_malloc(strlen(inptr) + 1); if (rarray[i].response == NULL) { /* Free up everything else and return error */ int j; for (j = 0; j < i; j++) dm_free(rarray[i].response); free(*response); errno = ENOMEM; status = -1; goto out; } strcpy(rarray[i].response, inptr); rarray[i].len = strlen(inptr); inptr += strlen(inptr) + 1; i++; } *num = num_responses; *response = rarray; out: if (retbuf) dm_free(retbuf); return status; }
/* Send a request and return the status */ static int _send_request(const char *inbuf, int inlen, char **retbuf) { char outbuf[PIPE_BUF]; struct clvm_header *outheader = (struct clvm_header *) outbuf; int len; int off; int buflen; int err; /* Send it to CLVMD */ rewrite: if ( (err = write(_clvmd_sock, inbuf, inlen)) != inlen) { if (err == -1 && errno == EINTR) goto rewrite; fprintf(stderr, "Error writing data to clvmd: %s", strerror(errno)); return 0; } /* Get the response */ reread: if ((len = read(_clvmd_sock, outbuf, sizeof(struct clvm_header))) < 0) { if (errno == EINTR) goto reread; fprintf(stderr, "Error reading data from clvmd: %s", strerror(errno)); return 0; } if (len == 0) { fprintf(stderr, "EOF reading CLVMD"); errno = ENOTCONN; return 0; } /* Allocate buffer */ buflen = len + outheader->arglen; *retbuf = dm_malloc(buflen); if (!*retbuf) { errno = ENOMEM; return 0; } /* Copy the header */ memcpy(*retbuf, outbuf, len); outheader = (struct clvm_header *) *retbuf; /* Read the returned values */ off = 1; /* we've already read the first byte */ while (off <= outheader->arglen && len > 0) { len = read(_clvmd_sock, outheader->args + off, buflen - off - offsetof(struct clvm_header, args)); if (len > 0) off += len; } /* Was it an error ? */ if (outheader->status != 0) { errno = outheader->status; /* Only return an error here if there are no node-specific errors present in the message that might have more detail */ if (!(outheader->flags & CLVMD_FLAG_NODEERRS)) { fprintf(stderr, "cluster request failed: %s\n", strerror(errno)); return 0; } } return 1; }
int config_file_read_fd(struct dm_config_tree *cft, struct device *dev, off_t offset, size_t size, off_t offset2, size_t size2, checksum_fn_t checksum_fn, uint32_t checksum) { char *fb, *fe; int r = 0; int use_mmap = 1; off_t mmap_offset = 0; char *buf = NULL; struct config_source *cs = dm_config_get_custom(cft); if ((cs->type != CONFIG_FILE) && (cs->type != CONFIG_PROFILE)) { log_error(INTERNAL_ERROR "config_file_read_fd: expected file or profile config source, " "found %s config source.", _config_source_names[cs->type]); return 0; } /* Only use mmap with regular files */ if (!(dev->flags & DEV_REGULAR) || size2) use_mmap = 0; if (use_mmap) { mmap_offset = offset % lvm_getpagesize(); /* memory map the file */ fb = mmap((caddr_t) 0, size + mmap_offset, PROT_READ, MAP_PRIVATE, dev_fd(dev), offset - mmap_offset); if (fb == (caddr_t) (-1)) { log_sys_error("mmap", dev_name(dev)); goto out; } fb = fb + mmap_offset; } else { if (!(buf = dm_malloc(size + size2))) { log_error("Failed to allocate circular buffer."); return 0; } if (!dev_read_circular(dev, (uint64_t) offset, size, (uint64_t) offset2, size2, buf)) { goto out; } fb = buf; } if (checksum_fn && checksum != (checksum_fn(checksum_fn(INITIAL_CRC, (const uint8_t *)fb, size), (const uint8_t *)(fb + size), size2))) { log_error("%s: Checksum error", dev_name(dev)); goto out; } fe = fb + size + size2; if (!dm_config_parse(cft, fb, fe)) goto_out; r = 1; out: if (!use_mmap) dm_free(buf); else { /* unmap the file */ if (munmap(fb - mmap_offset, size + mmap_offset)) { log_sys_error("munmap", dev_name(dev)); r = 0; } } return r; }