void duk_hbuffer_insert_bytes(duk_hthread *thr, duk_hbuffer_dynamic *buf, size_t offset, duk_uint8_t *data, size_t length) { char *p; /* XXX: allow inserts with offset > curr_size? i.e., insert zeroes automatically? */ DUK_ASSERT(thr != NULL); DUK_ASSERT(buf != NULL); DUK_ASSERT(DUK_HBUFFER_HAS_DYNAMIC(buf)); DUK_ASSERT(offset >= 0); /* unsigned, so always true */ DUK_ASSERT(offset <= DUK_HBUFFER_GET_SIZE(buf)); /* equality is OK (= append) */ DUK_ASSERT(data != NULL); DUK_ASSERT(length >= 0); /* unsigned, so always true */ if (length == 0) { return; } if (DUK_HBUFFER_DYNAMIC_GET_SPARE_SIZE(buf) < length) { duk_hbuffer_resize(thr, buf, DUK_HBUFFER_GET_SIZE(buf), add_spare(DUK_HBUFFER_GET_SIZE(buf) + length)); } DUK_ASSERT(DUK_HBUFFER_DYNAMIC_GET_SPARE_SIZE(buf) >= length); p = (char *) DUK_HBUFFER_DYNAMIC_GET_CURR_DATA_PTR(buf); if (offset < DUK_HBUFFER_GET_SIZE(buf)) { /* not an append */ DUK_ASSERT(DUK_HBUFFER_GET_SIZE(buf) - offset > 0); /* not a zero byte memmove */ DUK_MEMMOVE((void *) (p + offset + length), (void *) (p + offset), DUK_HBUFFER_GET_SIZE(buf) - offset); } DUK_MEMCPY((void *) (p + offset), data, length); buf->size += length; }
static int create_volume(int ac, char **av) { struct mfi_config_data *config; struct mfi_array *ar; struct mfi_ld_config *ld; struct config_id_state state; size_t config_size; char *p, *cfg_arrays, *cfg_volumes; int error, fd, i, raid_type; int narrays, nvolumes, arrays_per_volume; struct array_info *arrays; long stripe_size; #ifdef DEBUG int dump; #endif int ch, verbose; /* * Backwards compat. Map 'create volume' to 'create' and * 'create spare' to 'add'. */ if (ac > 1) { if (strcmp(av[1], "volume") == 0) { av++; ac--; } else if (strcmp(av[1], "spare") == 0) { av++; ac--; return (add_spare(ac, av)); } } if (ac < 2) { warnx("create volume: volume type required"); return (EINVAL); } bzero(&state, sizeof(state)); config = NULL; arrays = NULL; narrays = 0; error = 0; fd = mfi_open(mfi_unit); if (fd < 0) { error = errno; warn("mfi_open"); return (error); } if (!mfi_reconfig_supported()) { warnx("The current mfi(4) driver does not support " "configuration changes."); error = EOPNOTSUPP; goto error; } /* Lookup the RAID type first. */ raid_type = -1; for (i = 0; raid_type_table[i].name != NULL; i++) if (strcasecmp(raid_type_table[i].name, av[1]) == 0) { raid_type = raid_type_table[i].raid_type; break; } if (raid_type == -1) { warnx("Unknown or unsupported volume type %s", av[1]); error = EINVAL; goto error; } /* Parse any options. */ optind = 2; #ifdef DEBUG dump = 0; #endif verbose = 0; stripe_size = 64 * 1024; while ((ch = getopt(ac, av, "ds:v")) != -1) { switch (ch) { #ifdef DEBUG case 'd': dump = 1; break; #endif case 's': stripe_size = dehumanize(optarg); if ((stripe_size < 512) || (!powerof2(stripe_size))) stripe_size = 64 * 1024; break; case 'v': verbose = 1; break; case '?': default: error = EINVAL; goto error; } } ac -= optind; av += optind; /* Parse all the arrays. */ narrays = ac; if (narrays == 0) { warnx("At least one drive list is required"); error = EINVAL; goto error; } switch (raid_type) { case RT_RAID0: case RT_RAID1: case RT_RAID5: case RT_RAID6: case RT_CONCAT: if (narrays != 1) { warnx("Only one drive list can be specified"); error = EINVAL; goto error; } break; case RT_RAID10: case RT_RAID50: case RT_RAID60: if (narrays < 1) { warnx("RAID10, RAID50, and RAID60 require at least " "two drive lists"); error = EINVAL; goto error; } if (narrays > MFI_MAX_SPAN_DEPTH) { warnx("Volume spans more than %d arrays", MFI_MAX_SPAN_DEPTH); error = EINVAL; goto error; } break; } arrays = calloc(narrays, sizeof(*arrays)); if (arrays == NULL) { warnx("malloc failed"); error = ENOMEM; goto error; } for (i = 0; i < narrays; i++) { error = parse_array(fd, raid_type, av[i], &arrays[i]); if (error) goto error; } switch (raid_type) { case RT_RAID10: case RT_RAID50: case RT_RAID60: for (i = 1; i < narrays; i++) { if (arrays[i].drive_count != arrays[0].drive_count) { warnx("All arrays must contain the same " "number of drives"); error = EINVAL; goto error; } } break; } /* * Fetch the current config and build sorted lists of existing * array and volume identifiers. */ if (mfi_config_read(fd, &config) < 0) { error = errno; warn("Failed to read configuration"); goto error; } p = (char *)config->array; state.array_ref = 0xffff; state.target_id = 0xff; state.array_count = config->array_count; if (config->array_count > 0) { state.arrays = calloc(config->array_count, sizeof(int)); if (state.arrays == NULL) { warnx("malloc failed"); error = ENOMEM; goto error; } for (i = 0; i < config->array_count; i++) { ar = (struct mfi_array *)p; state.arrays[i] = ar->array_ref; p += config->array_size; } qsort(state.arrays, config->array_count, sizeof(int), compare_int); } else state.arrays = NULL; state.log_drv_count = config->log_drv_count; if (config->log_drv_count) { state.volumes = calloc(config->log_drv_count, sizeof(int)); if (state.volumes == NULL) { warnx("malloc failed"); error = ENOMEM; goto error; } for (i = 0; i < config->log_drv_count; i++) { ld = (struct mfi_ld_config *)p; state.volumes[i] = ld->properties.ld.v.target_id; p += config->log_drv_size; } qsort(state.volumes, config->log_drv_count, sizeof(int), compare_int); } else state.volumes = NULL; free(config); /* Determine the size of the configuration we will build. */ switch (raid_type) { case RT_RAID0: case RT_RAID1: case RT_RAID5: case RT_RAID6: case RT_CONCAT: case RT_JBOD: /* Each volume spans a single array. */ nvolumes = narrays; break; case RT_RAID10: case RT_RAID50: case RT_RAID60: /* A single volume spans multiple arrays. */ nvolumes = 1; break; default: /* Pacify gcc. */ abort(); } config_size = sizeof(struct mfi_config_data) + sizeof(struct mfi_ld_config) * nvolumes + MFI_ARRAY_SIZE * narrays; config = calloc(1, config_size); if (config == NULL) { warnx("malloc failed"); error = ENOMEM; goto error; } config->size = config_size; config->array_count = narrays; config->array_size = MFI_ARRAY_SIZE; /* XXX: Firmware hardcode */ config->log_drv_count = nvolumes; config->log_drv_size = sizeof(struct mfi_ld_config); config->spares_count = 0; config->spares_size = 40; /* XXX: Firmware hardcode */ cfg_arrays = (char *)config->array; cfg_volumes = cfg_arrays + config->array_size * narrays; /* Build the arrays. */ for (i = 0; i < narrays; i++) { build_array(fd, cfg_arrays, &arrays[i], &state, verbose); cfg_arrays += config->array_size; } /* Now build the volume(s). */ arrays_per_volume = narrays / nvolumes; for (i = 0; i < nvolumes; i++) { build_volume(cfg_volumes, arrays_per_volume, &arrays[i * arrays_per_volume], raid_type, stripe_size, &state, verbose); cfg_volumes += config->log_drv_size; } #ifdef DEBUG if (dump) dump_config(fd, config); #endif /* Send the new config to the controller. */ if (mfi_dcmd_command(fd, MFI_DCMD_CFG_ADD, config, config_size, NULL, 0, NULL) < 0) { error = errno; warn("Failed to add volume"); /* FALLTHROUGH */ } error: /* Clean up. */ free(config); free(state.volumes); free(state.arrays); for (i = 0; i < narrays; i++) free(arrays[i].drives); free(arrays); close(fd); return (error); }
void duk_hbuffer_insert_slice(duk_hthread *thr, duk_hbuffer_dynamic *buf, size_t dst_offset, size_t src_offset, size_t length) { char *p; size_t src_end_offset; /* source end (exclusive) in initial buffer */ size_t len; DUK_ASSERT(thr != NULL); DUK_ASSERT(buf != NULL); DUK_ASSERT(DUK_HBUFFER_HAS_DYNAMIC(buf)); DUK_ASSERT(dst_offset >= 0); /* always true */ DUK_ASSERT(dst_offset <= DUK_HBUFFER_GET_SIZE(buf)); /* allow equality */ DUK_ASSERT(src_offset >= 0); /* always true */ DUK_ASSERT(src_offset <= DUK_HBUFFER_GET_SIZE(buf)); /* allow equality */ DUK_ASSERT(length >= 0); /* always true */ DUK_ASSERT(src_offset + length <= DUK_HBUFFER_GET_SIZE(buf)); /* allow equality */ if (length == 0) { return; } if (DUK_HBUFFER_DYNAMIC_GET_SPARE_SIZE(buf) < length) { duk_hbuffer_resize(thr, buf, DUK_HBUFFER_GET_SIZE(buf), add_spare(DUK_HBUFFER_GET_SIZE(buf) + length)); } DUK_ASSERT(DUK_HBUFFER_DYNAMIC_GET_SPARE_SIZE(buf) >= length); p = (char *) DUK_HBUFFER_DYNAMIC_GET_CURR_DATA_PTR(buf); /* * src_offset and dst_offset refer to the state of the buffer * before any changes are made. This must be taken into account * when moving data around; in particular, the source data may * "straddle" the dst_offset, so the insert may need to be handled * in two pieces. */ src_end_offset = src_offset + length; /* create a hole for the insert */ len = DUK_HBUFFER_GET_SIZE(buf) - dst_offset; if (len > 0) { DUK_MEMMOVE(p + dst_offset + length, p + dst_offset, len); } if (src_offset < dst_offset) { if (src_end_offset <= dst_offset) { /* entire source is before 'dst_offset' */ DUK_MEMCPY(p + dst_offset, p + src_offset, length); } else { /* part of the source is before 'dst_offset'; straddles */ len = dst_offset - src_offset; DUK_ASSERT(len >= 1 && len < length); DUK_ASSERT(length - len >= 1); DUK_MEMCPY(p + dst_offset, p + src_offset, len); DUK_MEMCPY(p + dst_offset + len, p + src_offset + length + len, /* take above memmove() into account */ length - len); } } else { /* entire source is after 'dst_offset' */ DUK_MEMCPY(p + dst_offset, p + src_offset + length, /* take above memmove() into account */ length); } buf->size += length; }