/** * Load certficiates from a directory */ static void load_certs(load_ctx_t *ctx, char *type_str, char *dir) { enumerator_t *enumerator; certificate_type_t type; x509_flag_t flag; struct stat st; chunk_t *map; char *path; vici_cert_info_from_str(type_str, &type, &flag); enumerator = enumerator_create_directory(dir); if (enumerator) { while (enumerator->enumerate(enumerator, NULL, &path, &st)) { if (S_ISREG(st.st_mode)) { map = chunk_map(path, FALSE); if (map) { load_cert(ctx, path, type, flag, *map); chunk_unmap(map); } else { fprintf(stderr, "mapping '%s' failed: %s, skipped\n", path, strerror(errno)); } } } enumerator->destroy(enumerator); } }
/** * Add a vici certificate blob value given by its file patch */ static bool add_file_key_value(vici_req_t *req, char *key, char *value) { chunk_t *map; char *path, buf[PATH_MAX]; if (path_absolute(value)) { path = value; } else { path = buf; snprintf(path, PATH_MAX, "%s%s%s", SWANCTL_X509CADIR, DIRECTORY_SEPARATOR, value); } map = chunk_map(path, FALSE); if (map) { vici_add_key_value(req, key, map->ptr, map->len); chunk_unmap(map); return TRUE; } else { fprintf(stderr, "loading ca certificate '%s' failed: %s\n", path, strerror(errno)); return FALSE; } }
/** * Load certficiates from a directory */ static void load_certs(vici_conn_t *conn, command_format_options_t format, char *type, char *dir) { enumerator_t *enumerator; struct stat st; chunk_t *map; char *path; enumerator = enumerator_create_directory(dir); if (enumerator) { while (enumerator->enumerate(enumerator, NULL, &path, &st)) { if (S_ISREG(st.st_mode)) { map = chunk_map(path, FALSE); if (map) { load_cert(conn, format, path, type, *map); chunk_unmap(map); } else { fprintf(stderr, "mapping '%s' failed: %s, skipped\n", path, strerror(errno)); } } } enumerator->destroy(enumerator); } }
END_TEST /******************************************************************************* * test for chunk_map and friends */ START_TEST(test_chunk_map) { chunk_t *map, contents = chunk_from_chars(0x01,0x02,0x03,0x04,0x05); char *path = "/tmp/strongswan-chunk-map-test"; ck_assert(chunk_write(contents, path, 022, TRUE)); /* read */ map = chunk_map(path, FALSE); ck_assert(map != NULL); ck_assert_msg(chunk_equals(*map, contents), "%B", map); /* altering mapped chunk should not hurt */ *map = chunk_empty; ck_assert(chunk_unmap(map)); /* write */ map = chunk_map(path, TRUE); ck_assert(map != NULL); ck_assert_msg(chunk_equals(*map, contents), "%B", map); map->ptr[0] = 0x06; ck_assert(chunk_unmap(map)); /* verify write */ contents.ptr[0] = 0x06; map = chunk_map(path, FALSE); ck_assert(map != NULL); ck_assert_msg(chunk_equals(*map, contents), "%B", map); ck_assert(chunk_unmap(map)); unlink(path); }
/** * load the credential from a file */ static void *load_from_file(char *file, credential_type_t type, int subtype, identification_t *subject, x509_flag_t flags) { void *cred; chunk_t *chunk; chunk = chunk_map(file, FALSE); if (!chunk) { DBG1(DBG_LIB, " opening '%s' failed: %s", file, strerror(errno)); return NULL; } cred = load_from_blob(*chunk, type, subtype, subject, flags); chunk_unmap(chunk); return cred; }
void * chunk_alloc_dss(size_t size, size_t alignment, bool *zero) { void *ret; cassert(config_dss); assert(size > 0 && (size & chunksize_mask) == 0); assert(alignment > 0 && (alignment & chunksize_mask) == 0); /* * sbrk() uses a signed increment argument, so take care not to * interpret a huge allocation request as a negative increment. */ if ((intptr_t)size < 0) return (NULL); malloc_mutex_lock(&dss_mtx); if (dss_prev != (void *)-1) { size_t gap_size, cpad_size; void *cpad, *dss_next; intptr_t incr; /* * The loop is necessary to recover from races with other * threads that are using the DSS for something other than * malloc. */ do { /* Get the current end of the DSS. */ dss_max = chunk_dss_sbrk(0); /* * Calculate how much padding is necessary to * chunk-align the end of the DSS. */ gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & chunksize_mask; /* * Compute how much chunk-aligned pad space (if any) is * necessary to satisfy alignment. This space can be * recycled for later use. */ cpad = (void *)((uintptr_t)dss_max + gap_size); ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, alignment); cpad_size = (uintptr_t)ret - (uintptr_t)cpad; dss_next = (void *)((uintptr_t)ret + size); if ((uintptr_t)ret < (uintptr_t)dss_max || (uintptr_t)dss_next < (uintptr_t)dss_max) { /* Wrap-around. */ malloc_mutex_unlock(&dss_mtx); return (NULL); } incr = gap_size + cpad_size + size; dss_prev = chunk_dss_sbrk(incr); if (dss_prev == dss_max) { /* Success. */ dss_max = dss_next; malloc_mutex_unlock(&dss_mtx); if (cpad_size != 0) chunk_unmap(cpad, cpad_size); if (*zero) { VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } return (ret); } } while (dss_prev != (void *)-1); } malloc_mutex_unlock(&dss_mtx); return (NULL); }