/** * Encode an RSA public key in DNSKEY format (RFC 3110) */ static bool build_pub(chunk_t *encoding, va_list args) { chunk_t n, e, pubkey; size_t exp_len; u_char *pos; if (cred_encoding_args(args, CRED_PART_RSA_MODULUS, &n, CRED_PART_RSA_PUB_EXP, &e, CRED_PART_END)) { /* remove leading zeros in exponent and modulus */ while (*e.ptr == 0) { e = chunk_skip(e, 1); } while (*n.ptr == 0) { n = chunk_skip(n, 1); } if (e.len < 256) { /* exponent length fits into a single octet */ exp_len = 1; pubkey = chunk_alloc(exp_len + e.len + n.len); pubkey.ptr[0] = (char)e.len; } else if (e.len < 65536) { /* exponent length fits into two octets preceded by zero octet */ exp_len = 3; pubkey = chunk_alloc(exp_len + e.len + n.len); pubkey.ptr[0] = 0x00; htoun16(pubkey.ptr + 1, e.len); } else { /* exponent length is too large */ return FALSE; } /* copy exponent and modulus and convert to base64 format */ pos = pubkey.ptr + exp_len; memcpy(pos, e.ptr, e.len); pos += e.len; memcpy(pos, n.ptr, n.len); *encoding = chunk_to_base64(pubkey, NULL); chunk_free(&pubkey); return TRUE; } return FALSE; }
/* * Described in header */ bool botan_get_privkey_encoding(botan_privkey_t key, cred_encoding_type_t type, chunk_t *encoding) { uint32_t format = BOTAN_PRIVKEY_EXPORT_FLAG_DER; switch (type) { case PRIVKEY_PEM: format = BOTAN_PRIVKEY_EXPORT_FLAG_PEM; /* fall-through */ case PRIVKEY_ASN1_DER: encoding->len = 0; if (botan_privkey_export(key, NULL, &encoding->len, format) != BOTAN_FFI_ERROR_INSUFFICIENT_BUFFER_SPACE) { return FALSE; } *encoding = chunk_alloc(encoding->len); if (botan_privkey_export(key, encoding->ptr, &encoding->len, format)) { chunk_free(encoding); return FALSE; } return TRUE; default: return FALSE; } }
/* * Defined in header. */ chunk_t asn1_build_known_oid(int n) { chunk_t oid; int i; if (n < 0 || n >= OID_MAX) { return chunk_empty; } i = oid_names[n].level + 1; oid = chunk_alloc(2 + i); oid.ptr[0] = ASN1_OID; oid.ptr[1] = i; do { if (oid_names[n].level >= i) { n--; continue; } oid.ptr[--i + 2] = oid_names[n--].octet; } while (i > 0); return oid; }
void * chunk_realloc (void *ptr, size_t old_size, size_t new_size) { #if USE_MMAP #ifdef MREMAP_MAYMOVE /* requires _GNU_SOURCE */ void *ptr2 = mremap (ptr, old_size, new_size, MREMAP_MAYMOVE); if (ptr2 == MAP_FAILED) return 0; return ptr2; #else void *ptr2 = chunk_alloc (new_size, 0); if (!ptr2) return ptr2; /* TODO: prepopulate old_size pages instead of faulting them in */ memcpy (ptr2, ptr, old_size); munmap (ptr, old_size); return ptr2; #endif #else return realloc (ptr, new_size); #endif }
static void *huge_chunk_alloc(struct thread_cache *cache, size_t size, size_t alignment, struct arena **out_arena) { struct arena *arena = get_arena(cache); void *chunk = chunk_recycle(&arena->chunks, NULL, size, alignment); if (chunk) { if (unlikely(memory_commit(chunk, size))) { chunk_free(&arena->chunks, chunk, size); return NULL; } } else { if (unlikely(!(chunk = chunk_alloc(NULL, size, alignment)))) { return NULL; } // Work around the possibility of holes created by huge_move_expand (see below). struct arena *chunk_arena = get_huge_arena(chunk); if (chunk_arena != arena) { mutex_unlock(&arena->mutex); if (chunk_arena) { mutex_lock(&chunk_arena->mutex); } arena = chunk_arena; } } *out_arena = arena; return chunk; }
void* allocator_alloc(allocator_t *allocator) { pthread_mutex_lock(&allocator->lock); chunk_t *chunk = DLLIST_ELEMENT(allocator->alloc_chunk, chunk_t, link); if (!allocator->alloc_chunk || chunk_empty(chunk)) { dllist_link *l = allocator->chunks.head; for (; ; l = l->next) { if (!l) { chunk_t *c = malloc(sizeof(chunk_t)); chunk_init(c, allocator->default_nr_blocks, allocator->block_size); allocator->alloc_chunk = &c->link; dllist_iat(&allocator->chunks, &c->link); allocator->chunks_count++; break; } chunk_t *tmp = DLLIST_ELEMENT(l, chunk_t, link); if (!chunk_empty(tmp)) { allocator->alloc_chunk = l; break; } } } chunk_t *tmp = DLLIST_ELEMENT(allocator->alloc_chunk, chunk_t, link); void *rv = chunk_alloc(tmp, allocator->block_size); pthread_mutex_unlock(&allocator->lock); return rv; }
/* * Described in header */ bool botan_get_encoding(botan_pubkey_t pubkey, cred_encoding_type_t type, chunk_t *encoding) { bool success = TRUE; encoding->len = 0; if (botan_pubkey_export(pubkey, NULL, &encoding->len, BOTAN_PRIVKEY_EXPORT_FLAG_DER) != BOTAN_FFI_ERROR_INSUFFICIENT_BUFFER_SPACE) { return FALSE; } *encoding = chunk_alloc(encoding->len); if (botan_pubkey_export(pubkey, encoding->ptr, &encoding->len, BOTAN_PRIVKEY_EXPORT_FLAG_DER)) { chunk_free(encoding); return FALSE; } if (type != PUBKEY_SPKI_ASN1_DER) { chunk_t asn1_encoding = *encoding; success = lib->encoding->encode(lib->encoding, type, NULL, encoding, CRED_PART_ECDSA_PUB_ASN1_DER, asn1_encoding, CRED_PART_END); chunk_free(&asn1_encoding); } return success; }
/* * Described in header */ bool botan_dh_key_derivation(botan_privkey_t key, chunk_t pub, chunk_t *secret) { botan_pk_op_ka_t ka; if (botan_pk_op_key_agreement_create(&ka, key, "Raw", 0)) { return FALSE; } if (botan_pk_op_key_agreement_size(ka, &secret->len)) { botan_pk_op_key_agreement_destroy(ka); return FALSE; } *secret = chunk_alloc(secret->len); if (botan_pk_op_key_agreement(ka, secret->ptr, &secret->len, pub.ptr, pub.len, NULL, 0)) { chunk_clear(secret); botan_pk_op_key_agreement_destroy(ka); return FALSE; } botan_pk_op_key_agreement_destroy(ka); return TRUE; }
ssize_t container_add_tail_data (container_t *container, data_t *data, chunk_flags_t flags){ // {{{ container_chunk_t *chunk; if( (chunk = chunk_alloc(data, flags)) == NULL) return -ENOMEM; container_add_tail_any(container, chunk); return 0; } // }}}
/* * Safe to use from a single thread. */ BufferFile * buffer_file_new (StackMap *sm, const char *output_fname) { BufferFile *b = calloc (sizeof (BufferFile), 1); b->sm = sm; b->dest = output_fname; b->cur = chunk_alloc (b->sm, b->dest); return b; }
void * huge_palloc(size_t size, size_t alignment, bool zero) { void *ret; size_t csize; extent_node_t *node; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ csize = CHUNK_CEILING(size); if (csize == 0) { /* size is large enough to cause size_t wrap-around. */ return (NULL); } /* Allocate an extent node with which to track the chunk. */ node = base_node_alloc(); if (node == NULL) return (NULL); /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; ret = chunk_alloc(csize, alignment, false, &is_zeroed, chunk_dss_prec_get()); if (ret == NULL) { base_node_dealloc(node); return (NULL); } /* Insert node into huge. */ node->addr = ret; node->size = csize; malloc_mutex_lock(&huge_mtx); extent_tree_ad_insert(&huge, node); if (config_stats) { stats_cactive_add(csize); huge_nmalloc++; huge_allocated += csize; } malloc_mutex_unlock(&huge_mtx); if (config_fill && zero == false) { if (opt_junk) memset(ret, 0xa5, csize); else if (opt_zero && is_zeroed == false) memset(ret, 0, csize); } return (ret); }
/* * Described in header. */ bool rng_allocate_bytes_not_zero(rng_t *rng, size_t len, chunk_t *chunk, bool all) { *chunk = chunk_alloc(len); if (!rng_get_bytes_not_zero(rng, len, chunk->ptr, all)) { chunk_clear(chunk); return FALSE; } return TRUE; }
/* * Described in header */ bool botan_get_fingerprint(botan_pubkey_t pubkey, void *cache, cred_encoding_type_t type, chunk_t *fp) { hasher_t *hasher; chunk_t key; if (cache && lib->encoding->get_cache(lib->encoding, type, cache, fp)) { return TRUE; } switch (type) { case KEYID_PUBKEY_SHA1: /* subjectPublicKey -> use botan_pubkey_fingerprint() */ *fp = chunk_alloc(HASH_SIZE_SHA1); if (botan_pubkey_fingerprint(pubkey, "SHA-1", fp->ptr, &fp->len)) { chunk_free(fp); return FALSE; } break; case KEYID_PUBKEY_INFO_SHA1: /* subjectPublicKeyInfo -> use botan_pubkey_export(), then hash */ if (!botan_get_encoding(pubkey, PUBKEY_SPKI_ASN1_DER, &key)) { return FALSE; } hasher = lib->crypto->create_hasher(lib->crypto, HASH_SHA1); if (!hasher || !hasher->allocate_hash(hasher, key, fp)) { DBG1(DBG_LIB, "SHA1 hash algorithm not supported, " "fingerprinting failed"); DESTROY_IF(hasher); chunk_free(&key); return FALSE; } hasher->destroy(hasher); chunk_free(&key); break; default: return FALSE; } if (cache) { lib->encoding->cache(lib->encoding, type, cache, *fp); } return TRUE; }
void buffer_file_append (BufferFile *file, const char *str, size_t len) { do { unsigned long to_write = MIN (CHUNK_PAYLOAD - file->cur->length, len); memcpy (file->cur->data + file->cur->length, str, to_write); str += to_write; len -= to_write; file->cur->length += to_write; if (file->cur->length >= CHUNK_PAYLOAD) file->cur = chunk_alloc (file->sm, file->dest); } while (len > 0); }
ssize_t list_t_unshift (list_t *list, data_t *data){ // {{{ list_chunk_t *chunk; if( (chunk = chunk_alloc(data)) == NULL) return -ENOMEM; //if(list->head == NULL){ // list->tail = chunk; //}else{ chunk->cnext = list->head; //} list->head = chunk; return 0; } // }}}
void * huge_malloc(size_t size, bool zero) { void *ret; size_t csize; extent_node_t *node; /* Allocate one or more contiguous chunks for this request. */ csize = CHUNK_CEILING(size); if (csize == 0) { /* size is large enough to cause size_t wrap-around. */ return (NULL); } /* Allocate an extent node with which to track the chunk. */ node = base_node_alloc(); if (node == NULL) return (NULL); ret = chunk_alloc(csize, false, &zero); if (ret == NULL) { base_node_dealloc(node); return (NULL); } /* Insert node into huge. */ node->addr = ret; node->size = csize; malloc_mutex_lock(&huge_mtx); extent_tree_ad_insert(&huge, node); #ifdef JEMALLOC_STATS stats_cactive_add(csize); huge_nmalloc++; huge_allocated += csize; #endif malloc_mutex_unlock(&huge_mtx); #ifdef JEMALLOC_FILL if (zero == false) { if (opt_junk) memset(ret, 0xa5, csize); else if (opt_zero) memset(ret, 0, csize); } #endif return (ret); }
void sml_obstack_blank(sml_obstack_t **obstack, size_t size) { sml_obstack_t *chunk = *obstack; if (chunk == NULL || (size_t)(chunk->end - chunk->free) < size) { if (chunk == NULL || chunk->base == chunk->free) chunk_alloc(obstack, size); else chunk_realloc(obstack, size); chunk = *obstack; } chunk->free += size; }
void *refill(size_t n) { obj **my_free_list = NULL; obj *result = NULL, *cur_obj = NULL, *next_obj = NULL; int i = 0; /*默认从内存池取得16个块*/ size_t nobjs = 16; char *chunk = chunk_alloc(n, &nobjs); if(!chunk) { return NULL; } /*内存池空间告急仅返回一个块,则直接返回给用户*/ if(nobjs == 1) { return chunk; } /*否则准备向free_list中纳入新的节点*/ my_free_list = free_list + freelist_index(n); /*以下在chunk的连续空间上建立free_list*/ result = (obj *)chunk; //此块返回给用户 *my_free_list = next_obj = (obj *)(chunk + n); /*空闲的连续空间超过2个*/ if (nobjs > 2) { mem_state_t* p_state = free_list_state+ freelist_index(n); p_state->cntn_unit_num = nobjs-1; p_state->cntn_chunk_num++; p_state->cntn_chunk_head = *my_free_list; p_state->pre_cntn_head = *my_free_list; } for(i = 1; ; i++) { cur_obj = next_obj; next_obj = (obj *)((char *)next_obj + n); /*最后一个块*/ if(nobjs - 1 == i) { cur_obj->free_list_link = NULL; break; } else { cur_obj->free_list_link = next_obj; } } return result; }
/* * Described in header */ bool botan_get_signature(botan_privkey_t key, const char *scheme, chunk_t data, chunk_t *signature) { botan_pk_op_sign_t sign_op; botan_rng_t rng; if (!scheme || !signature) { return FALSE; } if (botan_pk_op_sign_create(&sign_op, key, scheme, 0)) { return FALSE; } if (botan_pk_op_sign_update(sign_op, data.ptr, data.len)) { botan_pk_op_sign_destroy(sign_op); return FALSE; } signature->len = 0; if (botan_pk_op_sign_output_length(sign_op, &signature->len)) { botan_pk_op_sign_destroy(sign_op); return FALSE; } if (botan_rng_init(&rng, "user")) { botan_pk_op_sign_destroy(sign_op); return FALSE; } *signature = chunk_alloc(signature->len); if (botan_pk_op_sign_finish(sign_op, rng, signature->ptr, &signature->len)) { chunk_free(signature); botan_rng_destroy(rng); botan_pk_op_sign_destroy(sign_op); return FALSE; } botan_rng_destroy(rng); botan_pk_op_sign_destroy(sign_op); return TRUE; }
ssize_t list_t_push (list_t *list, data_t *data){ // {{{ list_chunk_t *chunk; list_chunk_t *curr; if( (chunk = chunk_alloc(data)) == NULL) return -ENOMEM; for(curr = list->head; curr; curr = curr->cnext){ if(curr->cnext == NULL){ curr->cnext = chunk; return 0; } } list->head = chunk; return 0; } // }}}
static bool base_pages_alloc(size_t minsize) { size_t csize; bool zero; assert(minsize != 0); csize = CHUNK_CEILING(minsize); zero = false; base_pages = chunk_alloc(csize, chunksize, true, &zero, chunk_dss_prec_get()); if (base_pages == NULL) return (true); base_next_addr = base_pages; base_past_addr = (void *)((uintptr_t)base_pages + csize); return (false); }
void buffer_file_dump (BufferFile *file, int input_fd) { for (;;) { unsigned long to_read = CHUNK_PAYLOAD - file->cur->length; to_read = read (input_fd, file->cur->data + file->cur->length, to_read); if (to_read < 0) { perror ("read error"); break; } else if (to_read == 0) { break; } file->cur->length += to_read; if (file->cur->length >= CHUNK_PAYLOAD) file->cur = chunk_alloc (file->sm, file->dest); } }
void fs_allocate(struct snapraid_disk* disk, block_off_t parity_pos, struct snapraid_file* file, block_off_t file_pos) { struct snapraid_chunk* chunk; struct snapraid_chunk* parity_chunk; struct snapraid_chunk* file_chunk; if (file_pos > 0) { /* search an existing chunk for the previous file_pos */ chunk = fs_file2chunk_get_ts(disk, &disk->fs_last, file, file_pos - 1); if (chunk != 0 && parity_pos == chunk->parity_pos + chunk->count) { /* ensure that we are extending the chunk at the end */ if (file_pos != chunk->file_pos + chunk->count) { /* LCOV_EXCL_START */ log_fatal("Internal inconsistency when allocating file '%s' at position '%u' in the middle of chunk '%u:%u' in disk '%s'\n", file->sub, file_pos, chunk->file_pos, chunk->count, disk->name); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } /* extend the existing chunk */ ++chunk->count; return; } } /* a chunk doesn't exist, and we have to create a new one */ chunk = chunk_alloc(parity_pos, file, file_pos, 1); /* insert the chunk in the trees */ parity_chunk = tommy_tree_insert(&disk->fs_parity, &chunk->parity_node, chunk); file_chunk = tommy_tree_insert(&disk->fs_file, &chunk->file_node, chunk); if (parity_chunk != chunk || file_chunk != chunk) { /* LCOV_EXCL_START */ log_fatal("Internal inconsistency when allocating file '%s' at position '%u' for existing chunk '%u:%u' in disk '%s'\n", file->sub, file_pos, chunk->file_pos, chunk->count, disk->name); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } /* store the last accessed chunk */ disk->fs_last = chunk; }
static bool base_pages_alloc(size_t minsize) { size_t csize; bool zero; if (base_pages != NULL) { /* TODO: remove this implementation restriction */ malloc_write("<jemalloc>: Internal allocation limit reached\n"); abort(); } assert(minsize != 0); csize = CHUNK_CEILING(minsize); zero = false; base_pages = chunk_alloc(csize, true, &zero); if (base_pages == NULL) return (true); base_next_addr = base_pages; base_past_addr = (void *)((uintptr_t)base_pages + csize); return (false); }
END_TEST /******************************************************************************* * clear */ START_TEST(test_chunk_clear) { chunk_t chunk; u_char *ptr; int i; bool cleared = TRUE; chunk = chunk_empty; chunk_clear(&chunk); chunk_free(&chunk); chunk = chunk_alloc(64); ptr = chunk.ptr; for (i = 0; i < 64; i++) { chunk.ptr[i] = i; } chunk_clear(&chunk); /* check memory area of freed chunk. We can't use ck_assert() for this * test directly, as it might allocate data at the freed area. */ for (i = 0; i < 64; i++) { if (ptr[i] != 0 && ptr[i] == i) { cleared = FALSE; break; } } assert_chunk_empty(chunk); ck_assert(cleared); }
/** * Determine the type of the attribute and its value */ static bool parse_attributes(char *name, char *value, value_type_t *value_type, configuration_attribute_type_t *type, configuration_attribute_type_t *type_ip6, chunk_t *blob) { host_t *addr = NULL, *mask = NULL; chunk_t addr_chunk, mask_chunk, blob_next; char *text = "", *pos_addr, *pos_mask, *pos_next, *endptr; int i; switch (*value_type) { case VALUE_STRING: *blob = chunk_create(value, strlen(value)); *blob = chunk_clone(*blob); break; case VALUE_HEX: *blob = chunk_from_hex(chunk_create(value, strlen(value)), NULL); break; case VALUE_ADDR: addr = host_create_from_string(value, 0); if (addr == NULL) { fprintf(stderr, "invalid IP address: '%s'.\n", value); return FALSE; } addr_chunk = addr->get_address(addr); *blob = chunk_clone(addr_chunk); break; case VALUE_SUBNET: *blob = chunk_empty; pos_next = value; do { pos_addr = pos_next; pos_next = strchr(pos_next, ','); if (pos_next) { *pos_next = '\0'; pos_next += 1; } pos_mask = strchr(pos_addr, '/'); if (pos_mask == NULL) { fprintf(stderr, "invalid IPv4 subnet: '%s'.\n", pos_addr); free(blob->ptr); return FALSE; } *pos_mask = '\0'; pos_mask += 1; addr = host_create_from_string(pos_addr, 0); mask = host_create_from_string(pos_mask, 0); if (addr == NULL || addr->get_family(addr) != AF_INET || mask == NULL || mask->get_family(addr) != AF_INET) { fprintf(stderr, "invalid IPv4 subnet: '%s/%s'.\n", pos_addr, pos_mask); DESTROY_IF(addr); DESTROY_IF(mask); free(blob->ptr); return FALSE; } addr_chunk = addr->get_address(addr); mask_chunk = mask->get_address(mask); blob_next = chunk_alloc(blob->len + UNITY_NETWORK_LEN); memcpy(blob_next.ptr, blob->ptr, blob->len); pos_addr = blob_next.ptr + blob->len; memset(pos_addr, 0x00, UNITY_NETWORK_LEN); memcpy(pos_addr, addr_chunk.ptr, 4); memcpy(pos_addr + 4, mask_chunk.ptr, 4); addr->destroy(addr); addr = NULL; mask->destroy(mask); chunk_free(blob); *blob = blob_next; } while (pos_next); break; case VALUE_NONE: *blob = chunk_empty; break; } /* init the attribute type */ *type = 0; *type_ip6 = 0; for (i = 0; i < countof(attr_info); i++) { if (strcaseeq(name, attr_info[i].keyword)) { *type = attr_info[i].type; *type_ip6 = attr_info[i].type_ip6; if (*value_type == VALUE_NONE) { *value_type = attr_info[i].value_type; return TRUE; } if (*value_type != attr_info[i].value_type && *value_type != VALUE_HEX) { switch (attr_info[i].value_type) { case VALUE_STRING: text = "a string"; break; case VALUE_HEX: text = "a hex"; break; case VALUE_ADDR: text = "an IP address"; break; case VALUE_SUBNET: text = "a subnet"; break; case VALUE_NONE: text = "no"; break; } fprintf(stderr, "the %s attribute requires %s value.\n", name, text); DESTROY_IF(addr); free(blob->ptr); return FALSE; } if (*value_type == VALUE_ADDR) { *type = (addr->get_family(addr) == AF_INET) ? attr_info[i].type : attr_info[i].type_ip6; addr->destroy(addr); } else if (*value_type == VALUE_HEX) { *value_type = attr_info[i].value_type; if (*value_type == VALUE_ADDR) { if (blob->len == 16) { *type = attr_info[i].type_ip6; } else if (blob->len != 4) { fprintf(stderr, "the %s attribute requires " "a valid IP address.\n", name); free(blob->ptr); return FALSE; } } } return TRUE; } } /* clean up */ DESTROY_IF(addr); /* is the attribute type numeric? */ *type = strtol(name, &endptr, 10); if (*endptr != '\0') { fprintf(stderr, "the %s attribute is not recognized.\n", name); free(blob->ptr); return FALSE; } if (*type < 1 || *type > 32767) { fprintf(stderr, "the attribute type must lie in the range 1..32767.\n"); free(blob->ptr); return FALSE; } if (*value_type == VALUE_NONE) { *value_type = VALUE_HEX; } return TRUE; }
/** * See header. */ bool pem_encoder_encode(cred_encoding_type_t type, chunk_t *encoding, va_list args) { chunk_t asn1; char *label; u_char *pos; size_t len, written, pem_chars, pem_lines; chunk_t n, e, d, p, q, exp1, exp2, coeff, to_free = chunk_empty; switch (type) { case PUBKEY_PEM: label ="PUBLIC KEY"; /* direct PKCS#1 PEM encoding */ if (cred_encoding_args(args, CRED_PART_RSA_PUB_ASN1_DER, &asn1, CRED_PART_END) || cred_encoding_args(args, CRED_PART_ECDSA_PUB_ASN1_DER, &asn1, CRED_PART_END) || cred_encoding_args(args, CRED_PART_EDDSA_PUB_ASN1_DER, &asn1, CRED_PART_END) || cred_encoding_args(args, CRED_PART_BLISS_PUB_ASN1_DER, &asn1, CRED_PART_END)) { break; } /* indirect PEM encoding from components */ if (cred_encoding_args(args, CRED_PART_RSA_MODULUS, &n, CRED_PART_RSA_PUB_EXP, &e, CRED_PART_END)) { if (lib->encoding->encode(lib->encoding, PUBKEY_SPKI_ASN1_DER, NULL, &asn1, CRED_PART_RSA_MODULUS, n, CRED_PART_RSA_PUB_EXP, e, CRED_PART_END)) { to_free = asn1; break; } } return FALSE; case PRIVKEY_PEM: label ="RSA PRIVATE KEY"; /* direct PKCS#1 PEM encoding */ if (cred_encoding_args(args, CRED_PART_RSA_PRIV_ASN1_DER, &asn1, CRED_PART_END)) { break; } /* indirect PEM encoding from components */ if (cred_encoding_args(args, CRED_PART_RSA_MODULUS, &n, CRED_PART_RSA_PUB_EXP, &e, CRED_PART_RSA_PRIV_EXP, &d, CRED_PART_RSA_PRIME1, &p, CRED_PART_RSA_PRIME2, &q, CRED_PART_RSA_EXP1, &exp1, CRED_PART_RSA_EXP2, &exp2, CRED_PART_RSA_COEFF, &coeff, CRED_PART_END)) { if (lib->encoding->encode(lib->encoding, PRIVKEY_ASN1_DER, NULL, &asn1, CRED_PART_RSA_MODULUS, n, CRED_PART_RSA_PUB_EXP, e, CRED_PART_RSA_PRIV_EXP, d, CRED_PART_RSA_PRIME1, p, CRED_PART_RSA_PRIME2, q, CRED_PART_RSA_EXP1, exp1, CRED_PART_RSA_EXP2, exp2, CRED_PART_RSA_COEFF, coeff, CRED_PART_END)) { to_free = asn1; break; } } if (cred_encoding_args(args, CRED_PART_ECDSA_PRIV_ASN1_DER, &asn1, CRED_PART_END)) { label ="EC PRIVATE KEY"; break; } if (cred_encoding_args(args, CRED_PART_BLISS_PRIV_ASN1_DER, &asn1, CRED_PART_END)) { label ="BLISS PRIVATE KEY"; break; } if (cred_encoding_args(args, CRED_PART_EDDSA_PRIV_ASN1_DER, &asn1, CRED_PART_END)) { label ="PRIVATE KEY"; break; } return FALSE; case CERT_PEM: if (cred_encoding_args(args, CRED_PART_X509_ASN1_DER, &asn1, CRED_PART_END)) { /* PEM encode x509 certificate */ label = "CERTIFICATE"; break; } if (cred_encoding_args(args, CRED_PART_X509_CRL_ASN1_DER, &asn1, CRED_PART_END)) { /* PEM encode CRL */ label = "X509 CRL"; break; } if (cred_encoding_args(args, CRED_PART_PKCS10_ASN1_DER, &asn1, CRED_PART_END)) { /* PEM encode PKCS10 certificate reqeuest */ label = "CERTIFICATE REQUEST"; break; } if (cred_encoding_args(args, CRED_PART_X509_AC_ASN1_DER, &asn1, CRED_PART_END)) { label = "ATTRIBUTE CERTIFICATE"; break; } default: return FALSE; } /* compute and allocate maximum size of PEM object */ pem_chars = 4 * ((asn1.len + 2) / 3); pem_lines = (asn1.len + BYTES_PER_LINE - 1) / BYTES_PER_LINE; *encoding = chunk_alloc(5 + 2*(6 + strlen(label) + 6) + 3 + pem_chars + pem_lines); pos = encoding->ptr; len = encoding->len; /* write PEM header */ written = snprintf(pos, len, "-----BEGIN %s-----\n", label); pos += written; len -= written; /* write PEM body */ while (pem_lines--) { chunk_t asn1_line, pem_line; asn1_line = chunk_create(asn1.ptr, min(asn1.len, BYTES_PER_LINE)); asn1.ptr += asn1_line.len; asn1.len -= asn1_line.len; pem_line = chunk_to_base64(asn1_line, pos); pos += pem_line.len; len -= pem_line.len; *pos = '\n'; pos++; len--; } chunk_clear(&to_free); /* write PEM trailer */ written = snprintf(pos, len, "-----END %s-----", label); pos += written; len -= written; /* replace termination null character with newline */ *pos = '\n'; pos++; len--; /* compute effective length of PEM object */ encoding->len = pos - encoding->ptr; return TRUE; }
void fs_deallocate(struct snapraid_disk* disk, block_off_t parity_pos) { struct snapraid_chunk* chunk; struct snapraid_chunk* second_chunk; struct snapraid_chunk* parity_chunk; struct snapraid_chunk* file_chunk; block_off_t first_count, second_count; chunk = fs_par2chunk_get_ts(disk, &disk->fs_last, parity_pos); if (!chunk) { /* LCOV_EXCL_START */ log_fatal("Internal inconsistency when deallocating parity position '%u' for not existing chunk in disk '%s'\n", parity_pos, disk->name); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } /* if it's the only block of the chunk, delete it */ if (chunk->count == 1) { /* remove from the trees */ tommy_tree_remove(&disk->fs_parity, chunk); tommy_tree_remove(&disk->fs_file, chunk); /* deallocate */ chunk_free(chunk); /* clear the last accessed chunk */ disk->fs_last = 0; return; } /* if it's at the start of the chunk, shrink the chunk */ if (parity_pos == chunk->parity_pos) { ++chunk->parity_pos; ++chunk->file_pos; --chunk->count; return; } /* if it's at the end of the chunk, shrink the chunk */ if (parity_pos == chunk->parity_pos + chunk->count - 1) { --chunk->count; return; } /* otherwise it's in the middle */ first_count = parity_pos - chunk->parity_pos; second_count = chunk->count - first_count - 1; /* adjust the first chunk */ chunk->count = first_count; /* allocate the second chunk */ second_chunk = chunk_alloc(chunk->parity_pos + first_count + 1, chunk->file, chunk->file_pos + first_count + 1, second_count); /* insert the chunk in the trees */ parity_chunk = tommy_tree_insert(&disk->fs_parity, &second_chunk->parity_node, second_chunk); file_chunk = tommy_tree_insert(&disk->fs_file, &second_chunk->file_node, second_chunk); if (parity_chunk != second_chunk || file_chunk != second_chunk) { /* LCOV_EXCL_START */ log_fatal("Internal inconsistency when deallocating parity position '%u' for splitting chunk '%u:%u' in disk '%s'\n", parity_pos, second_chunk->file_pos, second_chunk->count, disk->name); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } /* store the last accessed chunk */ disk->fs_last = second_chunk; }
static bool do_test_gcm(test_vector_t *test) { encryption_algorithm_t alg; chunk_t key, iv; aead_t *aead; size_t saltlen, ivlen; switch (ctx.icvlen / 8) { case 8: alg = ENCR_AES_GCM_ICV8; break; case 12: alg = ENCR_AES_GCM_ICV12; break; case 16: alg = ENCR_AES_GCM_ICV16; break; default: DBG1(DBG_APP, "unsupported ICV length: %d", ctx.icvlen); return FALSE; } aead = lib->crypto->create_aead(lib->crypto, alg, test->key.len, 4); if (!aead) { DBG1(DBG_APP, "algorithm %N or key length (%d bits) not supported", encryption_algorithm_names, alg, test->key.len * 8); return FALSE; } /* our API is quite RFC 4106 specific, that is, part of the IV is provided * at the end of the key. */ saltlen = aead->get_key_size(aead) - test->key.len; ivlen = aead->get_iv_size(aead); if (ctx.ivlen / 8 != saltlen + ivlen) { DBG1(DBG_APP, "unsupported IV length: %d", ctx.ivlen); aead->destroy(aead); return FALSE; } if (!test->external_iv) { rng_t *rng; /* the IV consists of saltlen random bytes (usually additional keymat) * followed by a counter, zero here */ test->iv = chunk_alloc(saltlen + ivlen); memset(test->iv.ptr, 0, test->iv.len); rng = lib->crypto->create_rng(lib->crypto, RNG_STRONG); if (!rng || !rng->get_bytes(rng, saltlen, test->iv.ptr)) { DBG1(DBG_APP, "failed to generate IV"); DESTROY_IF(rng); aead->destroy(aead); return FALSE; } rng->destroy(rng); } key = chunk_alloca(test->key.len + saltlen); memcpy(key.ptr, test->key.ptr, test->key.len); memcpy(key.ptr + test->key.len, test->iv.ptr, saltlen); iv = chunk_alloca(ivlen); memcpy(iv.ptr, test->iv.ptr + saltlen, iv.len); if (!aead->set_key(aead, key)) { DBG1(DBG_APP, "failed to set key"); aead->destroy(aead); return FALSE; } if (ctx.decrypt) { /* the ICV is expected to follow the cipher text */ chunk_t cipher = chunk_cata("cc", test->cipher, test->icv); /* store if the verification of the ICV verification is successful */ test->success = aead->decrypt(aead, cipher, test->aad, iv, &test->plain); } else { if (!aead->encrypt(aead, test->plain, test->aad, iv, &test->cipher)) { DBG1(DBG_APP, "encryption failed"); aead->destroy(aead); return FALSE; } /* copy ICV from the end of the cipher text */ test->icv = chunk_alloc(ctx.icvlen / 8); test->cipher.len -= test->icv.len; memcpy(test->icv.ptr, test->cipher.ptr + test->cipher.len, test->icv.len); } aead->destroy(aead); return TRUE; }
void sequence_to_chunk(const byte_t * const first, const uint32_t len, chunk_t * const chunk) { *chunk = chunk_alloc(len); memcpy(chunk->ptr, first, len); }