cf_vector * cf_vector_create( uint32_t value_len, uint32_t init_sz, uint flags) { cf_vector *v; v = cf_malloc(sizeof(cf_vector)); if (!v) return(0); v->value_len = value_len; v->flags = flags; v->alloc_len = init_sz; v->len = 0; v->stack_struct = false; v->stack_vector = false; if (init_sz) { v->vector = cf_malloc(init_sz * value_len); if (!v->vector) { cf_free(v); return(0); } } else v->vector = 0; if ((flags & VECTOR_FLAG_INITZERO) && v->vector) memset(v->vector, 0, init_sz * value_len); if (flags & VECTOR_FLAG_BIGLOCK){ pthread_mutex_init(&v->LOCK, 0); } return(v); }
void ai_objClone(ai_obj *dest, ai_obj *src) { memcpy(dest, src, sizeof(ai_obj)); if (src->freeme) { dest->s = cf_malloc(src->len); memcpy(dest->s, src->s, src->len); dest->freeme = 1; } if (src->ic) { dest->ic = (icol_t *) cf_malloc(sizeof(icol_t)); cloneIC(dest->ic, src->ic); } }
char * as_bytes_val_tostring(const as_val * v) { as_bytes * bytes = as_bytes_fromval(v); if ( !bytes || !bytes->value || !bytes->size ) { return NULL; } uint8_t * s = bytes->value; uint32_t sl = bytes->size; size_t st = (4 * sl) + 3; char * str = (char *) cf_malloc(st); if ( !str ) { return NULL; } int j=0; for ( int i=0; i < sl; i++ ) { str[j] = hex_chars[ s[i] >> 4 ]; str[j+1] = hex_chars[ s[i] & 0xf ]; str[j+2] = ' '; j += 3; } j--; // chomp str[j] = 0; return str; }
/** * Ensure the bytes buffer can handle `capacity` bytes. * * If `resize` is true and `capacity` exceeds the capacity of the bytes's * buffer, then resize the capacity of the buffer to `capacity` bytes. If the * buffer was heap allocated, then `cf_realloc()` will be used to resize. If the * buffer was stack allocated, it will be converted to a heap allocated buffer * using cf_malloc() and then its contents will be copied into the new heap * allocated buffer. * * If `resize` is false, and if the capacity is not sufficient, then return * false. * * ~~~~~~~~~~{.c} * as_bytes_ensure(&bytes, 100, true); * ~~~~~~~~~~ * * @param bytes The bytes to ensure the capacity of. * @param capacity The total number of bytes to ensure bytes can handle. * @param resize If true and capacity is not sufficient, then resize the buffer. * * @return On success, true. Otherwise an error occurred. */ bool as_bytes_ensure(as_bytes * bytes, uint32_t capacity, bool resize) { if ( capacity <= bytes->capacity ) return true; if ( !resize ) return false; uint8_t * buffer = NULL; if ( bytes->free ) { // this is a previously cf_malloc'd value buffer = cf_realloc(bytes->value, capacity); if ( !buffer ) { // allocation failed, so return false. return false; } } else { // this is a previously stack alloc'd value buffer = cf_malloc(capacity); if ( !buffer ) { // allocation failed, so return false. return false; } // copy the bytes memcpy(buffer, bytes->value, bytes->size); } bytes->free = true; bytes->value = buffer; bytes->capacity = capacity; return true; }
static as_operations * as_operations_default(as_operations * ops, bool free, uint16_t nops) { if ( !ops ) return ops; ops->_free = free; ops->gen = 0; ops->ttl = 0; as_binop * entries = NULL; if ( nops > 0 ) { entries = (as_binop *) cf_malloc(sizeof(as_binop) * nops); } if ( entries ) { ops->binops._free = true; ops->binops.capacity = nops; ops->binops.size = 0; ops->binops.entries = entries; } else { ops->binops._free = false; ops->binops.capacity = 0; ops->binops.size = 0; ops->binops.entries = NULL; } return ops; }
as_particle *as_particle_set_blob(as_particle *p, as_particle_type type, void *data, uint32_t sz, bool data_in_memory) { as_particle_blob *pb = (as_particle_blob *)p; if (data_in_memory) { if (pb && (sz != pb->sz)) { if (sz > pb->sz) { cf_free(pb); pb = 0; } else { pb = cf_realloc(pb, sizeof(as_particle_blob) + sz); } } if (! pb) { pb = cf_malloc(sizeof(as_particle_blob) + sz); } } pb->type = type; pb->sz = sz; memcpy(pb->data, data, sz); return((as_particle *)pb); }
as_particle *as_particle_set_string(as_particle *p, as_particle_type type, void *data, uint32_t sz, bool data_in_memory) { as_particle_string *ps = (as_particle_string *)p; if (data_in_memory) { if (ps && (sz != ps->sz)) { if (sz > ps->sz) { cf_free(ps); ps = 0; } else { ps = cf_realloc(ps, sizeof(as_particle_string) + sz); } } if (! ps) { ps = cf_malloc(sizeof(as_particle_string) + sz); } } ps->type = AS_PARTICLE_TYPE_STRING; ps->sz = sz; memcpy(ps->data, data, sz); return((as_particle *)ps); }
static int cf_vector_resize(cf_vector *v, uint32_t new_sz) { if (v->flags & VECTOR_FLAG_BIGRESIZE) { if (new_sz < 50) new_sz = 50; } else if (new_sz == 0) { new_sz = 2; } uint8_t *_t; if (v->vector == 0 || v->stack_vector) { _t = cf_malloc(new_sz * v->value_len); if (!_t) return(-1); if (v->stack_vector) { memcpy(_t, v->vector, v->alloc_len * v->value_len); v->stack_vector = false; } } else _t = cf_realloc(v->vector, (new_sz) * v->value_len); if (!_t) return(-1); v->vector = _t; if (v->flags & VECTOR_FLAG_INITZERO) memset(v->vector + (v->alloc_len * v->value_len), 0, (new_sz - v->alloc_len) * v->value_len); v->alloc_len = new_sz; return(0); }
as_event_loop* as_event_create_loops(uint32_t capacity) { as_event_send_buffer_size = as_pipe_get_send_buffer_size(); as_event_recv_buffer_size = as_pipe_get_recv_buffer_size(); as_event_loops = cf_malloc(sizeof(as_event_loop) * capacity); if (! as_event_loops) { return 0; } as_event_loop_capacity = capacity; as_event_threads_created = true; for (uint32_t i = 0; i < capacity; i++) { as_event_loop* event_loop = &as_event_loops[i]; event_loop->loop = 0; pthread_mutex_init(&event_loop->lock, 0); event_loop->thread = 0; event_loop->index = i; as_queue_init(&event_loop->pipe_cb_queue, sizeof(as_queued_pipe_cb), AS_EVENT_QUEUE_INITIAL_CAPACITY); event_loop->pipe_cb_calling = false; if (! as_event_create_loop(event_loop)) { as_event_close_loops(); return 0; } as_event_loop_size++; } return as_event_loops; }
char *as_pair_val_tostring(const as_val * v) { as_pair * p = as_pair_fromval(v); if ( p == NULL ) return NULL; char * a = as_val_tostring(p->_1); size_t al = strlen(a); char * b = as_val_tostring(p->_2); size_t bl = strlen(b); size_t l = al + bl + 5; char * str = (char *) cf_malloc(sizeof(char) * l); if (!str) return str; strcpy(str, "("); strcpy(str+1, a); strcpy(str+1+al,", "); strcpy(str+1+al+2, b); strcpy(str+1+al+2+bl,")"); *(str+1+al+2+bl+1) = '\0'; cf_free(a); cf_free(b); return str; }
static as_status as_parse_roles(as_error* err, uint8_t* buffer, size_t size, as_vector* /*<as_role*>*/ roles) { uint8_t* p = buffer; uint8_t* end = buffer + size; as_role* role; char role_name[AS_ROLE_SIZE]; int len; int sz; uint8_t id; uint8_t field_count; uint8_t result; while (p < end) { result = p[1]; if (result != 0) { return result; } field_count = p[3]; p += HEADER_REMAINING; role_name[0] = 0; role = 0; for (uint8_t b = 0; b < field_count; b++) { len = cf_swap_from_be32(*(int*)p); p += 4; id = *p++; len--; if (id == ROLE) { sz = (len <= (AS_ROLE_SIZE-1))? len : (AS_ROLE_SIZE-1); memcpy(role_name, p, sz); role_name[sz] = 0; p += len; } else if (id == PRIVILEGES) { p = as_parse_privileges(p, &role); } else { p += len; } } if (role_name[0] == 0 && role == 0) { continue; } if (! role) { role = cf_malloc(sizeof(as_role)); role->privileges_size = 0; } strcpy(role->name, role_name); as_vector_append(roles, &role); } return AEROSPIKE_OK; }
atf_test_result * atf_test_result_new(atf_test * test) { atf_test_result * res = (atf_test_result *) cf_malloc(sizeof(atf_test_result)); res->test = test; res->success = true; res->message[0] = '\0'; return res; }
atf_suite_result * atf_suite_result_new(atf_suite * suite) { atf_suite_result * res = (atf_suite_result *) cf_malloc(sizeof(atf_suite_result)); res->suite = suite; res->size = 0; res->success = 0; return res; }
// // Make sure the buf has enough bytes for whatever you're up to. int cf_buf_builder_reserve_internal(cf_buf_builder **bb_r, size_t sz) { cf_buf_builder *bb = *bb_r; // see if we need more space size_t new_sz = cf_dyn_buf_get_newsize(bb->alloc_sz, bb->used_sz, sz); if (new_sz > bb->alloc_sz) { if (bb->alloc_sz - bb->used_sz < MAX_BACKOFF) { bb = cf_realloc(bb, new_sz); if (!bb) return(-1); } else { // Only possible if buffer was reset. Avoids potential expensive // copy within realloc. cf_buf_builder *_t = cf_malloc(new_sz); if (!_t) return(-1); memcpy(_t->buf, bb->buf, bb->used_sz); _t->used_sz = bb->used_sz; cf_free(bb); bb = _t; } bb->alloc_sz = new_sz - sizeof(cf_buf_builder); *bb_r = bb; } return(0); }
as_node* as_node_create(as_cluster* cluster, const char* name, struct sockaddr_in* addr) { as_node* node = cf_malloc(sizeof(as_node)); if (!node) { return 0; } node->ref_count = 1; node->partition_generation = 0xFFFFFFFF; node->cluster = cluster; strcpy(node->name, name); node->address_index = 0; as_vector_init(&node->addresses, sizeof(as_address), 2); as_node_add_address(node, addr); node->conn_q = cf_queue_create(sizeof(int), true); // node->conn_q_asyncfd = cf_queue_create(sizeof(int), true); // node->asyncwork_q = cf_queue_create(sizeof(cl_async_work*), true); node->info_fd = -1; node->friends = 0; node->failures = 0; node->index = 0; node->active = true; return node; }
static as_status as_parse_users(as_error* err, uint8_t* buffer, size_t size, as_vector* /*<as_user*>*/ users) { uint8_t* p = buffer; uint8_t* end = buffer + size; as_user* user; char user_name[AS_USER_SIZE]; int len; int sz; uint8_t id; uint8_t field_count; uint8_t result; while (p < end) { result = p[1]; if (result != 0) { return result; } field_count = p[3]; p += HEADER_REMAINING; user_name[0] = 0; user = 0; for (uint8_t b = 0; b < field_count; b++) { len = cf_swap_from_be32(*(int*)p); p += 4; id = *p++; len--; if (id == USER) { sz = (len <= (AS_USER_SIZE-1))? len : (AS_USER_SIZE-1); memcpy(user_name, p, sz); user_name[sz] = 0; p += len; } else if (id == ROLES) { p = as_parse_users_roles(p, &user); } else { p += len; } } if (user_name[0] == 0 && user == 0) { continue; } if (! user) { user = cf_malloc(sizeof(as_user)); user->roles_size = 0; } strcpy(user->name, user_name); as_vector_append(users, &user); } return 0; }
/* * Return 0 in case of success * -1 in case of failure */ static int btree_addsinglerec(as_sindex_metadata *imd, cf_digest *dig, cf_ll *recl, uint64_t *n_bdigs) { if (!as_sindex_partition_isactive(imd->si->ns, dig)) { return 0; } bool create = (cf_ll_size(recl) == 0) ? true : false; dig_arr_t *dt; if (!create) { cf_ll_element * ele = cf_ll_get_tail(recl); dt = ((ll_recl_element*)ele)->dig_arr; if (dt->num == NUM_DIGS_PER_ARR) { create = true; } } if (create) { dt = getDigestArray(); if (!dt) { return -1; } ll_recl_element * node; node = cf_malloc(sizeof(ll_recl_element)); node->dig_arr = dt; cf_ll_append(recl, (cf_ll_element *)node); } memcpy(&dt->digs[dt->num], dig, CF_DIGEST_KEY_SZ); dt->num++; *n_bdigs = *n_bdigs + 1; return 0; }
/** * Heap allocate and initialize a timer */ as_timer * as_timer_new(void * source, const as_timer_hooks * hooks) { as_timer * timer = (as_timer *) cf_malloc(sizeof(as_timer)); if (!timer) return timer; timer->source = source; timer->hooks = hooks; return timer; }
/** * Creates a new aerospike object on the heap * @returns a new aerospike object */ aerospike* aerospike_new(as_config* config) { aerospike * as = (aerospike *) cf_malloc(sizeof(aerospike)); if ( !as ) return as; return aerospike_defaults(as, true, config); }
void pickle_all(as_storage_rd* rd, rw_request* rw) { if (rd->keep_pickle) { rw->pickle = rd->pickle; rw->pickle_sz = rd->pickle_sz; return; } // else - new protocol with no destination node(s), or old protocol. if (rw->n_dest_nodes == 0) { return; } // else - old protocol with destination node(s). // TODO - old pickle - remove in "six months". rw->is_old_pickle = true; rw->pickle = as_record_pickle(rd, &rw->pickle_sz); rw->set_name = rd->set_name; rw->set_name_len = rd->set_name_len; if (rd->key) { rw->key = cf_malloc(rd->key_size); rw->key_size = rd->key_size; memcpy(rw->key, rd->key, rd->key_size); } }
/* * Internal function: udf__aerospike_get_particle_buf * * Parameters: * r -- udf_record_bin for which particle buf is requested * type -- bin type * pbytes -- current space required * * Return value: * NULL on failure * valid buf pointer success * * Description: * The function find space on preallocated particle_data for requested size. * In case it is found it tries to allocate space for bin independently. * Return back the pointer to the offset on preallocated particle_data or newly * allocated space. * * Return NULL if both fails * * Note: ubin->particle_buf will be set if new per bin memory is allocated. * * Callers: * udf_aerospike_setbin */ uint8_t * udf__aerospike_get_particle_buf(udf_record *urecord, udf_record_bin *ubin, uint32_t pbytes) { if (pbytes > urecord->rd->ns->storage_write_block_size) { cf_warning(AS_UDF, "udf__aerospike_get_particle_buf: Invalid Operation [Bin %s data too big size=%u]... Fail", ubin->name, pbytes); return NULL; } uint32_t alloc_size = pbytes == 0 ? 0 : urecord->rd->ns->storage_write_block_size; uint8_t *buf = NULL; if (ubin->particle_buf) { buf = ubin->particle_buf; } else { // Disable dynamic shifting from the flat allocater to dynamic // allocation. if ((urecord->cur_particle_data + pbytes) < urecord->end_particle_data) { buf = urecord->cur_particle_data; urecord->cur_particle_data += pbytes; } else if (alloc_size) { // If there is no space in preallocated buffer then go // ahead and allocate space per bin. This may happen // if user keeps doing lot of execute update exhausting // the buffer. After this point the record size check will // trip instead of at the code when bin value is set. ubin->particle_buf = cf_malloc(alloc_size); if (ubin->particle_buf) { buf = ubin->particle_buf; } } } return buf; }
int _match_index(int tmatch, cf_ll *indl, bool prtl) { cf_ll_element * ele; r_tbl_t *rt = &Tbl[tmatch]; if (!rt->ilist) { return 0; } int matches = 0; cf_ll_iterator * iter = cf_ll_getIterator(rt->ilist, true); while (( ele = cf_ll_getNext(iter))) { int imatch = ((ll_ai_match_element *)ele)->match; r_ind_t *ri = &Index[imatch]; if (!prtl && !ri->done) { continue; } ll_ai_match_element * node = cf_malloc(sizeof(ll_ai_match_element)); node->match = imatch; if (prtl) { cf_ll_append(indl, (cf_ll_element *)node); } else { // \/ UNIQ can fail, must be 1st if (UNIQ(ri->cnstr)) { cf_ll_prepend(indl, (cf_ll_element *)node); } else { cf_ll_append(indl, (cf_ll_element *)node); } } matches++; } cf_ll_releaseIterator(iter); return matches; }
/* * Internal function which adds digests to the defrag_list * Mallocs the nodes of defrag_list * Returns : * -1 : Error * number of digests found : success * */ static long build_defrag_list_from_nbtr(as_namespace *ns, ai_obj *acol, bt *nbtr, ulong nofst, ulong *limit, uint64_t * tot_found, cf_ll *gc_list) { int error = -1; btEntry *nbe; // STEP 1: go thru a portion of the nbtr and find to-be-deleted-PKs // TODO: a range query may be smarter then using the Xth Iterator btSIter *nbi = (nofst ? btGetFullXthIter(nbtr, nofst, 1, NULL, 0) : btGetFullRangeIter(nbtr, 1, NULL)); if (!nbi) { return error; } long found = 0; long processed = 0; while ((nbe = btRangeNext(nbi, 1))) { ai_obj *akey = nbe->key; int ret = as_sindex_can_defrag_record(ns, (cf_digest *) (&akey->y)); if (ret == AS_SINDEX_GC_SKIP_ITERATION) { *limit = 0; break; } else if (ret == AS_SINDEX_GC_OK) { bool create = (cf_ll_size(gc_list) == 0) ? true : false; objs_to_defrag_arr *dt; if (!create) { cf_ll_element * ele = cf_ll_get_tail(gc_list); dt = ((ll_sindex_gc_element*)ele)->objs_to_defrag; if (dt->num == SINDEX_GC_NUM_OBJS_PER_ARR) { create = true; } } if (create) { dt = as_sindex_gc_get_defrag_arr(); if (!dt) { *tot_found += found; return -1; } ll_sindex_gc_element * node; node = cf_malloc(sizeof(ll_sindex_gc_element)); node->objs_to_defrag = dt; cf_ll_append(gc_list, (cf_ll_element *)node); } cloneDigestFromai_obj(&(dt->acol_digs[dt->num].dig), akey); ai_objClone(&(dt->acol_digs[dt->num].acol), acol); dt->num += 1; found++; } processed++; (*limit)--; if (*limit == 0) break; } btReleaseRangeIterator(nbi); *tot_found += found; return processed; }
void cf_dyn_buf_init_heap(cf_dyn_buf *db, size_t sz) { db->buf = cf_malloc(sz); db->is_stack = false; db->alloc_sz = sz; db->used_sz = 0; }
as_vector* as_vector_create(uint32_t item_size, uint32_t capacity) { as_vector* vector = cf_malloc(sizeof(as_vector)); as_vector_init(vector, item_size, capacity); vector->flags = FLAGS_HEAP | FLAGS_CREATED; return vector; }
static ai_arr * ai_arr_new() { ai_arr *arr = cf_malloc(sizeof(ai_arr) + (INIT_CAPACITY * CF_DIGEST_KEY_SZ)); arr->capacity = INIT_CAPACITY; arr->used = 0; return arr; }
char * as_integer_val_tostring(const as_val * v) { as_integer * i = (as_integer *) v; char * str = (char *) cf_malloc(sizeof(char) * 32); memset(str, 0, 32); sprintf(str, "%" PRId64, i->value); return str; }
/* * create_slot : Creates slot and initializes variable * create_chunk : Creates chunk and initializes * expand_chunk : Expands chunk by single unit * * Note: The idea of having a extra level indirection for chunk and slot instead * of single array is because the address values of tr/rd/r_ref extra is * stored and used. Realloc may end up moving these to different memory * location, invalidating stored values. * * Return value: * Slot functions * valid slot pointer in case of success * NULL in case of failure * Chunk functions * 0 in case of success * -1 in case of failure */ ldt_slot * create_slot() { ldt_slot *slots = cf_malloc(sizeof(ldt_slot) * LDT_SLOT_CHUNK_SIZE); if (!slots) { return NULL; } return slots; }
cf_buf_builder * cf_buf_builder_create() { cf_buf_builder *bb = cf_malloc(1024); if (!bb) return(0); bb->alloc_sz = 1024 - sizeof(cf_buf_builder); bb->used_sz = 0; return(bb); }
void* as_vector_to_array(as_vector* vector, uint32_t* size) { size_t len = vector->size * vector->item_size; void* array = cf_malloc(len); memcpy(array, vector->list, len); *size = vector->size; return array; }