extkey_cache_t * extkey_cache_new(ham_db_t *db) { extkey_cache_t *c; int memsize; memsize=sizeof(extkey_cache_t)+EXTKEY_CACHE_BUCKETSIZE*sizeof(extkey_t *); c=(extkey_cache_t *)allocator_calloc(env_get_allocator(db_get_env(db)), memsize); if (!c) { // HAM_OUT_OF_MEMORY; return (0); } extkey_cache_set_db(c, db); extkey_cache_set_bucketsize(c, EXTKEY_CACHE_BUCKETSIZE); return (c); }
int main(int argc, char* argv[]) { int i = 0; int n = 100; Allocator* allocator = allocator_normal_create(); for(i = 0; i < n; i++) { char* ptr = allocator_alloc(allocator, i); ptr = allocator_realloc(allocator, ptr, i+4); allocator_free(allocator, ptr); ptr = allocator_calloc(allocator, i+4, 4); allocator_free(allocator, ptr); } allocator_destroy(allocator); return 0; }
ham_cache_t * cache_new(ham_env_t *env, ham_size_t max_size) { ham_cache_t *cache; ham_size_t mem, buckets; buckets=CACHE_BUCKET_SIZE; ham_assert(buckets, (0)); ham_assert(max_size, (0)); mem=sizeof(ham_cache_t)+(buckets-1)*sizeof(void *); cache=allocator_calloc(env_get_allocator(env), mem); if (!cache) return (0); cache_set_env(cache, env); cache_set_capacity(cache, max_size); cache_set_bucketsize(cache, buckets); cache->_timeslot = 777; /* a reasonable start value; value is related * to the increments applied to active cache pages */ return (cache); }
static ham_status_t _remote_cursor_create(ham_db_t *db, ham_txn_t *txn, ham_u32_t flags, ham_cursor_t **cursor) { ham_env_t *env=db_get_env(db); ham_status_t st; proto_wrapper_t *request, *reply; request=proto_init_cursor_create_request(db_get_remote_handle(db), txn ? txn_get_remote_handle(txn) : 0, flags); st=_perform_request(env, env_get_curl(env), request, &reply); proto_delete(request); if (st) { if (reply) proto_delete(reply); return (st); } ham_assert(reply!=0, ("")); ham_assert(proto_has_cursor_create_reply(reply)!=0, ("")); st=proto_cursor_create_reply_get_status(reply); if (st) { proto_delete(reply); return (st); } *cursor=(ham_cursor_t *)allocator_calloc(env_get_allocator(env), sizeof(ham_cursor_t)); if (!(*cursor)) return (HAM_OUT_OF_MEMORY); cursor_set_remote_handle(*cursor, proto_cursor_create_reply_get_cursor_handle(reply)); proto_delete(reply); return (st); }
ham_status_t btree_create(ham_backend_t **backend_ref, ham_db_t *db, ham_u32_t flags) { ham_btree_t *btree; *backend_ref = 0; btree = (ham_btree_t *)allocator_calloc(env_get_allocator(db_get_env(db)), sizeof(*btree)); if (!btree) { return HAM_OUT_OF_MEMORY; } /* initialize the backend */ btree->_db=db; btree->_fun_create=my_fun_create; btree->_fun_open=my_fun_open; btree->_fun_close=my_fun_close; btree->_fun_flush=my_fun_flush; btree->_fun_delete=my_fun_delete; btree->_fun_find=btree_find; btree->_fun_insert=btree_insert; btree->_fun_erase=btree_erase; btree->_fun_enumerate=btree_enumerate; #ifdef HAM_ENABLE_INTERNAL btree->_fun_check_integrity=btree_check_integrity; btree->_fun_calc_keycount_per_page=my_fun_calc_keycount_per_page; #else btree->_fun_check_integrity=0; btree->_fun_calc_keycount_per_page=0; #endif btree->_fun_cursor_create = my_fun_cursor_create; btree->_fun_close_cursors = my_fun_close_cursors; btree->_fun_uncouple_all_cursors=my_fun_uncouple_all_cursors; btree->_fun_free_page_extkeys=my_fun_free_page_extkeys; *backend_ref = (ham_backend_t *)btree; return HAM_SUCCESS; }
static ham_status_t _remote_cursor_clone(ham_cursor_t *src, ham_cursor_t **dest) { ham_env_t *env=db_get_env(cursor_get_db(src)); ham_status_t st; proto_wrapper_t *request, *reply; request=proto_init_cursor_clone_request(cursor_get_remote_handle(src)); st=_perform_request(env, env_get_curl(env), request, &reply); proto_delete(request); if (st) { if (reply) proto_delete(reply); return (st); } ham_assert(reply!=0, ("")); ham_assert(proto_has_cursor_clone_reply(reply)!=0, ("")); st=proto_cursor_clone_reply_get_status(reply); if (st) { proto_delete(reply); return (st); } *dest=(ham_cursor_t *)allocator_calloc(env_get_allocator(env), sizeof(ham_cursor_t)); if (!(*dest)) return (HAM_OUT_OF_MEMORY); cursor_set_remote_handle(*dest, proto_cursor_clone_reply_get_cursor_handle(reply)); proto_delete(reply); return (st); }
/** @fn DataBuffer_t::DataBuffer_t(size_t allocSize ) * Constructor. * Allocate ION (other allocator) mapped memory. * * @param size_t allocSize - required size in bytes. */ DataBuffer_t::DataBuffer_t(size_t allocSize ) { #ifndef DVP_USE_ION pData = calloc(1, allocSize); if ( pData) nBuffSize = allocSize; bytesWritten = 0; #else // DVP_USE_ION mutex_init(&mBuffLock); pAllocator = allocator_init(); if( pAllocator ) { dims[0].img.bpp = 1; dims[0].img.width = allocSize; dims[0].img.height = 1; dims[0].dim.x = allocSize; dims[0].dim.y = 1; dims[0].dim.z = 1; memset((void*)handles, 0, sizeof(handles)); if( false_e == allocator_calloc( pAllocator, ALLOCATOR_MEMORY_TYPE_TILED_1D_UNCACHED, 1, 1, dims, &pData, handles, &strides ) ) { pData = NULL; } if ( pData ) nBuffSize = allocSize; else nBuffSize = 0; bytesWritten = 0; } #endif // DVP_USE_ION }
/** * Allocate space in storage for and write the content references by 'data' * (and length 'size') to storage. * * Conditions will apply whether the data is written through cache or direct * to device. * * The content is, of course, prefixed by a BLOB header. * * Partial writes are handled in this function. */ ham_status_t blob_allocate(ham_env_t *env, ham_db_t *db, ham_record_t *record, ham_u32_t flags, ham_offset_t *blobid) { ham_status_t st; ham_page_t *page=0; ham_offset_t addr; blob_t hdr; ham_u8_t *chunk_data[2]; ham_size_t alloc_size; ham_size_t chunk_size[2]; ham_device_t *device=env_get_device(env); ham_bool_t freshly_created = HAM_FALSE; *blobid=0; /* * PARTIAL WRITE * * if offset+partial_size equals the full record size, then we won't * have any gaps. In this case we just write the full record and ignore * the partial parameters. */ if (flags&HAM_PARTIAL) { if (record->partial_offset==0 && record->partial_offset+record->partial_size==record->size) flags&=~HAM_PARTIAL; } /* * in-memory-database: the blobid is actually a pointer to the memory * buffer, in which the blob (with the blob-header) is stored */ if (env_get_rt_flags(env)&HAM_IN_MEMORY_DB) { blob_t *hdr; ham_u8_t *p=(ham_u8_t *)allocator_alloc(env_get_allocator(env), record->size+sizeof(blob_t)); if (!p) { return HAM_OUT_OF_MEMORY; } /* initialize the header */ hdr=(blob_t *)p; memset(hdr, 0, sizeof(*hdr)); blob_set_self(hdr, (ham_offset_t)PTR_TO_U64(p)); blob_set_alloc_size(hdr, record->size+sizeof(blob_t)); blob_set_size(hdr, record->size); /* do we have gaps? if yes, fill them with zeroes */ if (flags&HAM_PARTIAL) { ham_u8_t *s=p+sizeof(blob_t); if (record->partial_offset) memset(s, 0, record->partial_offset); memcpy(s+record->partial_offset, record->data, record->partial_size); if (record->partial_offset+record->partial_size<record->size) memset(s+record->partial_offset+record->partial_size, 0, record->size-(record->partial_offset+record->partial_size)); } else { memcpy(p+sizeof(blob_t), record->data, record->size); } *blobid=(ham_offset_t)PTR_TO_U64(p); return (0); } memset(&hdr, 0, sizeof(hdr)); /* * blobs are CHUNKSIZE-allocated */ alloc_size=sizeof(blob_t)+record->size; alloc_size += DB_CHUNKSIZE - 1; alloc_size -= alloc_size % DB_CHUNKSIZE; /* * check if we have space in the freelist */ st = freel_alloc_area(&addr, env, db, alloc_size); if (!addr) { if (st) return st; /* * if the blob is small AND if logging is disabled: load the page * through the cache */ if (__blob_from_cache(env, alloc_size)) { st = db_alloc_page(&page, db, PAGE_TYPE_BLOB, PAGE_IGNORE_FREELIST); ham_assert(st ? page == NULL : 1, (0)); ham_assert(!st ? page != NULL : 1, (0)); if (st) return st; /* blob pages don't have a page header */ page_set_npers_flags(page, page_get_npers_flags(page)|PAGE_NPERS_NO_HEADER); addr=page_get_self(page); /* move the remaining space to the freelist */ (void)freel_mark_free(env, db, addr+alloc_size, env_get_pagesize(env)-alloc_size, HAM_FALSE); blob_set_alloc_size(&hdr, alloc_size); } else { /* * otherwise use direct IO to allocate the space */ ham_size_t aligned=alloc_size; aligned += env_get_pagesize(env) - 1; aligned -= aligned % env_get_pagesize(env); st=device->alloc(device, aligned, &addr); if (st) return (st); /* if aligned!=size, and the remaining chunk is large enough: * move it to the freelist */ { ham_size_t diff=aligned-alloc_size; if (diff > SMALLEST_CHUNK_SIZE) { (void)freel_mark_free(env, db, addr+alloc_size, diff, HAM_FALSE); blob_set_alloc_size(&hdr, aligned-diff); } else { blob_set_alloc_size(&hdr, aligned); } } freshly_created = HAM_TRUE; } ham_assert(HAM_SUCCESS == freel_check_area_is_allocated(env, db, addr, alloc_size), (0)); } else { ham_assert(!st, (0)); blob_set_alloc_size(&hdr, alloc_size); } blob_set_size(&hdr, record->size); blob_set_self(&hdr, addr); /* * PARTIAL WRITE * * are there gaps at the beginning? If yes, then we'll fill with zeros */ if ((flags&HAM_PARTIAL) && (record->partial_offset)) { ham_u8_t *ptr; ham_size_t gapsize=record->partial_offset; ptr=allocator_calloc(env_get_allocator(env), gapsize > env_get_pagesize(env) ? env_get_pagesize(env) : gapsize); if (!ptr) return (HAM_OUT_OF_MEMORY); /* * first: write the header */ chunk_data[0]=(ham_u8_t *)&hdr; chunk_size[0]=sizeof(hdr); st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); if (st) return (st); addr+=sizeof(hdr); /* now fill the gap; if the gap is bigger than a pagesize we'll * split the gap into smaller chunks */ while (gapsize>=env_get_pagesize(env)) { chunk_data[0]=ptr; chunk_size[0]=env_get_pagesize(env); st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); if (st) break; gapsize-=env_get_pagesize(env); addr+=env_get_pagesize(env); } /* fill the remaining gap */ if (gapsize) { chunk_data[0]=ptr; chunk_size[0]=gapsize; st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); if (st) return (st); addr+=gapsize; } allocator_free(env_get_allocator(env), ptr); /* now write the "real" data */ chunk_data[0]=(ham_u8_t *)record->data; chunk_size[0]=record->partial_size; st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); if (st) return (st); addr+=record->partial_size; } else { /* * not writing partially: write header and data, then we're done */ chunk_data[0]=(ham_u8_t *)&hdr; chunk_size[0]=sizeof(hdr); chunk_data[1]=(ham_u8_t *)record->data; chunk_size[1]=(flags&HAM_PARTIAL) ? record->partial_size : record->size; st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 2); if (st) return (st); addr+=sizeof(hdr)+ ((flags&HAM_PARTIAL) ? record->partial_size : record->size); } /* * store the blobid; it will be returned to the caller */ *blobid=blob_get_self(&hdr); /* * PARTIAL WRITES: * * if we have gaps at the end of the blob: just append more chunks to * fill these gaps. Since they can be pretty large we split them into * smaller chunks if necessary. */ if (flags&HAM_PARTIAL) { if (record->partial_offset+record->partial_size < record->size) { ham_u8_t *ptr; ham_size_t gapsize=record->size - (record->partial_offset+record->partial_size); /* now fill the gap; if the gap is bigger than a pagesize we'll * split the gap into smaller chunks * * we split this loop in two - the outer loop will allocate the * memory buffer, thus saving some allocations */ while (gapsize>env_get_pagesize(env)) { ham_u8_t *ptr=allocator_calloc(env_get_allocator(env), env_get_pagesize(env)); if (!ptr) return (HAM_OUT_OF_MEMORY); while (gapsize>env_get_pagesize(env)) { chunk_data[0]=ptr; chunk_size[0]=env_get_pagesize(env); st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); if (st) break; gapsize-=env_get_pagesize(env); addr+=env_get_pagesize(env); } allocator_free(env_get_allocator(env), ptr); if (st) return (st); } /* now write the remainder, which is less than a pagesize */ ham_assert(gapsize<env_get_pagesize(env), ("")); chunk_size[0]=gapsize; ptr=chunk_data[0]=allocator_calloc(env_get_allocator(env), gapsize); if (!ptr) return (HAM_OUT_OF_MEMORY); st=__write_chunks(env, page, addr, HAM_TRUE, freshly_created, chunk_data, chunk_size, 1); allocator_free(env_get_allocator(env), ptr); if (st) return (st); } } return (0); }
ham_status_t blob_duplicate_insert(ham_db_t *db, ham_offset_t table_id, ham_record_t *record, ham_size_t position, ham_u32_t flags, dupe_entry_t *entries, ham_size_t num_entries, ham_offset_t *rid, ham_size_t *new_position) { ham_status_t st=0; dupe_table_t *table=0; ham_bool_t alloc_table=0; ham_bool_t resize=0; ham_page_t *page=0; ham_env_t *env=db_get_env(db); /* * create a new duplicate table if none existed, and insert * the first entry */ if (!table_id) { ham_assert(num_entries==2, ("")); /* allocates space for 8 (!) entries */ table=allocator_calloc(env_get_allocator(env), sizeof(dupe_table_t)+7*sizeof(dupe_entry_t)); if (!table) return HAM_OUT_OF_MEMORY; dupe_table_set_capacity(table, 8); dupe_table_set_count(table, 1); memcpy(dupe_table_get_entry(table, 0), &entries[0], sizeof(entries[0])); /* skip the first entry */ entries++; num_entries--; alloc_table=1; } else { /* * otherwise load the existing table */ st=__get_duplicate_table(&table, &page, env, table_id); ham_assert(st ? table == NULL : 1, (0)); ham_assert(st ? page == NULL : 1, (0)); if (!table) return st ? st : HAM_INTERNAL_ERROR; if (!page && !(env_get_rt_flags(env)&HAM_IN_MEMORY_DB)) alloc_table=1; } if (page) if ((st=ham_log_add_page_before(page))) return (st); ham_assert(num_entries==1, ("")); /* * resize the table, if necessary */ if (!(flags & HAM_OVERWRITE) && dupe_table_get_count(table)+1>=dupe_table_get_capacity(table)) { dupe_table_t *old=table; ham_size_t new_cap=dupe_table_get_capacity(table); if (new_cap < 3*8) new_cap += 8; else new_cap += new_cap/3; table=allocator_calloc(env_get_allocator(env), sizeof(dupe_table_t)+ (new_cap-1)*sizeof(dupe_entry_t)); if (!table) return (HAM_OUT_OF_MEMORY); dupe_table_set_capacity(table, new_cap); dupe_table_set_count(table, dupe_table_get_count(old)); memcpy(dupe_table_get_entry(table, 0), dupe_table_get_entry(old, 0), dupe_table_get_count(old)*sizeof(dupe_entry_t)); if (alloc_table) allocator_free(env_get_allocator(env), old); alloc_table=1; resize=1; } /* * insert sorted, unsorted or overwrite the entry at the requested position */ if (flags&HAM_OVERWRITE) { dupe_entry_t *e=dupe_table_get_entry(table, position); if (!(dupe_entry_get_flags(e)&(KEY_BLOB_SIZE_SMALL |KEY_BLOB_SIZE_TINY |KEY_BLOB_SIZE_EMPTY))) { (void)blob_free(env, db, dupe_entry_get_rid(e), 0); } memcpy(dupe_table_get_entry(table, position), &entries[0], sizeof(entries[0])); } else { if (db_get_rt_flags(db)&HAM_SORT_DUPLICATES) { if (page) page_add_ref(page); position=__get_sorted_position(db, table, record, flags); if (page) page_release_ref(page); if (position<0) return ((ham_status_t)position); } else if (flags&HAM_DUPLICATE_INSERT_BEFORE) { /* do nothing, insert at the current position */ } else if (flags&HAM_DUPLICATE_INSERT_AFTER) { position++; if (position > dupe_table_get_count(table)) position=dupe_table_get_count(table); } else if (flags&HAM_DUPLICATE_INSERT_FIRST) { position=0; } else if (flags&HAM_DUPLICATE_INSERT_LAST) { position=dupe_table_get_count(table); } else { position=dupe_table_get_count(table); } if (position != dupe_table_get_count(table)) { memmove(dupe_table_get_entry(table, position+1), dupe_table_get_entry(table, position), sizeof(entries[0])*(dupe_table_get_count(table)-position)); } memcpy(dupe_table_get_entry(table, position), &entries[0], sizeof(entries[0])); dupe_table_set_count(table, dupe_table_get_count(table)+1); } /* * write the table back to disk and return the blobid of the table */ if ((table_id && !page) || resize) { ham_record_t rec={0}; rec.data=(ham_u8_t *)table; rec.size=sizeof(dupe_table_t) +(dupe_table_get_capacity(table)-1)*sizeof(dupe_entry_t); st=blob_overwrite(env, db, table_id, &rec, 0, rid); } else if (!table_id) { ham_record_t rec={0}; rec.data=(ham_u8_t *)table; rec.size=sizeof(dupe_table_t) +(dupe_table_get_capacity(table)-1)*sizeof(dupe_entry_t); st=blob_allocate(env, db, &rec, 0, rid); } else if (table_id && page) { page_set_dirty(page, env); } else { ham_assert(!"shouldn't be here", (0)); } if (alloc_table) allocator_free(env_get_allocator(env), table); if (new_position) *new_position=position; return (st); }