static int tree_delete_block(tree_t *tree, unsigned int block_vid){ // {{{ ssize_t ret; hash_t req_move[] = { { HK(action), DATA_UINT32T( ACTION_MOVE ) }, { HK(offset_from), DATA_OFFT ( (block_vid + 1) * sizeof(block_info) ) }, { HK(offset_to), DATA_OFFT ( (block_vid ) * sizeof(block_info) ) }, hash_end }; if( (ret = chain_next_query(tree->chain, req_move)) != 0) return ret; hash_t req_delete[] = { { HK(action), DATA_UINT32T( ACTION_DELETE ) }, { HK(offset), DATA_OFFT ( (tree->blocks_count - 1) * sizeof(block_info) ) }, { HK(size), DATA_SIZET( sizeof(block_info) ) }, hash_end }; if( (ret = chain_next_query(tree->chain, req_delete)) != 0) return ret; if(tree_reinit(tree) != 0) return -1; // TODO replace recalc with faster procedure which recalc only changed items instead of all if(tree_recalc(tree) != 0) return -1; return 0; } // }}}
static ssize_t lists_set(backend_t *backend, request_t *request){ ssize_t ret; off_t from, to; if(hash_find(request, HK(insert)) != NULL){ // on insert we move all items from 'key' to 'key'+1 // recommended use of 'blocks' backend as under-lying backend to improve perfomance hash_data_copy(ret, TYPE_OFFT, from, request, HK(offset)); if(ret != 0) return warning("no offset supplied"); to = from + 1; hash_t new_request[] = { { HK(action), DATA_UINT32T(ACTION_MOVE) }, { HK(offset_from), DATA_PTR_OFFT(&from) }, { HK(offset_to), DATA_PTR_OFFT(&to) }, { HK(size), DATA_VOID }, hash_next(request) }; if( (ret = backend_pass(backend, new_request)) < 0) return ret; } return ( (ret = backend_pass(backend, request)) < 0) ? ret : -EEXIST; }
static int tree_resize_block(tree_t *tree, unsigned int block_vid, unsigned int new_size){ // {{{ unsigned int delta; ssize_t ret; block_info block; int j; buffer_t req_buffer; buffer_init_from_bare(&req_buffer, &block, sizeof(block)); /* read block_info */ hash_t req_read[] = { { HK(action), DATA_UINT32T( ACTION_READ ) }, { HK(offset), DATA_OFFT ( block_vid * sizeof(block_info) ) }, { HK(size), DATA_SIZET( sizeof(block_info) ) }, { HK(buffer), DATA_BUFFERT(&req_buffer) }, hash_end }; if( (ret = chain_next_query(tree->chain, req_read)) <= 0){ buffer_destroy(&req_buffer); return -1; } /* fix block_info */ delta = new_size - block.size; block.size = new_size; /* write block info */ hash_t req_write[] = { { HK(action), DATA_UINT32T( ACTION_WRITE ) }, { HK(offset), DATA_OFFT ( block_vid * sizeof(block_info) ) }, { HK(size), DATA_SIZET( sizeof(block_info) ) }, { HK(buffer), DATA_BUFFERT(&req_buffer) }, hash_end }; chain_next_query(tree->chain, req_write); // TODO lock for(j=0; j < tree->nlevels; j++){ tree->table[ tree->tof[j] + (block_vid / tree->lss[j]) ] += delta; } // TODO unlock buffer_destroy(&req_buffer); return 0; } // }}}
static int tree_insert(tree_t *tree, unsigned int block_vid, unsigned int block_off, unsigned int size){ // {{{ block_info block; ssize_t ret; buffer_t req_buffer; buffer_init_from_bare(&req_buffer, &block, sizeof(block)); block.real_block_off = block_off; block.size = size; hash_t req_move[] = { { HK(action), DATA_UINT32T(ACTION_MOVE) }, { HK(offset_from), DATA_OFFT ((block_vid ) * sizeof(block_info)) }, { HK(offset_to), DATA_OFFT ((block_vid + 1) * sizeof(block_info)) }, hash_end }; ret = chain_next_query(tree->chain, req_move); hash_t req_write[] = { { HK(action), DATA_UINT32T( ACTION_WRITE ) }, { HK(offset), DATA_OFFT ( block_vid * sizeof(block_info) ) }, { HK(size), DATA_SIZET( sizeof(block_info) ) }, { HK(buffer), DATA_BUFFERT(&req_buffer) }, hash_end }; if( (ret = chain_next_query(tree->chain, req_write)) <= 0) goto cleanup; ret = -1; if(tree_reinit(tree) != 0) goto cleanup; // TODO replace recalc with faster procedure which recalc only changed items instead of all if(tree_recalc(tree) != 0) goto cleanup; ret = 0; cleanup: buffer_destroy(&req_buffer); return ret; } // }}}
static int tree_get_block(tree_t *tree, unsigned int block_vid, block_info *block){ // {{{ buffer_t req_buffer; buffer_init_from_bare(&req_buffer, block, sizeof(block_info)); hash_t req_read[] = { { HK(action), DATA_UINT32T( ACTION_READ ) }, { HK(offset), DATA_OFFT ( block_vid * sizeof(block_info) ) }, { HK(size), DATA_SIZET( sizeof(block_info) ) }, { HK(buffer), DATA_BUFFERT(&req_buffer) }, hash_end }; if(chain_next_query(tree->chain, req_read) <= 0) return -1; buffer_destroy(&req_buffer); return 0; } // }}}
static ssize_t lookup_handler(backend_t *backend, request_t *request){ // {{{ ssize_t ret; data_t *d; data_t d_output; data_t d_void = DATA_VOID; lookup_userdata *userdata = (lookup_userdata *)backend->userdata; if( userdata->force_query != 0 || hash_find(request, userdata->output) == NULL ){ d_output.type = userdata->output_type; fastcall_alloc r_alloc = { { 3, ACTION_ALLOC }, 100 }; if(data_query(&d_output, &r_alloc) < 0) return -ENOMEM; request_t r_query[] = { { HK(action), DATA_UINT32T(ACTION_READ) }, { userdata->output, d_output }, hash_next(request) }; switch( (ret = backend_query(userdata->backend_index, r_query)) ){ case 0: d = hash_data_find(r_query, userdata->output); break; case -ENOENT: d = &d_void; break; default: goto free; }; request_t r_next[] = { { userdata->output, *d }, hash_next(request) }; ret = backend_pass(backend, r_next); free:; fastcall_alloc r_free = { { 2, ACTION_FREE } }; data_query(&d_output, &r_free); return (ret < 0) ? ret : -EEXIST; } return ( (ret = backend_pass(backend, request)) < 0 ) ? ret : -EEXIST; } // }}}
static ssize_t lists_delete(backend_t *backend, request_t *request){ ssize_t ret; off_t from, to; size_t size; hash_data_copy(ret, TYPE_SIZET, size, request, HK(size)); if(ret != 0) return warning("no size supplied"); hash_data_copy(ret, TYPE_OFFT, from, request, HK(offset)); if(ret != 0) return warning("no offset supplied"); to = from; from += size; hash_t new_request[] = { { HK(action), DATA_UINT32T(ACTION_MOVE) }, { HK(offset_from), DATA_PTR_OFFT(&from) }, { HK(offset_to), DATA_PTR_OFFT(&to) }, { HK(size), DATA_VOID }, hash_next(request) }; return ( (ret = backend_pass(backend, new_request)) < 0) ? ret : -EEXIST; }
static ssize_t murmur2_32_handler(machine_t *machine, request_t *request){ // {{{ uint32_t hash = 0; data_t *key = NULL; murmur_userdata *userdata = (murmur_userdata *)machine->userdata; key = hash_data_find(request, userdata->input); if(key == NULL){ if(userdata->fatal == 0) return machine_pass(machine, request); return error("input key not supplied"); } hash = MurmurHash2(key, 0); request_t r_next[] = { { userdata->output, DATA_UINT32T(hash) }, hash_next(request) }; return machine_pass(machine, r_next); } // }}}
/* size-tree {{ */ static int tree_reinit(tree_t *tree){ // {{{ unsigned int i; size_t ls; unsigned int nlevels; size_t table_size; size_t *table; size_t *lss; off_t *tof; buffer_t req_buffer; buffer_init_from_bare(&req_buffer, &tree->blocks_count, sizeof(tree->blocks_count)); hash_t req_count[] = { { HK(action), DATA_UINT32T(ACTION_COUNT) }, { HK(buffer), DATA_BUFFERT(&req_buffer) }, hash_end }; if(chain_next_query(tree->chain, req_count) <= 0){ buffer_destroy(&req_buffer); return -1; } buffer_destroy(&req_buffer); tree->blocks_count /= sizeof(block_info); nlevels = log_any(tree->blocks_count, tree->elements_per_level) + 1; if(nlevels == tree->nlevels) return 0; // TODO lock /* remove old data */ if(tree->lss != NULL) free(tree->lss); if(tree->tof != NULL) free(tree->tof); if(tree->table != NULL) free(tree->table); if( (lss = malloc(sizeof(size_t) * nlevels)) == NULL) goto free; if( (tof = malloc(sizeof(off_t) * nlevels)) == NULL) goto free1; table_size = 0; ls = 1; tof[0] = 0; for(i=1; i <= nlevels; i++){ if(i != nlevels) tof[i] = tof[i - 1] + ls; table_size += ls; ls *= tree->elements_per_level; lss[nlevels - i] = ls; } table_size *= sizeof(size_t); //printf("tree_reinit: nelements: %x, nlevels: %x, table_size %d\n", (unsigned int)tree->blocks_count, nlevels, (unsigned int)table_size); if( (table = malloc(table_size)) == NULL ) goto free2; tree->table = table; tree->nlevels = nlevels; tree->lss = lss; // LevelSizeSizes tree->tof = tof; // TableOFfsets // TODO unlock return 0; free2: free(tof); free1: free(lss); free: // TODO unlock return -1; } // }}}
static int tree_get(tree_t *tree, off_t offset, unsigned int *block_vid, off_t *real_offset){ // {{{ unsigned int i,j,ret; off_t level_off; unsigned int ptr; size_t chunk_size; block_info block; if(offset >= tree->table[0]) // out of range return -1; ret = -1; level_off = 0; ptr = 0; // TODO lock for(i=1; i < tree->nlevels; i++){ for(j=0; j < tree->elements_per_level; j++, ptr++){ chunk_size = tree->table[ tree->tof[i] + ptr]; /*printf("lev: %x el: %x ptr: %x (%x < %x + %x)\n", i, j, (unsigned int)ptr, (unsigned int)offset, (unsigned int)level_off, (unsigned int)chunk_size );*/ if(offset < level_off + chunk_size) break; level_off += chunk_size; } ptr *= tree->elements_per_level; } // last level buffer_t req_buffer; buffer_init_from_bare(&req_buffer, &block, sizeof(block)); /* read block_info */ for(j=0; j < tree->elements_per_level; j++, ptr++){ hash_t req_read[] = { { HK(action), DATA_UINT32T( ACTION_READ ) }, { HK(offset), DATA_OFFT ( ptr * sizeof(block_info) ) }, { HK(size), DATA_SIZET( sizeof(block_info) ) }, { HK(buffer), DATA_BUFFERT(&req_buffer) }, hash_end }; if(chain_next_query(tree->chain, req_read) <= 0) break; /*printf("el: %x ptr: %x (%x < %x + %x)\n", j, (unsigned int)ptr, (unsigned int)offset, (unsigned int)level_off, (unsigned int)block.size );*/ if(offset < level_off + block.size){ *block_vid = ptr; *real_offset = block.real_block_off + (offset - level_off); ret = 0; break; } level_off += block.size; } buffer_destroy(&req_buffer); // TODO unlock return ret; } // }}}
static int tree_recalc(tree_t *tree){ // {{{ int i,j; ssize_t ret_size; unsigned int block_size; unsigned int read_size; buffer_t *buffer; size_t *calcs; size_t blocks_left; unsigned int ptr = 0; unsigned int nlevels = tree->nlevels; if( (blocks_left = tree->blocks_count) == 0) return 0; calcs = calloc(sizeof(size_t), tree->nlevels); buffer = buffer_alloc(); while(blocks_left > 0){ read_size = ( (blocks_left > tree->read_per_calc) ? tree->read_per_calc : blocks_left ); hash_t req_read[] = { { HK(action), DATA_UINT32T(ACTION_READ) }, { HK(offset), DATA_OFFT (ptr * sizeof(block_info)) }, { HK(size), DATA_SIZET(read_size * sizeof(block_info)) }, { HK(buffer), DATA_BUFFERT(buffer) }, hash_end }; if( (ret_size = chain_next_query(tree->chain, req_read)) <= 0) break; buffer_process(buffer, ret_size, 0, do { for(i=0; i < size; i += sizeof(block_info), ptr += 1){ block_size = ((block_info *)(chunk + i))->size; //printf("block: %x size: %x\n", (unsigned int)ptr, (unsigned int)block_size); for(j=0; j < nlevels; j++){ calcs[j] += block_size; //printf("block: [%x/%x] calcs[%x] = %x (ptr: %x)\n", // (unsigned int)ptr, (unsigned int)tree->blocks_count, // (unsigned int)j, (unsigned int)calcs[j], // (unsigned int)( ptr / tree->lss[j] ) //); if( (ptr % tree->lss[j]) == tree->lss[j] - 1){ tree->table[ tree->tof[j] + (ptr / tree->lss[j]) ] = calcs[j]; calcs[j] = 0; } } } }while(0) ); blocks_left -= ret_size / sizeof(block_info); } // flush for(j=0; j < nlevels; j++){ /* printf("dumping j %x ptr: %x lss: %x [%x]=%x\n", j, (unsigned int)ptr, (unsigned int)tree->lss[j], (unsigned int)(tree->tof[j] + (ptr / tree->lss[j])), (unsigned int)calcs[j] );*/ tree->table[ tree->tof[j] + (ptr / tree->lss[j]) ] = calcs[j]; } buffer_free(buffer); free(calcs); return (blocks_left == 0) ? 0 : -1; } // }}}
START_TEST (test_structs){ hash_t structure[] = { { HK(key1), DATA_HASHT(hash_end) }, { HK(key2), DATA_HASHT(hash_end) }, { HK(key3), DATA_HASHT( { HK(default), DATA_UINT32T(0) }, hash_end )}, { HK(key4), DATA_HASHT(hash_end) }, hash_end }; request_t values[] = { { HK(key4), DATA_STRING("hello") }, { HK(key1), DATA_UINT32T(100) }, { HK(key2), DATA_OFFT(10) }, { HK(key3), DATA_UINT32T(0) }, hash_end }; ssize_t ret; char test[100] = {0}; char orig[] = "\x64\x00\x00\x00" "\x0A\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00" "hello\x00"; data_t test_data = DATA_RAW(test, 100); ret = struct_pack(structure, values, &test_data); fail_unless(ret > 0, "struct_pack failed");