void sock_free(Sock *s) { close(s->fd); cb_free(s->sendbuf); cb_free(s->recvbuf); free(s); }
int x25_unregister(struct x25_cs * x25) { int rc = X25_BADTOKEN; if (!x25) goto out; struct x25_cs_internal * x25_int = x25_get_internal(x25); x25_stop_timers(x25); x25->callbacks->del_timer(x25_int->RestartTimer.timer_ptr); x25->callbacks->del_timer(x25_int->CallTimer.timer_ptr); x25->callbacks->del_timer(x25_int->ResetTimer.timer_ptr); x25->callbacks->del_timer(x25_int->ClearTimer.timer_ptr); x25->callbacks->del_timer(x25_int->AckTimer.timer_ptr); x25->callbacks->del_timer(x25_int->DataTimer.timer_ptr); cb_free(&x25_int->ack_queue); cb_free(&x25_int->write_queue); cb_free(&x25_int->interrupt_in_queue); cb_free(&x25_int->interrupt_out_queue); cb_free(&x25->link.queue); x25_mem_free(x25_int); x25_mem_free(x25); rc = X25_OK; out: return rc; }
void sorted_list_remove(void *list, const void *elem) { sorted_list_t *l = (sorted_list_t *) list; sorted_list_node_t *n = l->head; sorted_list_node_t *prev = NULL; int cmp; while (n != NULL) { cmp = l->cmp_fun(n->element, elem); if (cmp == 0) { if (prev == NULL) { cb_assert(n == l->head); l->head = n->next; } else { prev->next = n->next; } l->length -= 1; cb_free(n->element); cb_free(n); break; } else if (cmp > 0) { return; } else { prev = n; n = n->next; } } }
void wi_private_free(wi_private_t my) { if (my) { cb_free(my->in); cb_free(my->partial); memset(my, 0, sizeof(struct wi_private)); free(my); } }
int sorted_list_add(void *list, const void *elem, size_t elem_size) { sorted_list_t *l = (sorted_list_t *) list; sorted_list_node_t *n = l->head; sorted_list_node_t *prev = NULL; sorted_list_node_t *new_node; int cmp = 0; new_node = (sorted_list_node_t *) cb_malloc(sizeof(sorted_list_node_t)); if (new_node == NULL) { return -1; } new_node->element = cb_malloc(elem_size); if (new_node->element == NULL) { cb_free(new_node); return -1; } memcpy(new_node->element, elem, elem_size); if (l->head == NULL) { new_node->next = NULL; l->head = new_node; l->length += 1; return 0; } while (n != NULL) { cmp = l->cmp_fun(n->element, elem); if (cmp >= 0) { break; } prev = n; n = n->next; } if (prev != NULL) { prev->next = new_node; } else { l->head = new_node; } if (cmp == 0) { new_node->next = n->next; cb_free(n->element); cb_free(n); } else { l->length += 1; new_node->next = n; } return 0; }
void sorted_list_free(void *list) { sorted_list_t *l = (sorted_list_t *) list; sorted_list_node_t *n = NULL; if (l != NULL) { while (l->head != NULL) { n = l->head; l->head = l->head->next; cb_free(n->element); cb_free(n); } cb_free(list); } }
/* * Complete an asynchronous IO request. */ void _sysio_ioctx_complete(struct ioctx *ioctx) { struct ioctx_callback *entry; /* update IO stats */ _SYSIO_UPDACCT(ioctx->ioctx_write, ioctx->ioctx_cc); /* * Run the call-back queue. */ while ((entry = ioctx->ioctx_cbq.tqh_first)) { TAILQ_REMOVE(&ioctx->ioctx_cbq, entry, iocb_next); (*entry->iocb_f)(ioctx, entry->iocb_data); cb_free(entry); } /* * Unlink from the file record's outstanding request queue. */ LIST_REMOVE(ioctx, ioctx_link); if (ioctx->ioctx_fast) return; I_RELE(ioctx->ioctx_ino); free(ioctx); }
void cleanup_module() { nf_unregister_hook(&nfho); if(bufferPointer != NULL) cb_free(&cb); printk(KERN_INFO "kernel module unloaded.\n"); }
void cb_release (cb_info_t *cb) { if (!cb) return; pthread_mutex_lock (&list_lock); cb->refcnt--; if ((!cb->active && cb->refcnt == 1) || cb->refcnt <= 0) { cb_free (cb, NULL); } pthread_mutex_unlock (&list_lock); }
void dl_free(dl_t self) { if (self) { dl_private_t my = self->private_state; if (my) { cb_free(my->in); ht_free(my->device_num_to_device_id); memset(my, 0, sizeof(struct dl_private)); free(my); } memset(self, 0, sizeof(struct dl_struct)); free(self); } }
list_for_each_safe(cur_i, cur_n, cur_list) \ { \ found=0; \ cur_el = list_entry(cur_i, struct struct_name, member); \ list_for_each_safe(new_i, new_n, new_list) \ { \ new_el = list_entry(new_i, struct struct_name, member); \ \ if (! cb_cmp(cur_el, new_el) )\ { \ /* This element is in both lists, so it is not * interesting, we remove from new_list */ \ found = 1; \ list_del(new_i); \ if (cb_free) \ cb_free(new_el); \ else \ free(new_el); \ } \ } \
/** close a message queue */ amqd_t amq_close(amqd_t mqdes) { struct impl_t * q = (struct impl_t *) mqdes; q->use_count--; if (q->use_count == 0) { /* unlink */ if (queues == q) { queues->prev = NULL; queues = q->next; } else { if (q->prev) q->prev->next = q->next; if (q->next) q->next->prev = q->prev; } // free memory cb_free(&q->cb); free(q->name); free(q->tmp); free(q); } }
static int read_record(FILE *f, void **buffer, void *ctx) { int *rec = (int *) cb_malloc(sizeof(int)); (void) ctx; if (rec == NULL) { return FILE_MERGER_ERROR_ALLOC; } if (fread(rec, sizeof(int), 1, f) != 1) { cb_free(rec); if (feof(f)) { return 0; } else { return FILE_MERGER_ERROR_FILE_READ; } } *buffer = rec; return sizeof(int); }
void test_values() { char value_bin[] = { 0,10,0,0,4,54,49,53,53,0,0,4,54,49,53,52 }; char id_btree_value_bin[] = { 0,67,0,0,2,0,14,91,49,50,51,44,34,102,111,111,98,97,114, 34,93,0,4,45,51,50,49,1,0,1,0,7,91,53,44,54,44,55,93 }; view_btree_value_t *v; view_id_btree_value_t *id_btree_v; view_btree_value_t *v2; view_id_btree_value_t *id_btree_v2; char *v_bin2 = NULL; size_t v_bin2_size = 0; char *id_btree_v_bin2 = NULL; size_t id_btree_v_bin2_size = 0; char *v_bin3 = NULL; size_t v_bin3_size = 0; char *id_btree_v_bin3 = NULL; size_t id_btree_v_bin3_size = 0; fprintf(stderr, "Decoding a view btree value ...\n"); v = test_view_btree_value_decoding(value_bin, sizeof(value_bin)); fprintf(stderr, "Decoding a view id btree value ...\n"); id_btree_v = test_view_id_btree_value_decoding(id_btree_value_bin, sizeof(id_btree_value_bin)); fprintf(stderr, "Encoding the previously decoded view btree value ...\n"); test_view_btree_value_encoding(v, &v_bin2, &v_bin2_size); cb_assert(v_bin2_size == sizeof(value_bin)); cb_assert(memcmp(v_bin2, value_bin, v_bin2_size) == 0); fprintf(stderr, "Encoding the previously decoded view id btree value ...\n"); test_view_id_btree_value_encoding(id_btree_v, &id_btree_v_bin2, &id_btree_v_bin2_size); cb_assert(id_btree_v_bin2_size == sizeof(id_btree_value_bin)); cb_assert(memcmp(id_btree_v_bin2, id_btree_value_bin, id_btree_v_bin2_size) == 0); fprintf(stderr, "Decoding the previously encoded view btree value ...\n"); v2 = test_view_btree_value_decoding(v_bin2, v_bin2_size); fprintf(stderr, "Decoding the previously encoded view id btree value ...\n"); id_btree_v2 = test_view_id_btree_value_decoding(id_btree_v_bin2, id_btree_v_bin2_size); fprintf(stderr, "Encoding the previously decoded view btree value ...\n"); test_view_btree_value_encoding(v2, &v_bin3, &v_bin3_size); cb_assert(v_bin3_size == sizeof(value_bin)); cb_assert(memcmp(v_bin3, value_bin, v_bin3_size) == 0); fprintf(stderr, "Encoding the previously decoded view id btree value ...\n"); test_view_id_btree_value_encoding(id_btree_v2, &id_btree_v_bin3, &id_btree_v_bin3_size); cb_assert(id_btree_v_bin3_size == sizeof(id_btree_value_bin)); cb_assert(memcmp(id_btree_v_bin3, id_btree_value_bin, id_btree_v_bin3_size) == 0); free_view_btree_value(v); free_view_btree_value(v2); cb_free(v_bin2); cb_free(v_bin3); free_view_id_btree_value(id_btree_v); free_view_id_btree_value(id_btree_v2); cb_free(id_btree_v_bin2); cb_free(id_btree_v_bin3); }
static void free_record(void *rec, void *ctx) { (void) ctx; cb_free(rec); }
/* Write a node using enough items from the values list to create a node * with uncompressed size of at least mr_quota */ static couchstore_error_t flush_spatial_partial(couchfile_modify_result *res, size_t mr_quota) { char *dst; couchstore_error_t errcode = COUCHSTORE_SUCCESS; int itmcount = 0; char *nodebuf = NULL; sized_buf writebuf; char reducebuf[MAX_REDUCTION_SIZE]; size_t reducesize = 0; uint64_t subtreesize = 0; cs_off_t diskpos; size_t disk_size; nodelist *i, *pel; node_pointer *ptr; if (res->values_end == res->values || ! res->modified) { /* Empty */ return COUCHSTORE_SUCCESS; } /* nodebuf/writebuf is very short-lived and can be large, so use regular * malloc heap for it: */ nodebuf = (char *) cb_malloc(res->node_len + 1); if (nodebuf == NULL) { return COUCHSTORE_ERROR_ALLOC_FAIL; } writebuf.buf = nodebuf; dst = nodebuf; *(dst++) = (char) res->node_type; i = res->values->next; /* We don't care that we've reached mr_quota if we haven't written out * at least two items and we're not writing a leaf node. */ while (i != NULL && (mr_quota > 0 || (itmcount < 2 && res->node_type == KP_NODE))) { dst = (char *) write_kv(dst, i->key, i->data); if (i->pointer) { subtreesize += i->pointer->subtreesize; } mr_quota -= i->key.size + i->data.size + sizeof(raw_kv_length); i = i->next; res->count--; itmcount++; } writebuf.size = dst - nodebuf; errcode = (couchstore_error_t) db_write_buf_compressed( res->rq->file, &writebuf, &diskpos, &disk_size); cb_free(nodebuf); /* here endeth the nodebuf. */ if (errcode != COUCHSTORE_SUCCESS) { return errcode; } /* Store the enclosing MBB in the reducebuf */ if (res->node_type == KV_NODE && res->rq->reduce) { errcode = res->rq->reduce( reducebuf, &reducesize, res->values->next, itmcount, res->rq->user_reduce_ctx); if (errcode != COUCHSTORE_SUCCESS) { return errcode; } cb_assert(reducesize <= sizeof(reducebuf)); } if (res->node_type == KP_NODE && res->rq->rereduce) { errcode = res->rq->rereduce( reducebuf, &reducesize, res->values->next, itmcount, res->rq->user_reduce_ctx); if (errcode != COUCHSTORE_SUCCESS) { return errcode; } cb_assert(reducesize <= sizeof(reducebuf)); } /* `reducesize` one time for the key, one time for the actual reduce */ ptr = (node_pointer *) arena_alloc( res->arena, sizeof(node_pointer) + 2 * reducesize); if (ptr == NULL) { return COUCHSTORE_ERROR_ALLOC_FAIL; } ptr->key.buf = ((char *)ptr) + sizeof(node_pointer); ptr->reduce_value.buf = ((char *)ptr) + sizeof(node_pointer) + reducesize; ptr->key.size = reducesize; ptr->reduce_value.size = reducesize; /* Store the enclosing MBB that was calculate in the reduce function * as the key. The reduce also stores it as it is the "Original MBB" * used in the RR*-tree algorithm */ memcpy(ptr->key.buf, reducebuf, reducesize); memcpy(ptr->reduce_value.buf, reducebuf, reducesize); ptr->subtreesize = subtreesize + disk_size; ptr->pointer = diskpos; pel = encode_pointer(res->arena, ptr); if (pel == NULL) { return COUCHSTORE_ERROR_ALLOC_FAIL; } res->pointers_end->next = pel; res->pointers_end = pel; res->node_len -= (writebuf.size - 1); res->values->next = i; if(i == NULL) { res->values_end = res->values; } return COUCHSTORE_SUCCESS; }
/** * Interface (extern): Computes the k nearest neighbors for a given set of test points * stored in *Xtest and stores the results in two arrays *distances and *indices. * * @param *Xtest Pointer to the set of query/test points (stored as FLOAT_TYPE) * @param nXtest The number of query points * @param dXtest The dimension of each query point * @param *distances The distances array (FLOAT_TYPE) used to store the computed distances * @param ndistances The number of query points * @param ddistances The number of distance values for each query point * @param *indices Pointer to arrray storing the indices of the k nearest neighbors for each query point * @param nindices The number of query points * @param dindices The number of indices comptued for each query point * @param *tree_record Pointer to struct storing all relevant information for model * @param *params Pointer to struct containing all relevant parameters * */ void neighbors_extern(FLOAT_TYPE * Xtest, INT_TYPE nXtest, INT_TYPE dXtest, FLOAT_TYPE *distances, INT_TYPE ndistances, INT_TYPE ddistances, INT_TYPE *indices, INT_TYPE nindices, INT_TYPE dindices, TREE_RECORD *tree_record, TREE_PARAMETERS *params) { START_MY_TIMER(tree_record->timers + 1); UINT_TYPE i, j; tree_record->find_leaf_idx_calls = 0; tree_record->empty_all_buffers_calls = 0; tree_record->Xtest = Xtest; tree_record->nXtest = nXtest; tree_record->dist_mins_global = distances; tree_record->idx_mins_global = indices; long device_mem_bytes = tree_record->device_infos.device_mem_bytes; double test_mem_bytes = get_test_tmp_mem_device_bytes(tree_record, params); PRINT(params)("Memory needed for test patterns: %f (GB)\n", test_mem_bytes / MEM_GB); if (test_mem_bytes > device_mem_bytes * params->allowed_test_mem_percent) { PRINT(params)("Too much memory used for test patterns and temporary data!\n"); FREE_OPENCL_DEVICES(tree_record, params); exit(EXIT_FAILURE); } double total_device_bytes = get_total_mem_device_bytes(tree_record, params); PRINT(params)("Total memory needed on device: %f (GB)\n", total_device_bytes / MEM_GB); START_MY_TIMER(tree_record->timers + 4); /* ------------------------------------- OPENCL -------------------------------------- */ INIT_ARRAYS(tree_record, params); /* ------------------------------------- OPENCL -------------------------------------- */ // initialize leaf buffer for test queries (circular buffers) tree_record->buffers = (circular_buffer **) malloc(tree_record->n_leaves * sizeof(circular_buffer*)); for (i = 0; i < tree_record->n_leaves; i++) { tree_record->buffers[i] = (circular_buffer *) malloc(sizeof(circular_buffer)); cb_init(tree_record->buffers[i], tree_record->leaves_initial_buffer_sizes); } tree_record->buffer_full_warning = 0; // initialize queue "input" (we can have at most number_test_patterns in there) cb_init(&(tree_record->queue_reinsert), tree_record->nXtest); /* ------------------------------------- OPENCL -------------------------------------- */ START_MY_TIMER(tree_record->timers + 3); ALLOCATE_MEMORY_OPENCL_DEVICES(tree_record, params); STOP_MY_TIMER(tree_record->timers + 3); /* ------------------------------------- OPENCL -------------------------------------- */ UINT_TYPE iter = 0; UINT_TYPE test_printed = 0; // allocate space for the indices added in each round; we cannot have more than original test patterns ... INT_TYPE *all_next_indices = (INT_TYPE *) malloc( tree_record->approx_number_of_avail_buffer_slots * sizeof(INT_TYPE)); // allocate space for all return values (by FIND_LEAF_IDX_BATCH) tree_record->leaf_indices_batch_ret_vals = (INT_TYPE *) malloc( tree_record->approx_number_of_avail_buffer_slots * sizeof(INT_TYPE)); UINT_TYPE num_elts_added; tree_record->current_test_index = 0; INT_TYPE reinsert_counter = 0; PRINT(params)("Starting Querying process via buffer tree...\n"); STOP_MY_TIMER(tree_record->timers + 4); START_MY_TIMER(tree_record->timers + 2); do { iter++; // try to get elements from both queues until buffers are full // (each buffer is either empty or has at least space for leaves_buffer_sizes_threshold elements) num_elts_added = 0; // add enough elements to the buffers ("batch filling") while (num_elts_added < tree_record->approx_number_of_avail_buffer_slots && (tree_record->current_test_index < tree_record->nXtest || !cb_is_empty(&(tree_record->queue_reinsert)))) { // we remove indices from both queues here (add one element from each queue, if not empty) if (!cb_is_empty(&(tree_record->queue_reinsert))) { cb_read(&(tree_record->queue_reinsert), all_next_indices + num_elts_added); } else { all_next_indices[num_elts_added] = tree_record->current_test_index; tree_record->current_test_index++; } num_elts_added++; } /* ------------------------------------- OPENCL -------------------------------------- */ FIND_LEAF_IDX_BATCH(all_next_indices, num_elts_added, tree_record->leaf_indices_batch_ret_vals, tree_record, params); /* ------------------------------------- OPENCL -------------------------------------- */ // we have added num_elts_added indices to the all_next_indices array for (j = 0; j < num_elts_added; j++) { INT_TYPE leaf_idx = tree_record->leaf_indices_batch_ret_vals[j]; // if not done: add the index to the appropriate buffer if (leaf_idx != -1) { // enlarge buffer if needed if (cb_is_full(tree_record->buffers[leaf_idx])) { PRINT(params)("Increasing buffer size ...\n"); tree_record->buffers[leaf_idx] = cb_double_size(tree_record->buffers[leaf_idx]); } // add next_indices[j] to buffer leaf_idx cb_write(tree_record->buffers[leaf_idx], all_next_indices + j); if (cb_get_number_items(tree_record->buffers[leaf_idx]) >= tree_record->leaves_buffer_sizes_threshold) { tree_record->buffer_full_warning = 1; } } // else: traversal of test pattern has reached root: done! } /* ------------------------------------- OPENCL -------------------------------------- */ PROCESS_ALL_BUFFERS(tree_record, params); /* ------------------------------------- OPENCL -------------------------------------- */ if (tree_record->current_test_index == tree_record->nXtest && !test_printed) { PRINT(params)("All query indices are in the buffer tree now (buffers or reinsert queue)...\n"); test_printed = 1; } } while (tree_record->current_test_index < tree_record->nXtest || !cb_is_empty(&(tree_record->queue_reinsert))); STOP_MY_TIMER(tree_record->timers + 2); START_MY_TIMER(tree_record->timers + 5); /* ------------------------------------- OPENCL -------------------------------------- */ GET_DISTANCES_AND_INDICES(tree_record, params); /* ------------------------------------- OPENCL -------------------------------------- */ // free space generated by testing for (i = 0; i < tree_record->n_leaves; i++) { cb_free(tree_record->buffers[i]); } STOP_MY_TIMER(tree_record->timers + 5); STOP_MY_TIMER(tree_record->timers + 1); PRINT(params)("Buffer full indices (overhead)=%i\n", reinsert_counter); PRINT(params)("\nNumber of iterations in while loop: \t\t\t\t\t\t\t%i\n", iter); PRINT(params)("Number of empty_all_buffers calls: \t\t\t\t\t\t\t%i\n", tree_record->empty_all_buffers_calls); PRINT(params)("Number of find_leaf_idx_calls: \t\t\t\t\t\t\t\t%i\n\n", tree_record->find_leaf_idx_calls); PRINT(params)("Elapsed total time for querying: \t\t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 1)); PRINT(params)("-----------------------------------------------------------------------------------------------------------------------------\n"); PRINT(params)("(Overhead) Elapsed time for BEFORE WHILE: \t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 4)); PRINT(params)("(Overhead) -> ALLOCATE_MEMORY_OPENCL_DEVICES: \t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 3)); PRINT(params)( "-----------------------------------------------------------------------------------------------------------------------------\n"); PRINT(params)("Elapsed time in while-loop: \t\t\t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 2)); PRINT(params)("(I) Elapsed time for PROCESS_ALL_BUFFERS: \t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 12)); PRINT(params)("(I.A) Function: retrieve_indices_from_buffers_gpu: \t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 11)); PRINT(params)("(I.B) Do brute-force (do_brute.../process_buffers_...chunks_gpu : \t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 18)); PRINT(params)("(I.B.1) -> Elapsed time for clEnqueueWriteBuffer (INTERLEAVED): \t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 19)); PRINT(params)("(I.B.1) -> Elapsed time for memcpy (INTERLEAVED): \t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 21)); PRINT(params)("(I.B.1) -> Elapsed time for waiting for chunk (in seconds): \t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 22)); PRINT(params)("(I.B.2) -> Number of copy calls: %i\n", tree_record->counters[0]); if (!training_chunks_inactive(tree_record, params)) { PRINT(params)("(I.B.4) -> Overhead distributing indices to chunks (in seconds): \t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 23)); PRINT(params)("(I.B.5) -> Processing of whole chunk (all three phases, in seconds): \t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 24)); PRINT(params)("(I.B.6) -> Processing of chunk before brute (in seconds): \t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 25)); PRINT(params)("(I.B.7) -> Processing of chunk after brute (in seconds): \t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 26)); PRINT(params)("(I.B.8) -> Processing of chunk after brute, buffer release (in seconds): \t%2.10f\n", GET_MY_TIMER(tree_record->timers + 27)); PRINT(params)("(I.B.9) -> Number of release buffer calls: %i\n", tree_record->counters[0]); } if (USE_GPU) { PRINT(params)("(I.B.3) -> Elapsed time for TEST_SUBSET (in seconds): \t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 13)); PRINT(params)("(I.B.4) -> Elapsed time for NN Search (in seconds): \t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 14)); PRINT(params)("(I.B.5) -> Elapsed time for UPDATE (in seconds): \t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 15)); PRINT(params)("(I.B.6) -> Elapsed time for OVERHEAD (in seconds): \t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 12) - GET_MY_TIMER(tree_record->timers + 14) - GET_MY_TIMER(tree_record->timers + 15) - GET_MY_TIMER(tree_record->timers + 13)); } PRINT(params)("(II) FIND_LEAF_IDX_BATCH : \t\t\t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 16)); PRINT(params)("(III) Elapsed time for final brute-force step : \t\t\t\t%2.10f\n\n", GET_MY_TIMER(tree_record->timers + 20)); PRINT(params)("-----------------------------------------------------------------------------------------------------------------------------\n"); PRINT(params)("(DIFF) While - PROCESS_ALL_BUFFERS - FIND_LEAF_IDX_BATCH: \t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 2) - GET_MY_TIMER(tree_record->timers + 12) - GET_MY_TIMER(tree_record->timers + 16)); PRINT(params)("(Overhead) Elapsed time for AFTER WHILE : \t\t\t\t\t%2.10f\n", GET_MY_TIMER(tree_record->timers + 5)); PRINT(params)("-----------------------------------------------------------------------------------------------------------------------------\n\n"); PRINT(params)("-----------------------------------------------------------------------------------------------------------------------------\n"); PRINT(params)("QUERY RUNTIME: %2.10f ", GET_MY_TIMER(tree_record->timers + 1)); PRINT(params)("PROCESS_ALL_BUFFERS: %2.10f ", GET_MY_TIMER(tree_record->timers + 12)); PRINT(params)("FIND_LEAF_IDX_BATCH: %2.10f ", GET_MY_TIMER(tree_record->timers + 16)); PRINT(params)("WHILE_OVERHEAD: %2.10f ", GET_MY_TIMER(tree_record->timers + 2) - GET_MY_TIMER(tree_record->timers + 12) - GET_MY_TIMER(tree_record->timers + 16)); PRINT(params)("\n"); PRINT(params)("-----------------------------------------------------------------------------------------------------------------------------\n"); // free all allocated memory related to querying for (i = 0; i < tree_record->n_leaves; i++) { free(tree_record->buffers[i]); } free(tree_record->buffers); // free arrays free(tree_record->all_stacks); free(tree_record->all_depths); free(tree_record->all_idxs); free(all_next_indices); free(tree_record->leaf_indices_batch_ret_vals); }
void sorted_list_free_iterator(void *iterator) { cb_free(iterator); }
/* * Free callback entry. */ void _sysio_ioctx_cb_free(struct ioctx_callback *cb) { cb_free(cb); }