int main() { bst tree; tree.root = NULL; bst_add(&tree, 1); printf("%d\n", (tree.root)->value); bst_add(&tree, 2); node* right = (tree.root)->right; printf("%d\n", (right)->value); bst_add(&tree, 3); right = right->right; //printf("%d\n", (right)->value); }
bst_t bst_copy(bst_t bst) { bst_t result = bst_empty(); if (bst != NULL) { result = bst_add(result, index_copy(pair_fst(bst->pair)), data_copy(pair_snd(bst->pair))); result->right = bst_copy(bst->right); result->left = bst_copy(bst->left); } return result; }
bst_t bst_add(bst_t bst, index_t index, data_t data) { /* assert(bst_search(index, bst)==NULL);*/ /*PRE*/ unsigned int length = bst_length(bst); switch (bst_type(bst)) { case isNull: bst = bst_empty(); bst->pair = pair_from_index_data(index, data); break; case isEmpty: bst->pair = pair_from_index_data(index, data); break; case isNotEmpty: switch (index_compare(index, bst)){ case EQ: break; case LT: bst->izq = bst_add(bst->izq, index, data); break; case GT: bst->der = bst_add(bst->der, index, data); break; } break; } assert(bst_length(bst) == length + 1); /*POST*/ return (bst); }
bst_t bst_add(bst_t bst, index_t index, data_t data) { unsigned int prev_length = bst_length(bst); if (bst != NULL) { if (index_is_less_than(index, pair_fst(bst->pair))) { bst->left = bst_add(bst->left, index, data); } else if (!index_is_equal(index, pair_fst(bst->pair))) { bst->right = bst_add(bst->right, index, data); } } else { bst_t add = calloc(1, sizeof(struct _tree_node_t)); add->pair = pair_from_index_data(index, data); add->left = NULL; add->right = NULL; bst = add; } assert(prev_length + 1 == bst_length(bst)); return bst; }
int main() { node_t*tree=0; bst_add(&tree,3); bst_add(&tree,1); bst_add(&tree,4); bst_add(&tree,1); bst_add(&tree,5); bst_add(&tree,9); bst_add(&tree,2); bst_add(&tree,6); node_linked_search(tree,dump,0); return 0; }
dict_t dict_add(dict_t dict, word_t word, def_t def) { assert(dict != NULL && word != NULL && def != NULL && !dict_exists(dict, word)); index_t index = index_from_string(word); data_t data = data_from_string(def); dict->length = dict->length + 1; dict->data = bst_add(dict->data, index, data); return dict; }
dict_t dict_add(dict_t dict, word_t word, def_t def) { /*Precondition verification*/ assert(dict != NULL); assert(word != NULL); assert(def != NULL); assert(!dict_exists(dict, word)); index_t index = index_from_string(word); data_t data = data_from_string(def); dict->length += 1; dict->data = bst_add(dict->data, index, data); /* POST: the elements of the result are the same as the one in 'dict' with * the new pair ('word', 'def') added.*/ return (dict); }
void *test(void *data) { DDPRINT("starting test\n",NULL); //get the per-thread data thread_data_t *d = (thread_data_t *)data; //scale percentages of the various operations to the range 0..255 //this saves us a floating point operation during the benchmark //e.g instead of random()%100 to determine the next operation we will do, we can simply do random()&256 //this saves time on some platfroms uint32_t read_thresh = 256 * finds / 100; //uint32_t write_thresh = 256 * (finds + inserts) / 100; //place the thread on the apropriate cpu set_cpu(d->id); //initialize the custom memeory allocator for this thread (we do not use malloc due to concurrency bottleneck issues) ssalloc_init(); // ssalloc_align(); bst_init_local(d->id); //for fine-grain latency measurements, we need to get the lenght of a getticks() function call, which is also counted //by default when we do getticks(); //code... getticks(); PF_START and PF_STOP use this when fine grain measurements are enabled PF_CORRECTION; uint32_t rand_max; //seed the custom random number generator seeds = seed_rand(); rand_max = max_key; uint32_t op; skey_t key; int i; int last = -1; DDPRINT("staring initial insert\n",NULL); DDPRINT("number of inserts: %u up to %u\n",d->num_add,rand_max); //before starting the test, we insert a number of elements in the data structure //we do this at each thread to avoid the situation where the entire data structure //resides in the same memory node // int num_elem = 0; // if (num_threads == 1){ // num_elem = max_key/2; // } else{ // num_elem = max_key/4; // } // pthread_mutex_lock(d->init_lock); // fprintf(stderr, "Starting critical section %d\n", d->id); for (i=0;i<max_key/4;++i) { key = my_random(&seeds[0],&seeds[1],&seeds[2]) & rand_max; DDPRINT("key is %u\n",key); //we make sure the insert was effective (as opposed to just updating an existing entry) if (d->id < 2) { if (bst_add(key,root, d->id)!=TRUE) { i--; } } } // fprintf(stderr, "Exiting critical section %d\n", d->id); // pthread_mutex_unlock(d->init_lock); DDPRINT("added initial data\n",NULL); bool_t res; /* Init of local data if necessary */ ticks t1,t2; /* Wait on barrier */ // fprintf(stderr, "Waiting on barrier; thread %d\n", d->id); barrier_cross(d->barrier); //start the test while (*running) { //generate a key (node that rand_max is expected to be a power of 2) key = my_random(&seeds[0],&seeds[1],&seeds[2]) & rand_max; //generate the operation op = my_random(&seeds[0],&seeds[1],&seeds[2]) & 0xff; if (op < read_thresh) { //do a find operation //PF_START and PF_STOP can be used to do latency measurements of the operation //to enable them, DO_TIMINGS must be defined at compile time, otherwise they do nothing //PF_START(2); bst_contains(key,root, d->id); //PF_STOP(2); } else if (last == -1) { //do a write operation if (bst_add(key,root, d->id)) { d->num_insert++; last=1; } } else { //do a delete operation if (bst_remove(key,root, d->id)) { d->num_remove++; last=-1; } } d->num_operations++; //memory barrier to ensure no unwanted reporderings are happening //MEM_BARRIER; } //summary of the fine grain measurements if enabled PF_PRINT; return NULL; }