static void test_2() { struct avl_node *root = NULL; struct timespec a; struct timespec b; uint64_t d = 0; int i = 0; int rv; int error = 0; srand(time(0)); /* insert */ for (i = 0; i < 100; i++) { struct avl_node *tmp = avl_alloc(0); tmp->key = rand() % 1000000000; time_now(&a); rv = avl_insert(&root, tmp); time_now(&b); if (!rv) error++; d += time_diff(&a, &b); } /* average */ d = d / i; fprintf(stdout, "insertion: %lu nano/s, %f micro/s, errors: %d\n", d, (float) d / 1000, error); /* lookup */ for (i = 0, d = 0, error = 0; i < 100; i++) { struct avl_node *tmp; size_t key; key = rand() % 1000000000; time_now(&a); tmp = avl_lookup(root, key); time_now(&b); if (!tmp) error++; d += time_diff(&a, &b); } /* average */ d = d / i; fprintf(stdout, "lookup: %lu nano/s, %f micro/s, errors: %d\n", d, (float) d / 1000, error); avl_dump_to_file(root, __func__); }
/* this may replace root, which is why we pass * in a AvlTree * */ void avlInsert(AvlTree *t, tw_event *key) { /* insertion procedure */ if (*t == AVL_EMPTY) { /* new t */ *t = avl_alloc(); if (*t == NULL) { tw_error(TW_LOC, "Out of AVL tree nodes!"); } (*t)->child[0] = AVL_EMPTY; (*t)->child[1] = AVL_EMPTY; (*t)->key = key; (*t)->height = 1; /* done */ return; } if (key->recv_ts == (*t)->key->recv_ts) { // We have a timestamp tie, check the event ID if (key->event_id == (*t)->key->event_id) { // We have a event ID tie, check the send_pe if (key->send_pe == (*t)->key->send_pe) { // This shouldn't happen but we'll allow it tw_printf(TW_LOC, "The events are identical!!!\n"); } avlInsert(&(*t)->child[key->send_pe > (*t)->key->send_pe], key); avlRebalance(t); } else { // Event IDs are different avlInsert(&(*t)->child[key->event_id > (*t)->key->event_id], key); avlRebalance(t); } return; } else { // Timestamps are different avlInsert(&(*t)->child[key->recv_ts > (*t)->key->recv_ts], key); avlRebalance(t); } }
struct m_buf *__alloc_mbuf(unsigned int size, gfp_t gfp_mask) { struct kmem_cache *cache; struct mbuf_shared_info *shinfo; struct m_buf *mbuf; u8 *data; cache = mbuf_head_cache; /* Get the HEAD */ mbuf = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); if (!mbuf) goto out; /* Get the DATA. Size must match mbuf_add_mtu(). */ size = MBUF_DATA_ALIGN(size); data = avl_alloc(size + sizeof(struct mbuf_shared_info), smp_processor_id()%NTA_NR_CPUS, gfp_mask); if (!data) goto nodata; memset(mbuf, 0, offsetof(struct m_buf, truesize)); mbuf->truesize = size + sizeof(struct m_buf); atomic_set(&mbuf->users, 1); mbuf->head = data; mbuf->data = data; mbuf->tail = data; mbuf->end = data + size; /* make sure we initialize shinfo sequentially */ shinfo = mbuf_shinfo(mbuf); atomic_set(&shinfo->dataref, 1); shinfo->nr_frags = 0; shinfo->gso_size = 0; shinfo->gso_segs = 0; shinfo->gso_type = 0; shinfo->ip6_frag_id = 0; shinfo->frag_list = NULL; out: return mbuf; nodata: kmem_cache_free(cache, mbuf); mbuf = NULL; goto out; }
static void test_1() { avl_node *root = NULL; int i, rv; int input[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; /* int input[] = { 100, 20, 150, 6, 26, 27 }; */ /* int input[] = { 100, 20, 150, 6, 26, 25 }; */ /* int input[] = { 3769, 4163, 3465, 4143, 4396, 4011 }; */ /* int input[] = { 3769, 4163, 3465, 4143, 4396, 4144 }; */ for (i = 0; i < ARRAY_SIZE(input); i++) { struct avl_node *n; n = avl_alloc(0); n->key = input[i]; rv = avl_insert(&root, n); if (rv == 0) fprintf(stdout, "'avl_insert()' error, %ld\n", n->key); } for (i = 0; i < ARRAY_SIZE(input); i++) { struct avl_node *n; n = avl_lookup(root, input[i]); if (n) { struct avl_node *l = n->link[0]; struct avl_node *r = n->link[1]; fprintf(stdout, "-> %ld { %ld, %ld } %d\n", n->key, l ? l->key:-99, r ? r->key:-99, n->bf); } } avl_dump_to_file(root, __func__); }