void *fms_mem_alloc(fms_mem_pool *mem_pool, fms_u32 data_size) { struct rb_node *pos = mem_pool->free_blocks.rb_node; fms_mem_block *block = NULL; struct rb_node *best_fit = NULL; fms_u32 block_data_size = 0; data_size = mem_align_ptr(data_size, 4);//²»¼ÓÕâÒ»¾ä¾Í¶Î´íÎóå why ?½¨Òé×öÒ»´ÎµØÖ·¸ú×Ù²âÊÔ //printf("@@@@@@@@@@fms_mem_alloc:datasize=%u\n", data_size); while (pos) { block = rb_entry(pos, fms_mem_block, rb_node); //printf("lorent 1, need %u[%u]\n", data_size, block->data_size); block_data_size = block->data_size; if (data_size < block->data_size) { best_fit = pos; pos = pos->rb_left; } else if (data_size > block->data_size) { pos = pos->rb_right; } else { best_fit = pos; break; } } if (best_fit == NULL) { //ûÓзûºÏ´óСµÄÄÚ´æ¿é£¬ÄÇôÐèÒªÔÙ´ÎÀ©Õ¹ÄÚ´æ³Ø£¬Ò»°ã²»»á·¢Éú´ËÖÖÇé¿ö printf("mem_pool no space\n"); } if (NULL == pos) { //ûÓÐÕÒµ½¸ÕºÃ·ûºÏµÄÄÚ´æ¿é pos != best_fit block = rb_entry(best_fit, fms_mem_block, rb_node); //printf("lorent 2, find %u\n", block->data_size); block_data_size = block->data_size; //ÕâÀïÓиöÒþ²ØÌõ¼þ block->data_size > data_size if (data_size + sizeof(fms_mem_block) > block->data_size) { /* no room for other buffers */ } else { //printf("lroent 2-1, cut %u->%u\n", block->data_size, data_size); block->data_size = data_size;//²Ã¼ô´óС } } //printf("lorent 3\n"); rb_erase(best_fit, &mem_pool->free_blocks); //printf("lorent 3-1\n"); mem_block_insert_allocated(mem_pool, block); block->free = FMS_FALSE; if (block->data_size != block_data_size) { //²Ã¼ô fms_mem_block *new_block = (fms_mem_block *)((fms_u8 *)block->data + block->data_size); new_block->data_size = block_data_size - block->data_size - sizeof(fms_mem_block); //printf("lorent 4, new block size=%u\n", new_block->data_size ); new_block->id = mem_pool->block_id;//Ìرð×¢Òâ²Ã¼ô³öÀ´µÄBlockµÄIDÊÇÏàͬµÄ new_block->free = FMS_TRUE; list_add(&new_block->entry, &block->entry);//ÕâÀï¿ÉÄÜ»á³ö´í?list_addµÄ¹¦ÄÜ mem_block_insert_free(mem_pool, new_block); } //printf("lorent 5\n"); return (void *)block->data; }
void insert(int val){ struct rb_node **p; struct rb_node *parent = NULL; tree_entry_t *__rq; // Do the allocation before we start the main critical section tree_entry_t *new_node = kmalloc(sizeof(tree_entry_t), 1); new_node->val = val; splock(&gKrbtreeLock); p = &root.rb_node; while (*p) { parent = *p; __rq = rb_entry(parent, tree_entry_t, rb_node); if (val < __rq->val) p = &(*p)->rb_left; else if (val > __rq->val) p = &(*p)->rb_right; else { spunlock(&gKrbtreeLock); // Someone else put this in already kfree(new_node); return; } } rb_link_node(&new_node->rb_node, parent, p); rb_insert_color(&new_node->rb_node, &root); spunlock(&gKrbtreeLock); return; }
errcode_t o2fsck_add_dir_block(o2fsck_dirblocks *db, uint64_t ino, uint64_t blkno, uint64_t blkcount) { struct rb_node ** p = &db->db_root.rb_node; struct rb_node * parent = NULL; o2fsck_dirblock_entry *dbe, *tmp_dbe; errcode_t ret = 0; dbe = calloc(1, sizeof(*dbe)); if (dbe == NULL) { ret = OCFS2_ET_NO_MEMORY; goto out; } dbe->e_ino = ino; dbe->e_blkno = blkno; dbe->e_blkcount = blkcount; while (*p) { parent = *p; tmp_dbe = rb_entry(parent, o2fsck_dirblock_entry, e_node); if (dbe->e_blkno < tmp_dbe->e_blkno) p = &(*p)->rb_left; else if (dbe->e_blkno > tmp_dbe->e_blkno) p = &(*p)->rb_right; } rb_link_node(&dbe->e_node, parent, p); rb_insert_color(&dbe->e_node, &db->db_root); out: return ret; }
// 查询或添加结点 // link,结点不存在时是否挂到树上 static lts_sjson_obj_node_t *__lts_sjson_search(lts_rb_root_t *root, lts_sjson_obj_node_t *obj_node, int link) { lts_sjson_obj_node_t *s; lts_rb_node_t *parent, **iter; parent = NULL; iter = &root->rb_node; while (*iter) { int balance; parent = *iter; s = rb_entry(parent, lts_sjson_obj_node_t, tnode); balance = lts_str_compare(&obj_node->key, &s->key); if (balance < 0) { iter = &(parent->rb_left); } else if (balance > 0) { iter = &(parent->rb_right); } else { return s; } } if (link) { rb_link_node(&obj_node->tnode, parent, iter); rb_insert_color(&obj_node->tnode, root); } return obj_node; }
static memory_node_t *memory_engine_lookup_shm_node_for_cache( struct rb_root *shm_root, const uint phyaddress, const uint size) { struct rb_node *n = shm_root->rb_node; memory_node_t *tmp_node; while (n) { tmp_node = rb_entry(n, memory_node_t, __rb_node); if (phyaddress < tmp_node->m_phyaddress) n = n->rb_left; else if (phyaddress > tmp_node->m_phyaddress) if ((phyaddress + size) <= (MEMNODE_ALIGN_ADDR(tmp_node) + MEMNODE_ALIGN_SIZE(tmp_node))) { return tmp_node; } else { n = n->rb_right; } else { if (size <= MEMNODE_ALIGN_SIZE(tmp_node)) { return tmp_node; } else { return NULL; } } } return NULL; }
static int forward(struct disc_net *net, int ifindex, void *pkt, int len) { struct pppoe_serv_t *n; struct tree *t = &net->tree[ifindex & HASH_BITS]; struct rb_node **p = &t->root.rb_node, *parent = NULL; int r = 0; struct ethhdr *ethhdr = (struct ethhdr *)(pkt + 4); pthread_mutex_lock(&t->lock); while (*p) { parent = *p; n = rb_entry(parent, typeof(*n), node); if (ifindex < n->ifindex) p = &(*p)->rb_left; else if (ifindex > n->ifindex) p = &(*p)->rb_right; else { if (!memcmp(ethhdr->h_dest, bc_addr, ETH_ALEN) || !memcmp(ethhdr->h_dest, n->hwaddr, ETH_ALEN)) { *(int *)pkt = len; triton_context_call(&n->ctx, (triton_event_func)pppoe_serv_read, pkt); r = 1; } break; } } pthread_mutex_unlock(&t->lock); return r; }
void remove (int val){ struct rb_node *n; tree_entry_t *rq; splock(&gKrbtreeLock); n = root.rb_node; while (n) { rq = rb_entry(n, tree_entry_t, rb_node); if (val < rq->val) n = n->rb_left; else if (val > rq->val) n = n->rb_right; else { rb_erase(n, &root); spunlock(&gKrbtreeLock); kfree(rq); // Yes, this is out of the lock. It should give us a ballpark successful_deletes++; return; } } spunlock(&gKrbtreeLock); return; }
/* * Go through the dirblocks pre-filling them. We try to coalesce adjacent * ones. Don't care to return errors, because it's a cache pre-fill. */ static int try_to_cache(ocfs2_filesys *fs, struct rb_node *node, char *pre_cache_buf, int pre_cache_blocks) { int cached_blocks = 0; o2fsck_dirblock_entry *dbe; uint64_t io_blkno = 0, next_blkno = 0; int count = 0; errcode_t err; uint64_t blocks_seen = 0; o2fsck_reset_blocks_cached(); for (; node; node = rb_next(node)) { blocks_seen++; dbe = rb_entry(node, o2fsck_dirblock_entry, e_node); if (io_blkno) { assert(count); assert(next_blkno > io_blkno); if ((next_blkno == dbe->e_blkno) && (count < pre_cache_blocks)) { count++; next_blkno++; continue; } if (!o2fsck_worth_caching(count)) { io_blkno = 0; break; } err = ocfs2_read_blocks(fs, io_blkno, count, pre_cache_buf); io_blkno = 0; next_blkno = 0; if (err) break; cached_blocks += count; count = 0; } assert(!io_blkno); io_blkno = dbe->e_blkno; next_blkno = io_blkno + 1; count = 1; } /* Catch the last pre-fill buffer */ if (io_blkno && o2fsck_worth_caching(count)) { assert(count); err = ocfs2_read_blocks(fs, io_blkno, count, pre_cache_buf); if (!err) cached_blocks += count; } return cached_blocks; }
void _display(struct rb_node *temp, int (*a)[10], int *row, int *col) { if(temp == 0) return; (*row)++; _display(temp->rb_left, a, row,col); a[*row][(*col)++] = rb_entry(temp, SAWON,tree)->sid; _display(temp->rb_right, a, row, col); (*row)--; }
void o2fsck_icount_free(o2fsck_icount *icount) { struct rb_node *node; icount_node *in; ocfs2_bitmap_free(icount->ic_single_bm); while((node = rb_first(&icount->ic_multiple_tree)) != NULL) { in = rb_entry(node, icount_node, in_node); rb_erase(node, &icount->ic_multiple_tree); free(in); } free(icount); }
static void kllds_remove_rbtree(void) { kllds_entry *elem; struct rb_node *next; write_lock(&kllds_rwlck); while ((next = rb_first(&kllds_rbtree))) { elem = rb_entry(next, kllds_entry, rb); rb_erase(next, &kllds_rbtree); kfree(elem->val); kmem_cache_free(tmp_kllds_cache, elem); } write_unlock(&kllds_rwlck); }
int pppoe_disc_start(struct pppoe_serv_t *serv) { struct disc_net *net = find_net(serv->net); struct rb_node **p, *parent = NULL; struct tree *t; int ifindex = serv->ifindex, i; struct pppoe_serv_t *n; if (!net) { pthread_mutex_lock(&nets_lock); net = find_net(serv->net); if (!net) net = init_net(serv->net); pthread_mutex_unlock(&nets_lock); if (!net) return -1; } t = &net->tree[ifindex & HASH_BITS]; pthread_mutex_lock(&t->lock); p = &t->root.rb_node; while (*p) { parent = *p; n = rb_entry(parent, typeof(*n), node); i = n->ifindex; if (ifindex < i) p = &(*p)->rb_left; else if (ifindex > i) p = &(*p)->rb_right; else { pthread_mutex_unlock(&t->lock); log_error("pppoe: disc: attempt to add duplicate ifindex\n"); return -1; } } rb_link_node(&serv->node, parent, p); rb_insert_color(&serv->node, &t->root); pthread_mutex_unlock(&t->lock); return net->hnd.fd; }
static memory_node_t *memory_engine_lookup_shm_node( struct rb_root *shm_root, const uint phyaddress) { struct rb_node *n = shm_root->rb_node; memory_node_t *tmp_node; while (n) { tmp_node = rb_entry(n, memory_node_t, __rb_node); if (phyaddress < tmp_node->m_phyaddress) n = n->rb_left; else if (phyaddress > tmp_node->m_phyaddress) n = n->rb_right; else return tmp_node; } return NULL; }
void su_peer_reply_ignore_act(su_peer_t *psar) { rb_key_cache_t key; memcpy(&key.destaddr, &psar->nowsynframe->srcaddr, sizeof(SAUN)); key.destlen = psar->nowsynframe->srclen; key.seq = psar->nowsynframe->recvhdr.seq; key.sid = psar->nowsynframe->recvhdr.sid; struct rb_node *cachenode; cache_t *frees; if ((cachenode = rb_search(&psar->rbackcache, &key))) { frees = rb_entry(cachenode, cache_t, rbn); list_remove (&frees->frame.node); rb_erase (&frees->rbn, &psar->rbackcache); free(frees); return; } }
static inline t_llds_entry *t_llds_search_by_key(struct rb_root *tn_root, uint64_t search_key) { struct rb_node *next; t_llds_entry *tn = NULL; next = tn_root->rb_node; while(next) { tn = rb_entry(next, t_llds_entry, rb); if(search_key < tn->key) { next = next->rb_left; } else if (search_key > tn->key) { next = next->rb_right; } else { return tn; } } return NULL; }
static void memory_engine_insert_shm_node(struct rb_root *shm_root, memory_node_t *shm_node) { struct rb_node **p = &shm_root->rb_node; struct rb_node *parent = NULL; memory_node_t *tmp_node; while (*p) { parent = *p; tmp_node = rb_entry(parent, memory_node_t, __rb_node); if (shm_node->m_phyaddress < tmp_node->m_phyaddress) p = &parent->rb_left; else p = &parent->rb_right; } rb_link_node(&shm_node->__rb_node, parent, p); rb_insert_color(&shm_node->__rb_node, shm_root); }
static void MV_SHM_insert_phyaddress_node(struct rb_root *shm_root, shm_address_t *shm_node) { struct rb_node **p = &shm_root->rb_node; struct rb_node *parent = NULL; shm_address_t *tmp_node; while (*p) { parent = *p; tmp_node = rb_entry(parent, shm_address_t, phys_node); if (shm_node->m_phyaddress < tmp_node->m_phyaddress) p = &parent->rb_left; else p = &parent->rb_right; } rb_link_node(&shm_node->phys_node, parent, p); rb_insert_color(&shm_node->phys_node, shm_root); }
/* XXX this is currently fragile in that it requires that the caller make * sure that the node doesn't already exist in the tree. */ static void icount_insert(o2fsck_icount *icount, icount_node *in) { struct rb_node ** p = &icount->ic_multiple_tree.rb_node; struct rb_node * parent = NULL; icount_node *tmp_in; while (*p) { parent = *p; tmp_in = rb_entry(parent, icount_node, in_node); if (in->in_blkno < tmp_in->in_blkno) p = &(*p)->rb_left; else if (in->in_blkno > tmp_in->in_blkno) p = &(*p)->rb_right; } rb_link_node(&in->in_node, parent, p); rb_insert_color(&in->in_node, &icount->ic_multiple_tree); }
static void t_llds_insert_by_key(struct rb_root *ver_root, t_llds_entry *ins_tn) { struct rb_node **uncle = &ver_root->rb_node; struct rb_node *parent = NULL; t_llds_entry *tn; while(*uncle!=NULL) { parent = *uncle; tn = rb_entry(parent, t_llds_entry, rb); if(ins_tn->key < tn->key) { uncle = &parent->rb_left; } else if(ins_tn->key > tn->key) { uncle = &parent->rb_right; } else { return; } } rb_link_node(&ins_tn->rb, parent, uncle); rb_insert_color(&ins_tn->rb, ver_root); }
static icount_node *icount_search(o2fsck_icount *icount, uint64_t blkno, icount_node **next) { struct rb_node *node = icount->ic_multiple_tree.rb_node; icount_node *in, *last_left = NULL; while (node) { in = rb_entry(node, icount_node, in_node); if (blkno < in->in_blkno) { last_left = in; node = node->rb_left; } else if (blkno > in->in_blkno) node = node->rb_right; else return in; } if (next && last_left) *next = last_left; return NULL; }
void o2fsck_dir_block_iterate(o2fsck_state *ost, dirblock_iterator func, void *priv_data) { o2fsck_dirblocks *db = &ost->ost_dirblocks; ocfs2_filesys *fs = ost->ost_fs; o2fsck_dirblock_entry *dbe; struct rb_node *node; unsigned ret; errcode_t err; char *pre_cache_buf = NULL; int pre_cache_blocks = ocfs2_blocks_in_bytes(fs, 1024 * 1024); int cached_blocks = 0; o2fsck_reset_blocks_cached(); if (o2fsck_worth_caching(1)) { err = ocfs2_malloc_blocks(fs->fs_io, pre_cache_blocks, &pre_cache_buf); if (err) verbosef("Unable to allocate dirblock pre-cache " "buffer, %s\n", "ignoring"); } for (node = rb_first(&db->db_root); node; node = rb_next(node)) { if (!cached_blocks && pre_cache_buf) cached_blocks = try_to_cache(fs, node, pre_cache_buf, pre_cache_blocks); dbe = rb_entry(node, o2fsck_dirblock_entry, e_node); ret = func(dbe, priv_data); if (ret & OCFS2_DIRENT_ABORT) break; if (cached_blocks) cached_blocks--; } if (pre_cache_buf) ocfs2_free(&pre_cache_buf); }
//--------------------------------------------------------------------- SAWON* insert_data(struct rb_root *root, int sid, struct rb_node *node) { struct rb_node **p = &root->rb_node; struct rb_node * parent = NULL; SAWON *s; while(*p) { parent = *p; s = rb_entry(parent, SAWON, tree); if(sid < s->sid) p = &(*p)->rb_left; else if(sid > s->sid) p = &(*p)->rb_right; else return s; } rb_link_node(node, parent, p); rb_insert_color(node, root); return NULL; }
static shm_address_t *MV_SHM_lookup_phyaddress_node( struct rb_root *shm_root, const uint address) { struct rb_node *n = shm_root->rb_node; shm_address_t *tmp_node; while (n) { tmp_node = rb_entry(n, shm_address_t, phys_node); if (address < tmp_node->m_phyaddress) { n = n->rb_left; } else if (address > tmp_node->m_phyaddress) { if (address < (tmp_node->m_phyaddress + tmp_node->m_size) ) { return tmp_node; } else { n = n->rb_right; } } else { return tmp_node; } } return NULL; }
static void prepare_pmu_trigger(struct rb_root *root) { struct rb_node *node = rb_first(root); struct uftrace_filter *entry; while (node) { entry = rb_entry(node, typeof(*entry), node); if (entry->trigger.flags & TRIGGER_FL_READ) { if (entry->trigger.read & TRIGGER_READ_PMU_CYCLE) if (prepare_pmu_event(EVENT_ID_READ_PMU_CYCLE) < 0) break; if (entry->trigger.read & TRIGGER_READ_PMU_CACHE) if (prepare_pmu_event(EVENT_ID_READ_PMU_CACHE) < 0) break; if (entry->trigger.read & TRIGGER_READ_PMU_BRANCH) if (prepare_pmu_event(EVENT_ID_READ_PMU_BRANCH) < 0) break; } node = rb_next(node); } }
static void notify_down(struct disc_net *net, int ifindex) { struct pppoe_serv_t *n; struct tree *t = &net->tree[ifindex & HASH_BITS]; struct rb_node **p = &t->root.rb_node, *parent = NULL; pthread_mutex_lock(&t->lock); while (*p) { parent = *p; n = rb_entry(parent, typeof(*n), node); if (ifindex < n->ifindex) p = &(*p)->rb_left; else if (ifindex > n->ifindex) p = &(*p)->rb_right; else { triton_context_call(&n->ctx, (triton_event_func)_server_stop, n); break; } } pthread_mutex_unlock(&t->lock); }
static __always_inline kllds_entry *kllds_search_by_key(struct rb_root *tn_root, uint64_t search_key) { struct rb_node *next; kllds_entry *tn = NULL; read_lock(&kllds_rwlck); next = tn_root->rb_node; while(next) { tn = rb_entry(next, kllds_entry, rb); if(search_key < tn->key) { next = next->rb_left; } else if (search_key > tn->key) { next = next->rb_right; } else { read_unlock(&kllds_rwlck); return tn; } } read_unlock(&kllds_rwlck); return NULL; }
static inline int reliable_ack___save (su_serv_t *psvr, const frames_t *frame, const void *outbuff, int outbytes) { /* Construct search key */ rb_key_cache_t key; memcpy(&key.destaddr, &frame->srcaddr, sizeof(SAUN)); key.destlen = frame->srclen; key.seq = frame->recvhdr.seq; key.sid = frame->recvhdr.sid; struct rb_node *cachenode; cache_t *cache; /* If is no reply content, only replace len value, don't replace node * If have a content, must allocating and replacing new node */ if (outbuff == 0 && outbytes == 0) { if ((cachenode = rb_search(&psvr->rbackcache, &key))) { cache = rb_entry(cachenode, cache_t, rbn); cache->frame.len = 0; #if defined SU_DEBUG_LIST || defined SU_DEBUG_RBTREE pthread_t tid = pthread_self(); char ipbuff[INET6_ADDRSTRLEN]; int port; su_get_ip_port_f(&cache->frame.srcaddr, ipbuff, sizeof(ipbuff), &port); log_msg("serv %x %x time %u key(%s:%d:%u:%u) " ColorRed "+ACK cache %p" ColorEnd , psvr, tid, cache->ts, ipbuff, port, cache->frame.recvhdr.sid, cache->frame.recvhdr.seq, cache); #endif return 0; } errno = ENOKEY; return -1; } cache_t * newack; newack = calloc(1, sizeof(cache_t) + outbytes); if (newack == 0) { errno = ENOBUFS; return -1; } /* Construct a new node */ memcpy(&newack->frame, frame, sizeof(frames_t)); memcpy(newack->frame.data, outbuff, outbytes); newack->frame.len = outbytes; /* Find and replace the hold node */ if ((cachenode = rb_search(&psvr->rbackcache, &key))) { rb_replace_node(cachenode, &newack->rbn, &psvr->rbackcache); cache = rb_entry(cachenode, cache_t, rbn); newack->ts = cache->ts; list_remove(&cache->frame.node); list_append(&psvr->lsackcache, &newack->frame.node); #if defined SU_DEBUG_LIST || defined SU_DEBUG_RBTREE pthread_t tid = pthread_self(); char ipbuff[INET6_ADDRSTRLEN]; int port; su_get_ip_port_f(&newack->frame.srcaddr, ipbuff, sizeof(ipbuff), &port); log_msg("serv %x %x time %u key(%s:%d:%u:%u) " ColorRed "+ACK cache %p Swap %p" ColorEnd , psvr, tid, newack->ts, ipbuff, port, frame->recvhdr.sid, frame->recvhdr.seq, cache, newack); #endif free(cache); return 0; } free(newack); errno = ENOKEY; return -1; }
static void *thread_request_handle(void *v) { int ret; struct list *synnode; struct timespec abstime = {0}; frames_t *frame; su_serv_t *psvr = (su_serv_t*)v; pthread_t tid __attribute__((unused)) = pthread_self(); for (; psvr->run;) { pthread_mutex_lock(&psvr->lock); while ((synnode = psvr->synrecvls.next) == &psvr->synrecvls) { maketimeout_seconds(&abstime, 1); ret = pthread_cond_timedwait(&psvr->syncond, &psvr->lock, &abstime); if (!psvr->run) { pthread_mutex_unlock(&psvr->lock); goto quit; } if ( ret == ETIMEDOUT ) { pthread_mutex_lock(&psvr->cachelock); reliable_ack_unsave(psvr); pthread_mutex_unlock(&psvr->cachelock); } } list_remove(synnode); pthread_mutex_unlock(&psvr->lock); /* have request datagram */ frame = container_of(synnode, frames_t, node); rb_key_cache_t key; memcpy(&key.destaddr, &frame->srcaddr, sizeof(SAUN)); key.destlen = frame->srclen; key.seq = frame->recvhdr.seq; key.sid = frame->recvhdr.sid; struct rb_node *cachenode; cache_t *cache; pthread_mutex_lock(&psvr->cachelock); reliable_ack_unsave(psvr); if (frame->recvhdr.type == SU_RELIABLE) { if ( (cachenode = rb_search(&psvr->rbackcache, &key))) { cache = rb_entry(cachenode, cache_t, rbn); if (cache->frame.len == -1) { #ifdef SU_DEBUG_RBTREE char ipbuff[INET6_ADDRSTRLEN]; int port; su_get_ip_port_f(&cache->frame.srcaddr, ipbuff, sizeof(ipbuff), &port); log_msg("serv %x %x time %u key(%s:%d:%u:%u)" ColorRed " 0ACK cache %p" ColorEnd, psvr, tid, cache->ts, ipbuff, port, cache->frame.recvhdr.sid, cache->frame.recvhdr.seq, cache); #endif pthread_mutex_unlock(&psvr->cachelock); free(frame); continue; } #ifdef SU_DEBUG_RBTREE char ipbuff[INET6_ADDRSTRLEN]; int port; su_get_ip_port_f(&cache->frame.srcaddr, ipbuff, sizeof(ipbuff), &port); log_msg("serv %x %x time %u key(%s:%d:%u:%u)" ColorRed " @ACK cache %p" ColorEnd, psvr, tid, cache->ts, ipbuff, port, cache->frame.recvhdr.sid, cache->frame.recvhdr.seq, cache); #endif struct iovec iovsend[2] = {{0}}; struct msghdr msgsend = {0}; /* assumed init to 0 */ frame->recvhdr.act = SU_ACK; msgsend.msg_name = (void*)&cache->frame.srcaddr; msgsend.msg_namelen = cache->frame.srclen; msgsend.msg_iov = &iovsend[0]; msgsend.msg_iovlen = 2; iovsend[0].iov_base = &frame->recvhdr; iovsend[0].iov_len = sizeof(suhdr_t); iovsend[1].iov_base = (void*)cache->frame.data; /* get the cache results */ iovsend[1].iov_len = cache->frame.len; /* resend from cache */ if (sendmsg(psvr->fd, &msgsend, 0) != sizeof(suhdr_t) + cache->frame.len) { char ipbuff[INET6_ADDRSTRLEN]; int port; su_get_ip_port_f(&cache->frame.srcaddr, ipbuff, sizeof(ipbuff), &port); ERR_RET("retransmit sendmsg %s:%d:%u:%u:%u error", ipbuff, port, frame->recvhdr.seq, frame->recvhdr.ts, frame->recvhdr.sid); } #ifdef SU_DEBUG_PEER_RESEND else { char ipbuff[INET6_ADDRSTRLEN]; int port; su_get_ip_port_f(&cache->frame.srcaddr, ipbuff, sizeof(ipbuff), &port); log_msg("retransmit sendmsg %s:%d:%u:%u:%u", ipbuff, port, frame->recvhdr.seq, frame->recvhdr.ts, frame->recvhdr.sid); } #endif pthread_mutex_unlock(&psvr->cachelock); free(frame); continue; } else { if (reliable_ack___hold(psvr, frame) < 0) { err_ret("reliable_ack___hold error"); pthread_mutex_unlock(&psvr->cachelock); free(frame); continue; } } } pthread_mutex_unlock(&psvr->cachelock); request_handle(psvr, frame); #if defined SU_DEBUG_PEER_RECV || defined SU_DEBUG_LIST log_msg("serv %x %x delete syn "ColorRed"%p"ColorEnd" seq %d datagram len %d", psvr, tid, frame, frame->recvhdr.seq, frame->len); #endif free(frame); } quit: pthread_exit(0); }
int main(int argc, char *argv[]) { rb_tree_t rb_tree; rb_node_t *node; unsigned int key_pool[KEY_POOL_CAPS]; unsigned int key; for (int i = 0; i < KEY_POOL_CAPS; ++i) { add_key: key = arc4random() % 3000; for (int j = i - 1; j >= 0; j--) { if (key == key_pool[j]) { goto add_key; } } key_pool[i] = key; printf("Add %u into key pool.\n", key_pool[i]); } rb_tree_init(&rb_tree, data_compare, data_destroy); redo: for (int i = 0; i < KEY_POOL_CAPS; ++i) { data_node_t *data = (data_node_t *)malloc(sizeof(*data)); data->key = key_pool[i]; if (rb_insert(&(data->rb_node), &rb_tree)) { printf("Key %u already exist.\n", data->key); } else { printf("Add %u into tree.\n", data->key); } } printf("INIT: ************\n"); printf("Min:%u\n", rb_entry(rb_first(&rb_tree), data_node_t, rb_node)->key); printf("Max:%u\n", rb_entry(rb_last(&rb_tree), data_node_t, rb_node)->key); printf("Trav with rb_next:\n"); node = rb_first(&rb_tree); while (node) { printf("%u, ",rb_entry(node, data_node_t, rb_node)->key); node = rb_next(node); } printf("\n"); printf("Trav with rb_prev:\n"); node = rb_last(&rb_tree); while (node) { printf("%u, ",rb_entry(node, data_node_t, rb_node)->key); node = rb_prev(node); } printf("\n"); printf("ERASE: ************\n"); data_node_t search_node; for (int i = 0; i < 5; i++) { unsigned int j = arc4random() % KEY_POOL_CAPS; search_node.key = key_pool[j]; rb_node_t *to_remove = rb_find(&(search_node.rb_node), &rb_tree); if (to_remove) { printf("Remove %u from tree.\n", search_node.key); rb_remove(to_remove, &rb_tree); free(rb_entry(to_remove, data_node_t, rb_node)); } else { printf("Key %u not in tree.\n", search_node.key); } } printf("PRINT: ************\n"); printf("Min:%u\n", rb_entry(rb_first(&rb_tree), data_node_t, rb_node)->key); printf("Max:%u\n", rb_entry(rb_last(&rb_tree), data_node_t, rb_node)->key); printf("Trav with rb_next:\n"); node = rb_first(&rb_tree); while (node) { printf("%u, ",rb_entry(node, data_node_t, rb_node)->key); node = rb_next(node); } printf("\n"); printf("Trav with rb_prev:\n"); node = rb_last(&rb_tree); while (node) { printf("%u, ",rb_entry(node, data_node_t, rb_node)->key); node = rb_prev(node); } printf("\n"); rb_clear(&rb_tree); printf("PRINT2: ************\n"); node = rb_first(&rb_tree); if (node) { printf("Min:%u\n", rb_entry(node, data_node_t, rb_node)->key); } node = rb_last(&rb_tree); if (node) { printf("Max:%u\n", rb_entry(node, data_node_t, rb_node)->key); } printf("Trav with rb_next:\n"); node = rb_first(&rb_tree); while (node) { printf("%u, ",rb_entry(node, data_node_t, rb_node)->key); node = rb_next(node); } printf("\n"); printf("Trav with rb_prev:\n"); node = rb_last(&rb_tree); while (node) { printf("%u, ",rb_entry(node, data_node_t, rb_node)->key); node = rb_prev(node); } printf("\n"); goto redo; return 0; }