static struct vme *vmap_insert(vaddr_t start, size_t len, uint8_t type) { struct vme *vme; vme = malloc(sizeof(*vme)); assert(vme != NULL); vme->addr = start; vme->size = len; vme->type = type; rb_tree_insert_node(&vmap_rbtree, (void *) vme); vmap_size += vme->size; return vme; }
bool addFDE(pint_t pcStart, pint_t pcEnd, pint_t fde) { pthread_rwlock_wrlock(&fdeTreeLock); Range *n = (Range *)malloc(sizeof(*n)); n->hdr_base = fde; n->hdr_start = 0; n->hdr_entries = 0; n->first_pc = pcStart; n->last_pc = pcEnd; n->data_base = 0; n->ehframe_base = 0; if (rb_tree_insert_node(&segmentTree, n) == n) { pthread_rwlock_unlock(&fdeTreeLock); return true; } free(n); pthread_rwlock_unlock(&fdeTreeLock); return false; }
// Creates (malloc'ates) int rb_tree_insert (struct rb_tree *self, void *value) { return rb_tree_insert_node(self, rb_node_create(value)); }
/* * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. */ int nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int lkflags) { struct nfsnode *np; struct vnode *vp; struct nfsmount *nmp = VFSTONFS(mntp); int error; struct fh_match fhm; fhm.fhm_fhp = fhp; fhm.fhm_fhsize = fhsize; loop: rw_enter(&nmp->nm_rbtlock, RW_READER); np = rb_tree_find_node(&nmp->nm_rbtree, &fhm); if (np != NULL) { vp = NFSTOV(np); mutex_enter(vp->v_interlock); rw_exit(&nmp->nm_rbtlock); error = vget(vp, LK_EXCLUSIVE | lkflags); if (error == EBUSY) return error; if (error) goto loop; *npp = np; return(0); } rw_exit(&nmp->nm_rbtlock); error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, NULL, &vp); if (error) { *npp = 0; return (error); } np = pool_get(&nfs_node_pool, PR_WAITOK); memset(np, 0, sizeof *np); np->n_vnode = vp; /* * Insert the nfsnode in the hash queue for its new file handle */ if (fhsize > NFS_SMALLFH) { np->n_fhp = kmem_alloc(fhsize, KM_SLEEP); } else np->n_fhp = &np->n_fh; memcpy(np->n_fhp, fhp, fhsize); np->n_fhsize = fhsize; np->n_accstamp = -1; np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK); rw_enter(&nmp->nm_rbtlock, RW_WRITER); if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) { rw_exit(&nmp->nm_rbtlock); if (fhsize > NFS_SMALLFH) { kmem_free(np->n_fhp, fhsize); } pool_put(&nfs_vattr_pool, np->n_vattr); pool_put(&nfs_node_pool, np); ungetnewvnode(vp); goto loop; } vp->v_data = np; genfs_node_init(vp, &nfs_genfsops); /* * Initalize read/write creds to useful values. VOP_OPEN will * overwrite these. */ np->n_rcred = curlwp->l_cred; kauth_cred_hold(np->n_rcred); np->n_wcred = curlwp->l_cred; kauth_cred_hold(np->n_wcred); VOP_LOCK(vp, LK_EXCLUSIVE); NFS_INVALIDATE_ATTRCACHE(np); uvm_vnp_setsize(vp, 0); (void)rb_tree_insert_node(&nmp->nm_rbtree, np); rw_exit(&nmp->nm_rbtlock); *npp = np; return (0); }
/* * chfs_add_tmp_dnode_to_tree - * adds a temporary node to the temporary tree */ int chfs_add_tmp_dnode_to_tree(struct chfs_mount *chmp, struct chfs_readinode_info *rii, struct chfs_tmp_dnode *newtd) { uint64_t end_ofs = newtd->node->ofs + newtd->node->size; struct chfs_tmp_dnode_info *this; struct rb_node *node, *prev_node; struct chfs_tmp_dnode_info *newtdi; node = rb_tree_find_node(&rii->tdi_root, &newtd->node->ofs); if (node) { this = (struct chfs_tmp_dnode_info *)node; while (this->tmpnode->overlapped) { prev_node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT); if (!prev_node) { this->tmpnode->overlapped = 0; break; } node = prev_node; this = (struct chfs_tmp_dnode_info *)node; } } while (node) { this = (struct chfs_tmp_dnode_info *)node; if (this->tmpnode->node->ofs > end_ofs) break; struct chfs_tmp_dnode *tmp_td = this->tmpnode; while (tmp_td) { if (tmp_td->version == newtd->version) { /* This is a new version of an old node. */ if (!chfs_check_td_node(chmp, tmp_td)) { dbg("calling kill td 0\n"); chfs_kill_td(chmp, newtd); return 0; } else { chfs_remove_tmp_dnode_from_tdi(this, tmp_td); chfs_kill_td(chmp, tmp_td); chfs_add_tmp_dnode_to_tdi(this, newtd); return 0; } } if (tmp_td->version < newtd->version && tmp_td->node->ofs >= newtd->node->ofs && tmp_td->node->ofs + tmp_td->node->size <= end_ofs) { /* New node entirely overlaps 'this' */ if (chfs_check_td_node(chmp, newtd)) { dbg("calling kill td 2\n"); chfs_kill_td(chmp, newtd); return 0; } /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */ while (tmp_td && tmp_td->node->ofs + tmp_td->node->size <= end_ofs) { struct rb_node *next = rb_tree_iterate(&rii->tdi_root, this, RB_DIR_RIGHT); struct chfs_tmp_dnode_info *next_tdi = (struct chfs_tmp_dnode_info *)next; struct chfs_tmp_dnode *next_td = NULL; if (tmp_td->next) { next_td = tmp_td->next; } else if (next_tdi) { next_td = next_tdi->tmpnode; } if (tmp_td->version < newtd->version) { chfs_remove_tmp_dnode_from_tdi(this, tmp_td); chfs_kill_td(chmp, tmp_td); if (!this->tmpnode) { rb_tree_remove_node(&rii->tdi_root, this); chfs_kill_tdi(chmp, this); this = next_tdi; } } tmp_td = next_td; } continue; } if (tmp_td->version > newtd->version && tmp_td->node->ofs <= newtd->node->ofs && tmp_td->node->ofs + tmp_td->node->size >= end_ofs) { /* New node entirely overlapped by 'this' */ if (!chfs_check_td_node(chmp, tmp_td)) { dbg("this version: %llu\n", (unsigned long long)tmp_td->version); dbg("this ofs: %llu, size: %u\n", (unsigned long long)tmp_td->node->ofs, tmp_td->node->size); dbg("calling kill td 4\n"); chfs_kill_td(chmp, newtd); return 0; } /* ... but 'this' was bad. Replace it... */ chfs_remove_tmp_dnode_from_tdi(this, tmp_td); chfs_kill_td(chmp, tmp_td); if (!this->tmpnode) { rb_tree_remove_node(&rii->tdi_root, this); chfs_kill_tdi(chmp, this); } dbg("calling kill td 5\n"); chfs_kill_td(chmp, newtd); break; } tmp_td = tmp_td->next; } node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT); } newtdi = chfs_alloc_tmp_dnode_info(); chfs_add_tmp_dnode_to_tdi(newtdi, newtd); /* We neither completely obsoleted nor were completely obsoleted by an earlier node. Insert into the tree */ struct chfs_tmp_dnode_info *tmp_tdi = rb_tree_insert_node(&rii->tdi_root, newtdi); if (tmp_tdi != newtdi) { chfs_remove_tmp_dnode_from_tdi(newtdi, newtd); chfs_add_tmp_dnode_to_tdi(tmp_tdi, newtd); chfs_kill_tdi(chmp, newtdi); } /* If there's anything behind that overlaps us, note it */ node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT); if (node) { while (1) { this = (struct chfs_tmp_dnode_info *)node; if (this->tmpnode->node->ofs + this->tmpnode->node->size > newtd->node->ofs) { newtd->overlapped = 1; } if (!this->tmpnode->overlapped) break; prev_node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT); if (!prev_node) { this->tmpnode->overlapped = 0; break; } node = prev_node; } } /* If the new node overlaps anything ahead, note it */ node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT); this = (struct chfs_tmp_dnode_info *)node; while (this && this->tmpnode->node->ofs < end_ofs) { this->tmpnode->overlapped = 1; node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT); this = (struct chfs_tmp_dnode_info *)node; } return 0; }