int RTreeInsert(RTree_t * rtp, Rect_t * r, void *data, Node_t ** n, int level) { /* RTreeInsert(RTree_t*rtp, Rect_t*r, int data, Node_t**n, int level) { */ register int i; register Node_t *newroot; Node_t *newnode=0; Branch_t b; int result = 0; assert(r && n); assert(level >= 0 && level <= (*n)->level); for (i = 0; i < NUMDIMS; i++) assert(r->boundary[i] <= r->boundary[NUMDIMS + i]); # ifdef RTDEBUG fprintf(stderr, "RTreeInsert level=%d\n", level); # endif if (rtp->StatFlag) { if (rtp->Deleting) rtp->ReInsertCount++; else rtp->InsertCount++; } if (!rtp->Deleting) rtp->RectCount++; if (RTreeInsert2(rtp, r, data, *n, &newnode, level)) { /* root was split */ if (rtp->StatFlag) { if (rtp->Deleting) rtp->DeTouchCount++; else rtp->InTouchCount++; } newroot = RTreeNewNode(rtp); /* grow a new root, make tree taller */ rtp->NonLeafCount++; newroot->level = (*n)->level + 1; b.rect = NodeCover(*n); b.child = *n; AddBranch(rtp, &b, newroot, NULL); b.rect = NodeCover(newnode); b.child = newnode; AddBranch(rtp, &b, newroot, NULL); *n = newroot; // rtp->root = newroot; rtp->EntryCount += 2; result = 1; } return result; }
RTREE_TEMPLATE bool RTREE_QUAL::InsertRectRec(const Branch& a_branch, Node* a_node, Node** a_newNode, int a_level) { ASSERT(a_node && a_newNode); ASSERT(a_level >= 0 && a_level <= a_node->m_level); // recurse until we reach the correct level for the new record. data records // will always be called with a_level == 0 (leaf) if (a_node->m_level > a_level) { // Still above level for insertion, go down tree recursively Node* otherNode; // find the optimal branch for this record int index = PickBranch(&a_branch.m_rect, a_node); // recursively insert this record into the picked branch bool childWasSplit = InsertRectRec(a_branch, a_node->m_branch[index].m_child, &otherNode, a_level); if (!childWasSplit) { // Child was not split. Merge the bounding box of the new record with the // existing bounding box a_node->m_branch[index].m_rect = CombineRect(&a_branch.m_rect, &(a_node->m_branch[index].m_rect)); return false; } else { // Child was split. The old branches are now re-partitioned to two nodes // so we have to re-calculate the bounding boxes of each node a_node->m_branch[index].m_rect = NodeCover(a_node->m_branch[index].m_child); Branch branch; branch.m_child = otherNode; branch.m_rect = NodeCover(otherNode); // The old node is already a child of a_node. Now add the newly-created // node to a_node as well. a_node might be split because of that. return AddBranch(&branch, a_node, a_newNode); } } else if (a_node->m_level == a_level) { // We have reached level for insertion. Add rect, split if necessary return AddBranch(&a_branch, a_node, a_newNode); } else { // Should never occur ASSERT(0); return false; } }
RTREE_TEMPLATE bool RTREE_QUAL::InsertRect(const Branch& a_branch, Node** a_root, int a_level) { ASSERT(a_root); ASSERT(a_level >= 0 && a_level <= (*a_root)->m_level); #ifdef _DEBUG for (int index = 0; index < NUMDIMS; ++index) { ASSERT(a_branch.m_rect.m_min[index] <= a_branch.m_rect.m_max[index]); } #endif //_DEBUG Node* newNode; if (InsertRectRec(a_branch, *a_root, &newNode, a_level)) // Root split { // Grow tree taller and new root Node* newRoot = AllocNode(); newRoot->m_level = (*a_root)->m_level + 1; Branch branch; // add old root node as a child of the new root branch.m_rect = NodeCover(*a_root); branch.m_child = *a_root; AddBranch(&branch, newRoot, NULL); // add the split node as a child of the new root branch.m_rect = NodeCover(newNode); branch.m_child = newNode; AddBranch(&branch, newRoot, NULL); // set the new root as the root node *a_root = newRoot; return true; } return false; }
wxTreeItemId AnimationSelector::AddBranch(const wxTreeItemId& root, const wxString& name) { wxString::size_type pos = name.find_last_of(wxT('/')); if (pos != wxString::npos) { wxTreeItemId parent = AddBranch(root, name.substr(0, pos)); return CreateChildItem(parent, name.substr(pos+1, wxString::npos)); } else { return CreateChildItem(root, name); } }
RTREE_TEMPLATE void RTREE_QUAL::LoadNodes(Node* a_nodeA, Node* a_nodeB, PartitionVars* a_parVars) { ASSERT(a_nodeA); ASSERT(a_nodeB); ASSERT(a_parVars); for (int index = 0; index < a_parVars->m_total; ++index) { ASSERT(a_parVars->m_partition[index] == 0 || a_parVars->m_partition[index] == 1); int targetNodeIndex = a_parVars->m_partition[index]; Node * targetNodes[] = {a_nodeA, a_nodeB}; // It is assured that AddBranch here will not cause a node split. bool nodeWasSplit = AddBranch(&a_parVars->m_branchBuf[index], targetNodes[targetNodeIndex], NULL); ASSERT(!nodeWasSplit); } }
bool RtreeInsert(const Rec *mbr, RtreeNode *node, RtreeNode* &new_node) { //assert(level == node->level); if (!node->level) { new_node = new RtreeNode(); new_node->level = 0; new_node->branch[new_node->count++].mbr = *mbr; return true; } int chosen = ChooseBranch(mbr, node); bool res = RtreeInsert(mbr, node->branch[chosen].child, new_node); node->branch[chosen].mbr = CoverRec(node->branch[chosen].child); if (res) { RtreeBranch nbra; nbra.mbr = CoverRec(new_node); nbra.child = new_node; return AddBranch(&nbra, node, &new_node); } return false; }
bool AggressiveDCEPass::AggressiveDCE(Function* func) { // Mark function parameters as live. AddToWorklist(&func->DefInst()); func->ForEachParam( [this](const Instruction* param) { AddToWorklist(const_cast<Instruction*>(param)); }, false); // Compute map from block to controlling conditional branch std::list<BasicBlock*> structuredOrder; cfg()->ComputeStructuredOrder(func, &*func->begin(), &structuredOrder); ComputeBlock2HeaderMaps(structuredOrder); bool modified = false; // Add instructions with external side effects to worklist. Also add branches // EXCEPT those immediately contained in an "if" selection construct or a loop // or continue construct. // TODO(greg-lunarg): Handle Frexp, Modf more optimally call_in_func_ = false; func_is_entry_point_ = false; private_stores_.clear(); // Stacks to keep track of when we are inside an if- or loop-construct. // When immediately inside an if- or loop-construct, we do not initially // mark branches live. All other branches must be marked live. std::stack<bool> assume_branches_live; std::stack<uint32_t> currentMergeBlockId; // Push sentinel values on stack for when outside of any control flow. assume_branches_live.push(true); currentMergeBlockId.push(0); for (auto bi = structuredOrder.begin(); bi != structuredOrder.end(); ++bi) { // If exiting if or loop, update stacks if ((*bi)->id() == currentMergeBlockId.top()) { assume_branches_live.pop(); currentMergeBlockId.pop(); } for (auto ii = (*bi)->begin(); ii != (*bi)->end(); ++ii) { SpvOp op = ii->opcode(); switch (op) { case SpvOpStore: { uint32_t varId; (void)GetPtr(&*ii, &varId); // Mark stores as live if their variable is not function scope // and is not private scope. Remember private stores for possible // later inclusion. We cannot call IsLocalVar at this point because // private_like_local_ has not been set yet. if (IsVarOfStorage(varId, SpvStorageClassPrivate) || IsVarOfStorage(varId, SpvStorageClassWorkgroup)) private_stores_.push_back(&*ii); else if (!IsVarOfStorage(varId, SpvStorageClassFunction)) AddToWorklist(&*ii); } break; case SpvOpCopyMemory: case SpvOpCopyMemorySized: { uint32_t varId; (void)GetPtr(ii->GetSingleWordInOperand(kCopyMemoryTargetAddrInIdx), &varId); if (IsVarOfStorage(varId, SpvStorageClassPrivate) || IsVarOfStorage(varId, SpvStorageClassWorkgroup)) private_stores_.push_back(&*ii); else if (!IsVarOfStorage(varId, SpvStorageClassFunction)) AddToWorklist(&*ii); } break; case SpvOpLoopMerge: { assume_branches_live.push(false); currentMergeBlockId.push( ii->GetSingleWordInOperand(kLoopMergeMergeBlockIdInIdx)); } break; case SpvOpSelectionMerge: { assume_branches_live.push(false); currentMergeBlockId.push( ii->GetSingleWordInOperand(kSelectionMergeMergeBlockIdInIdx)); } break; case SpvOpSwitch: case SpvOpBranch: case SpvOpBranchConditional: case SpvOpUnreachable: { if (assume_branches_live.top()) { AddToWorklist(&*ii); } } break; default: { // Function calls, atomics, function params, function returns, etc. // TODO(greg-lunarg): function calls live only if write to non-local if (!ii->IsOpcodeSafeToDelete()) { AddToWorklist(&*ii); } // Remember function calls if (op == SpvOpFunctionCall) call_in_func_ = true; } break; } } } // See if current function is an entry point for (auto& ei : get_module()->entry_points()) { if (ei.GetSingleWordInOperand(kEntryPointFunctionIdInIdx) == func->result_id()) { func_is_entry_point_ = true; break; } } // If the current function is an entry point and has no function calls, // we can optimize private variables as locals private_like_local_ = func_is_entry_point_ && !call_in_func_; // If privates are not like local, add their stores to worklist if (!private_like_local_) for (auto& ps : private_stores_) AddToWorklist(ps); // Perform closure on live instruction set. while (!worklist_.empty()) { Instruction* liveInst = worklist_.front(); // Add all operand instructions if not already live liveInst->ForEachInId([&liveInst, this](const uint32_t* iid) { Instruction* inInst = get_def_use_mgr()->GetDef(*iid); // Do not add label if an operand of a branch. This is not needed // as part of live code discovery and can create false live code, // for example, the branch to a header of a loop. if (inInst->opcode() == SpvOpLabel && liveInst->IsBranch()) return; AddToWorklist(inInst); }); if (liveInst->type_id() != 0) { AddToWorklist(get_def_use_mgr()->GetDef(liveInst->type_id())); } // If in a structured if or loop construct, add the controlling // conditional branch and its merge. BasicBlock* blk = context()->get_instr_block(liveInst); Instruction* branchInst = block2headerBranch_[blk]; if (branchInst != nullptr) { AddToWorklist(branchInst); Instruction* mergeInst = branch2merge_[branchInst]; AddToWorklist(mergeInst); } // If the block is a header, add the next outermost controlling // conditional branch and its merge. Instruction* nextBranchInst = header2nextHeaderBranch_[blk]; if (nextBranchInst != nullptr) { AddToWorklist(nextBranchInst); Instruction* mergeInst = branch2merge_[nextBranchInst]; AddToWorklist(mergeInst); } // If local load, add all variable's stores if variable not already live if (liveInst->opcode() == SpvOpLoad || liveInst->IsAtomicWithLoad()) { uint32_t varId; (void)GetPtr(liveInst, &varId); if (varId != 0) { ProcessLoad(varId); } // Process memory copies like loads } else if (liveInst->opcode() == SpvOpCopyMemory || liveInst->opcode() == SpvOpCopyMemorySized) { uint32_t varId; (void)GetPtr(liveInst->GetSingleWordInOperand(kCopyMemorySourceAddrInIdx), &varId); if (varId != 0) { ProcessLoad(varId); } // If merge, add other branches that are part of its control structure } else if (liveInst->opcode() == SpvOpLoopMerge || liveInst->opcode() == SpvOpSelectionMerge) { AddBreaksAndContinuesToWorklist(liveInst); // If function call, treat as if it loads from all pointer arguments } else if (liveInst->opcode() == SpvOpFunctionCall) { liveInst->ForEachInId([this](const uint32_t* iid) { // Skip non-ptr args if (!IsPtr(*iid)) return; uint32_t varId; (void)GetPtr(*iid, &varId); ProcessLoad(varId); }); // If function parameter, treat as if it's result id is loaded from } else if (liveInst->opcode() == SpvOpFunctionParameter) { ProcessLoad(liveInst->result_id()); // We treat an OpImageTexelPointer as a load of the pointer, and // that value is manipulated to get the result. } else if (liveInst->opcode() == SpvOpImageTexelPointer) { uint32_t varId; (void)GetPtr(liveInst, &varId); if (varId != 0) { ProcessLoad(varId); } } worklist_.pop(); } // Kill dead instructions and remember dead blocks for (auto bi = structuredOrder.begin(); bi != structuredOrder.end();) { uint32_t mergeBlockId = 0; (*bi)->ForEachInst([this, &modified, &mergeBlockId](Instruction* inst) { if (!IsDead(inst)) return; if (inst->opcode() == SpvOpLabel) return; // If dead instruction is selection merge, remember merge block // for new branch at end of block if (inst->opcode() == SpvOpSelectionMerge || inst->opcode() == SpvOpLoopMerge) mergeBlockId = inst->GetSingleWordInOperand(0); to_kill_.push_back(inst); modified = true; }); // If a structured if or loop was deleted, add a branch to its merge // block, and traverse to the merge block and continue processing there. // We know the block still exists because the label is not deleted. if (mergeBlockId != 0) { AddBranch(mergeBlockId, *bi); for (++bi; (*bi)->id() != mergeBlockId; ++bi) { } auto merge_terminator = (*bi)->terminator(); if (merge_terminator->opcode() == SpvOpUnreachable) { // The merge was unreachable. This is undefined behaviour so just // return (or return an undef). Then mark the new return as live. auto func_ret_type_inst = get_def_use_mgr()->GetDef(func->type_id()); if (func_ret_type_inst->opcode() == SpvOpTypeVoid) { merge_terminator->SetOpcode(SpvOpReturn); } else { // Find an undef for the return value and make sure it gets kept by // the pass. auto undef_id = Type2Undef(func->type_id()); auto undef = get_def_use_mgr()->GetDef(undef_id); live_insts_.Set(undef->unique_id()); merge_terminator->SetOpcode(SpvOpReturnValue); merge_terminator->SetInOperands({{SPV_OPERAND_TYPE_ID, {undef_id}}}); get_def_use_mgr()->AnalyzeInstUse(merge_terminator); } live_insts_.Set(merge_terminator->unique_id()); } } else { ++bi; } } return modified; }