xml::Node xml::Node::getChildByName(const std::string& name) const { NodeList nodes = getChildrenByName(name); auto l = nodes.size(); if (l != 1) { LOG_ERROR("Expected one node with name " << name << " but found " << l); THROW_INVALID_ARG("Child not found"); } return nodes[0]; }
void Annotator::annotateNodeList(NodeList& nl, bool marktailpos, bool marksingletype) { const size_t nlsize = nl.size(); for (size_t i = 0; i < nlsize; i++) { if (marktailpos && i == nlsize - 1) nl[i]->setIsInTailPos(true); if (marksingletype) nl[i]->setIsSingleTypeRequired(true); annotateNode(nl[i]); } }
// Apply recursive call void DominatedActionSequenceDetection::searchNode(TreeNode* node, int seqLength, std::vector<bool>& isSequenceUsed) { assert(num_sequences(seqLength) == isSequenceUsed.size()); // 1. get actionList for the node getUsedSequenceList(node, seqLength, isSequenceUsed); // 2. recursively call this for child nodes NodeList childList = sortNodeList(node->v_children); for (int i = 0; i < childList.size(); ++i) { searchNode(childList[i], seqLength, isSequenceUsed); } }
// Generate a new level of nodes bool DataScaling::generateNextLevel(int level) { cout << "Level:" << level << "\t"; // Get the number of nodes on the previous level NodeList* previousNodeList = this->nodes[level - 1]; int previousSize = previousNodeList->size(); // Set the number of new nodes to generate int numNewNodes = this->nodesPerLevel; // Create new random nodes NodeList* currentNodeList = this->nodes[level]; for (int count = 0; count < numNewNodes; ++count) { // Create a new node int newId = this->nodeCount + count; // nodeCount is total number of nodes in all previous levels Node* newNode = new Node(newId); currentNodeList->push_back(newNode); // Randomly select the degree for this node int nodeDegree = (rand() % this->degree) + 1; // For each degree (as specified on the command line) for (int degree = 0; degree < nodeDegree; ++degree) { // Select a random node from the previous level int oldId = rand() % previousSize; // Check if there's already an edge to the selected node if (!newNode->findEdge(oldId)) { Node* oldNode = previousNodeList->at(oldId); // Add an edge between the new node and the old node newNode->addEdge(oldId); oldNode->addEdge(newId); this->edgeCount += 2; // Randomly add some of the old node's words into the new node int numWords = this->wordsPerNode / nodeDegree; newNode->addPartialWordList(oldNode, numWords); } } } // Update the total number of nodes so far this->nodeCount += numNewNodes; cout << "nodeCount:" << this->nodeCount << endl; return(true); }
void Document::copyNodes(const NodeList& nodeList) { if (!isValid() || _xmlDoc->children == NULL) { return; // is not Valid, place an assertion here? } // Copy the child nodes one by one for (std::size_t i = 0; i < nodeList.size(); i++) { // Copy the node xmlNodePtr node = xmlCopyNode(nodeList[i].getNodePtr(), 1); // Add this node to the top level node of this document xmlAddChild(xmlDocGetRootElement(_xmlDoc), node); } }
bool HexagonOptAddrMode::analyzeUses(unsigned tfrDefR, const NodeList &UNodeList, InstrEvalMap &InstrEvalResult, short &SizeInc) { bool KeepTfr = false; bool HasRepInstr = false; InstrEvalResult.clear(); for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) { bool CanBeReplaced = false; NodeAddr<UseNode *> UN = *I; NodeAddr<StmtNode *> SN = UN.Addr->getOwner(*DFG); MachineInstr *MI = SN.Addr->getCode(); const MCInstrDesc &MID = MI->getDesc(); if ((MID.mayLoad() || MID.mayStore())) { if (!hasRepForm(MI, tfrDefR)) { KeepTfr = true; continue; } SizeInc++; CanBeReplaced = true; } else if (MI->getOpcode() == Hexagon::S2_addasl_rrri) { NodeList AddaslUseList; DEBUG(dbgs() << "\nGetting ReachedUses for === " << *MI << "\n"); getAllRealUses(SN, AddaslUseList); // Process phi nodes. if (allValidCandidates(SN, AddaslUseList) && canRemoveAddasl(SN, MI, AddaslUseList)) { SizeInc += AddaslUseList.size(); SizeInc -= 1; // Reduce size by 1 as addasl itself can be removed. CanBeReplaced = true; } else SizeInc++; } else // Currently, only load/store and addasl are handled. // Some other instructions to consider - // A2_add -> A2_addi // M4_mpyrr_addr -> M4_mpyrr_addi KeepTfr = true; InstrEvalResult[MI] = CanBeReplaced; HasRepInstr |= CanBeReplaced; } // Reduce total size by 2 if original tfr can be deleted. if (!KeepTfr) SizeInc -= 2; return HasRepInstr; }
ParallelBFS::ParallelBFS(const mpi::communicator &comm, const NodeList &vertices, const NodeList &edges) : comm(comm) { NodeId part = (NodeId)vertices.size() / comm.size(), left_vertices = (NodeId)vertices.size() % comm.size(), first_vertex = 0, first_edge = 0; NodeList part_vertices((size_t)comm.size()); NodeList first_vertices((size_t)comm.size()); NodeList part_edges((size_t)comm.size()); NodeList first_edges((size_t)comm.size()); NodeList all_description((size_t)(comm.size() << 2)); for (int i = 0; i < comm.size(); ++i) { NodeId this_part = part + (i < left_vertices); NodeId last_edge = first_vertex + this_part == vertices.size() ? (NodeId)edges.size() : vertices[first_vertex + this_part]; all_description[(i<<2)] = (NodeId)vertices.size(); all_description[(i<<2) + 1] = first_vertices[i] = first_vertex; all_description[(i<<2) + 2] = part_vertices[i] = this_part; all_description[(i<<2) + 3] = part_edges[i] = last_edge - first_edge; first_edges[i] = first_edge; first_edge = last_edge; first_vertex += this_part; } NodeList description(4); mpi::scatter(comm, all_description.data(), description.data(), 4, 0); this->vertex_total_count = description[0]; this->first_vertex = description[1]; this->vertices.resize((size_t)description[2]); mpi::scatterv(comm, vertices, part_vertices, first_vertices, this->vertices, 0); this->edges.resize((size_t)description[3]); mpi::scatterv(comm, edges, part_edges, first_edges, this->edges, 0); prepare(); }
/** * Generates a buffer with the Nintendo tagged parameters of an 802.11 Beacon frame * for UDS communication. * @returns A buffer with the Nintendo tagged parameters of the beacon frame. */ std::vector<u8> GenerateNintendoTaggedParameters(const NetworkInfo& network_info, const NodeList& nodes) { ASSERT_MSG(network_info.max_nodes == nodes.size(), "Inconsistent network state."); std::vector<u8> buffer = GenerateNintendoDummyTag(); std::vector<u8> network_info_tag = GenerateNintendoNetworkInfoTag(network_info); std::vector<u8> first_data_tag = GenerateNintendoFirstEncryptedDataTag(network_info, nodes); std::vector<u8> second_data_tag = GenerateNintendoSecondEncryptedDataTag(network_info, nodes); buffer.insert(buffer.end(), network_info_tag.begin(), network_info_tag.end()); buffer.insert(buffer.end(), first_data_tag.begin(), first_data_tag.end()); buffer.insert(buffer.end(), second_data_tag.begin(), second_data_tag.end()); return buffer; }
Node* osgDB::readNodeFiles(std::vector<std::string>& commandLine,const ReaderWriter::Options* options) { typedef std::vector<osg::Node*> NodeList; NodeList nodeList; // note currently doesn't delete the loaded file entries from the command line yet... for(std::vector<std::string>::iterator itr=commandLine.begin(); itr!=commandLine.end(); ++itr) { if ((*itr)[0]!='-') { // not an option so assume string is a filename. osg::Node *node = osgDB::readNodeFile( *itr , options ); if( node != (osg::Node *)0L ) { if (node->getName().empty()) node->setName( *itr ); nodeList.push_back(node); } } } if (nodeList.empty()) { return NULL; } if (nodeList.size()==1) { return nodeList.front(); } else // size >1 { osg::Group* group = new osg::Group; for(NodeList::iterator itr=nodeList.begin(); itr!=nodeList.end(); ++itr) { group->addChild(*itr); } return group; } }
void ParallelBFS::calculate(NodeId root) { distance.assign((size_t) vertex_count, infinity); NodeId level = 1; NodeList frontier; frontier.reserve((size_t)vertex_count); if (comm.rank() == find_owner(root)) { frontier.push_back(root - first_vertex); distance[root - first_vertex] = 0; } std::vector<NodeList> send_buf((size_t)comm.size()); NodeList new_frontier; NodeList sizes((size_t)comm.size()), displacements((size_t)comm.size()); while (mpi::all_reduce(comm, (NodeId)frontier.size(), std::plus<NodeId>()) > 0) { for (NodeId u : frontier) for (int e = vertices[u]; e < vertices[u + 1]; ++e) { int v = edges[e]; send_buf[find_owner(v)].push_back(v); } for (int i = 0; i < comm.size(); ++i) { mpi::gather(comm, (NodeId)send_buf[i].size(), sizes.data(), i); if (i == comm.rank()) { for (int j = 1; j < comm.size(); ++j) displacements[j] = displacements[j - 1] + sizes[j - 1]; new_frontier.resize( (size_t)(displacements[comm.size()-1] + sizes[comm.size() - 1])); mpi::gatherv(comm, send_buf[i], new_frontier, sizes, displacements, i); } else { mpi::gatherv(comm, send_buf[i], i); } } for (size_t i = 0; i < comm.size(); ++i) send_buf[i].clear(); frontier.clear(); for (int v : new_frontier) { v -= first_vertex; if (distance[v] == infinity) { distance[v] = level; frontier.push_back(v); } } ++level; } }
///////////////////////////////////////////////////////////////////////////////////////////////////////////// ////// Performs a breadth first search to find shortest available path (with non-saturated path capacities) ////// between a search node and a sink node. The function uses a search graph to perform the search and ////// store parent-child relationship of the nodes in the graph. ///////////////////////////////////////////////////////////////////////////////////////////////////////////// void PerformBFS(const Graph& graph, SearchGraph& sgraph, int source, int sink, Path* augpath) { NodeList nlist; nlist.push_back(source); int u, v; int i; SearchNode snode_u, snode_v; bool found = false; vector<int> nbr_nodes; while (nlist.size() > 0) { u = nlist[0]; GetNeighboringNodes(u, graph, &nbr_nodes); snode_u = sgraph[u]; for (i = 0; i < (int) nbr_nodes.size(); i++) { v = nbr_nodes[i]; snode_v = sgraph[v]; if (snode_v.color == -1) { snode_v.color = 0; snode_v.dist = snode_u.dist + 1; snode_v.parent = u; sgraph[v] = snode_v; nlist.push_back(v); } if (v == sink) { found = true; break; } } nlist.pop_front(); snode_u.color = 1; sgraph[u] = snode_u; if (found == true) break; } if (found == true) FindAugmentingPath(sgraph, source, sink, augpath); }
void SplitTree::exhaustiveSearch() { // first label the outputs for( NodeList::iterator i = _outputList.begin(); i != _outputList.end(); ++i ) { (*i)->_splitHere = true; } // now collect all the unlabeled, nontrivial nodes: NodeList nodesToConsider; for( NodeList::iterator j = _dagOrderNodeList.begin(); j != _dagOrderNodeList.end(); ++j ) { if( (*j)->isMarkedAsSplit() ) continue; if( !(*j)->canBeSaved() ) continue; if( *j == _pseudoRoot ) continue; nodesToConsider.push_back( *j ); } size_t nodeCount = nodesToConsider.size(); int bestScore = INT_MAX; for( size_t subsetSize = 0; subsetSize < nodeCount; subsetSize++ ) { std::cout << "considering subsets of size " << subsetSize << " out of " << nodeCount << std::endl; int bestScoreForSubsetSize = INT_MAX; exhaustiveSubsetSearch( subsetSize, nodesToConsider, bestScoreForSubsetSize ); std::cout << "best split has score: " << bestScoreForSubsetSize << std::endl; if( bestScoreForSubsetSize != INT_MAX ) { if( (bestScore != INT_MAX) && (bestScoreForSubsetSize > bestScore) ) { // there probably isn't a better partition, lets use this :) break; } if( bestScoreForSubsetSize < bestScore ) bestScore = bestScoreForSubsetSize; } } std::cout << "best overall score found before giving up: " << bestScore << std::endl; }
int main() { cout << "This is a demonstration of Lab 2's XML parser for the PaperSize, Area, and PatchCodeNotification elements.\n\n"; NodeList nlist; populateList(nlist); cout << "XML DATA EXTRACTED:\n\n"; for (int i = 0; i < nlist.size(); i++) cout << nlist[i]->getXMLTags() << endl; cout << "\n\nPARSED DATA:\n\n"; cout << *(dynamic_cast<PaperSize *>(nlist[0])) << endl; cout << *(dynamic_cast<Area *>(nlist[1])) << endl; cout << *(dynamic_cast<PatchCodeNotification *>(nlist[2])) << endl; return 0; }
void emptyTree(NodeSet roots) { TreeNode *tempNode = 0, *parentNode = 0; NodeSetIter setIter; NodeList nodeList; NodeListIter listIter; for(setIter=roots.begin(); setIter!=roots.end(); ++setIter) { tempNode=0; parentNode=0; if(*setIter!=0) { nodeList.push_front(*setIter); while (nodeList.size()!=0) { listIter=nodeList.begin(); tempNode=(*listIter); nodeList.pop_front(); if (tempNode->right==0 && tempNode->left==0) { parentNode=tempNode->parent; if (parentNode->right->ID==tempNode->ID) parentNode->right = 0; else parentNode->left=0; delete tempNode; tempNode=0; } else { if(tempNode->right!=0) nodeList.push_front(tempNode->right); if(tempNode->left!=0) nodeList.push_front(tempNode->left); } } } nodeList.clear(); } }
/** * Generates a buffer with the Network Info Nintendo tag. * This tag contains the first portion of the encrypted payload in the 802.11 beacon frame. * The encrypted payload contains information about the nodes currently connected to the network. * @returns A buffer with the first Nintendo encrypted data parameters of the beacon frame. */ std::vector<u8> GenerateNintendoFirstEncryptedDataTag(const NetworkInfo& network_info, const NodeList& nodes) { const size_t payload_size = std::min<size_t>(EncryptedDataSizeCutoff, nodes.size() * sizeof(NodeInfo)); EncryptedDataTag tag{}; tag.header.tag_id = static_cast<u8>(TagId::VendorSpecific); tag.header.length = static_cast<u8>(sizeof(tag) - sizeof(TagHeader) + payload_size); tag.oui_type = static_cast<u8>(NintendoTagId::EncryptedData0); tag.oui = NintendoOUI; std::vector<u8> buffer(sizeof(tag) + payload_size); std::memcpy(buffer.data(), &tag, sizeof(tag)); std::vector<u8> encrypted_data = GeneratedEncryptedData(network_info, nodes); std::memcpy(buffer.data() + sizeof(tag), encrypted_data.data(), payload_size); return buffer; }
virtual ReadResult readNode(std::istream& fin, const Options* options) const { loadWrappers(); fin.imbue(std::locale::classic()); Input fr; fr.attach(&fin); fr.setOptions(options); typedef std::vector<osg::Node*> NodeList; NodeList nodeList; // load all nodes in file, placing them in a group. while(!fr.eof()) { Node *node = fr.readNode(); if (node) nodeList.push_back(node); else fr.advanceOverCurrentFieldOrBlock(); } if (nodeList.empty()) { return ReadResult("No data loaded"); } else if (nodeList.size()==1) { return nodeList.front(); } else { Group* group = new Group; group->setName("import group"); for(NodeList::iterator itr=nodeList.begin(); itr!=nodeList.end(); ++itr) { group->addChild(*itr); } return group; } }
Node* osgDB::readNodeFiles(std::vector<std::string>& fileList,const Options* options) { typedef std::vector<osg::Node*> NodeList; NodeList nodeList; for(std::vector<std::string>::iterator itr=fileList.begin(); itr!=fileList.end(); ++itr) { osg::Node *node = osgDB::readNodeFile( *itr , options ); if( node != (osg::Node *)0L ) { if (node->getName().empty()) node->setName( *itr ); nodeList.push_back(node); } } if (nodeList.empty()) { return NULL; } if (nodeList.size()==1) { return nodeList.front(); } else // size >1 { osg::Group* group = new osg::Group; for(NodeList::iterator itr=nodeList.begin(); itr!=nodeList.end(); ++itr) { group->addChild(*itr); } return group; } }
///////////////////////////////////////////////////////////////////////////////////////////////////////////// ////// Given a residual graph with no path from the source and sink node, the sunction outputs an assignment list ////// which assigns each node to either belonging to the source tree or the sink tree. It performs breadth first ////// search on the residual graph to find the children of the source tress. Any remaining nodes are aressigned ////// to the sink node. ///////////////////////////////////////////////////////////////////////////////////////////////////////////// void ComputeAssignments(const Graph& graph, int source, int sink, vector<int>* assignments) { SearchGraph sgraph; assignments->clear(); const int nnodes = graph.size(); InitSearchGraph(&sgraph, nnodes, source); assignments->resize(nnodes, -1); NodeList nlist; nlist.push_back(source); int u, v; int i; SearchNode snode_u, snode_v; bool found = false; vector<int> nbr_nodes; while (nlist.size() > 0) { u = nlist[0]; (*assignments)[u] = 1; GetNeighboringNodes(u, graph, &nbr_nodes); snode_u = sgraph[u]; for (i = 0; i < (int) nbr_nodes.size(); i++) { v = nbr_nodes[i]; snode_v = sgraph[v]; if (snode_v.color == -1) { snode_v.color = 0; snode_v.dist = snode_u.dist + 1; snode_v.parent = u; sgraph[v] = snode_v; nlist.push_back(v); } } nlist.pop_front(); snode_u.color = 1; sgraph[u] = snode_u; } }
void SplitTree::rdsTryMerge( SplitNode* n, SplitShaderHeuristics& outHeuristics ) { assert( n ); // dumpFile << "TRY MERGE " << (void*)n << std::endl; // n->dump( dumpFile ); // dumpFile << std::endl; // first try to merge with all children if( rdsCompile( n, outHeuristics ) ) return; // dumpFile << "whole thing didn't work, trying to split" << std::endl; // count the number of unsaved kids size_t childCount = n->getGraphChildCount(); NodeList unsavedChildren; for( size_t i = 0; i < childCount; i++ ) { SplitNode* child = n->getIndexedGraphChild(i); if( !child->isMarkedAsSplit() ) unsavedChildren.push_back( child ); } size_t unsavedChildCount = unsavedChildren.size(); assert( unsavedChildCount > 0 ); size_t subsetSize = unsavedChildCount; while( subsetSize-- > 0 ) { // try to do merges with the given subset size // dumpFile << "trying merges of " << subsetSize << " of the " << unsavedChildCount << " children" << std::endl; if( rdsMergeSome( n, unsavedChildren, subsetSize, outHeuristics ) ) return; } assert( false ); }
Node* osgDB::readNodeFiles(osg::ArgumentParser& arguments,const Options* options) { typedef std::vector< osg::ref_ptr<osg::Node> > NodeList; NodeList nodeList; std::string filename; while (arguments.read("--file-cache",filename)) { osgDB::Registry::instance()->setFileCache(new osgDB::FileCache(filename)); } while (arguments.read("--image",filename)) { osg::ref_ptr<osg::Image> image = readImageFile(filename.c_str(), options); if (image.valid()) { osg::Geode* geode = osg::createGeodeForImage(image.get()); if (image->isImageTranslucent()) { OSG_INFO<<"Image "<<image->getFileName()<<" is translucent; setting up blending."<<std::endl; geode->getOrCreateStateSet()->setMode(GL_BLEND, osg::StateAttribute::ON); geode->getOrCreateStateSet()->setRenderingHint(osg::StateSet::TRANSPARENT_BIN); } nodeList.push_back(geode); } } while (arguments.read("--movie",filename)) { osg::ref_ptr<osg::Image> image = readImageFile(filename.c_str(), options); osg::ref_ptr<osg::ImageStream> imageStream = dynamic_cast<osg::ImageStream*>(image.get()); if (imageStream.valid()) { bool flip = image->getOrigin()==osg::Image::TOP_LEFT; // start the stream playing. imageStream->play(); osg::ref_ptr<osg::Geometry> pictureQuad = 0; bool useTextureRectangle = true; if (useTextureRectangle) { pictureQuad = osg::createTexturedQuadGeometry(osg::Vec3(0.0f,0.0f,0.0f), osg::Vec3(image->s(),0.0f,0.0f), osg::Vec3(0.0f,0.0f,image->t()), 0.0f, flip ? image->t() : 0.0, image->s(), flip ? 0.0 : image->t()); pictureQuad->getOrCreateStateSet()->setTextureAttributeAndModes(0, new osg::TextureRectangle(image.get()), osg::StateAttribute::ON); } else { pictureQuad = osg::createTexturedQuadGeometry(osg::Vec3(0.0f,0.0f,0.0f), osg::Vec3(image->s(),0.0f,0.0f), osg::Vec3(0.0f,0.0f,image->t()), 0.0f, flip ? 1.0f : 0.0f , 1.0f, flip ? 0.0f : 1.0f); pictureQuad->getOrCreateStateSet()->setTextureAttributeAndModes(0, new osg::Texture2D(image.get()), osg::StateAttribute::ON); } if (pictureQuad.valid()) { osg::ref_ptr<osg::Geode> geode = new osg::Geode; geode->addDrawable(pictureQuad.get()); nodeList.push_back(geode.get()); } } else if (image.valid()) { nodeList.push_back(osg::createGeodeForImage(image.get())); } } while (arguments.read("--dem",filename)) { osg::HeightField* hf = readHeightFieldFile(filename.c_str(), options); if (hf) { osg::Geode* geode = new osg::Geode; geode->addDrawable(new osg::ShapeDrawable(hf)); nodeList.push_back(geode); } } // note currently doesn't delete the loaded file entries from the command line yet... for(int pos=1; pos<arguments.argc(); ++pos) { if (!arguments.isOption(pos)) { // not an option so assume string is a filename. osg::Node *node = osgDB::readNodeFile( arguments[pos], options); if(node) { if (node->getName().empty()) node->setName( arguments[pos] ); nodeList.push_back(node); } } } if (nodeList.empty()) { return NULL; } if (nodeList.size()==1) { return nodeList.front().release(); } else // size >1 { osg::Group* group = new osg::Group; for(NodeList::iterator itr=nodeList.begin(); itr!=nodeList.end(); ++itr) { group->addChild((*itr).get()); } return group; } }
osg::ref_ptr<osg::Node> p3d::readShowFiles(osg::ArgumentParser& arguments,const osgDB::ReaderWriter::Options* options) { osg::ref_ptr<osgDB::Options> local_options = createOptions(options); local_options->setOptionString("main"); typedef std::vector< osg::ref_ptr<osg::Node> > NodeList; NodeList nodeList; std::string filename; while (arguments.read("--image",filename)) { osg::ref_ptr<osg::Image> image = readImageFile(filename.c_str(), local_options.get()); if (image.valid()) nodeList.push_back(osg::createGeodeForImage(image.get())); } while (arguments.read("--movie",filename)) { osg::ref_ptr<osg::Image> image = readImageFile(filename.c_str(), local_options.get()); osg::ref_ptr<osg::ImageStream> imageStream = dynamic_cast<osg::ImageStream*>(image.get()); if (image.valid()) { imageStream->play(); nodeList.push_back(osg::createGeodeForImage(imageStream.get())); } } while (arguments.read("--dem",filename)) { osg::HeightField* hf = readHeightFieldFile(filename.c_str(), local_options.get()); if (hf) { osg::Geode* geode = new osg::Geode; geode->addDrawable(new osg::ShapeDrawable(hf)); nodeList.push_back(geode); } } // note currently doesn't delete the loaded file entries from the command line yet... for(int pos=1;pos<arguments.argc();++pos) { if (!arguments.isOption(pos)) { // not an option so assume string is a filename. osg::Node *node = osgDB::readNodeFile( arguments[pos], local_options.get()); if(node) { if (node->getName().empty()) node->setName( arguments[pos] ); nodeList.push_back(node); } } } if (nodeList.empty()) { return NULL; } osg::ref_ptr<osg::Node> root; if (nodeList.size()==1) { root = nodeList.front().get(); } else // size >1 { osg::Switch* sw = new osg::Switch; for(NodeList::iterator itr=nodeList.begin(); itr!=nodeList.end(); ++itr) { sw->addChild((*itr).get()); } sw->setSingleChildOn(0); sw->setEventCallback(new p3d::ShowEventHandler()); root = sw; } if (root.valid()) { osg::notify(osg::INFO)<<"Got node now adding callback"<<std::endl; AddVolumeEditingCallbackVisitor avecv; root->accept(avecv); } return root; }
//----------------------------- bool DocumentProcessor::createAndWriteSkinController( const Loader::InstanceControllerData& instanceControllerData, const COLLADAFW::UniqueId& controllerDataUniqueId, const COLLADAFW::UniqueId& sourceUniqueId, const StringList& sidsOrIds, bool resolveIds) { if ( !controllerDataUniqueId.isValid() ) return false; const URIList& skeletonRoots = instanceControllerData.skeletonRoots; NodeList joints; for ( StringList::const_iterator it = sidsOrIds.begin(); it != sidsOrIds.end(); ++it) { const String sidOrId = *it; bool jointFound = false; if ( resolveIds ) { const SidTreeNode* joint = resolveSid( sidOrId ); if ( joint ) { // the joint could be found if ( joint->getTargetType() == SidTreeNode::TARGETTYPECLASS_OBJECT ) { const COLLADAFW::Object* object = joint->getObjectTarget(); if ( object->getClassId() == COLLADAFW::Node::ID() ) { joints.push_back( (COLLADAFW::Node*)object ); jointFound = true; //search for the next joint } else { // we could resolve the sid, but is not a joint/node } } else { // we could resolve the sid, but is not a joint/node } } } else { for ( URIList::const_iterator skeletonIt = skeletonRoots.begin(); skeletonIt != skeletonRoots.end(); ++skeletonIt) { const COLLADABU::URI& skeletonUri = *skeletonIt; SidAddress sidAddress( skeletonUri, sidOrId ); const SidTreeNode* joint = resolveSid( sidAddress ); if ( joint ) { // the joint could be found if ( joint->getTargetType() != SidTreeNode::TARGETTYPECLASS_OBJECT ) { // we could resolve the sid, but is not a joint/node break; } const COLLADAFW::Object* object = joint->getObjectTarget(); if ( object->getClassId() != COLLADAFW::Node::ID() ) { // we could resolve the sid, but is not a joint/node break; } joints.push_back( (COLLADAFW::Node*)object ); jointFound = true; //search for the next joint break; } } } if ( !jointFound ) { std::stringstream msg; msg << "Could not resolve " << (resolveIds ? "id" : "sid") << " \""; msg << sidOrId << "\" referenced in skin controller."; if ( handleFWLError( SaxFWLError::ERROR_UNRESOLVED_REFERENCE, msg.str() )) { return false; } } } COLLADAFW::SkinController skinController( createUniqueId(COLLADAFW::SkinController::ID())); COLLADAFW::UniqueIdArray &jointsUniqueIds = skinController.getJoints(); jointsUniqueIds.allocMemory( joints.size() ); jointsUniqueIds.setCount(joints.size()); size_t i = 0; NodeList::const_iterator it = joints.begin(); for ( ; it != joints.end(); ++it, ++i ) { const COLLADAFW::Node* node = *it; jointsUniqueIds[i] = node->getUniqueId(); } skinController.setSkinControllerData(controllerDataUniqueId); skinController.setSource(sourceUniqueId); bool success = true; // Check if we have already wrote a skin controller that describes the same controller, i.e. has same // source, skin data and joints. If so, do not write it again and reference the previously used in the // scene graph const COLLADAFW::SkinController* skinControllerToWrite = 0; Loader::SkinControllerSet::const_iterator skinControllerIt = mSkinControllerSet.find( skinController ); if ( skinControllerIt == mSkinControllerSet.end() ) { skinControllerToWrite = &skinController; success = writer()->writeController(skinControllerToWrite); mSkinControllerSet.insert( skinController ); } else { skinControllerToWrite = &(*skinControllerIt); } instanceControllerData.instanceController->setInstanciatedObjectId( skinControllerToWrite->getUniqueId() ); return success; }
void Graph::addNodes(const NodeList& nodes) { mNodes.clear(); for (size_t i = 0; i < nodes.size(); ++i) addNode(nodes[i]); }
int Transformator::findBlockSplitIndex(const NodeList& nodes) { enum NodeMode { kMode_begin, kMode_let, kMode_onExit, kMode_other }; NodeMode mode = kMode_begin; for (size_t i = 0; i < nodes.size(); i++) { auto letnd = dynamic_cast<LetNode*>(nodes[i].get()); if (letnd) { switch (mode) { case kMode_begin: mode = kMode_let; break; case kMode_let: mode = kMode_let; break; case kMode_other: return i; case kMode_onExit: return i; } } else { auto onnd = dynamic_cast<OnNode*>(nodes[i].get()); if (onnd) { switch (mode) { case kMode_begin: case kMode_let: if (onnd->key() == Names::kSignalKeyword) mode = kMode_let; else if (onnd->key() == Names::kExitKeyword) mode = kMode_onExit; else mode = kMode_other; break; case kMode_other: if (onnd->key() == Names::kSignalKeyword) return i; else if (onnd->key() == Names::kExitKeyword) return i; else mode = kMode_other; break; case kMode_onExit: return i; } } else { mode = kMode_other; } } } return -1; }
void launcher::Launcher::loadSceneFromFile(string fileName, string initialStateGroup) { Engine& engine = Engine::getInstance(); // FIXME should we validate task file against xml schema? auto& ffls = FileFolderLookupService::getInstance(); string fname = ffls.lookupFile(fileName); LOG_DEBUG("Loading scene from file " << fname); // parse file Doc doc = Doc::fromFile(fname); xml::Node rootNode = doc.getRootElement(); // read task parameters NodeList taskNodes = rootNode.xpath("/task"); if( taskNodes.size() != 1 ) THROW_INVALID_INPUT("Config file should contain one <task/> element"); for(auto& taskNode: taskNodes) { int numberOfSnaps = lexical_cast<int>(taskNode["numberOfSnaps"]); int stepsPerSnap = lexical_cast<int>(taskNode["stepsPerSnap"]); engine.setNumberOfSnaps(numberOfSnaps); engine.setStepsPerSnap(stepsPerSnap); } NodeList loadPluginsList = rootNode.xpath("/task/system/loadPlugin"); for (auto& plugin: loadPluginsList){ engine.loadPlugin(plugin["name"]); } // reading system properties NodeList defaultContactCalculatorList = rootNode.xpath("/task/system/defaultContactCalculator"); if( defaultContactCalculatorList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <defaultContactCalculator/> element"); if( defaultContactCalculatorList.size() == 1 ) { xml::Node defaultContactCalculator = defaultContactCalculatorList.front(); string type = defaultContactCalculator["type"]; if( engine.getContactCalculator(type) == NULL ) { THROW_INVALID_INPUT("Unknown contact calculator requested: " + type); } engine.replaceDefaultContactCondition( new ContactCondition(NULL, new StepPulseForm(-1, -1), engine.getContactCalculator(type) ) ); LOG_INFO("Default contact calculator set to: " + type); if (type == "AdhesionContactDestroyCalculator") { real adhesionThreshold = lexical_cast<real>(defaultContactCalculator["adhesionThreshold"]); engine.getContactCondition(0)->setConditionParam(adhesionThreshold); } if (type == "ClosedFractureContactCalculator") { NodeList areaNodes = defaultContactCalculator.getChildrenByName("area"); if (areaNodes.size() != 1) THROW_INVALID_INPUT("Exactly one area element can be provided for ClosedFractureCalculator"); Area* area = readArea(areaNodes[0]); (static_cast<gcm::ClosedFractureContactCalculator*> (engine.getContactCalculator(type)))->setFracArea(area); } if (type == "OpenFractureContactCalculator") { NodeList areaNodes = defaultContactCalculator.getChildrenByName("area"); if (areaNodes.size() != 1) THROW_INVALID_INPUT("Exactly one area element can be provided for ClosedFractureCalculator"); Area* area = readArea(areaNodes[0]); (static_cast<gcm::OpenFractureContactCalculator*> (engine.getContactCalculator(type)))->setFracArea(area); } } NodeList defaultBorderCalculatorList = rootNode.xpath("/task/system/defaultBorderCalculator"); if( defaultBorderCalculatorList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <defaultBorderCalculator/> element"); if( defaultBorderCalculatorList.size() == 1 ) { xml::Node defaultBorderCalculator = defaultBorderCalculatorList.front(); string type = defaultBorderCalculator["type"]; if( engine.getBorderCalculator(type) == NULL ) { THROW_INVALID_INPUT("Unknown border calculator requested: " + type); } engine.replaceDefaultBorderCondition( new BorderCondition(NULL, new StepPulseForm(-1, -1), engine.getBorderCalculator(type) ) ); LOG_INFO("Default border calculator set to: " + type); } NodeList defaultRheoCalculatorList = rootNode.xpath("/task/system/defaultRheologyCalculator"); if( defaultRheoCalculatorList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <defaultRheologyCalculator/> element"); if( defaultRheoCalculatorList.size() == 1 ) { xml::Node defaultRheoCalculator = defaultRheoCalculatorList.front(); string type = defaultRheoCalculator["type"]; engine.setDefaultRheologyCalculatorType(type); LOG_INFO("Default rheology calculator set to: " + type); } NodeList defaultFailureModelList = rootNode.xpath("/task/system/defaultFailureModel"); if( defaultFailureModelList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <defaultFailureModelList/> element"); if( defaultFailureModelList.size() == 1 ) { xml::Node defaultFailureModel = defaultFailureModelList.front(); string type = defaultFailureModel["type"]; engine.setDefaultFailureModelType(type); LOG_INFO("Default failure model set to: " + type); } NodeList contactThresholdList = rootNode.xpath("/task/system/contactThreshold"); if( contactThresholdList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <contactThreshold/> element"); if( contactThresholdList.size() == 1 ) { xml::Node contactThreshold = contactThresholdList.front(); string measure = contactThreshold["measure"]; real value = lexical_cast<real>(contactThreshold["value"]); if( measure == "avgH" ) { engine.setContactThresholdType(CONTACT_THRESHOLD_BY_AVG_H); engine.setContactThresholdFactor(value); } else if( measure == "lambdaTau" ) { engine.setContactThresholdType(CONTACT_THRESHOLD_BY_MAX_LT); engine.setContactThresholdFactor(value); } else if( measure == "abs" ) { engine.setContactThresholdType(CONTACT_THRESHOLD_FIXED); engine.setContactThresholdFactor(value); } else { THROW_INVALID_INPUT("Unknown units of measure for <contactThreshold/>"); } } NodeList collisionDetectorList = rootNode.xpath("/task/system/collisionDetector"); if( collisionDetectorList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <collisionDetector/> element"); if( collisionDetectorList.size() == 1 ) { xml::Node collisionDetector = collisionDetectorList.front(); string isStatic = collisionDetector["static"]; if( isStatic == "true" ) { engine.setCollisionDetectorStatic(true); } else if( isStatic == "false" ) { engine.setCollisionDetectorStatic(false); } } NodeList meshMovementList = rootNode.xpath("/task/system/meshMovement"); if( meshMovementList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <meshMovement/> element"); if( meshMovementList.size() == 1 ) { xml::Node meshMovement = meshMovementList.front(); string meshMovementType = meshMovement["type"]; if( meshMovementType == "none" ) { engine.setMeshesMovable(false); } } NodeList timeStepList = rootNode.xpath("/task/system/timeStep"); if( timeStepList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <timeStepList/> element"); if( timeStepList.size() == 1 ) { xml::Node timeStep = timeStepList.front(); real value = lexical_cast<real>(timeStep["multiplier"]); engine.setTimeStepMultiplier(value); LOG_INFO("Using time step multiplier: " << value); } NodeList plasticityTypeList = rootNode.xpath("/task/system/plasticity"); if( plasticityTypeList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <plasticity/> element"); string plasticityType = PLASTICITY_TYPE_NONE; if( plasticityTypeList.size() == 1 ) { plasticityType = plasticityTypeList.front()["type"]; } NodeList failureModeList = rootNode.xpath("/task/system/failure"); if( failureModeList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <failure/> element"); string failureMode = FAILURE_MODE_DISCRETE; if( failureModeList.size() == 1 ) { failureMode = failureModeList.front()["mode"]; } string matrixDecompositionImplementation = "numerical"; NodeList matrixDecompositionList = rootNode.xpath("/task/system/matrixDecomposition"); if( matrixDecompositionList.size() > 1 ) THROW_INVALID_INPUT("Config file can contain only one <matrixDecomposition/> element"); if( matrixDecompositionList.size() == 1 ) { xml::Node matrixDecomposition = matrixDecompositionList.front(); matrixDecompositionImplementation = matrixDecomposition["implementation"]; } LOG_INFO("Using matrix decomposition: " << matrixDecompositionImplementation); loadMaterialLibrary("materials"); // reading materials loadMaterialsFromXml(rootNode.xpath("/task/materials/material")); AABB globalScene; // search for bodies NodeList bodyNodes = rootNode.xpath("/task/bodies/body"); // prepare basic bodies parameters for(auto& bodyNode: bodyNodes) { string id = bodyNode.getAttributes()["id"]; LOG_DEBUG("Loading body '" << id << "'"); // create body instance Body* body = new Body(id); body->setRheologyCalculatorType(engine.getDefaultRheologyCalculatorType()); // set rheology NodeList rheologyNodes = bodyNode.getChildrenByName("rheology"); if (rheologyNodes.size() > 1) THROW_INVALID_INPUT("Only one rheology element allowed for body declaration"); if (rheologyNodes.size()) { // We can do smth here when we have more than one rheology calculators } // preload meshes for dispatcher NodeList meshNodes = bodyNode.getChildrenByName("mesh"); for(auto& meshNode: meshNodes) { string type = meshNode["type"]; LOG_INFO("Preparing mesh for body '" << id << "'"); AABB localScene; int slicingDirection; int numberOfNodes; if (type == Geo2MeshLoader::MESH_TYPE) Geo2MeshLoader::getInstance().preLoadMesh(meshNode, localScene, slicingDirection, numberOfNodes); else if (type == Msh2MeshLoader::MESH_TYPE) Msh2MeshLoader::getInstance().preLoadMesh(meshNode, localScene, slicingDirection, numberOfNodes); else if (type == Ani3D2MeshLoader::MESH_TYPE) Ani3D2MeshLoader::getInstance().preLoadMesh(meshNode, localScene, slicingDirection, numberOfNodes); else if (type == Vtu2MeshLoader::MESH_TYPE) Vtu2MeshLoader::getInstance().preLoadMesh(meshNode, localScene, slicingDirection, numberOfNodes); else if (type == Vtu2MeshZoneLoader::MESH_TYPE) Vtu2MeshZoneLoader::getInstance().preLoadMesh(meshNode, localScene, slicingDirection, numberOfNodes); else if (type == BasicCubicMeshLoader::MESH_TYPE) BasicCubicMeshLoader::getInstance().preLoadMesh(meshNode, localScene, slicingDirection, numberOfNodes); else if (type == RectangularCutCubicMeshLoader::MESH_TYPE) RectangularCutCubicMeshLoader::getInstance().preLoadMesh(meshNode, localScene, slicingDirection, numberOfNodes); else if (type == MarkeredMeshGeoLoader::MESH_TYPE) MarkeredMeshGeoLoader::getInstance().preLoadMesh(meshNode, localScene, slicingDirection, numberOfNodes); else THROW_UNSUPPORTED("Specified mesh loader is not supported"); // transform meshes NodeList transformNodes = bodyNode.getChildrenByName("transform"); for(auto& transformNode: transformNodes) { string transformType = transformNode["type"]; if ( transformType == "translate" ) { real x = lexical_cast<real>(transformNode["moveX"]); real y = lexical_cast<real>(transformNode["moveY"]); real z = lexical_cast<real>(transformNode["moveZ"]); LOG_DEBUG("Moving body: [" << x << "; " << y << "; " << z << "]"); localScene.transfer(x, y, z); } if ( transformType == "scale" ) { real x0 = lexical_cast<real>(transformNode["x0"]); real y0 = lexical_cast<real>(transformNode["y0"]); real z0 = lexical_cast<real>(transformNode["z0"]); real scaleX = lexical_cast<real>(transformNode["scaleX"]); real scaleY = lexical_cast<real>(transformNode["scaleY"]); real scaleZ = lexical_cast<real>(transformNode["scaleZ"]); LOG_DEBUG("Scaling body: [" << x0 << "; " << scaleX << "; " << y0 << "; " << scaleY << "; " << z0 << "; " << scaleZ << "]"); localScene.scale(x0, y0, z0, scaleX, scaleY, scaleZ); } } LOG_DEBUG("Mesh preloaded. Mesh size: " << localScene << " Number of nodes: " << numberOfNodes); engine.getDispatcher()->addBodyOutline(id, localScene); engine.getDispatcher()->addBodySlicingDirection(id, slicingDirection); engine.getDispatcher()->addBodyNodesNumber(id, numberOfNodes); if( isinf(globalScene.maxX) ) { globalScene = localScene; } else { for( int k = 0; k < 3; k++ ) { if( globalScene.min_coords[k] > localScene.min_coords[k] ) globalScene.min_coords[k] = localScene.min_coords[k]; if( globalScene.max_coords[k] < localScene.max_coords[k] ) globalScene.max_coords[k] = localScene.max_coords[k]; } } } // add body to scene engine.addBody(body); } engine.setScene(globalScene); LOG_DEBUG("Total scene: " << engine.getScene()); // run dispatcher engine.getDispatcher()->prepare(engine.getNumberOfWorkers(), &globalScene); engine.getDataBus()->syncOutlines(); for( int i = 0; i < engine.getNumberOfWorkers(); i++) { LOG_DEBUG("Area scheduled for worker " << i << ": " << *(engine.getDispatcher()->getOutline(i))); } // read meshes for all bodies for(auto& bodyNode: bodyNodes) { string id = bodyNode.getAttributes()["id"]; LOG_DEBUG("Loading meshes for body '" << id << "'"); // get body instance Body* body = engine.getBodyById(id); // FIXME - WA - we need this to determine isMine() correctly for moved points real dX = 0; real dY = 0; real dZ = 0; NodeList tmpTransformNodes = bodyNode.getChildrenByName("transform"); for(auto& transformNode: tmpTransformNodes) { string transformType = transformNode["type"]; if ( transformType == "translate" ) { dX += lexical_cast<real>(transformNode["moveX"]); dY += lexical_cast<real>(transformNode["moveY"]); dZ += lexical_cast<real>(transformNode["moveZ"]); } if ( transformType == "scale" ) { //real x0 = lexical_cast<real>(transformNode["x0"]); //real y0 = lexical_cast<real>(transformNode["y0"]); //real z0 = lexical_cast<real>(transformNode["z0"]); //real scaleX = lexical_cast<real>(transformNode["scaleX"]); //real scaleY = lexical_cast<real>(transformNode["scaleY"]); //real scaleZ = lexical_cast<real>(transformNode["scaleZ"]); } } engine.getDispatcher()->setTransferVector(dX, dY, dZ, id); // load meshes NodeList meshNodes = bodyNode.getChildrenByName("mesh"); for(auto& meshNode: meshNodes) { LOG_INFO("Loading mesh for body '" << id << "'"); string type = meshNode["type"]; Mesh* mesh = nullptr; if (type == Geo2MeshLoader::MESH_TYPE) mesh = Geo2MeshLoader::getInstance().load(meshNode, body); else if (type == Msh2MeshLoader::MESH_TYPE) mesh = Msh2MeshLoader::getInstance().load(meshNode, body); else if (type == Ani3D2MeshLoader::MESH_TYPE) mesh = Ani3D2MeshLoader::getInstance().load(meshNode, body); else if (type == Vtu2MeshLoader::MESH_TYPE) mesh = Vtu2MeshLoader::getInstance().load(meshNode, body); else if (type == Vtu2MeshZoneLoader::MESH_TYPE) mesh = Vtu2MeshZoneLoader::getInstance().load(meshNode, body); else if (type == BasicCubicMeshLoader::MESH_TYPE) mesh = BasicCubicMeshLoader::getInstance().load(meshNode, body); else if (type == RectangularCutCubicMeshLoader::MESH_TYPE) mesh = RectangularCutCubicMeshLoader::getInstance().load(meshNode, body); else if (type == MarkeredMeshGeoLoader::MESH_TYPE) mesh = MarkeredMeshGeoLoader::getInstance().load(meshNode, body); LOG_INFO("Loaded mesh for body '" << id << "', started attaching to body"); // attach mesh to body body->attachMesh(mesh); mesh->setBodyNum( engine.getBodyNum(id) ); LOG_INFO("Mesh '" << mesh->getId() << "' of type '" << type << "' created. " << "Number of nodes: " << mesh->getNodesNumber() << "."); } // transform meshes NodeList transformNodes = bodyNode.getChildrenByName("transform"); for(auto& transformNode: transformNodes) { string transformType = transformNode["type"]; if( transformType == "translate" ) { real x = lexical_cast<real>(transformNode["moveX"]); real y = lexical_cast<real>(transformNode["moveY"]); real z = lexical_cast<real>(transformNode["moveZ"]); LOG_DEBUG("Moving body: [" << x << "; " << y << "; " << z << "]"); body->getMeshes()->transfer(x, y, z); } if ( transformType == "scale" ) { real x0 = lexical_cast<real>(transformNode["x0"]); real y0 = lexical_cast<real>(transformNode["y0"]); real z0 = lexical_cast<real>(transformNode["z0"]); real scaleX = lexical_cast<real>(transformNode["scaleX"]); real scaleY = lexical_cast<real>(transformNode["scaleY"]); real scaleZ = lexical_cast<real>(transformNode["scaleZ"]); LOG_DEBUG("Scaling body: [" << x0 << "; " << scaleX << "; " << y0 << "; " << scaleY << "; " << z0 << "; " << scaleZ << "]"); body->getMeshes()->scale(x0, y0, z0, scaleX, scaleY, scaleZ); } } // FIXME - Part of the WA above if( engine.getNumberOfWorkers() != 1 ) engine.getDispatcher()->setTransferVector(/*-dX, -dY, -dZ,*/0, 0, 0, id); // set material properties NodeList matNodes = bodyNode.getChildrenByName("material"); if (matNodes.size() < 1) THROW_INVALID_INPUT("Material not set"); for(auto& matNode: matNodes) { string id = matNode["id"]; // FIXME this code seems to be dead //Material* mat = engine.getMaterial(id); Mesh* mesh = body->getMeshes(); NodeList areaNodes = matNode.getChildrenByName("area"); int matId = engine.getMaterialIndex(id); usedMaterialsIds.push_back(matId); if (areaNodes.size() == 0) { mesh->setRheology( matId ); } else if (areaNodes.size() == 1) { Area* matArea = readArea(areaNodes.front()); if(matArea == NULL) THROW_INVALID_INPUT("Can not read area"); mesh->setRheology( matId, matArea ); } else { THROW_INVALID_INPUT("Only one or zero area elements are allowed for material"); } } LOG_DEBUG("Body '" << id << "' loaded"); } NodeList initialStateNodes = rootNode.xpath("/task/initialState" + (initialStateGroup == "" ? "" : "[@group=\"" + initialStateGroup + "\"]")); if (initialStateGroup != "" && initialStateNodes.size() == 0) THROW_INVALID_ARG("Initial state group not found"); for(auto& initialStateNode: initialStateNodes) { NodeList areaNodes = initialStateNode.getChildrenByName("area"); NodeList valuesNodes = initialStateNode.getChildrenByName("values"); NodeList pWaveNodes = initialStateNode.getChildrenByName("pWave"); if (areaNodes.size() == 0) THROW_INVALID_INPUT("Area element should be provided for initial state"); if (valuesNodes.size() > 1) THROW_INVALID_INPUT("Only one values element allowed for initial state"); if (pWaveNodes.size() > 1) THROW_INVALID_INPUT("Only one pWave element allowed for initial state"); if ((valuesNodes.size() == 1 && pWaveNodes.size() == 1) || (valuesNodes.size() == 0 && pWaveNodes.size() == 0)) THROW_INVALID_INPUT("You have to provide initial state by using exactly one tag of allowed ones: values, pWave");; auto useValues = valuesNodes.size() == 1; real values[9]; std::function<void(CalcNode&)> setter; if (useValues) { xml::Node valuesNode = valuesNodes.front(); vector<string> names = {"vx", "vy", "vz", "sxx", "sxy", "sxz", "syy", "syz", "szz"}; int i = 0; for (auto value_name: names) { string v = valuesNode.getAttributes()[value_name]; values[i++] = v.empty() ? 0.0 : lexical_cast<real>(v); } LOG_DEBUG("Initial state values: " << values[0] << " " << values[1] << " " << values[2] << " " << values[3] << " " << values[4] << " " << values[5] << " " << values[6] << " " << values[7] << " " << values[8] ); } else { xml::Node pWaveNode = pWaveNodes.front(); auto attrs = pWaveNode.getAttributes(); auto direction = attrs["direction"]; if (direction.empty()) THROW_INVALID_INPUT("P-wave direction is not specified"); vector<string> _direction; split(_direction, direction, is_any_of(";")); if (_direction.size() != 3) THROW_INVALID_INPUT("Invalid P-wave direction specified"); auto dx = lexical_cast<real>(_direction[0]); auto dy = lexical_cast<real>(_direction[1]); auto dz = lexical_cast<real>(_direction[2]); Vector3 dir({dx, dy, dz}); if (dx == 0.0 && dy == 0.0 && dz == 0.0) THROW_INVALID_INPUT("Invalid P-wave direction specified"); auto scale = attrs["amplitudeScale"]; if (scale.empty()) THROW_INVALID_INPUT("P-wave amplitude scale is not specified"); auto amplitudeScale = lexical_cast<real>(scale); if (amplitudeScale <= 0.0) THROW_INVALID_INPUT("P-wave amplitude must be positive"); auto type = attrs["type"]; if (type.empty()) THROW_INVALID_INPUT("P-wave type is not specified"); if (type != "compression" && type != "rarefaction") THROW_INVALID_INPUT("Invalid P-wave type specified"); auto compression = type == "compression"; setter = [=](CalcNode& node) { setIsotropicElasticPWave(node, dir, amplitudeScale, compression); }; } for(auto& areaNode: areaNodes) { Area* stateArea = readArea(areaNode); if(stateArea == NULL) THROW_INVALID_INPUT("Can not read area"); for( int i = 0; i < engine.getNumberOfBodies(); i++ ) { if (useValues) engine.getBody(i)->setInitialState(stateArea, values); else engine.getBody(i)->setInitialState(stateArea, setter); engine.getBody(i)->getMeshes()->processStressState(); } } } NodeList borderConditionNodes = rootNode.xpath("/task/borderCondition"); for(auto& borderConditionNode: borderConditionNodes) { string calculator = borderConditionNode["calculator"]; if( engine.getBorderCalculator(calculator) == NULL ) { THROW_INVALID_INPUT("Unknown border calculator requested: " + calculator); } // FIXME_ASAP: calculators became statefull engine.getBorderCalculator(calculator)->setParameters( borderConditionNode ); float startTime = lexical_cast<real>(borderConditionNode.getAttributeByName("startTime", "-1")); float duration = lexical_cast<real>(borderConditionNode.getAttributeByName("duration", "-1")); unsigned int conditionId = engine.addBorderCondition( new BorderCondition(NULL, new StepPulseForm(startTime, duration), engine.getBorderCalculator(calculator) ) ); LOG_INFO("Border condition created with calculator: " + calculator); NodeList areaNodes = borderConditionNode.getChildrenByName("area"); if (areaNodes.size() == 0) THROW_INVALID_INPUT("Area should be specified for border condition"); for(auto& areaNode: areaNodes) { Area* conditionArea = readArea(areaNode); if(conditionArea == NULL) THROW_INVALID_INPUT("Can not read area"); for( int i = 0; i < engine.getNumberOfBodies(); i++ ) { engine.getBody(i)->setBorderCondition(conditionArea, conditionId); } } } NodeList contactConditionNodes = rootNode.xpath("/task/contactCondition"); for(auto& contactConditionNode: contactConditionNodes) { string calculator = contactConditionNode["calculator"]; if( engine.getContactCalculator(calculator) == NULL ) { THROW_INVALID_INPUT("Unknown border calculator requested: " + calculator); } float startTime = lexical_cast<real>(contactConditionNode.getAttributeByName("startTime", "-1")); float duration = lexical_cast<real>(contactConditionNode.getAttributeByName("duration", "-1")); unsigned int conditionId = engine.addContactCondition( new ContactCondition(NULL, new StepPulseForm(startTime, duration), engine.getContactCalculator(calculator) ) ); if (calculator == "AdhesionContactDestroyCalculator") { real adhesionThreshold = lexical_cast<real>(contactConditionNode["adhesionThreshold"]); engine.getContactCondition(conditionId)->setConditionParam(adhesionThreshold); } LOG_INFO("Contact condition created with calculator: " + calculator); NodeList areaNodes = contactConditionNode.getChildrenByName("area"); if (areaNodes.size() == 0) THROW_INVALID_INPUT("Area should be specified for contact condition"); for(auto& areaNode: areaNodes) { Area* conditionArea = readArea(areaNode); if(conditionArea == NULL) THROW_INVALID_INPUT("Can not read area"); for( int i = 0; i < engine.getNumberOfBodies(); i++ ) { engine.getBody(i)->setContactCondition(conditionArea, conditionId); } } } // create rheology matrixes vector<RheologyMatrixPtr> matrices; for (int i = 0; i < engine.getNumberOfMaterials(); i++) { MaterialPtr material = engine.getMaterial(i); bool materialUsedInTask = (std::find(usedMaterialsIds.begin(), usedMaterialsIds.end(), i) != usedMaterialsIds.end()); auto props = material->getPlasticityProperties(); bool plasticityPropsPresent = ( (props[plasticityType].size() != 0) || (plasticityType == PLASTICITY_TYPE_NONE) ); SetterPtr setter; DecomposerPtr decomposer; CorrectorPtr corrector; RheologyMatrixPtr matrix; if (material->isIsotropic()) { if(materialUsedInTask) { LOG_INFO("Using \"" << plasticityType << "\" plasticity model " << "and \"" + failureMode + "\" failure mode " << "for isotropic material \"" << material->getName() << "\"."); if( !plasticityPropsPresent ) THROW_UNSUPPORTED("Required plasticity properties were not found."); } if (plasticityType == PLASTICITY_TYPE_NONE) { corrector = nullptr; setter = makeSetterPtr<IsotropicRheologyMatrixSetter>(); decomposer = makeDecomposerPtr<IsotropicRheologyMatrixDecomposer>(); } else if (plasticityType == PLASTICITY_TYPE_PRANDTL_RAUSS) { corrector = nullptr; setter = makeSetterPtr<PrandtlRaussPlasticityRheologyMatrixSetter>(); if (matrixDecompositionImplementation == "numerical") decomposer = makeDecomposerPtr<NumericalRheologyMatrixDecomposer>(); else decomposer = makeDecomposerPtr<AnalyticalRheologyMatrixDecomposer>(); } else if (plasticityType == PLASTICITY_TYPE_PRANDTL_RAUSS_CORRECTOR) { corrector = makeCorrectorPtr<IdealPlasticFlowCorrector>(); setter = makeSetterPtr<IsotropicRheologyMatrixSetter>(); decomposer = makeDecomposerPtr<IsotropicRheologyMatrixDecomposer>(); } else { THROW_UNSUPPORTED("Plasticity type\"" + plasticityType + "\" is not supported."); } if (failureMode == FAILURE_MODE_DISCRETE) { corrector = nullptr; setter = makeSetterPtr<IsotropicRheologyMatrixSetter>(); decomposer = makeDecomposerPtr<IsotropicRheologyMatrixDecomposer>(); } else if (failureMode == FAILURE_MODE_CONTINUAL) { corrector = nullptr; setter = makeSetterPtr<IsotropicDamagedRheologyMatrixSetter>(); decomposer = makeDecomposerPtr<IsotropicRheologyMatrixDecomposer>(); } else { THROW_UNSUPPORTED("Failure mode \"" + failureMode + "\" is not supported."); } } else { if(materialUsedInTask) { LOG_INFO("Using \"" << plasticityType << "\" plasticity model for anisotropic material \"" << material->getName() << "\"."); if (plasticityType != PLASTICITY_TYPE_NONE) LOG_WARN("Plasticity is not supported for anisotropic materials, using elastic instead."); } if (failureMode == FAILURE_MODE_DISCRETE) { corrector = nullptr; setter = makeSetterPtr<AnisotropicRheologyMatrixSetter>(); } else if (failureMode == FAILURE_MODE_CONTINUAL) { corrector = nullptr; setter = makeSetterPtr<AnisotropicDamagedRheologyMatrixSetter>(); } else { THROW_UNSUPPORTED("Failure mode \"" + failureMode + "\" is not supported."); } if( matrixDecompositionImplementation == "numerical" ) decomposer = makeDecomposerPtr<NumericalRheologyMatrixDecomposer>(); else decomposer = makeDecomposerPtr<AnalyticalRheologyMatrixDecomposer>(); } matrices.push_back(makeRheologyMatrixPtr(material, setter, decomposer, corrector)); } engine.setRheologyMatrices([&matrices](const CalcNode& node) -> RheologyMatrixPtr { return matrices[node.getMaterialId()]; } ); LOG_DEBUG("Running plugin-specific initializations"); for (auto plugin: engine.getPlugins()) plugin ->parseTask(doc); LOG_DEBUG("Scene loaded"); }
visualization_msgs::MarkerArray getVisualMsg (NodeList& pG, EdgeList& edgeList) { marker_array.markers.clear(); msg_arrow.points.clear(); msg_dots.points.clear(); int id = 0; // start vertex_list - Dots for graph nodes msg_dots.id = id; msg_dots.ns = "nodes"; msg_dots.scale.x = 1; msg_dots.scale.y = 1; msg_dots.scale.z = 1; msg_dots.color = red; for(unsigned int i=0; i < pG.size(); i++) { msg_point.x = pG[i].pose.pose.position.x; msg_point.y = pG[i].pose.pose.position.y; msg_point.z = pG[i].pose.pose.position.z; msg_dots.points.push_back(msg_point); } marker_array.markers.push_back(msg_dots); id++; // start vertex_list -arrows for graph edges for(unsigned int i=0; i < edgeList.size(); i++) { //grab the edge Edge pE = edgeList[i]; msg_arrow.id = id; msg_arrow.points.clear(); msg_arrow.ns = "edges"; msg_arrow.scale.x = 0.5; msg_arrow.scale.y = 1; msg_arrow.scale.z = 1; if(pE.from == 0) msg_arrow.color = white; else msg_arrow.color = blue; //fill in the point msg_point.x = pG[pE.from].pose.pose.position.x; msg_point.y = pG[pE.from].pose.pose.position.y; msg_point.z = pG[pE.from].pose.pose.position.z; //ROS_INFO("start adding: %f, %f", msg_point.x,msg_point.y); msg_arrow.points.push_back(msg_point); msg_point.x = pG[ pE.to].pose.pose.position.x; msg_point.y = pG[ pE.to].pose.pose.position.y; msg_point.z = pG[ pE.to].pose.pose.position.z; //ROS_INFO("end adding: %f, %f", msg_point.x,msg_point.y); msg_arrow.points.push_back(msg_point); //we have filled in the arrow stuff, push it back to the marker array marker_array.markers.push_back(msg_arrow); id++; } return marker_array; }
bool HexagonOptAddrMode::processBlock(NodeAddr<BlockNode *> BA) { bool Changed = false; for (auto IA : BA.Addr->members(*DFG)) { if (!DFG->IsCode<NodeAttrs::Stmt>(IA)) continue; NodeAddr<StmtNode *> SA = IA; MachineInstr *MI = SA.Addr->getCode(); if (MI->getOpcode() != Hexagon::A2_tfrsi || !MI->getOperand(1).isGlobal()) continue; DEBUG(dbgs() << "[Analyzing A2_tfrsi]: " << *MI << "\n"); DEBUG(dbgs() << "\t[InstrNode]: " << Print<NodeAddr<InstrNode *>>(IA, *DFG) << "\n"); NodeList UNodeList; getAllRealUses(SA, UNodeList); if (!allValidCandidates(SA, UNodeList)) continue; short SizeInc = 0; unsigned DefR = MI->getOperand(0).getReg(); InstrEvalMap InstrEvalResult; // Analyze all uses and calculate increase in size. Perform the optimization // only if there is no increase in size. if (!analyzeUses(DefR, UNodeList, InstrEvalResult, SizeInc)) continue; if (SizeInc > CodeGrowthLimit) continue; bool KeepTfr = false; DEBUG(dbgs() << "\t[Total reached uses] : " << UNodeList.size() << "\n"); DEBUG(dbgs() << "\t[Processing Reached Uses] ===\n"); for (auto I = UNodeList.rbegin(), E = UNodeList.rend(); I != E; ++I) { NodeAddr<UseNode *> UseN = *I; assert(!(UseN.Addr->getFlags() & NodeAttrs::PhiRef) && "Found a PhiRef node as a real reached use!!"); NodeAddr<StmtNode *> OwnerN = UseN.Addr->getOwner(*DFG); MachineInstr *UseMI = OwnerN.Addr->getCode(); unsigned BBNum = UseMI->getParent()->getNumber(); (void)BBNum; DEBUG(dbgs() << "\t\t[MI <BB#" << BBNum << ">]: " << *UseMI << "\n"); int UseMOnum = -1; unsigned NumOperands = UseMI->getNumOperands(); for (unsigned j = 0; j < NumOperands - 1; ++j) { const MachineOperand &op = UseMI->getOperand(j); if (op.isReg() && op.isUse() && DefR == op.getReg()) UseMOnum = j; } assert(UseMOnum >= 0 && "Invalid reached use!"); if (InstrEvalResult[UseMI]) // Change UseMI if replacement is possible. Changed |= xformUseMI(MI, UseMI, UseN, UseMOnum); else KeepTfr = true; } if (!KeepTfr) Deleted.insert(MI); } return Changed; }
Edges *MaxAcyclicSubgraph::find_subgraph() { Edges *Ea = new Edges; stack< NodeList::iterator > toRemove; NodeList *copy = copy_graph(); Node *node = NULL; COLA_ASSERT(!copy->empty()); COLA_ASSERT(!edges->empty()); #ifdef COPY_ADJ_DEBUG cout << "COPY OF MATRIX: " << endl; printNodes(copy); #endif // while the graph is not empty while (!copy->empty()) { COLA_ASSERT(toRemove.empty()); // do we have any sinks for (NodeList::iterator ni = copy->begin(); ni != copy->end(); ni++) { // is is a sink if there are no outgoing edges node = *ni; if (node->outgoing.empty()) { #ifdef RUN_DEBUG cout << "vertex(" << node->id << ") is a SINK" << endl; #endif // append it's incoming edges to Ea for (unsigned j = 0; j < node->incoming.size(); j++) { #ifdef RUN_DEBUG cout << "Appending to Ea: Edge(" << node->incoming[j].first << ", " << node->incoming[j].second << ")" << endl; #endif Ea->push_back(node->incoming[j]); // find the edge from a vertex where the edge is outgoing Node *out = NULL; for (unsigned q = 0; q < copy->size(); q++) { if ((*copy)[q]->id == node->incoming[j].first) { out = (*copy)[q]; } } COLA_ASSERT(out != NULL); #ifdef RUN_DEBUG cout << "Searching through OUTGOING list for vertex(" << out->id << ")" << endl; #endif Edges::iterator oi; for (oi = out->outgoing.begin(); oi != out->outgoing.end(); oi++) { cola::Edge e = *oi; #ifdef RUN_DEBUG cout << "Looking at Edge(" << e.first << ", " << e.second << ")" << endl; #endif if (e == node->incoming[j]) { break; } } #ifdef RUN_DEBUG cout << "Erasing Edge(" << (*oi).first << ", " << (*oi).second << ") from OUTGOING list of vertex(" << out->id << ")" << endl; #endif out->outgoing.erase(oi); } // say that we want to remove this vertex from the graph. toRemove.push(ni); } } // remove all necessary vertices while (!toRemove.empty()) { copy->erase(toRemove.top()); toRemove.pop(); } COLA_ASSERT(toRemove.empty()); #ifdef EA_DEBUG cout << "EA: "; for (unsigned i = 0; i < Ea->size(); i++) { cout << "(" << (*Ea)[i].first << ", " << (*Ea)[i].second << ") "; } cout << endl; #endif #ifdef RUN_DEBUG cout << "COPY OF MATRIX (after SINKS removed): " << endl; printNodes(copy); #endif // do we have any isolated vertices for (NodeList::iterator ni = copy->begin(); ni != copy->end(); ni++) { // is is an isolated vertice if there are no incoming or outgoing edges node = *ni; if (node->incoming.empty() && node->outgoing.empty()) { #ifdef RUN_DEBUG cout << "vertex(" << node->id << ") is ISOLATED" << endl; #endif // say that we want to remove this vertex from the graph. toRemove.push(ni); } } // remove all necessary vertices while (!toRemove.empty()) { copy->erase(toRemove.top()); toRemove.pop(); } COLA_ASSERT(toRemove.empty()); #ifdef EA_DEBUG cout << "EA: "; for (unsigned i = 0; i < Ea->size(); i++) { cout << "(" << (*Ea)[i].first << ", " << (*Ea)[i].second << ") "; } cout << endl; #endif #ifdef RUN_DEBUG cout << "COPY OF MATRIX (after isolated vertices removed): " << endl; printNodes(copy); #endif // do we have any sources for (NodeList::iterator ni = copy->begin(); ni != copy->end(); ni++) { // is is a sink if there are no incoming edges node = *ni; if (node->incoming.empty()) { #ifdef RUN_DEBUG cout << "vertex(" << node->id << ") is a SOURCE" << endl; #endif // append it's outgoing edges to Ea for (unsigned j = 0; j < node->outgoing.size(); j++) { #ifdef RUN_DEBUG cout << "Appending to Ea: Edge(" << node->outgoing[j].first << ", " << node->outgoing[j].second << ")" << endl; #endif Ea->push_back(node->outgoing[j]); // find the edge from a vertex where the edge is incoming Node *in = NULL; for (unsigned q = 0; q < copy->size(); q++) { if ((*copy)[q]->id == node->outgoing[j].second) { in = (*copy)[q]; } } COLA_ASSERT(in != NULL); #ifdef RUN_DEBUG cout << "Searching through INCOMING list for vertex(" << in->id << ")" << endl; #endif Edges::iterator ii; for (ii = in->incoming.begin(); ii != in->incoming.end(); ii++) { cola::Edge e = *ii; #ifdef RUN_DEBUG cout << "Looking at Edge(" << e.first << ", " << e.second << ")" << endl; #endif if (e == node->outgoing[j]) { break; } } #ifdef RUN_DEBUG cout << "Erasing Edge(" << (*ii).first << ", " << (*ii).second << ") from INCOMING list of vertex(" << in->id << ")" << endl; #endif in->incoming.erase(ii); } // say that we want to remove this vertex from the graph. toRemove.push(ni); } } // remove all necessary vertices while (!toRemove.empty()) { copy->erase(toRemove.top()); toRemove.pop(); } COLA_ASSERT(toRemove.empty()); #ifdef EA_DEBUG cout << "EA: "; for (unsigned i = 0; i < Ea->size(); i++) { cout << "(" << (*Ea)[i].first << ", " << (*Ea)[i].second << ") "; } cout << endl; #endif #ifdef RUN_DEBUG cout << "COPY OF MATRIX (after SOURCES removed): " << endl; printNodes(copy); #endif // if the graph is not empty if (!copy->empty()) { // find the vertex with the highest degree of "source" int degree = -1000; NodeList::iterator theNode; for (NodeList::iterator ni = copy->begin(); ni != copy->end(); ni++) { node = *ni; int t = node->outgoing.size() - node->incoming.size(); if (t > degree) { #ifdef RUN_DEBUG cout << "Sourceiest node: " << node->id << "(d:" << degree << ", t:" << t << ")" << endl; #endif degree = t; theNode = ni; } } // add this node's outgoing edges to Ea node = *theNode; for (unsigned j = 0; j < node->outgoing.size(); j++) { #ifdef RUN_DEBUG cout << "Appending to Ea: Edge(" << node->outgoing[j].first << ", " << node->outgoing[j].second << ")" << endl; #endif Ea->push_back(node->outgoing[j]); // find the edge from a vertex where the edge is incoming Node *in = NULL; for (unsigned q = 0; q < copy->size(); q++) { if ((*copy)[q]->id == node->outgoing[j].second) { in = (*copy)[q]; } } COLA_ASSERT(in != NULL); #ifdef RUN_DEBUG cout << "Searching through INCOMING list for vertex(" << in->id << ")" << endl; #endif Edges::iterator ii; for (ii = in->incoming.begin(); ii != in->incoming.end(); ii++) { cola::Edge e = *ii; #ifdef RUN_DEBUG cout << "Looking at Edge(" << e.first << ", " << e.second << ")" << endl; #endif if (e == node->outgoing[j]) { break; } } #ifdef RUN_DEBUG cout << "Erasing Edge(" << (*ii).first << ", " << (*ii).second << ") from INCOMING list of vertex(" << in->id << ")" << endl; #endif in->incoming.erase(ii); } // for all of the incoming edges this node possesses, delete then from other node's outgoing edge list for (unsigned j = 0; j < node->incoming.size(); j++) { // find the edge from a vertex where the edge is outgoing Node *out = NULL; for (unsigned q = 0; q < copy->size(); q++) { if ((*copy)[q]->id == node->incoming[j].first) { out = (*copy)[q]; } } COLA_ASSERT(out != NULL); #ifdef RUN_DEBUG cout << "Searching through OUTGOING list for vertex(" << out->id << ")" << endl; #endif Edges::iterator oi; for (oi = out->outgoing.begin(); oi != out->outgoing.end(); oi++) { cola::Edge e = *oi; #ifdef RUN_DEBUG cout << "Looking at Edge(" << e.first << ", " << e.second << ")" << endl; #endif if (e == node->incoming[j]) { break; } } #ifdef RUN_DEBUG cout << "Erasing Edge(" << (*oi).first << ", " << (*oi).second << ") from OUTGOING list of vertex(" << out->id << ")" << endl; #endif out->outgoing.erase(oi); } // delete this vertex copy->erase(theNode); } #ifdef EA_DEBUG cout << "EA: "; for (unsigned i = 0; i < Ea->size(); i++) { cout << "(" << (*Ea)[i].first << ", " << (*Ea)[i].second << ") "; } cout << endl; #endif #ifdef RUN_DEBUG cout << "COPY OF MATRIX (after SOURCIEST node removed): " << endl; printNodes(copy); #endif } // delete the copy if (copy != NULL) { for (unsigned i = 0; i < copy->size(); i++) { if ((*copy)[i] != NULL) { delete (*copy)[i]; } } delete copy; } #ifdef EA_DEBUG cout << "Returning EA: "; for (unsigned i = 0; i < Ea->size(); i++) { cout << "(" << (*Ea)[i].first << ", " << (*Ea)[i].second << ") "; } cout << endl; #endif return Ea; }
NodeList* Pathfinder::PathBetweenPoints(int x1, int y1, int x2, int y2) { // Set up all the data structures we need, lots o' stuff NodeList Q; PreviousNodeMap prev; PopulateListWithNodes(Q); Node* source = m_nodeMap[x1][y1]; Node* dest = m_nodeMap[x2][y2]; // Make sure source and dest are in Q if(find(Q.begin(), Q.end(), source) == Q.end()) { Q.push_back(source); } if(find(Q.begin(), Q.end(), dest) == Q.end()) { Q.push_back(dest); } ResetNodes(Q, x2, y2); source->SetDistance(0); while(Q.size() > 0) { Q.sort(NodesByScore); Node* u = Q.front(); if(u == dest) { // found our node, break! break; } if(u->GetDistance() == NODE_INFINITY) { // In this case, no valid path from point 1 to point 2 return NULL; } // Remove it from the unvisited queue Q.remove(u); // Update its neighbors int x = u->GetX(); int y = u->GetY(); if(x - 1 >= 0 && m_nodeMap[x-1][y]) { Node* toUpdate = m_nodeMap[x-1][y]; if(u->GetDistance() + 1 < toUpdate->GetDistance()) { prev[toUpdate] = u; toUpdate->SetDistance(u->GetDistance() + 1); } } if(x + 1 < m_currentLevel->GetWidth() && m_nodeMap[x+1][y]) { Node* toUpdate = m_nodeMap[x+1][y]; if(u->GetDistance() + 1 < toUpdate->GetDistance()) { prev[toUpdate] = u; toUpdate->SetDistance(u->GetDistance() + 1); } } if(y - 1 >= 0 && m_nodeMap[x][y-1]) { Node* toUpdate = m_nodeMap[x][y-1]; if(u->GetDistance() + 1 < toUpdate->GetDistance()) { prev[toUpdate] = u; toUpdate->SetDistance(u->GetDistance() + 1); } } if(y + 1 < m_currentLevel->GetHeight() && m_nodeMap[x][y+1]) { Node* toUpdate = m_nodeMap[x][y+1]; if(u->GetDistance() + 1 < toUpdate->GetDistance()) { prev[toUpdate] = u; toUpdate->SetDistance(u->GetDistance() + 1); } } } // Prep the list of path nodes to send back NodeList* toReturn = new NodeList(); Node* next = prev[dest]; toReturn->push_back(next); while(prev.find(next) != prev.end() && prev[next] != source) { next = prev[next]; toReturn->push_back(next); } return toReturn; }