bool jobSchedule( const map<JobID, vector<JobID> > &deps, int n, vector<JobID> &result) { int index(0); for ( JobID i = static_cast<JobID>(1); i <= n; ++i ) { IndexMapIt it = indexMap.find(i); if ( it == indexMap.end() ) { if ( !process(i, index, deps, result) ) return false; } } return true; }
unsigned int getVertexIndex(gkSubMesh* sub, unsigned int index, const gkVertex& ref) { UTsize i = m_indexMap.find((int)index), fnd = UT_NPOS, size = sub->m_verts.size(); if (i != UT_NPOS) { UTsize sp = m_indexMap.at(i); if (sp < size && vertEq(sub, sub->m_verts.at(sp), ref)) fnd = sp; } if (fnd == UT_NPOS) { sub->m_bounds.merge(ref.co); sub->m_verts.push_back(ref); m_indexMap.insert((int)index, size); return (unsigned int)size; } return (unsigned int)fnd; }
SpeckleyNodes::SpeckleyNodes(SpeckleyNodes_ptr fullNodes, IntVec& requiredNodes, const string& meshName) : name(meshName) { numDims = fullNodes->numDims; nodeDist = fullNodes->nodeDist; globalNumNodes = fullNodes->globalNumNodes; // first: find the unique set of required nodes and their IDs while // updating the contents of requiredNodes at the same time // requiredNodes contains node indices (not IDs!) IntVec::iterator it; IndexMap indexMap; // maps old index to new index size_t newIndex = 0; for (it = requiredNodes.begin(); it != requiredNodes.end(); it++) { IndexMap::iterator res = indexMap.find(*it); if (res == indexMap.end()) { nodeID.push_back(fullNodes->nodeID[*it]); nodeTag.push_back(fullNodes->nodeTag[*it]); indexMap[*it] = newIndex; *it = newIndex++; } else { *it = res->second; } } // second: now that we know how many nodes we need use the map to fill // the coordinates numNodes = newIndex; for (int dim=0; dim<numDims; dim++) { const float* origC = fullNodes->coords[dim]; float* c = new float[numNodes]; coords.push_back(c); IndexMap::const_iterator mIt; for (mIt = indexMap.begin(); mIt != indexMap.end(); mIt++) { c[mIt->second] = origC[mIt->first]; } } }
bool process( const JobID id, int& index, const map<JobID, vector<JobID> > &deps, vector<JobID> &result) { JobIndex jobIndex(index, index); indexMap.insert(make_pair(id, jobIndex)); ++index; st.push(id); st_set.insert(id); map<JobID, vector<JobID> >::const_iterator jobIt = deps.find(id); if ( jobIt != deps.end() ) { for ( int i = 0, len = jobIt->second.size(); i < len; ++i ) { JobID currId = (jobIt->second)[i]; IndexMapIt mapIt = indexMap.find(currId); if ( mapIt == indexMap.end() ) { if ( !process(currId, index, deps, result) ) return false; } else if ( st_set.find(currId) != st_set.end() ) { return false; } } } result.push_back(id); st.pop(); st_set.erase(id); return true; }
// Load the shape with vertices and faces from the OBJ file. // Returns ST_ERROR on failure, ST_OK on success. STStatus STShape::LoadOBJ(const std::string& filename) { static const int kMaxLine = 256; // The subset of the OBJ format that we handle has // the following commands: // // v <x> <y> <z> Define a vertex position. // vn <x> <y> <z> Define a vertex normal. // vt <s> <t> Define a vertex texture coordiante. // f <p0>/<t0>/<n0> ... Define a face from previous data. // // Every face in an OBJ file refers to previously-defines // positions, normals and texture coordinates by index. // Vertices in an STShape must define all three of these, // so we must generate one STShape vertex for each combination // of indices we see in the OBJ file. // // // Open the file. // FILE* file = fopen(filename.c_str(), "r"); if (!file) { fprintf(stderr, "STShape::LoadOBJ() - Could not open shape file '%s'.\n", filename.c_str()); return ST_ERROR; } char lineBuffer[kMaxLine]; int lineIndex = 0; // for printing error messages // Arrays to collect the positions, normals and texture // coordinates that we encounter. std::vector<STPoint3> positions; std::vector<STPoint2> texCoords; std::vector<STVector3> normals; // Map to point us to previously-created vertices. This maps // triples of indices (a position, texcoord and normal index) // to a single index in the new shape. typedef std::pair<int,int> IntPair; typedef std::pair<int, IntPair> IntTriple; typedef std::map<IntTriple, size_t> IndexMap; IndexMap indexMap; // Keep track of whether the file contained normals... bool needsNormals = false; // // Read the file line-by-line // while (fgets(lineBuffer, kMaxLine, file)) { ++lineIndex; char* str = strtok(lineBuffer, " \t\n\r"); // // Skip empty or comment lines. // if (!str || str[0] == '\0' || str[0] == '#') continue; if (str[0] == 'g' || str[0] == 's' || strcmp(str, "usemtl") == 0 || strcmp(str, "mtllib") == 0) continue; // // Process other lines based on their commands. // if (strcmp(str, "v") == 0) { // Vertex position line. Read the position data (x, y, z). str = strtok(NULL, ""); STPoint3 position; sscanf(str, "%f %f %f\n", &position.x, &position.y, &position.z); positions.push_back(position); } else if (strcmp(str, "vt") == 0) { // Vertex texture coordinate line. Read the texture coord data. str = strtok(NULL, ""); STPoint2 texCoord; sscanf(str, "%f %f\n", &texCoord.x, &texCoord.y); texCoords.push_back(texCoord); } else if (strcmp(str, "vn") == 0) { // Vertex normal line. Read the normal data. str = strtok(NULL, ""); STVector3 normal; sscanf(str, "%f %f %f\n", &normal.x, &normal.y, &normal.z); normals.push_back(normal); } else if (strcmp(str, "f") == 0) { // Face command. Each vertex in the face will be defined by // the indices of its position, texture coordinate and // normal. std::vector<Index> faceIndices; // Read each vertex entry. int curIndex = 0; Index indices[3]; enum FaceFormat { PosTexNorm, // %d/%d/%d PosTex, // %d/%d PosNorm, // %d//%d Pos, // %d LAST_FACE_FORMAT }; const char* FaceFormatToString[LAST_FACE_FORMAT] = { "Position/Texture/Normal", "Position/Texture", "Position//Normal", "Position", }; bool set_format = false; FaceFormat format; const int kNoTextureIndex = -1; const int kNoNormalIndex = -1; int positionIdx; int texCoordIdx; int normalIdx; while ((str = strtok(NULL, " \t\n\r")) != NULL) { if (sscanf(str, "%d/%d/%d", &positionIdx, &texCoordIdx, &normalIdx) == 3) { if (!set_format) { format = PosTexNorm; set_format = true; } else { if (format != PosTexNorm) { fprintf(stderr, "STShape::LoadOBJ() - " "Line %d: Current face format is %s, but received another vertex in format %s\n", lineIndex, FaceFormatToString[format], FaceFormatToString[PosTexNorm]); } } } else if (sscanf(str, "%d/%d", &positionIdx, &texCoordIdx) == 2) { if (!set_format) { format = PosTex; set_format = true; } else { if (format != PosTex) { fprintf(stderr, "STShape::LoadOBJ() - " "Line %d: Current face format is %s, but received another vertex in format %s\n", lineIndex, FaceFormatToString[format], FaceFormatToString[PosTex]); } } } else if (sscanf(str, "%d//%d", &positionIdx, &normalIdx) == 2) { if (!set_format) { format = PosNorm; set_format = true; } else { if (format != PosNorm) { fprintf(stderr, "STShape::LoadOBJ() - " "Line %d: Current face format is %s, but received another vertex in format %s\n", lineIndex, FaceFormatToString[format], FaceFormatToString[PosNorm]); } } // Pass } else if (sscanf(str, "%d", &positionIdx) == 1) { if (!set_format) { format = Pos; set_format = true; } else { if (format != Pos) { fprintf(stderr, "STShape::LoadOBJ() - " "Line %d: Current face format is %s, but received another vertex in format %s\n", lineIndex, FaceFormatToString[format], FaceFormatToString[Pos]); } } } else { // TODO(boulos): Print out line #? fprintf(stderr, "STShape::LoadOBJ() - " "Line %d: Bad face format given %s\n", lineIndex, str); continue; } // // We look to see if we have already created a vertex // based on this position/texCoord/normal, and reuse it // if possible. Otherwise we add a new vertex. // positionIdx = OBJIndexing(positionIdx, positions.size()); texCoordIdx = (format == PosTexNorm || format == PosTex) ? OBJIndexing(texCoordIdx, texCoords.size()) : kNoTextureIndex; normalIdx = (format == PosTexNorm || format == PosNorm) ? OBJIndexing(normalIdx, texCoords.size()) : kNoNormalIndex; size_t newIndex; IntTriple key(positionIdx, IntPair(texCoordIdx, normalIdx)); IndexMap::const_iterator ii = indexMap.find(key); if (ii != indexMap.end()) { newIndex = ii->second; } else { // Construct a new vertex from the indices given. STPoint3 position = positions[positionIdx]; STVector3 normal = (normalIdx != kNoNormalIndex) ? normals[normalIdx] : STVector3::Zero; STPoint2 texCoord = (texCoordIdx != kNoTextureIndex) ? texCoords[texCoordIdx] : STPoint2::Origin; // If the vertex has no normal, then remember // to create normals later... if (normalIdx == kNoNormalIndex) { needsNormals = true; } Vertex newVertex(position, normal, texCoord); newIndex = AddVertex(newVertex); indexMap[key] = newIndex; } indices[curIndex++] = newIndex; // Keep triangle fanning if (curIndex == 3) { AddFace(Face(indices[0], indices[1], indices[2])); indices[1] = indices[2]; curIndex = 2; } } } else { // // Unknown line - ignore and print a warning. // fprintf(stderr, "STShape::LoadOBJ() - " "Unable to parse line %d: %s (continuing)\n", lineIndex, lineBuffer); } } fclose(file); // // If the file didn't already have normals, then generate them. // GenerateNormals(); return ST_OK; }
void SplitGeoRec::feedResults(OSG::UInt32 part, GeoReceiver *obj, pntRec pntfunc, triRec trifunc) { char name[_basename.size() + 20]; sprintf(name, "%s_%d.ooc", _basename.c_str(), part); std::ifstream s(name, std::ios::in|std::ios::binary); OSG::UInt32 vcnt = 0, tcnt = 0; #ifdef OSG_STL_HAS_HASH_MAP typedef OSG_STDEXTENSION_NAMESPACE::hash_map<OSG::UInt32, OSG::UInt32> IndexMap; #else typedef std::map<OSG::UInt32, OSG::UInt32> IndexMap; #endif IndexMap indexMap; while(s.good() && ! s.eof()) { OSG::UInt8 rec; s.read(reinterpret_cast<char*>(&rec), sizeof(rec)); if(rec == PNT_RECORD) { OSG::Pnt3f pnt; OSG::UInt32 ind; s.read(reinterpret_cast<char*>(&ind), sizeof(ind)); s.read(reinterpret_cast<char*>(&pnt), sizeof(Pnt3f)); pntfunc(obj, pnt); FDEBUG(("SplitGeoRec::feedResults: PNT(%d): added as %d\n", vcnt, ind)); indexMap[ind] = vcnt++; } else if (rec == TRI_RECORD) { OSG::UInt32 i1,i2,i3,mat; s.read(reinterpret_cast<char*>(&i1), sizeof(i1)); s.read(reinterpret_cast<char*>(&i2), sizeof(i2)); s.read(reinterpret_cast<char*>(&i3), sizeof(i3)); s.read(reinterpret_cast<char*>(&mat), sizeof(mat)); FDEBUG(("SplitGeoRec::feedResults: TRI(%d): adding " "%d(%d) %d(%d) %d(%d) (mat %d)\n", tcnt, i1, (indexMap.find(i1)!=indexMap.end())?indexMap[i1]:888888888, i2, (indexMap.find(i2)!=indexMap.end())?indexMap[i2]:888888888, i3, (indexMap.find(i3)!=indexMap.end())?indexMap[i3]:888888888, mat )); if(indexMap.find(i1) == indexMap.end()) { FWARNING(("SplitGeoRec::feedResults: TRI(%d): Couldn't " "find %d in the indexMap!\n", tcnt, i1)); } if(indexMap.find(i2) == indexMap.end()) { FWARNING(("SplitGeoRec::feedResults: TRI(%d): Couldn't " "find %d in the indexMap!\n", tcnt, i2)); } if(indexMap.find(i3) == indexMap.end()) { FWARNING(("SplitGeoRec::feedResults: TRI(%d): Couldn't " "find %d in the indexMap!\n", tcnt, i3)); } trifunc(obj, indexMap[i1], indexMap[i2], indexMap[i3], mat); tcnt++; } else { FWARNING(("SplitGeoRec::feedResult: unknown record type '%d'!\n", rec)); } } }
QString QSqlResultPrivate::holderAt(int index) const { return indexes.key(index); }
nsresult nsMorkReader::ParseTable(const nsCSubstring &aLine, const IndexMap &aColumnMap) { nsCLineString line(aLine); const PRUint32 columnCount = mColumns.Length(); // total number of columns PRInt32 columnIndex = -1; // column index of the cell we're parsing // value array for the row we're parsing nsTArray<nsCString> *currentRow = nsnull; PRBool inMetaRow = PR_FALSE; do { PRUint32 idx = 0; PRUint32 len = line.Length(); PRUint32 tokenStart, tokenEnd; while (idx < len) { switch (line[idx++]) { case '{': // This marks the beginning of a table section. There's a lot of // junk before the first row that looks like cell values but isn't. // Skip to the first '['. while (idx < len && line[idx] != '[') { if (line[idx] == '{') { inMetaRow = PR_TRUE; // the meta row is enclosed in { } } else if (line[idx] == '}') { inMetaRow = PR_FALSE; } ++idx; } break; case '[': { // Start of a new row. Consume the row id, up to the first '('. // Row edits also have a table namespace, separated from the row id // by a colon. We don't make use of the namespace, but we need to // make sure not to consider it part of the row id. if (currentRow) { NS_WARNING("unterminated row?"); currentRow = nsnull; } // Check for a '-' at the start of the id. This signifies that // if the row already exists, we should delete all columns from it // before adding the new values. PRBool cutColumns; if (idx < len && line[idx] == '-') { cutColumns = PR_TRUE; ++idx; } else { cutColumns = PR_FALSE; } tokenStart = idx; while (idx < len && line[idx] != '(' && line[idx] != ']' && line[idx] != ':') { ++idx; } tokenEnd = idx; while (idx < len && line[idx] != '(' && line[idx] != ']') { ++idx; } if (inMetaRow) { mMetaRow = NewVoidStringArray(columnCount); NS_ENSURE_TRUE(mMetaRow, NS_ERROR_OUT_OF_MEMORY); currentRow = mMetaRow; } else { const nsCSubstring& row = Substring(line, tokenStart, tokenEnd - tokenStart); if (!mTable.Get(row, ¤tRow)) { currentRow = NewVoidStringArray(columnCount); NS_ENSURE_TRUE(currentRow, NS_ERROR_OUT_OF_MEMORY); NS_ENSURE_TRUE(mTable.Put(row, currentRow), NS_ERROR_OUT_OF_MEMORY); } } if (cutColumns) { // Set all of the columns to void // (this differentiates them from columns which are empty strings). for (PRUint32 i = 0; i < columnCount; ++i) { currentRow->ElementAt(i).SetIsVoid(PR_TRUE); } } break; } case ']': // We're done with the row currentRow = nsnull; inMetaRow = PR_FALSE; break; case '(': { if (!currentRow) { NS_WARNING("cell value outside of row"); break; } NS_WARN_IF_FALSE(columnIndex == -1, "unterminated cell?"); PRBool columnIsAtom; if (line[idx] == '^') { columnIsAtom = PR_TRUE; ++idx; // this is not part of the column id, advance past it } else { columnIsAtom = PR_FALSE; } tokenStart = idx; while (idx < len && line[idx] != '^' && line[idx] != '=') { if (line[idx] == '\\') { ++idx; // skip escaped characters } ++idx; } tokenEnd = PR_MIN(idx, len); nsCAutoString column; const nsCSubstring &colValue = Substring(line, tokenStart, tokenEnd - tokenStart); if (columnIsAtom) { column.Assign(colValue); } else { MorkUnescape(colValue, column); } if (!aColumnMap.Get(colValue, &columnIndex)) { NS_WARNING("Column not in column map, discarding it"); columnIndex = -1; } } break; case '=': case '^': { if (columnIndex == -1) { NS_WARNING("stray ^ or = marker"); break; } PRBool valueIsAtom = (line[idx - 1] == '^'); tokenStart = idx - 1; // include the '=' or '^' marker in the value while (idx < len && line[idx] != ')') { if (line[idx] == '\\') { ++idx; // skip escaped characters } ++idx; } tokenEnd = PR_MIN(idx, len); ++idx; const nsCSubstring &value = Substring(line, tokenStart, tokenEnd - tokenStart); if (valueIsAtom) { (*currentRow)[columnIndex] = value; } else { nsCAutoString value2; MorkUnescape(value, value2); (*currentRow)[columnIndex] = value2; } columnIndex = -1; } break; } } } while (currentRow && NS_SUCCEEDED(ReadLine(line))); return NS_OK; }
nsresult nsMorkReader::Read(nsIFile *aFile) { nsCOMPtr<nsIFileInputStream> stream = do_CreateInstance(NS_LOCALFILEINPUTSTREAM_CONTRACTID); NS_ENSURE_TRUE(stream, NS_ERROR_FAILURE); nsresult rv = stream->Init(aFile, PR_RDONLY, 0, 0); NS_ENSURE_SUCCESS(rv, rv); mStream = do_QueryInterface(stream); NS_ASSERTION(mStream, "file input stream must impl nsILineInputStream"); nsCLineString line; rv = ReadLine(line); if (!line.EqualsLiteral("// <!-- <mdb:mork:z v=\"1.4\"/> -->")) { return NS_ERROR_FAILURE; // unexpected file format } IndexMap columnMap; NS_ENSURE_TRUE(columnMap.Init(), NS_ERROR_OUT_OF_MEMORY); while (NS_SUCCEEDED(ReadLine(line))) { // Trim off leading spaces PRUint32 idx = 0, len = line.Length(); while (idx < len && line[idx] == ' ') { ++idx; } if (idx >= len) { continue; } const nsCSubstring &l = Substring(line, idx); // Look at the line to figure out what section type this is if (StringBeginsWith(l, NS_LITERAL_CSTRING("< <(a=c)>"))) { // Column map. We begin by creating a hash of column id to column name. StringMap columnNameMap; NS_ENSURE_TRUE(columnNameMap.Init(), NS_ERROR_OUT_OF_MEMORY); rv = ParseMap(l, &columnNameMap); NS_ENSURE_SUCCESS(rv, rv); // Now that we have the list of columns, we put them into a flat array. // Rows will have value arrays of the same size, with indexes that // correspond to the columns array. As we insert each column into the // array, we also make an entry in columnMap so that we can look up the // index given the column id. mColumns.SetCapacity(columnNameMap.Count()); AddColumnClosure closure(&mColumns, &columnMap); columnNameMap.EnumerateRead(AddColumn, &closure); if (NS_FAILED(closure.result)) { return closure.result; } } else if (StringBeginsWith(l, NS_LITERAL_CSTRING("<("))) { // Value map rv = ParseMap(l, &mValueMap); NS_ENSURE_SUCCESS(rv, rv); } else if (l[0] == '{' || l[0] == '[') { // Table / table row rv = ParseTable(l, columnMap); NS_ENSURE_SUCCESS(rv, rv); } else { // Don't know, hopefully don't care } } return NS_OK; }