void Value::readString(std::istream &input, std::string &result) { bool noErrors = true, noUnicodeError = true; char currentCharacter, tmpCharacter; std::stringstream constructing; std::string tmpStr(4, ' '); std::stringstream tmpSs; int32_t tmpInt; String32 tmpStr32; unsigned int tmpCounter; // As long as there aren't any errors and that we haven't reached the // end of the input stream. while (noErrors && !input.eof()) { input.get(currentCharacter); if (input.good()) { if (currentCharacter & 0x80) { // 0x80 --> 10000000 // The character is part of an utf8 character. constructing << currentCharacter; } else if (currentCharacter == Strings::Json::Escape::BEGIN_ESCAPE) { if (!input.eof()) { input.get(tmpCharacter); switch (tmpCharacter) { case Strings::Json::Escape::QUOTATION_MARK: constructing << Strings::Std::QUOTATION_MARK; break; case Strings::Json::Escape::REVERSE_SOLIDUS: constructing << Strings::Std::REVERSE_SOLIDUS; break; case Strings::Json::Escape::SOLIDUS: constructing << Strings::Std::SOLIDUS; break; case Strings::Json::Escape::BACKSPACE: constructing << Strings::Std::BACKSPACE; break; case Strings::Json::Escape::FORM_FEED: constructing << Strings::Std::FORM_FEED; break; case Strings::Json::Escape::LINE_FEED: constructing << Strings::Std::LINE_FEED; break; case Strings::Json::Escape::CARRIAGE_RETURN: constructing << Strings::Std::CARRIAGE_RETURN; break; case Strings::Json::Escape::TAB: constructing << Strings::Std::TAB; break; case Strings::Json::Escape::BEGIN_UNICODE: // TODO: Check for utf16 surrogate pairs. tmpCounter = 0; tmpStr.clear(); tmpStr = " "; noUnicodeError = true; while (tmpCounter < 4 && !input.eof()) { input.get(tmpCharacter); if (isHexDigit(tmpCharacter)) { tmpStr[tmpCounter] = tmpCharacter; } else { noUnicodeError = false; std::cout << "Invalid \\u character, skipping it." << std::endl; } ++tmpCounter; } if (noUnicodeError) { tmpSs.clear(); tmpSs.str(""); tmpSs << std::hex << tmpStr; tmpSs >> tmpInt; tmpStr32.clear(); tmpStr32.push_back(tmpInt); tmpStr = Convert::encodeToUTF8(tmpStr32); constructing << tmpStr; } break; default: break; } } } else if (currentCharacter == '"') { result = constructing.str(); noErrors = false; } else { constructing << currentCharacter; } }
void MeshData::read_unv_implementation (std::istream & in_file) { /* * This is the actual implementation of * reading in UNV format. This enables * to read either through the conventional * C++ stream, or through a stream that * allows to read .gz'ed files. */ if ( !in_file.good() ) libmesh_error_msg("ERROR: Input file not good."); const std::string _label_dataset_mesh_data = "2414"; /* * locate the beginning of data set * and read it. */ { std::string olds, news; while (true) { in_file >> olds >> news; /* * Yes, really dirty: * * When we found a dataset, and the user does * not want this dataset, we jump back here */ go_and_find_the_next_dataset: /* * a "-1" followed by a number means the beginning of a dataset * stop combing at the end of the file */ while( ((olds != "-1") || (news == "-1") ) && !in_file.eof() ) { olds = news; in_file >> news; } if(in_file.eof()) break; /* * if beginning of dataset */ if (news == _label_dataset_mesh_data) { /* * Now read the data of interest. * Start with the header. For * explanation of the variable * dataset_location, see below. */ unsigned int dataset_location; /* * the type of data (complex, real, * float, double etc, see below) */ unsigned int data_type; /* * the number of floating-point values per entity */ unsigned int NVALDC; /* * If there is no MeshDataUnvHeader object * attached */ if (_unv_header == libmesh_nullptr) { /* * Ignore the first lines that stand for * analysis dataset label and name. */ for(unsigned int i=0; i<3; i++) in_file.ignore(256,'\n'); /* * Read the dataset location, where * 1: Data at nodes * 2: Data on elements * other sets are currently not supported. */ in_file >> dataset_location; /* * Ignore five ID lines. */ for(unsigned int i=0; i<6; i++) in_file.ignore(256,'\n'); /* * These data are all of no interest to us... */ unsigned int model_type, analysis_type, data_characteristic, result_type; /* * Read record 9. */ in_file >> model_type // not used here >> analysis_type // not used here >> data_characteristic // not used here >> result_type // not used here >> data_type >> NVALDC; /* * Ignore record 10 and 11 * (Integer analysis type specific data). */ for (unsigned int i=0; i<3; i++) in_file.ignore(256,'\n'); /* * Ignore record 12 and record 13. Since there * exist UNV files with 'D' instead of 'e' as * 10th-power char, it is safer to use a string * to read the dummy reals. */ { std::string dummy_Real; for (unsigned int i=0; i<12; i++) in_file >> dummy_Real; } } else { /* * the read() method returns false when * the user wanted a special header, and * when the current header is _not_ the correct * header */ if (_unv_header->read(in_file)) { dataset_location = _unv_header->dataset_location; NVALDC = _unv_header->nvaldc; data_type = _unv_header->data_type; } else { /* * This is not the correct header. Go * and find the next. For this to * work correctly, shift to the * next line, so that the "-1" * disappears from olds */ olds = news; in_file >> news; /* * No good style, i know... */ goto go_and_find_the_next_dataset; } } /* * Check the location of the dataset. */ if (dataset_location != 1) libmesh_error_msg("ERROR: Currently only Data at nodes is supported."); /* * Now get the foreign node id number and the respective nodal data. */ int f_n_id; std::vector<Number> values; while(true) { in_file >> f_n_id; /* * if node_nr = -1 then we have reached the end of the dataset. */ if (f_n_id==-1) break; /* * Resize the values vector (usually data in three * principle directions, i.e. NVALDC = 3). */ values.resize(NVALDC); /* * Read the meshdata for the respective node. */ for (unsigned int data_cnt=0; data_cnt<NVALDC; data_cnt++) { /* * Check what data type we are reading. * 2,4: Real * 5,6: Complex * other data types are not supported yet. * As again, these floats may also be written * using a 'D' instead of an 'e'. */ if (data_type == 2 || data_type == 4) { std::string buf; in_file >> buf; MeshDataUnvHeader::need_D_to_e(buf); #ifdef LIBMESH_USE_COMPLEX_NUMBERS values[data_cnt] = Complex(std::atof(buf.c_str()), 0.); #else values[data_cnt] = std::atof(buf.c_str()); #endif } else if(data_type == 5 || data_type == 6) { #ifdef LIBMESH_USE_COMPLEX_NUMBERS Real re_val, im_val; std::string buf; in_file >> buf; if (MeshDataUnvHeader::need_D_to_e(buf)) { re_val = std::atof(buf.c_str()); in_file >> buf; MeshDataUnvHeader::need_D_to_e(buf); im_val = std::atof(buf.c_str()); } else { re_val = std::atof(buf.c_str()); in_file >> im_val; } values[data_cnt] = Complex(re_val,im_val); #else libmesh_error_msg("ERROR: Complex data only supported when libMesh is configured with --enable-complex!"); #endif }
// virtual BOOL LLInventoryCategory::importLegacyStream(std::istream& input_stream) { // *NOTE: Changing the buffer size will require changing the scanf // calls below. char buffer[MAX_STRING]; /* Flawfinder: ignore */ char keyword[MAX_STRING]; /* Flawfinder: ignore */ char valuestr[MAX_STRING]; /* Flawfinder: ignore */ keyword[0] = '\0'; valuestr[0] = '\0'; while(input_stream.good()) { input_stream.getline(buffer, MAX_STRING); sscanf( /* Flawfinder: ignore */ buffer, " %254s %254s", keyword, valuestr); if(0 == strcmp("{",keyword)) { continue; } if(0 == strcmp("}", keyword)) { break; } else if(0 == strcmp("cat_id", keyword)) { mUUID.set(valuestr); } else if(0 == strcmp("parent_id", keyword)) { mParentUUID.set(valuestr); } else if(0 == strcmp("type", keyword)) { mType = LLAssetType::lookup(valuestr); } else if(0 == strcmp("pref_type", keyword)) { mPreferredType = LLFolderType::lookup(valuestr); } else if(0 == strcmp("name", keyword)) { //strcpy(valuestr, buffer + strlen(keyword) + 3); // *NOTE: Not ANSI C, but widely supported. sscanf( /* Flawfinder: ignore */ buffer, " %254s %254[^|]", keyword, valuestr); mName.assign(valuestr); LLStringUtil::replaceNonstandardASCII(mName, ' '); LLStringUtil::replaceChar(mName, '|', ' '); } else { llwarns << "unknown keyword '" << keyword << "' in inventory import category " << mUUID << llendl; } } return TRUE; }
S32 LLSDXMLParser::Impl::parseLines(std::istream& input, LLSD& data) { XML_Status status = XML_STATUS_OK; data = LLSD(); static const int BUFFER_SIZE = 1024; //static char last_buffer[ BUFFER_SIZE ]; //std::streamsize last_num_read; // Must get rid of any leading \n, otherwise the stream gets into an error/eof state clear_eol(input); while( !mGracefullStop && input.good() && !input.eof()) { void* buffer = XML_GetBuffer(mParser, BUFFER_SIZE); /* * If we happened to end our last buffer right at the end of the llsd, but the * stream is still going we will get a null buffer here. Check for mGracefullStop. * -- I don't think this is actually true - zero 2008-05-09 */ if (!buffer) { break; } // Get one line input.getline((char*)buffer, BUFFER_SIZE); std::streamsize num_read = input.gcount(); //memcpy( last_buffer, buffer, num_read ); //last_num_read = num_read; if ( num_read > 0 ) { if (!input.good() ) { // Clear state that's set when we run out of buffer input.clear(); } // Re-insert with the \n that was absorbed by getline() char * text = (char *) buffer; if ( text[num_read - 1] == 0) { text[num_read - 1] = '\n'; } } status = XML_ParseBuffer(mParser, (int)num_read, false); if (status == XML_STATUS_ERROR) { break; } } if (status != XML_STATUS_ERROR && !mGracefullStop) { // Parse last bit status = XML_ParseBuffer(mParser, 0, true); } if (status == XML_STATUS_ERROR && !mGracefullStop) { if (mEmitErrors) { LL_INFOS() << "LLSDXMLParser::Impl::parseLines: XML_STATUS_ERROR" << LL_ENDL; } return LLSDParser::PARSE_FAILURE; } clear_eol(input); data = mResult; return mParseCount; }
// virtual S32 LLSDBinaryParser::doParse(std::istream& istr, LLSD& data) const { /** * Undefined: '!'<br> * Boolean: 't' for true 'f' for false<br> * Integer: 'i' + 4 bytes network byte order<br> * Real: 'r' + 8 bytes IEEE double<br> * UUID: 'u' + 16 byte unsigned integer<br> * String: 's' + 4 byte integer size + string<br> * strings also secretly support the notation format * Date: 'd' + 8 byte IEEE double for seconds since epoch<br> * URI: 'l' + 4 byte integer size + string uri<br> * Binary: 'b' + 4 byte integer size + binary data<br> * Array: '[' + 4 byte integer size + all values + ']'<br> * Map: '{' + 4 byte integer size every(key + value) + '}'<br> * map keys are serialized as s + 4 byte integer size + string or in the * notation format. */ char c; c = get(istr); if(!istr.good()) { return 0; } S32 parse_count = 1; switch(c) { case '{': { S32 child_count = parseMap(istr, data); if((child_count == PARSE_FAILURE) || data.isUndefined()) { parse_count = PARSE_FAILURE; } else { parse_count += child_count; } if(istr.fail()) { llinfos << "STREAM FAILURE reading binary map." << llendl; parse_count = PARSE_FAILURE; } break; } case '[': { S32 child_count = parseArray(istr, data); if((child_count == PARSE_FAILURE) || data.isUndefined()) { parse_count = PARSE_FAILURE; } else { parse_count += child_count; } if(istr.fail()) { llinfos << "STREAM FAILURE reading binary array." << llendl; parse_count = PARSE_FAILURE; } break; } case '!': data.clear(); break; case '0': data = false; break; case '1': data = true; break; case 'i': { U32 value_nbo = 0; read(istr, (char*)&value_nbo, sizeof(U32)); /*Flawfinder: ignore*/ data = (S32)ntohl(value_nbo); if(istr.fail()) { llinfos << "STREAM FAILURE reading binary integer." << llendl; } break; } case 'r': { F64 real_nbo = 0.0; read(istr, (char*)&real_nbo, sizeof(F64)); /*Flawfinder: ignore*/ data = ll_ntohd(real_nbo); if(istr.fail()) { llinfos << "STREAM FAILURE reading binary real." << llendl; } break; } case 'u': { LLUUID id; read(istr, (char*)(&id.mData), UUID_BYTES); /*Flawfinder: ignore*/ data = id; if(istr.fail()) { llinfos << "STREAM FAILURE reading binary uuid." << llendl; } break; } case '\'': case '"': { std::string value; int cnt = deserialize_string_delim(istr, value, c); if(PARSE_FAILURE == cnt) { parse_count = PARSE_FAILURE; } else { data = value; account(cnt); } if(istr.fail()) { llinfos << "STREAM FAILURE reading binary (notation-style) string." << llendl; parse_count = PARSE_FAILURE; } break; } case 's': { std::string value; if(parseString(istr, value)) { data = value; } else { parse_count = PARSE_FAILURE; } if(istr.fail()) { llinfos << "STREAM FAILURE reading binary string." << llendl; parse_count = PARSE_FAILURE; } break; } case 'l': { std::string value; if(parseString(istr, value)) { data = LLURI(value); } else { parse_count = PARSE_FAILURE; } if(istr.fail()) { llinfos << "STREAM FAILURE reading binary link." << llendl; parse_count = PARSE_FAILURE; } break; } case 'd': { F64 real = 0.0; read(istr, (char*)&real, sizeof(F64)); /*Flawfinder: ignore*/ data = LLDate(real); if(istr.fail()) { llinfos << "STREAM FAILURE reading binary date." << llendl; parse_count = PARSE_FAILURE; } break; } case 'b': { // We probably have a valid raw binary stream. determine // the size, and read it. U32 size_nbo = 0; read(istr, (char*)&size_nbo, sizeof(U32)); /*Flawfinder: ignore*/ S32 size = (S32)ntohl(size_nbo); if(mCheckLimits && (size > mMaxBytesLeft)) { parse_count = PARSE_FAILURE; } else { std::vector<U8> value; if(size > 0) { value.resize(size); account(fullread(istr, (char*)&value[0], size)); } data = value; } if(istr.fail()) { llinfos << "STREAM FAILURE reading binary." << llendl; parse_count = PARSE_FAILURE; } break; } default: parse_count = PARSE_FAILURE; llinfos << "Unrecognized character while parsing: int(" << (int)c << ")" << llendl; break; } if(PARSE_FAILURE == parse_count) { data.clear(); } return parse_count; }
void FileMap::strip_rn( std::istream & file ) { while( file.good() && ( file.peek() == '\n' || file.peek() == '\r' ) ) file.get(); // strip out all newlines and carriage returns }
std::shared_ptr< Expression> parse(std::istream& input, int& line_no, bool topLevel) { std::string buffer(""); char c; while (input.good()) { input.get(c); if (whitespace(c)) { if (c == '\n') { line_no++; } if (buffer.empty()) { ;//nothing to do } else { if (digit(buffer[0])) { return std::make_shared<Float>(buffer); } else if (doubleQuote(buffer[0])) { buffer += c; } else if (colon(buffer[0])) { return std::make_shared<Atom>(buffer.substr(1)); } else if (alphanumeric(buffer[0])) { return std::make_shared<Identifier>(buffer); //} else { // return new Identifier(buffer); } } } else if (digit(c)) { buffer += c; } else if (dot(c)) { if (buffer.empty()) { buffer += c; } else { if (digit(buffer[0])) { if (!dotIn(buffer)) { buffer += c; } else { SYNTAX_ERROR(line_no, "Second dot found in Float literal " << buffer + c ); } } else { buffer += c; } } } else if (colon(c)) { if (buffer.empty()) { buffer += c; } else { SYNTAX_ERROR(line_no, "Colon found in the middle of token " << buffer + c ); } } else if (doubleQuote(c)) { if (buffer.empty()) { buffer += c; } else { if (doubleQuote(buffer[0])) { buffer += c; return std::make_shared<String>(buffer); } else { SYNTAX_ERROR(line_no, "Double-quote found in the middle of token " << buffer + c ); } } } else if (parenthenesis(c)) { //BEWARE - THE TRICKY PART //TODO refactor to smaller functions if (buffer.empty()) { if (openParenthenesis(c)) { std::vector< std::shared_ptr<Expression>> list; char c2; input.get(c2); while (!closeParenthenesis(c2)) { input.unget(); std::shared_ptr<Expression> tmpExp = parse(input, line_no, false); if (tmpExp) { list.push_back(tmpExp); } else { input.unget(); } input.get(c2); } if (!parenMatches(c, c2)) { SYNTAX_ERROR(line_no, "Parenthenesis " << c << " is closed by not matching paren : " << c2); } return std::make_shared<List>(list); } else {//closeParenthenesis(c) if (topLevel) { SYNTAX_ERROR(line_no, "Found unmatched close parenthenesis " << c); } else { return NULL;//oznacza, ze trzeba wyjsc } } } else { //end of some token //return c back to the ctream and return expression as a result //or possibly it was ending paren inside a string and we add it simply if (digit(buffer[0])) { input.unget(); return std::make_shared<Float>(buffer); } else if (doubleQuote(buffer[0])) { buffer += c; //no unget, cause it's inside a string literal } else if (colon(buffer[0])) { input.unget(); return std::make_shared<Atom>(buffer); } else if (alphanumeric(buffer[0])) { input.unget(); return std::make_shared<Identifier>(buffer); } else { SYNTAX_ERROR(line_no, "Situation impossible : buffer == " << buffer); } } } else if (alphanumeric(c)) { if (buffer.empty()) { buffer += c; } else { if (!digit(buffer[0])) { buffer += c; } else { SYNTAX_ERROR(line_no, "Non-digit found in the middle of Float literal " << buffer + c ); } } } else { SYNTAX_ERROR(line_no, "Unrecognised char " << c); } } }
void OFFIO::read_stream(std::istream& in) { // This is a serial-only process for now; // the Mesh should be read on processor 0 and // broadcast later libmesh_assert_equal_to (this->mesh().processor_id(), 0); // Get a reference to the mesh MeshBase& the_mesh = MeshInput<MeshBase>::mesh(); // Clear any existing mesh data the_mesh.clear(); // Check the input buffer libmesh_assert (in.good()); unsigned int nn, ne, nf; std::string label; // Read the first string. It should say "OFF" in >> label; libmesh_assert_equal_to (label, "OFF"); // read the number of nodes, faces, and edges in >> nn >> nf >> ne; Real x=0., y=0., z=0.; // Read the nodes for (unsigned int n=0; n<nn; n++) { libmesh_assert (in.good()); in >> x >> y >> z; the_mesh.add_point ( Point(x,y,z), n ); } unsigned int nv, nid; // Read the elements for (unsigned int e=0; e<nf; e++) { libmesh_assert (in.good()); // The number of vertices in the element in >> nv; libmesh_assert(nv == 2 || nv == 3); if (e == 0) { the_mesh.set_mesh_dimension(nv-1); if (nv == 3) { #if LIBMESH_DIM < 2 libmesh_error_msg("Cannot open dimension 2 mesh file when configured without 2D support."); #endif } } Elem* elem; switch (nv) { case 2: elem = new Edge2; break; case 3: elem = new Tri3; break; default: libmesh_error_msg("Unsupported nv = " << nv); } elem->set_id(e); the_mesh.add_elem (elem); for (unsigned int i=0; i<nv; i++) { in >> nid; elem->set_node(i) = the_mesh.node_ptr(nid); } } }
bool Settings::updateConfigObject(std::istream &is, std::ostream &os, const std::string &end, u32 tab_depth) { std::map<std::string, SettingsEntry>::const_iterator it; std::set<std::string> present_entries; std::string line, name, value; bool was_modified = false; bool end_found = false; // Add any settings that exist in the config file with the current value // in the object if existing while (is.good() && !end_found) { std::getline(is, line); SettingsParseEvent event = parseConfigObject(line, end, name, value); switch (event) { case SPE_END: os << line << (is.eof() ? "" : "\n"); end_found = true; break; case SPE_MULTILINE: value = getMultiline(is); /* FALLTHROUGH */ case SPE_KVPAIR: it = m_settings.find(name); if (it != m_settings.end() && (it->second.is_group || it->second.value != value)) { printEntry(os, name, it->second, tab_depth); was_modified = true; } else { os << line << "\n"; if (event == SPE_MULTILINE) os << value << "\n\"\"\"\n"; } present_entries.insert(name); break; case SPE_GROUP: it = m_settings.find(name); if (it != m_settings.end() && it->second.is_group) { os << line << "\n"; sanity_check(it->second.group != NULL); was_modified |= it->second.group->updateConfigObject(is, os, "}", tab_depth + 1); } else { printEntry(os, name, it->second, tab_depth); was_modified = true; } present_entries.insert(name); break; default: os << line << (is.eof() ? "" : "\n"); break; } } // Add any settings in the object that don't exist in the config file yet for (it = m_settings.begin(); it != m_settings.end(); ++it) { if (present_entries.find(it->first) != present_entries.end()) continue; printEntry(os, it->first, it->second, tab_depth); was_modified = true; } return was_modified; }
void UNVIO::read_implementation (std::istream& in_stream) { // clear everything, so that // we can start from scratch this->clear (); // Keep track of what kinds of elements this file contains elems_of_dimension.clear(); elems_of_dimension.resize(4, false); // Note that we read this file // @e twice. First time to // detect the number of nodes // and elements (and possible // conversion tasks like D_to_e) // and the order of datasets // (nodes first, then elements, // or the other way around), // and second to do the actual // read. std::vector<std::string> order_of_datasets; order_of_datasets.reserve(2); { // the first time we read the file, // merely to obtain overall info if ( !in_stream.good() ) { libMesh::err << "ERROR: Input file not good." << std::endl; libmesh_error(); } // Count nodes and elements, then let // other methods read the element and // node data. Also remember which // dataset comes first: nodes or elements if (this->verbose()) libMesh::out << " Counting nodes and elements" << std::endl; // bool reached_eof = false; bool found_node = false; bool found_elem = false; std::string olds, news; while (in_stream.good()) { in_stream >> olds >> news; // a "-1" followed by a number means the beginning of a dataset // stop combing at the end of the file while ( ((olds != "-1") || (news == "-1") ) && !in_stream.eof() ) { olds = news; in_stream >> news; } // if (in_stream.eof()) // { // reached_eof = true; // break; // } // if beginning of dataset, buffer it in // temp_buffer, if desired if (news == _label_dataset_nodes) { found_node = true; order_of_datasets.push_back (_label_dataset_nodes); this->count_nodes (in_stream); // we can save some time scanning the file // when we know we already have everything // we want if (found_elem) break; } else if (news == _label_dataset_elements) { found_elem = true; order_of_datasets.push_back (_label_dataset_elements); this->count_elements (in_stream); // we can save some time scanning the file // when we know we already have everything // we want if (found_node) break; } } // Here we should better have found // the datasets for nodes and elements, // otherwise the unv files is bad! if (!found_elem) { libMesh::err << "ERROR: Could not find elements!" << std::endl; libmesh_error(); } if (!found_node) { libMesh::err << "ERROR: Could not find nodes!" << std::endl; libmesh_error(); } // Don't close, just seek to the beginning in_stream.seekg(0, std::ios::beg); if (!in_stream.good() ) { libMesh::err << "ERROR: Cannot re-read input file." << std::endl; libmesh_error(); } } // We finished scanning the file, // and our member data // \p this->_n_nodes, // \p this->_n_elements, // \p this->_need_D_to_e // should be properly initialized. { // Read the datasets in the order that // we already know libmesh_assert_equal_to (order_of_datasets.size(), 2); for (unsigned int ds=0; ds < order_of_datasets.size(); ds++) { if (order_of_datasets[ds] == _label_dataset_nodes) this->node_in (in_stream); else if (order_of_datasets[ds] == _label_dataset_elements) this->element_in (in_stream); else libmesh_error(); } // Set the mesh dimension to the largest encountered for an element for (unsigned int i=0; i!=4; ++i) if (elems_of_dimension[i]) MeshInput<MeshBase>::mesh().set_mesh_dimension(i); #if LIBMESH_DIM < 3 if (MeshInput<MeshBase>::mesh().mesh_dimension() > LIBMESH_DIM) { libMesh::err << "Cannot open dimension " << MeshInput<MeshBase>::mesh().mesh_dimension() << " mesh file when configured without " << MeshInput<MeshBase>::mesh().mesh_dimension() << "D support." << std::endl; libmesh_error(); } #endif // tell the MeshData object that we are finished // reading data this->_mesh_data.close_foreign_id_maps (); if (this->verbose()) libMesh::out << " Finished." << std::endl << std::endl; } // save memory this->_assign_nodes.clear(); this->_ds_position.clear(); }
void UNVIO::count_nodes (std::istream& in_file) { START_LOG("count_nodes()","UNVIO"); // if this->_n_nodes is not 0 the dataset // has already been scanned if (this->_n_nodes != 0) { libMesh::err << "Error: Trying to scan nodes twice!" << std::endl; libmesh_error(); } // Read from file, count nodes, // check if floats have to be converted std::string data; in_file >> data; // read the first node label if (data == "-1") { libMesh::err << "ERROR: Bad, already reached end of dataset before even starting to read nodes!" << std::endl; libmesh_error(); } // ignore the misc data for this node in_file.ignore(256,'\n'); // Now we are there to verify whether we need // to convert from D to e or not in_file >> data; // When this "data" contains a "D", then // we have to convert each and every float... // But also assume when _this_ specific // line does not contain a "D", then the // other lines won't, too. { // #ifdef __HP_aCC // // Use an "int" instead of unsigned int, // // otherwise HP aCC may crash! // const int position = data.find("D",6); // #else // const unsigned int position = data.find("D",6); // #endif std::string::size_type position = data.find("D",6); if (position!=std::string::npos) // npos means no position { this->_need_D_to_e = true; if (this->verbose()) libMesh::out << " Convert from \"D\" to \"e\"" << std::endl; } else this->_need_D_to_e = false; } // read the remaining two coordinates in_file >> data; in_file >> data; // this was our first node this->_n_nodes++; // proceed _counting_ the remaining // nodes. while (in_file.good()) { // read the node label in_file >> data; if (data == "-1") // end of dataset is reached break; // ignore the remaining data (coord_sys_label, color etc) in_file.ignore (256, '\n'); // ignore the coordinates in_file.ignore (256, '\n'); this->_n_nodes++; } if (in_file.eof()) { libMesh::err << "ERROR: File ended before end of node dataset!" << std::endl; libmesh_error(); } if (this->verbose()) libMesh::out << " Nodes : " << this->_n_nodes << std::endl; STOP_LOG("count_nodes()","UNVIO"); }
void cPCXFile::loadFrom(std::istream & file) throw() { if (!file.good()) throw exceptions::io(); int start_pos = (int)file.tellg(); file.seekg(0, std::ios::end); int length = (int)file.tellg() - start_pos; file.seekg(start_pos); if(length < (int)sizeof(m_Header)) throw exceptions::load_resource(); if (!file.good()) throw exceptions::load_resource(); file.read((char*)&m_Header, sizeof(m_Header)); if (file.gcount()!=sizeof(m_Header)) throw exceptions::load_resource(); if(m_Header.m_Bpp == 4) { file.seekg(start_pos + (int)offsetof(SPCXHeader, m_Palette)); m_Palette.loadFrom(file, 16); } else if(m_Header.m_Bpp == 8) { if((unsigned int)length > sizeof(m_Header) + 768) { file.seekg(-769, std::ios::end); if (file.get() != 0xC) throw exceptions::load_resource(); m_Palette.loadFrom(file); file.seekg(start_pos + sizeof(m_Header)); } else throw exceptions::load_resource(); } else throw exceptions::load_resource(); m_Bitmap.create(m_Header.m_EndX - m_Header.m_StartX + 1, m_Header.m_EndY - m_Header.m_StartY + 1); int line_lgt = m_Header.m_BPLine * m_Header.m_Planes; int line_padding = (line_lgt * 8 / m_Header.m_Bpp) - m_Bitmap.width(); if (line_lgt <= 0 || line_padding < 0) throw exceptions::load_resource(); for(int i = 0; i < m_Bitmap.height(); ++i) { char runcount = 0; char data = 0; char runvalue = 0; for(int j = 0; j < line_lgt; j+= runcount) { if (!file.good()) throw exceptions::load_resource(); file.get(data); if (!file.good()) throw exceptions::load_resource(); if((data & 0xC0) == 0xC0) { runcount = data & 0x3F; file.get(runvalue); if (!file.good()) throw exceptions::load_resource(); } else { runcount = 1; runvalue = data; } for(int z = 0; z < runcount && j + z < m_Bitmap.width(); ++z) m_Bitmap.pixel(j + z, i) = runvalue; } } }
bool LocalizedString::loadFromStream(std::istream& in_Stream, const uint32_t subHeader, const bool withHeader, uint32_t& bytesRead, const bool localized, const StringTable& table, char* buffer) { if (withHeader) { uint32_t subRecName = 0; //read header in_Stream.read((char*) &subRecName, 4); bytesRead += 4; if (subRecName!=subHeader) { UnexpectedRecord(subHeader, subRecName); return false; } }//if with header //subrecord's length uint16_t subLength = 0; in_Stream.read((char*) &subLength, 2); bytesRead += 2; if (localized) { if (subLength!=4) { std::cout << "Error: sub record " << IntTo4Char(subHeader) << " has invalid length (" << subLength << " bytes). Should be four bytes.\n"; return false; } //read value in_Stream.read((char*) &m_Index, 4); bytesRead += 4; if (!in_Stream.good()) { std::cout << "LocalizedString::loadFromStream: Error while reading subrecord " << IntTo4Char(subHeader)<<"!\n"; return false; } //treat index zero as empty string, some subrecords allow zero as index if (m_Index==0) { m_String.clear(); m_Type = lsIndex; return true; }//if zero if (!table.hasString(m_Index)) { std::cout << "LocalizedString::loadFromStream: table has no entry for index "<<m_Index<<"!\n"; return false; } m_String = table.getString(m_Index); m_Type = lsIndex; } else { //unlocalized (plain string) if (subLength>511) { std::cout <<"Error: subrecord "<<IntTo4Char(subHeader)<<" is longer than 511 characters!\n"; return false; } //read string memset(buffer, 0, 512); in_Stream.read(buffer, subLength); bytesRead += subLength; if (!in_Stream.good()) { std::cout << "Error while reading subrecord "<<IntTo4Char(subHeader)<<"!\n"; return false; } m_String = std::string(buffer); m_Type = lsString; } return true; }
bool TokenList::createTokens(std::istream &code, const std::string& file0) { _files.push_back(file0); // line number in parsed code unsigned int lineno = 1; // The current token being parsed std::string CurrentToken; // lineNumbers holds line numbers for files in fileIndexes // every time an include file is completely parsed, last item in the vector // is removed and lineno is set to point to that value. std::stack<unsigned int> lineNumbers; // fileIndexes holds index for _files vector about currently parsed files // every time an include file is completely parsed, last item in the vector // is removed and FileIndex is set to point to that value. std::stack<unsigned int> fileIndexes; // FileIndex. What file in the _files vector is read now? unsigned int FileIndex = 0; bool expandedMacro = false; // Read one byte at a time from code and create tokens for (char ch = (char)code.get(); code.good(); ch = (char)code.get()) { if (ch == Preprocessor::macroChar) { while (code.peek() == Preprocessor::macroChar) code.get(); ch = ' '; expandedMacro = true; } else if (ch == '\n') { expandedMacro = false; } // char/string.. // multiline strings are not handled. The preprocessor should handle that for us. else if (ch == '\'' || ch == '\"') { std::string line; // read char bool special = false; char c = ch; do { // Append token.. line += c; // Special sequence '\.' if (special) special = false; else special = (c == '\\'); // Get next character c = (char)code.get(); } while (code.good() && (special || c != ch)); line += ch; // Handle #file "file.h" if (CurrentToken == "#file") { // Extract the filename line = line.substr(1, line.length() - 2); // Has this file been tokenized already? ++lineno; bool foundOurfile = false; fileIndexes.push(FileIndex); for (unsigned int i = 0; i < _files.size(); ++i) { if (Path::sameFileName(_files[i], line)) { // Use this index foundOurfile = true; FileIndex = i; } } if (!foundOurfile) { // The "_files" vector remembers what files have been tokenized.. _files.push_back(Path::simplifyPath(line.c_str())); FileIndex = static_cast<unsigned int>(_files.size() - 1); } lineNumbers.push(lineno); lineno = 0; } else { // Add previous token addtoken(CurrentToken.c_str(), lineno, FileIndex); if (!CurrentToken.empty()) _back->setExpandedMacro(expandedMacro); // Add content of the string addtoken(line.c_str(), lineno, FileIndex); if (!line.empty()) _back->setExpandedMacro(expandedMacro); } CurrentToken.clear(); continue; } if (ch == '.' && CurrentToken.length() > 0 && std::isdigit(CurrentToken[0])) { // Don't separate doubles "5.4" } else if (strchr("+-", ch) && CurrentToken.length() > 0 && std::isdigit(CurrentToken[0]) && (CurrentToken[CurrentToken.length()-1] == 'e' || CurrentToken[CurrentToken.length()-1] == 'E') && !MathLib::isHex(CurrentToken)) { // Don't separate doubles "4.2e+10" } else if (CurrentToken.empty() && ch == '.' && std::isdigit(code.peek())) { // tokenize .125 into 0.125 CurrentToken = "0"; } else if (strchr("+-*/%&|^?!=<>[](){};:,.~\n ", ch)) { if (CurrentToken == "#file") { // Handle this where strings are handled continue; } else if (CurrentToken == "#endfile") { if (lineNumbers.empty() || fileIndexes.empty()) { // error deallocateTokens(); return false; } lineno = lineNumbers.top(); lineNumbers.pop(); FileIndex = fileIndexes.top(); fileIndexes.pop(); CurrentToken.clear(); continue; } addtoken(CurrentToken.c_str(), lineno, FileIndex, true); if (!CurrentToken.empty()) _back->setExpandedMacro(expandedMacro); CurrentToken.clear(); if (ch == '\n') { ++lineno; continue; } else if (ch == ' ') { continue; } CurrentToken += ch; // Add "++", "--", ">>" or ... token if (strchr("+-<>=:&|", ch) && (code.peek() == ch)) CurrentToken += (char)code.get(); addtoken(CurrentToken.c_str(), lineno, FileIndex); _back->setExpandedMacro(expandedMacro); CurrentToken.clear(); continue; } CurrentToken += ch; } addtoken(CurrentToken.c_str(), lineno, FileIndex, true); if (!CurrentToken.empty()) _back->setExpandedMacro(expandedMacro); _front->assignProgressValues(); for (unsigned int i = 1; i < _files.size(); i++) _files[i] = Path::getRelativePath(_files[i], _settings->_basePaths); return true; }
Node::Node( std::istream &in, const int style, const int l, const int pos ) throw( MalformedError ): Element( l, pos ) { // variables to optimize data node promotion in tag nodes bool promote_data = true; Node *the_data_node = 0; char chr; std::string entity; int iStatus = STATUS_BEGIN; m_prev = m_next = m_parent = m_child = m_last_child = 0; // defaults to data type: parents will ignore/destroy empty data elements m_type = typeData; while ( iStatus >= 0 && in.good() ) { in.get( chr ); // resetting new node foundings nextChar(); //std::cout << "CHR: " << chr << " - status: " << iStatus << std::endl; switch ( iStatus ) { case STATUS_BEGIN: // outside nodes switch ( chr ) { case MXML_LINE_TERMINATOR: nextLine() ; break; // We repeat line terminator here for portability case MXML_SOFT_LINE_TERMINATOR: break; case ' ': case '\t': break; case '<': iStatus = STATUS_FIRSTCHAR; break; default: // it is a data node m_type = typeData; m_data = chr; iStatus = STATUS_READ_DATA; // data } break; case STATUS_FIRSTCHAR: //inside a node, first character if ( chr == '/' ) { iStatus = STATUS_READ_TAG_NAME; m_type = typeFakeClosing; } else if ( chr == '!' ) { iStatus = STATUS_MAYBE_COMMENT; } else if ( chr == '?' ) { m_type = typePI; iStatus = STATUS_READ_TAG_NAME; // PI - read node name } else if ( isalpha( chr ) ) { m_type = typeTag; m_name = chr; iStatus = STATUS_READ_TAG_NAME2; // tag - read node name (2nd char) } else { throw MalformedError( Error::errInvalidNode, this ); } break; case STATUS_MAYBE_COMMENT: //inside a possible comment (<!-/<!?) if ( chr == '-') { iStatus = STATUS_MAYBE_COMMENT2 ; } else if ( isalpha( chr ) ) { m_type = typeDirective; m_name = chr; iStatus = STATUS_READ_TAG_NAME; // read directive } else { throw MalformedError( Error::errInvalidNode, this ); } break; case STATUS_MAYBE_COMMENT2: if ( chr == '-') { m_type = typeComment; iStatus = STATUS_READ_COMMENT; // read comment } else { throw MalformedError( Error::errInvalidNode, this ); } break; case STATUS_READ_COMMENT: if ( chr == '-' ) { iStatus = STATUS_END_COMMENT1; } else { if ( chr == MXML_LINE_TERMINATOR ) nextLine(); m_data += chr; } break; case STATUS_END_COMMENT1: if( chr == '-' ) iStatus = STATUS_END_COMMENT2; else { iStatus = STATUS_READ_COMMENT; m_data += "-" + chr; } break; case STATUS_END_COMMENT2: if ( chr == '>' ) { // comment is done! iStatus = STATUS_DONE; } else // any sequence of -- followed by any character != '>' is illegal throw MalformedError( Error::errCommentInvalid, this ); break; // data: case STATUS_READ_DATA: if ( chr == '?' && ( m_type == typePI || m_type == typeXMLDecl )) iStatus = STATUS_READ_TAG_NAME3; else if ( chr == '>' && m_type != typeData ) { // done with this node (either PI or Directive) iStatus = STATUS_DONE; } else if ( chr == '<' && m_type == typeData ) { // done with data elements in.unget(); iStatus = STATUS_DONE; } else { if ( m_type == typeData && chr == '&' && ! ( style & MXML_STYLE_NOESCAPE) ) { iStatus = STATUS_READ_ENTITY; entity = ""; } else{ if ( chr == MXML_LINE_TERMINATOR ) nextLine(); m_data += chr; } } break; // data + escape case STATUS_READ_ENTITY: if ( chr == ';' ) { // we see if we have a predef entity (also known as escape) if ( ( chr = parseEntity( entity ) ) != 0 ) m_data = chr; else m_data = '&' + entity + ';'; iStatus = STATUS_READ_DATA; } else if ( !isalnum( chr ) && chr != '_' && chr != '-' ) //error - we have something like & & throw MalformedError( Error::errUnclosedEntity, this ); else entity += chr; break; //Node name, first character case STATUS_READ_TAG_NAME: if ( isalpha( chr ) ) { m_name += chr; iStatus = STATUS_READ_TAG_NAME2; // second letter on } else throw MalformedError( Error::errInvalidNode, this ); break; //Node name, from second character on case STATUS_READ_TAG_NAME2: if ( isalnum( chr ) || chr == '-' || chr == '_' || chr == ':') m_name += chr; else if ( chr == '/' && m_type != typeFakeClosing ) iStatus = STATUS_READ_TAG_NAME3; // waiting for '>' to close the tag else if ( chr == '?' && ( m_type == typePI || m_type == typeXMLDecl )) iStatus = STATUS_READ_TAG_NAME3; else if ( chr == '>') { if ( m_type == typeFakeClosing ) iStatus = STATUS_DONE; else iStatus = STATUS_READ_SUBNODES; // reading subnodes } else if ( chr == ' ' || chr == '\t' || chr == MXML_SOFT_LINE_TERMINATOR || chr == MXML_LINE_TERMINATOR ) { if ( chr == MXML_LINE_TERMINATOR ) nextLine(); // check for xml PI. if ( m_type == typePI && m_name == "xml" ) { m_type = typeXMLDecl; } if ( m_type == typeTag || m_type == typeXMLDecl ) iStatus = STATUS_READ_ATTRIB; // read attributes else iStatus = STATUS_READ_DATA; // read data. } else throw MalformedError( Error::errInvalidNode, this ); break; // node name; waiting for '>' case STATUS_READ_TAG_NAME3: if ( chr != '>' ) throw MalformedError( Error::errInvalidNode, this ); // if not, we are done with this node return; // reading attributes case STATUS_READ_ATTRIB: if ( chr == '/' || ( chr == '?' && ( m_type == typePI || m_type == typeXMLDecl ))) { iStatus = STATUS_READ_TAG_NAME3; // node name, waiting for '>' } else if ( chr == '>' ) iStatus = STATUS_READ_SUBNODES; // subnodes else if ( chr == MXML_LINE_TERMINATOR || chr == ' ' || chr == '\t' || chr == MXML_SOFT_LINE_TERMINATOR ) { if ( chr == MXML_LINE_TERMINATOR ) nextLine(); } else { in.unget(); Attribute *attrib = new Attribute( in, style, line(), character() -1); m_attrib.push_back( attrib ); setPosition( attrib->line(), attrib->character() ); } break; case STATUS_READ_SUBNODES: in.unget(); while ( in.good() ) { //std::cout << "Reading subnode" << std::endl; Node *child = new Node( in, style, line(), character() -1); setPosition( child->line(), child->character() ); if ( child->m_type == typeData ) { if ( child->m_data == "" ) // delete empty data nodes delete child; else { // set the-data-node for data promotion if ( the_data_node == 0 ) the_data_node = child; else promote_data = false; addBelow( child ); } } // have we found our closing node? else if ( child->m_type == typeFakeClosing ) { //is the name valid? if ( m_name == child->m_name ) { iStatus = STATUS_DONE; delete child; break; } else { // We are unclosed! delete child; throw MalformedError( Error::errUnclosed, this ); } } else // in all the other cases, add subnodes. addBelow( child ); } break; } // switch } // while // now we do a little cleanup: // if we are a data or a comment node, trim the data // if we are a tag and we have just one data node, let's move it to our // data member if ( m_type == typeData || m_type == typeComment ) { int idx = m_data.find_first_not_of("\n\r \t"); if( static_cast<unsigned int>(idx) != std::string::npos ) { m_data = m_data.substr(idx); idx = m_data.find_last_not_of("\n\r \t"); if( static_cast<unsigned int>(idx) != std::string::npos ) m_data = m_data.substr( 0, idx+1 ); else m_data = ""; } else m_data = ""; } if ( m_type == typeTag && promote_data && the_data_node != 0 ) { m_data = the_data_node->m_data; // Data node have not children, and delete calls unlink() delete the_data_node; } }
/* virtual */ bool OFTexturePaletteRecord::read(std::istream &is) { Inherited::readChar8(is, szFilename, 200); Inherited::readVal (is, iPatternIdx ); Inherited::readVal (is, iPatternX ); Inherited::readVal (is, iPatternY ); OSG_OPENFLIGHT_LOG(("OFTexturePaletteRecord::read len " "[%u] file [%s] idx [%d]\n", _sLength, szFilename, iPatternIdx)); ImageUnrecPtr pImage = ImageFileHandler::the()->read(szFilename); if(pImage != NULL) { pTexObj = TextureObjChunk::create(); pTexObj->setImage(pImage); } else { std::string szTmp = szFilename; std::string::size_type uiPos = szTmp.rfind('/'); if(uiPos != std::string::npos) { pImage = ImageFileHandler::the()->read( &(szFilename[uiPos + 1])); if(pImage != NULL) { pTexObj = TextureObjChunk::create(); pTexObj->setImage(pImage); } else { FWARNING(("OFTexturePaletteRecord::read: Could not read image " "[%s].\n", &(szFilename[uiPos + 1]))); } } else { FWARNING(("OFTexturePaletteRecord::read: Could not read image " "[%s].\n", szFilename)); } } if(pTexObj != NULL) { TexAttr attr; if(readTexAttr(attr) == true) { pTexObj->setMinFilter(attr.getMinFilter()); pTexObj->setMagFilter(attr.getMagFilter()); pTexObj->setWrapS (attr.getWrapU ()); pTexObj->setWrapT (attr.getWrapV ()); pTexEnv = TextureEnvChunk::create(); pTexEnv->setEnvMode(attr.getEnvMode()); } } return is.good(); }
// Static diff parsing function for unified diffs Diffstat DiffParser::parse(std::istream &in) { static const char marker[] = "==================================================================="; std::string str, file; Diffstat ds; Diffstat::Stat stat; int chunk[2] = {0, 0}; while (in.good()) { std::getline(in, str); if (chunk[0] <= 0 && chunk[1] <= 0 && (!str.compare(0, 4, "--- ") || !str.compare(0, 4, "+++ "))) { if (!file.empty() && !stat.empty()) { ds.m_stats[file] = stat; file = std::string(); } stat = Diffstat::Stat(); std::vector<std::string> header = str::split(str.substr(4), "\t"); if (header.empty()) { throw PEX(std::string("EMPTY HEADER: ")+str); } if (header[0] != "/dev/null") { file = header[0]; if (file[0] == '"' && file[file.length()-1] == '"') { file = file.substr(1, file.length()-2); } if (!file.compare(0, 2, "a/") || !file.compare(0, 2, "b/")) { file = file.substr(2); } } } else if (!str.compare(0, 2, "@@")) { std::vector<std::string> header = str::split(str.substr(2), "@@", true); if (header.empty()) { throw PEX(std::string("EMPTY HEADER: ")+str); } std::vector<std::string> ranges = str::split(header[0], " ", true); if (ranges.size() < 2 || ranges[0].empty() || ranges[1].empty()) { throw PEX(std::string("EMPTY HEADER: ")+str); } size_t pos; if ((pos = ranges[0].find(',')) != std::string::npos) { str::str2int(ranges[0].substr(pos+1), &chunk[(ranges[0][0] == '-' ? 0 : 1)]); } else { chunk[(ranges[0][0] == '-' ? 0 : 1)] = 1; } if ((pos = ranges[1].find(',')) != std::string::npos) { str::str2int(ranges[1].substr(pos+1), &chunk[(ranges[1][0] == '-' ? 0 : 1)]); } else { chunk[(ranges[1][0] == '-' ? 0 : 1)] = 1; } } else if (!str.empty() && str[0] == '-') { stat.cdel += str.length(); ++stat.ldel; --chunk[0]; } else if (!str.empty() && str[0] == '+') { stat.cadd += str.length(); ++stat.ladd; --chunk[1]; } else if (str == marker) { chunk[0] = chunk[1] = 0; } else if (!str.empty() && str[0] == (char)EOF) { // git diff-tree pipe prints EOF after diff data break; } else { if (chunk[0] > 0) --chunk[0]; if (chunk[1] > 0) --chunk[1]; } } if (!file.empty() && !stat.empty()) { ds.m_stats[file] = stat; } return ds; }
bool OFVertexPaletteRecord::read(std::istream &is) { OSG_OPENFLIGHT_LOG(("OFVertexPaletteRecord::read len [%u]\n", _sLength)); static std::vector<char> tmpBuf; Int32 iFullLength; Inherited::readVal(is, iFullLength); Int32 iRead = 0; OFRecordHeader oRHeader; bool rc = true; Vec3d tmpPos; Vec3f tmpNorm; Vec2f tmpTexCoord; VertexInfo tmpInfo; while(iRead < iFullLength - 8 && is.good() == true) { rc = oRHeader.read(is); if(rc == false) { break; } tmpInfo.uiType = HasPos | HasCol; tmpInfo.uiOffset = iRead + 8; tmpInfo.uiIdx[ColIdx ] = -1; tmpInfo.uiIdx[NormIdx ] = -1; tmpInfo.uiIdx[TexCoordIdx] = -1; Int32 uiSize = 0; uiSize += Inherited::readVal(is, tmpInfo.uiColNameIdx); uiSize += Inherited::readVal(is, tmpInfo.iFlags ); uiSize += Inherited::readVal(is, tmpPos[0]); uiSize += Inherited::readVal(is, tmpPos[1]); uiSize += Inherited::readVal(is, tmpPos[2]); tmpPos *= _oDB.getUnitScale(); tmpInfo.uiIdx[PosIdx] = UInt32(vPositions.size()); vPositions.push_back(Pnt3f(tmpPos)); if(oRHeader.sOpCode == 69 || oRHeader.sOpCode == 70) { uiSize += Inherited::readVal(is, tmpNorm[0]); uiSize += Inherited::readVal(is, tmpNorm[1]); uiSize += Inherited::readVal(is, tmpNorm[2]); tmpInfo.uiIdx[NormIdx] = UInt32(vNormals.size()); tmpInfo.uiType |= HasNorm; vNormals.push_back(tmpNorm); } if(oRHeader.sOpCode == 70 || oRHeader.sOpCode == 71) { uiSize += Inherited::readVal(is, tmpTexCoord[0]); uiSize += Inherited::readVal(is, tmpTexCoord[1]); tmpInfo.uiIdx[TexCoordIdx] = UInt32(vTexCoords.size()); tmpInfo.uiType |= HasTexCoord; vTexCoords.push_back(tmpTexCoord); } uiSize += Inherited::readVal(is, tmpInfo.iPackedCol); uiSize += Inherited::readVal(is, tmpInfo.iColIdx ); if(oRHeader.sOpCode == 69 || oRHeader.sOpCode == 70) { if(uiSize < oRHeader.sLength - 4) { uiSize += Inherited::readVal(is, tmpInfo.iPad1); } } vVertexInfo.push_back(tmpInfo); iRead += oRHeader.sLength; } #if 0 fprintf(stderr, "Got %d vertices\n", vVertexInfo.size()); #endif return is.good(); }
void clangd::runLanguageServerLoop(std::istream &In, JSONOutput &Out, JSONRPCDispatcher &Dispatcher, bool &IsDone) { while (In.good()) { // A Language Server Protocol message starts with a set of HTTP headers, // delimited by \r\n, and terminated by an empty line (\r\n). unsigned long long ContentLength = 0; while (In.good()) { std::string Line; std::getline(In, Line); if (!In.good() && errno == EINTR) { In.clear(); continue; } llvm::StringRef LineRef(Line); // We allow YAML-style comments in headers. Technically this isn't part // of the LSP specification, but makes writing tests easier. if (LineRef.startswith("#")) continue; // Content-Type is a specified header, but does nothing. // Content-Length is a mandatory header. It specifies the length of the // following JSON. // It is unspecified what sequence headers must be supplied in, so we // allow any sequence. // The end of headers is signified by an empty line. if (LineRef.consume_front("Content-Length: ")) { if (ContentLength != 0) { Out.log("Warning: Duplicate Content-Length header received. " "The previous value for this message (" + std::to_string(ContentLength) + ") was ignored.\n"); } llvm::getAsUnsignedInteger(LineRef.trim(), 0, ContentLength); continue; } else if (!LineRef.trim().empty()) { // It's another header, ignore it. continue; } else { // An empty line indicates the end of headers. // Go ahead and read the JSON. break; } } if (ContentLength > 0) { // Now read the JSON. Insert a trailing null byte as required by the YAML // parser. std::vector<char> JSON(ContentLength + 1, '\0'); In.read(JSON.data(), ContentLength); // If the stream is aborted before we read ContentLength bytes, In // will have eofbit and failbit set. if (!In) { Out.log("Input was aborted. Read only " + std::to_string(In.gcount()) + " bytes of expected " + std::to_string(ContentLength) + ".\n"); break; } llvm::StringRef JSONRef(JSON.data(), ContentLength); // Log the message. Out.log("<-- " + JSONRef + "\n"); // Finally, execute the action for this JSON message. if (!Dispatcher.call(JSONRef)) Out.log("JSON dispatch failed!\n"); // If we're done, exit the loop. if (IsDone) break; } else { Out.log( "Warning: Missing Content-Length header, or message has zero " "length.\n" ); } } }
/* virtual */ bool OFColorPaletteRecord::read(std::istream &is) { OSG_OPENFLIGHT_LOG(("OFColorPaletteRecord::read len [%u]\n", _sLength)); Char8 reserved1[128]; Inherited::readChar8(is, reserved1, 128); bool hasNames = _sLength > 4228; UInt32 numColors = 1024; if(hasNames == false) { // number of colors derived from record size UInt32 numColors2 = (_sLength - 132) / 4; numColors = osgMin(numColors, numColors2); } for(UInt32 i = 0; i < numColors; ++i) { UChar8 alpha; UChar8 blue; UChar8 green; UChar8 red; Inherited::readVal(is, alpha); Inherited::readVal(is, blue ); Inherited::readVal(is, green); Inherited::readVal(is, red ); colors.push_back(Color4f(red / 255.f, green / 255.f, blue / 255.f, alpha / 255.f )); } if(hasNames == true) { colorNames.resize(numColors); Int32 numNames; Inherited::readVal(is, numNames); for(Int32 i = 0; i < numNames; ++i) { UInt16 nameLen; Int16 reserved2; Int16 colorIdx; Int16 reserved3; Char8 name[80]; Inherited::readVal (is, nameLen); Inherited::readVal (is, reserved2); Inherited::readVal (is, colorIdx); Inherited::readVal (is, reserved3); Inherited::readChar8(is, name, 80); colorNames[colorIdx] = std::string(name); } } return is.good(); }
void load(std::istream & in, float **tset, float **tgts, float **wts, int *nConfs, int **brks, int *nBrks, int *genomeSize, std::map<std::string,DihCorrection> & correctionMap) { int ch; int col=0; while(in.peek()=='+'||in.peek()=='-'){ std::string label; std::string dih; ch=in.get(); int strLen=(ch=='-'?11:35); in >> label; in >> std::ws; for(int i=0;i<strLen;i++){ dih.push_back(in.get()); } #if LOAD_DEBUG std::cout << "Dihedral[" << label << "]=" << dih << std::endl; #endif DihCorrection dc(dih); while((ch=in.get())==' '); if(ch=='\n'){ for(int n=4;n>0;n--) { #if LOAD_DEBUG std::cout << n << ": " << col << std::endl; #endif dc.addCorr(n, col++); } #if LOAD_DEBUG std::cout << dc.getCorrs().size() << std::endl; #endif } else { while(ch!='\n'){ if(ch<'0'||ch>'9'){ std::string otherLabel; do { otherLabel.push_back(ch); ch=in.get(); } while(ch<'0'||ch>'9'); dc.addCorr(ch-'0',correctionMap[otherLabel].getCorrs()[ch-'0']); } else { dc.addCorr(ch-'0', col++); } do ch=in.get();while(ch==' '); } } correctionMap[label]=dc; //std::map<std::string,DihCorrection>::iterator it=correctionMap.find(label); //if(it!=) } *genomeSize=col; std::vector<std::vector<float> > data; std::vector<int> breaks; std::vector<float> weights; *nConfs=0; std::string line; double off; while(in.good()&&std::getline(in,line)){ breaks.push_back(*nConfs); std::list<DihCorrection*> cols; std::string label; std::istringstream input(line); input >> label; #if LOAD_DEBUG std::cout << "Residue=" << label << std::endl; #endif weights.push_back(1.0f); if(input.good()){ input >> label; if(label[0]=='<') weights.back()=atof(label.data()+1); else { cols.push_back(&correctionMap[label]); } while(input.good()){ input >> label; cols.push_back(&correctionMap[label]); } } std::vector<float> dataRow; while(std::getline(in,line)&&line[0]!='/'){ input.clear(); input.str(line); dataRow.assign(1+*genomeSize, 0); double dih; for(std::list<DihCorrection*>::iterator it=cols.begin();it!=cols.end();++it){ input >> dih; #if LOAD_DEBUG std::cout << dih << ":"; #endif dih*=3.141592653589793238/180.; #if LOAD_DEBUG std::cout << (*it)->getCorrs().size() << std::endl; #endif #if 0 for(std::map<int,int>::iterator jt=(*it)->getCorrs().begin();jt!=(*it)->getCorrs().end();++jt){ //for(const auto& jt:(*it)->getCorrs()){ #if LOAD_DEBUG std::cout << " " << jt->first << "[" << jt->second << "]+=" << cos(dih*(float)jt->first); #endif dataRow[jt->second]+=cos(dih*(float)jt->first); } #endif for(int n=4;n>0;--n){ #if LOAD_DEBUG std::cout << " " << n << "[" << (*it)->getCorrs()[n] << "]+=" << cos(dih*(float)n); #endif dataRow[(*it)->getCorrs()[n]]+=cos(dih*(double)n); } #if LOAD_DEBUG std::cout << ' ' << (*it)->getCorrs().size() << std::endl; #endif } double E,E0; input >> E >> E0; if(*nConfs==breaks.back()){ off=E0-E; E=0; }else{ E=E-E0+off; } dataRow[*genomeSize]=(float)E; #if LOAD_DEBUG std::cout << " deltaE="<<dataRow[*genomeSize]<<std::endl; #endif ++*nConfs; data.push_back(dataRow); } weights.back()/=(float)((*nConfs-breaks.back())*(*nConfs-breaks.back()-1)/2); }
void GmshIO::read_mesh(std::istream& in) { // This is a serial-only process for now; // the Mesh should be read on processor 0 and // broadcast later libmesh_assert_equal_to (MeshOutput<MeshBase>::mesh().processor_id(), 0); libmesh_assert(in.good()); // initialize the map with element types init_eletypes(); // clear any data in the mesh MeshBase& mesh = MeshInput<MeshBase>::mesh(); mesh.clear(); // some variables int format=0, size=0; Real version = 1.0; // map to hold the node numbers for translation // note the the nodes can be non-consecutive std::map<unsigned int, unsigned int> nodetrans; // For reading the file line by line std::string s; while (true) { // Try to read something. This may set EOF! std::getline(in, s); if (in) { // Process s... if (s.find("$MeshFormat") == static_cast<std::string::size_type>(0)) { in >> version >> format >> size; if ((version != 2.0) && (version != 2.1) && (version != 2.2)) { // Some notes on gmsh mesh versions: // // Mesh version 2.0 goes back as far as I know. It's not explicitly // mentioned here: http://www.geuz.org/gmsh/doc/VERSIONS.txt // // As of gmsh-2.4.0: // bumped mesh version format to 2.1 (small change in the $PhysicalNames // section, where the group dimension is now required); // [Since we don't even parse the PhysicalNames section at the time // of this writing, I don't think this change affects us.] // // Mesh version 2.2 tested by Manav Bhatia; no other // libMesh code changes were required for support libmesh_error_msg("Error: Unknown msh file version " << version); } if (format) libmesh_error_msg("Error: Unknown data format for mesh in Gmsh reader."); } // read the node block else if (s.find("$NOD") == static_cast<std::string::size_type>(0) || s.find("$NOE") == static_cast<std::string::size_type>(0) || s.find("$Nodes") == static_cast<std::string::size_type>(0)) { unsigned int num_nodes = 0; in >> num_nodes; mesh.reserve_nodes (num_nodes); // read in the nodal coordinates and form points. Real x, y, z; unsigned int id; // add the nodal coordinates to the mesh for (unsigned int i=0; i<num_nodes; ++i) { in >> id >> x >> y >> z; mesh.add_point (Point(x, y, z), i); nodetrans[id] = i; } // read the $ENDNOD delimiter std::getline(in, s); }
// virtual S32 LLSDNotationParser::doParse(std::istream& istr, LLSD& data) const { // map: { string:object, string:object } // array: [ object, object, object ] // undef: ! // boolean: true | false | 1 | 0 | T | F | t | f | TRUE | FALSE // integer: i#### // real: r#### // uuid: u#### // string: "g'day" | 'have a "nice" day' | s(size)"raw data" // uri: l"escaped" // date: d"YYYY-MM-DDTHH:MM:SS.FFZ" // binary: b##"ff3120ab1" | b(size)"raw data" char c; c = istr.peek(); while(isspace(c)) { // pop the whitespace. c = get(istr); c = istr.peek(); continue; } if(!istr.good()) { return 0; } S32 parse_count = 1; switch(c) { case '{': { S32 child_count = parseMap(istr, data); if((child_count == PARSE_FAILURE) || data.isUndefined()) { parse_count = PARSE_FAILURE; } else { parse_count += child_count; } if(istr.fail()) { llinfos << "STREAM FAILURE reading map." << llendl; parse_count = PARSE_FAILURE; } break; } case '[': { S32 child_count = parseArray(istr, data); if((child_count == PARSE_FAILURE) || data.isUndefined()) { parse_count = PARSE_FAILURE; } else { parse_count += child_count; } if(istr.fail()) { llinfos << "STREAM FAILURE reading array." << llendl; parse_count = PARSE_FAILURE; } break; } case '!': c = get(istr); data.clear(); break; case '0': c = get(istr); data = false; break; case 'F': case 'f': ignore(istr); c = istr.peek(); if(isalpha(c)) { int cnt = deserialize_boolean( istr, data, NOTATION_FALSE_SERIAL, false); if(PARSE_FAILURE == cnt) parse_count = cnt; else account(cnt); } else { data = false; } if(istr.fail()) { llinfos << "STREAM FAILURE reading boolean." << llendl; parse_count = PARSE_FAILURE; } break; case '1': c = get(istr); data = true; break; case 'T': case 't': ignore(istr); c = istr.peek(); if(isalpha(c)) { int cnt = deserialize_boolean(istr,data,NOTATION_TRUE_SERIAL,true); if(PARSE_FAILURE == cnt) parse_count = cnt; else account(cnt); } else { data = true; } if(istr.fail()) { llinfos << "STREAM FAILURE reading boolean." << llendl; parse_count = PARSE_FAILURE; } break; case 'i': { c = get(istr); S32 integer = 0; istr >> integer; data = integer; if(istr.fail()) { llinfos << "STREAM FAILURE reading integer." << llendl; parse_count = PARSE_FAILURE; } break; } case 'r': { c = get(istr); F64 real = 0.0; istr >> real; data = real; if(istr.fail()) { llinfos << "STREAM FAILURE reading real." << llendl; parse_count = PARSE_FAILURE; } break; } case 'u': { c = get(istr); LLUUID id; istr >> id; data = id; if(istr.fail()) { llinfos << "STREAM FAILURE reading uuid." << llendl; parse_count = PARSE_FAILURE; } break; } case '\"': case '\'': case 's': if(!parseString(istr, data)) { parse_count = PARSE_FAILURE; } if(istr.fail()) { llinfos << "STREAM FAILURE reading string." << llendl; parse_count = PARSE_FAILURE; } break; case 'l': { c = get(istr); // pop the 'l' c = get(istr); // pop the delimiter std::string str; int cnt = deserialize_string_delim(istr, str, c); if(PARSE_FAILURE == cnt) { parse_count = PARSE_FAILURE; } else { data = LLURI(str); account(cnt); } if(istr.fail()) { llinfos << "STREAM FAILURE reading link." << llendl; parse_count = PARSE_FAILURE; } break; } case 'd': { c = get(istr); // pop the 'd' c = get(istr); // pop the delimiter std::string str; int cnt = deserialize_string_delim(istr, str, c); if(PARSE_FAILURE == cnt) { parse_count = PARSE_FAILURE; } else { data = LLDate(str); account(cnt); } if(istr.fail()) { llinfos << "STREAM FAILURE reading date." << llendl; parse_count = PARSE_FAILURE; } break; } case 'b': if(!parseBinary(istr, data)) { parse_count = PARSE_FAILURE; } if(istr.fail()) { llinfos << "STREAM FAILURE reading data." << llendl; parse_count = PARSE_FAILURE; } break; default: parse_count = PARSE_FAILURE; llinfos << "Unrecognized character while parsing: int(" << (int)c << ")" << llendl; break; } if(PARSE_FAILURE == parse_count) { data.clear(); } return parse_count; }
virtual void pop_from_stream(std::istream &_in) override {if(_in.good()) _in >> this->get_value();}
int32_t vtkReadBinaryData(std::istream& in, T* data, int32_t numTuples, int32_t numComp) { if (numTuples == 0 || numComp == 0) { // nothing to read here. return 1; } size_t numBytesToRead = static_cast<size_t>(numTuples) * static_cast<size_t>(numComp) * sizeof(T); size_t numRead = 0; // Cast our pointer to a pointer that std::istream will take char* chunkptr = reinterpret_cast<char*>(data); numRead = 0; // Now start reading the data in chunks if needed. size_t chunkSize = DEFAULT_BLOCKSIZE; // Sanity check the chunk size to make sure it is not any larger than the chunk of data we are about to read if (numBytesToRead < DEFAULT_BLOCKSIZE) { chunkSize = numBytesToRead; } size_t master_counter = 0; size_t bytes_read = 0; // Now chunk through the file reading up chunks of data that can actually be // read in a single read. DEFAULT_BLOCKSIZE will control this. while(1) { in.read(chunkptr, chunkSize); bytes_read = in.gcount(); chunkptr = chunkptr + bytes_read; master_counter += bytes_read; if (numBytesToRead - master_counter < chunkSize) { chunkSize = numBytesToRead - master_counter; } if (master_counter >= numBytesToRead) { break; } if (in.good()) { //std::cout << "all data read successfully." << in.gcount() << std::endl; } if ((in.rdstate() & std::ifstream::failbit) != 0) { std::cout << "FAIL " << in.gcount() << " could be read. Needed " << chunkSize << " total bytes read = " << master_counter << std::endl; return -12020; } if ((in.rdstate() & std::ifstream::eofbit) != 0) { std::cout << "EOF " << in.gcount() << " could be read. Needed " << chunkSize << " total bytes read = " << master_counter << std::endl; return -12021; } if ((in.rdstate() & std::ifstream::badbit) != 0) { std::cout << "BAD " << in.gcount() << " could be read. Needed " << chunkSize << " total bytes read = " << master_counter << std::endl; return -12021; } } return 0; }
AnimationPtr loadAnim(std::istream& inSource, Manager& inManager){ if(!inSource.good()) fatalError("stream is in bad condition"); //check header checkAndEatHeader(inSource); //get numFrames size_t numFrames = getNamedValue(inSource, "numFrames"); //get numJoints size_t numJoints = getNamedValue(inSource, "numJoints"); //get framerate size_t frameRate = getNamedValue(inSource, "frameRate"); //get numAnimatedComponents size_t numAnimatedComponents = getNamedValue(inSource, "numAnimatedComponents"); std::string token; //get hierarchy data findNextToken(inSource); std::getline(inSource, token, ' '); if(token != "hierarchy") fatalError("'hierarchy' wanted."); //eat { eatChar(inSource, '{'); std::vector<Bone> joints; std::vector<int> jointFlags; std::vector<int> jointStartIndices; joints.reserve(numJoints); jointFlags.reserve(numJoints); jointStartIndices.reserve(numJoints); for(size_t i = 0; i < numJoints; ++i){ //read next joint // '"' must be here eatChar(inSource, '"'); hydra::data::Bone nextBone; //read name std::getline(inSource, nextBone.mName, '"'); findNextToken(inSource); //read parent's id inSource >> nextBone.mParent; joints.push_back(nextBone); findNextToken(inSource); //read flags int flags = 0; inSource >> flags; jointFlags.push_back(flags); int startIndex = -1; inSource >> startIndex; jointStartIndices.push_back(startIndex); } //eat '}' eatChar(inSource, '}'); //ignore bounds for now //TODO: save them somewhere (may be) //bounds data findNextToken(inSource); std::getline(inSource, token, ' '); if(token != "bounds") fatalError("'bounds' wanted."); eatChar(inSource, '{'); //bounding boxes for each frame //for(size_t i = 0; i < numFrames; ++i){ // //} //just eat till '}' inSource.ignore(1000*numFrames, '}'); //baseframe goes now findNextToken(inSource); std::getline(inSource, token, ' '); if(token != "baseframe") fatalError("'baseframe' wanted"); eatChar(inSource, '{'); for(size_t i = 0; i < numJoints; ++i){ eatChar(inSource, '('); float x, y, z; //read position inSource >> x >> y >> z; joints[i].mPos = Vector3D(x, y, z); eatChar(inSource, ')'); eatChar(inSource, '('); //read orientation inSource >> x >> y >> z; joints[i].mOrient = buildUnitQuat(x, y, z); eatChar(inSource, ')'); } eatChar(inSource, '}'); std::vector< std::vector<float> > frameData; frameData.resize(numFrames); //read frame data for(size_t i = 0; i < numFrames; ++i){ findNextToken(inSource); std::getline(inSource, token, ' '); if(token != "frame") fatalError("'frame' wanted"); int frameIndex = -1; findNextToken(inSource); inSource >> frameIndex; eatChar(inSource, '{'); frameData[frameIndex].resize(numAnimatedComponents); for(size_t j = 0; j < numAnimatedComponents; ++j){ findNextToken(inSource); inSource >> frameData[frameIndex][j]; } eatChar(inSource, '}'); } //all the data read //now we should build frames AnimationPtr anim(new Animation()); anim->mFrames.resize(numFrames); //for each frame for(size_t i = 0; i < numFrames; ++i){ anim->mFrames[i].mBones.resize(numJoints); //for each joint for(size_t j = 0; j < numJoints; ++j){ //copy bone hydra::data::Bone currentBone = joints[j]; int startIndex = jointStartIndices[j]; if (jointFlags[j] & 1){ currentBone.mPos.setX(frameData[i][startIndex]); ++startIndex; } if(jointFlags[j] & 2){ currentBone.mPos.setY(frameData[i][startIndex]); ++startIndex; } if(jointFlags[j] & 4){ currentBone.mPos.setZ(frameData[i][startIndex]); ++startIndex; } Vector3D newQuat; if(jointFlags[j] & 8){ newQuat.setX(frameData[i][startIndex]); ++startIndex; } if(jointFlags[j] & 16){ newQuat.setY(frameData[i][startIndex]); ++startIndex; } if(jointFlags[j] & 32){ newQuat.setZ(frameData[i][startIndex]); ++startIndex; } currentBone.mOrient = buildUnitQuat(newQuat.x(), newQuat.y(), newQuat.z()); //assuming parent bone is alredy handled int parentIndex = currentBone.mParent; //if has parent if(parentIndex >= 0){ const hydra::data::Bone& parentBone = anim->mFrames[i].mBones[parentIndex]; //rotate position parentBone.mOrient.rotate(currentBone.mPos); currentBone.mPos += parentBone.mPos; currentBone.mOrient = parentBone.mOrient * currentBone.mOrient; currentBone.mOrient.normalize(); } anim->mFrames[i].mBones[j] = currentBone; } } anim->mFramerate = static_cast<float>(frameRate); return anim; }
BOOL LLPermissions::importLegacyStream(std::istream& input_stream) { init(LLUUID::null, LLUUID::null, LLUUID::null, LLUUID::null); const S32 BUFSIZE = 16384; // *NOTE: Changing the buffer size will require changing the scanf // calls below. char buffer[BUFSIZE]; /* Flawfinder: ignore */ char keyword[256]; /* Flawfinder: ignore */ char valuestr[256]; /* Flawfinder: ignore */ char uuid_str[256]; /* Flawfinder: ignore */ U32 mask; keyword[0] = '\0'; valuestr[0] = '\0'; while (input_stream.good()) { input_stream.getline(buffer, BUFSIZE); sscanf( /* Flawfinder: ignore */ buffer, " %255s %255s", keyword, valuestr); if (!strcmp("{", keyword)) { continue; } if (!strcmp("}",keyword)) { break; } else if (!strcmp("creator_mask", keyword)) { // legacy support for "creator" masks sscanf(valuestr, "%x", &mask); mMaskBase = mask; fixFairUse(); } else if (!strcmp("base_mask", keyword)) { sscanf(valuestr, "%x", &mask); mMaskBase = mask; //fixFairUse(); } else if (!strcmp("owner_mask", keyword)) { sscanf(valuestr, "%x", &mask); mMaskOwner = mask; } else if (!strcmp("group_mask", keyword)) { sscanf(valuestr, "%x", &mask); mMaskGroup = mask; } else if (!strcmp("everyone_mask", keyword)) { sscanf(valuestr, "%x", &mask); mMaskEveryone = mask; } else if (!strcmp("next_owner_mask", keyword)) { sscanf(valuestr, "%x", &mask); mMaskNextOwner = mask; } else if (!strcmp("creator_id", keyword)) { sscanf(valuestr, "%255s", uuid_str); /* Flawfinder: ignore */ mCreator.set(uuid_str); } else if (!strcmp("owner_id", keyword)) { sscanf(valuestr, "%255s", uuid_str); /* Flawfinder: ignore */ mOwner.set(uuid_str); } else if (!strcmp("last_owner_id", keyword)) { sscanf(valuestr, "%255s", uuid_str); /* Flawfinder: ignore */ mLastOwner.set(uuid_str); } else if (!strcmp("group_id", keyword)) { sscanf(valuestr, "%255s", uuid_str); /* Flawfinder: ignore */ mGroup.set(uuid_str); } else if (!strcmp("group_owned", keyword)) { sscanf(valuestr, "%d", &mask); if(mask) mIsGroupOwned = true; else mIsGroupOwned = false; } else { llinfos << "unknown keyword " << keyword << " in permissions import" << llendl; } } fix(); return TRUE; }
bool ossimApplanixEOFile::parseStream(std::istream& in) { theRecordIdMap.clear(); ossimString line; int c = '\0'; if(!parseHeader(in, theHeader)) { return false; } // now parse parameters in>>applanix_skipws; line = ""; while(in.good()&& !line.contains("RECORD FORMAT")) { std::getline(in, line.string()); line = line.upcase(); line = line.substitute("\r","\n", true); if(line.contains("KAPPA CARDINAL")) { theKappaCardinal = line; theKappaCardinal = theKappaCardinal.substitute("KAPPA CARDINAL ROTATION",""); theKappaCardinal = theKappaCardinal.substitute(":",""); theKappaCardinal = theKappaCardinal.substitute("\n",""); } else if(line.contains("LEVER ARM")) { ossimKeywordlist kwl('='); line = line.substitute("LEVER ARM VALUES:", ""); line = line.substitute(",", "\n", true); std::istringstream in(line); kwl.parseStream(in); theLeverArmLx = kwl.find("LX"); theLeverArmLy = kwl.find("LY"); theLeverArmLz = kwl.find("LZ"); } else if(line.contains("BORESIGHT VALUES")) { ossimKeywordlist kwl('='); line = line.substitute("BORESIGHT VALUES:", ""); line = line.substitute(",", "\n", true); std::istringstream in(line); kwl.parseStream(in); theBoreSightTx = kwl.find("TX"); theBoreSightTy = kwl.find("TY"); theBoreSightTz = kwl.find("TZ"); } else if(line.contains("SHIFT VALUES:")) { ossimKeywordlist kwl('='); line = line.substitute("SHIFT VALUES:",""); line = line.substitute(",", "\n", true); std::istringstream in(line); kwl.parseStream(in); theShiftValuesX = kwl.find("X"); theShiftValuesY = kwl.find("Y"); theShiftValuesZ = kwl.find("Z"); } else if(line.contains("GRID:")) { ossimKeywordlist kwl(':'); line = line.substitute(";", "\n", true); std::istringstream in(line); kwl.parseStream(in); theUtmZone = kwl.find("ZONE"); if(theUtmZone.contains("NORTH")) { theUtmHemisphere = "North"; } else { theUtmHemisphere = "South"; } theUtmZone = theUtmZone.replaceAllThatMatch("UTM|\\(.*\\)|NORTH|SOUTH",""); theUtmZone = theUtmZone.trim(); } else if(line.contains("FRAME DATUM")) { ossimKeywordlist kwl(':'); line = line.substitute(";", "\n", true); std::istringstream in(line); kwl.parseStream(in); theMappingFrameDatum = kwl.find("MAPPING FRAME DATUM"); theMappingFrameProjection = kwl.find("MAPPING FRAME PROJECTION"); theMappingFrameDatum = theMappingFrameDatum.trim(); theMappingFrameProjection = theMappingFrameProjection.trim(); } else if(line.contains("POSPROC SBET")) { theSbetField = line.after(":"); theSbetField = theSbetField.trim(); } else if(line.contains("CENTRAL MERIDIAN")) { theCentralMeridian = line; theCentralMeridian = theCentralMeridian.substitute("CENTRAL MERIDIAN",""); theCentralMeridian = theCentralMeridian.substitute("=",""); theCentralMeridian = theCentralMeridian.substitute("DEG",""); theCentralMeridian = theCentralMeridian.substitute(";",""); } else if(line.contains("LATITUDE OF THE GRID ORIGIN")) { ossimKeywordlist kwl('='); line = line.substitute(";", "\n", true); std::istringstream in(line); kwl.parseStream(in); theOriginLatitude = kwl.find("LATITUDE OF THE GRID ORIGIN"); theGridScaleFactor = kwl.find("GRID SCALE FACTOR"); } else if(line.contains("FALSE EASTING")) { ossimKeywordlist kwl('='); line = line.substitute(";", "\n", true); std::istringstream in(line); kwl.parseStream(in); theFalseEasting = kwl.find("FALSE EASTING"); theFalseNorthing = kwl.find("FALSE NORTHING"); } } in>>applanix_skipws; c = in.get(); std::vector<ossimString> fieldArray; ossimString field; while(in.good()&& (c!='\n')&& (c!='\r')) { field = ""; while((c != ',')&& (c != '\n')&& (c != '\r')) { field += (char)c; c = in.get(); } if((c!='\n')&& (c!='\r')) { c = in.get(); } field = field.trim(); if(field != "") { theRecordFormat.push_back(field); } } in>>applanix_skipws; if(in.peek() == '(') { std::getline(in, line.string()); } in>>applanix_skipws; ossimRefPtr<ossimApplanixEORecord> record = new ossimApplanixEORecord((ossim_uint32)theRecordFormat.size()); ossim_int32 latIdx = getFieldIdx("LAT"); ossim_int32 lonIdx = getFieldIdx("LONG");; bool hasLatLon = (latIdx >=0)&&(lonIdx >= 0); if(hasLatLon) { theMinLat = 90.0; theMaxLat = -90.0; theMinLon = 180.0; theMaxLon = -180.0; } else { theMinLat = ossim::nan(); theMaxLat = ossim::nan(); theMinLon = ossim::nan(); theMaxLon = ossim::nan(); } while(in.good()&&theRecordFormat.size()) { std::getline(in, line.string()); line = line.trim(); if(line != "") { std::istringstream inStr(line); ossim_uint32 idx; ossimString value; for(idx = 0; idx < theRecordFormat.size(); ++idx) { inStr >> (*record)[idx]; } if(hasLatLon) { double lat = (*record)[latIdx].toDouble(); double lon = (*record)[lonIdx].toDouble(); if(lat<theMinLat) theMinLat = lat; if(lat>theMaxLat) theMaxLat = lat; if(lon<theMinLon) theMinLon = lon; if(lon>theMaxLon) theMaxLon = lon; } theApplanixRecordList.push_back(new ossimApplanixEORecord(*record)); } }
// virtual BOOL LLInventoryItem::importLegacyStream(std::istream& input_stream) { // *NOTE: Changing the buffer size will require changing the scanf // calls below. char buffer[MAX_STRING]; /* Flawfinder: ignore */ char keyword[MAX_STRING]; /* Flawfinder: ignore */ char valuestr[MAX_STRING]; /* Flawfinder: ignore */ char junk[MAX_STRING]; /* Flawfinder: ignore */ BOOL success = TRUE; keyword[0] = '\0'; valuestr[0] = '\0'; mInventoryType = LLInventoryType::IT_NONE; mAssetUUID.setNull(); while(success && input_stream.good()) { input_stream.getline(buffer, MAX_STRING); sscanf( /* Flawfinder: ignore */ buffer, " %254s %254s", keyword, valuestr); if(0 == strcmp("{",keyword)) { continue; } if(0 == strcmp("}", keyword)) { break; } else if(0 == strcmp("item_id", keyword)) { mUUID.set(valuestr); } else if(0 == strcmp("parent_id", keyword)) { mParentUUID.set(valuestr); } else if(0 == strcmp("permissions", keyword)) { success = mPermissions.importLegacyStream(input_stream); } else if(0 == strcmp("sale_info", keyword)) { // Sale info used to contain next owner perm. It is now in // the permissions. Thus, we read that out, and fix legacy // objects. It's possible this op would fail, but it // should pick up the vast majority of the tasks. BOOL has_perm_mask = FALSE; U32 perm_mask = 0; success = mSaleInfo.importLegacyStream(input_stream, has_perm_mask, perm_mask); if(has_perm_mask) { if(perm_mask == PERM_NONE) { perm_mask = mPermissions.getMaskOwner(); } // fair use fix. if(!(perm_mask & PERM_COPY)) { perm_mask |= PERM_TRANSFER; } mPermissions.setMaskNext(perm_mask); } } else if(0 == strcmp("shadow_id", keyword)) { mAssetUUID.set(valuestr); LLXORCipher cipher(MAGIC_ID.mData, UUID_BYTES); cipher.decrypt(mAssetUUID.mData, UUID_BYTES); } else if(0 == strcmp("asset_id", keyword)) { mAssetUUID.set(valuestr); } else if(0 == strcmp("type", keyword)) { mType = LLAssetType::lookup(valuestr); } else if(0 == strcmp("inv_type", keyword)) { mInventoryType = LLInventoryType::lookup(std::string(valuestr)); } else if(0 == strcmp("flags", keyword)) { sscanf(valuestr, "%x", &mFlags); } else if(0 == strcmp("name", keyword)) { //strcpy(valuestr, buffer + strlen(keyword) + 3); // *NOTE: Not ANSI C, but widely supported. sscanf( /* Flawfinder: ignore */ buffer, " %254s%254[\t]%254[^|]", keyword, junk, valuestr); // IW: sscanf chokes and puts | in valuestr if there's no name if (valuestr[0] == '|') { valuestr[0] = '\000'; } mName.assign(valuestr); LLStringUtil::replaceNonstandardASCII(mName, ' '); LLStringUtil::replaceChar(mName, '|', ' '); } else if(0 == strcmp("desc", keyword)) { //strcpy(valuestr, buffer + strlen(keyword) + 3); // *NOTE: Not ANSI C, but widely supported. sscanf( /* Flawfinder: ignore */ buffer, " %254s%254[\t]%254[^|]", keyword, junk, valuestr); if (valuestr[0] == '|') { valuestr[0] = '\000'; } mDescription.assign(valuestr); LLStringUtil::replaceNonstandardASCII(mDescription, ' '); /* TODO -- ask Ian about this code const char *donkey = mDescription.c_str(); if (donkey[0] == '|') { llerrs << "Donkey" << llendl; } */ } else if(0 == strcmp("creation_date", keyword)) { S32 date; sscanf(valuestr, "%d", &date); mCreationDate = date; } else { llwarns << "unknown keyword '" << keyword << "' in inventory import of item " << mUUID << llendl; } } // Need to convert 1.0 simstate files to a useful inventory type // and potentially deal with bad inventory tyes eg, a landmark // marked as a texture. if((LLInventoryType::IT_NONE == mInventoryType) || !inventory_and_asset_types_match(mInventoryType, mType)) { lldebugs << "Resetting inventory type for " << mUUID << llendl; mInventoryType = LLInventoryType::defaultForAssetType(mType); } mPermissions.initMasks(mInventoryType); return success; }
void Value::loadFromStream(std::istream &input) { char currentCharacter; // We check that the stream is in UTF-8. char encoding[2]; input.get(encoding[0]); input.get(encoding[1]); if (encoding[0] != '\0' && encoding[1] != '\0') { // We put the characters back. input.putback(encoding[1]); input.putback(encoding[0]); // Boolean value used to stop reading characters after the value // is done loading. bool noErrors = true; while (noErrors && input.good()) { input.get(currentCharacter); // cout<<"Veja Current Character "<<currentCharacter<<endl; if (input.good()) { if (currentCharacter == Structural::BEGIN_END_STRING) { // cout<<"Structural::BEGIN_END_STRING "<<Structural::BEGIN_END_STRING<<endl; // The value to be parsed is a string. setString(""); readString(input, *data.stringValue); noErrors = false; } else if (currentCharacter == Structural::BEGIN_OBJECT) { // cout<<"Structural::BEGIN_OBJECT "<<Structural::BEGIN_OBJECT<<endl; // The value to be parsed is an object. setObject(Object()); readObject(input, *data.objectValue); noErrors = false; } else if (currentCharacter == Structural::BEGIN_ARRAY) { // cout<<"Structural::BEGIN_ARRAY "<<Structural::BEGIN_ARRAY<<endl; // The value to be parsed is an array. setArray(Array()); readArray(input, *data.arrayValue); noErrors = false; } else if (currentCharacter == Literals::NULL_STRING[0]) { // We try to read the literal 'null'. if (!input.eof()) { input.get(currentCharacter); if (currentCharacter == Literals::NULL_STRING[1]) { if (!input.eof()) { input.get(currentCharacter); if (currentCharacter == Literals::NULL_STRING[2]) { if (!input.eof()) { input.get(currentCharacter); if (currentCharacter == Literals::NULL_STRING[3]) { setNull(); noErrors = false; } else { std::cout << "invalid characters found" << std::endl; } } else { std::cout << "json input ends incorrectly" << std::endl; } } else { std::cout << "invalid characters found" << std::endl; } } else { std::cout << "json input ends incorrectly" << std::endl; } } else { std::cout << "invalid characters found" << std::endl; } } else { std::cout << "json input ends incorrectly" << std::endl; } } else if (currentCharacter == Numbers::MINUS || (currentCharacter >= Numbers::DIGITS[0] && currentCharacter <= Numbers::DIGITS[9])) { // Numbers can't start with zeroes. input.putback(currentCharacter); readNumber(input, *this); noErrors = false; } else if (currentCharacter == Literals::TRUE_STRING[0]) { // We try to read the boolean literal 'true'. if (!input.eof()) { input.get(currentCharacter); if (currentCharacter == Literals::TRUE_STRING[1]) { if (!input.eof()) { input.get(currentCharacter); if (currentCharacter == Literals::TRUE_STRING[2]) { if (!input.eof()) { input.get(currentCharacter); if (currentCharacter == Literals::TRUE_STRING[3]) { setBoolean(true); noErrors = false; } } } } } } } else if (currentCharacter == Literals::FALSE_STRING[0]) { // We try to read the boolean literal 'false'. if (!input.eof()) { input.get(currentCharacter); if (currentCharacter == Literals::FALSE_STRING[1]) { if (!input.eof()) { input.get(currentCharacter); if (currentCharacter == Literals::FALSE_STRING[2]) { if (!input.eof()) { input.get(currentCharacter); if (currentCharacter == Literals::FALSE_STRING[3]) { if (!input.eof()) { input.get(currentCharacter); if (currentCharacter == Literals::FALSE_STRING[4]) { setBoolean(false); noErrors = false; } } } } } } } } } else if (!isWhiteSpace(currentCharacter)) { std::cout << "Invalid character found: '" << currentCharacter << "'" << std::endl; } } } } else { std::cout << "File is not in UTF-8, not parsing." << std::endl; } }