void DocumentBuilder::consumeContent(Node *root){ while(peek() != -1){ consumeText(root); if (isElement()){ consumeElement(root); continue; } if (isComment()){ consumeComment(root); continue; } if (isCDataSection()){ consumeCDataSection(root); continue; } if(isPI()){ consumePI(root); continue; } if (isCharRef()){ StringBuffer *sb = new StringBuffer(2); sb->append(consumeCharRef()); appendToLastTextNode(root, sb); continue; } if (isEntityRef()){ String *entext = consumeEntityRef(true); appendToLastTextNode(root, entext); continue; } if (peek(0) == '<') break; }; }
void CXML::removeAttribute(const char * attributeName) { if(m_node && !isComment()) { m_node->RemoveAttribute(attributeName); } }
static void skipCommentsAndReadValue(char *headerLine, std::ifstream& infile){ do { readLine(headerLine, infile); } while( !infile.eof() && !infile.bad() && isComment(headerLine) ); }
void TSReader::handleError() { if (isComment()) return; if (hasError() && error() == CustomError) // raised by readContents return; const QString loc = QString::fromLatin1("at %3:%1:%2") .arg(lineNumber()).arg(columnNumber()).arg(m_cd.m_sourceFileName); switch (tokenType()) { case NoToken: // Cannot happen default: // likewise case Invalid: raiseError(QString::fromLatin1("Parse error %1: %2").arg(loc, errorString())); break; case StartElement: raiseError(QString::fromLatin1("Unexpected tag <%1> %2").arg(name().toString(), loc)); break; case Characters: { QString tok = text().toString(); if (tok.length() > 30) tok = tok.left(30) + QLatin1String("[...]"); raiseError(QString::fromLatin1("Unexpected characters '%1' %2").arg(tok, loc)); } break; case EntityReference: raiseError(QString::fromLatin1("Unexpected entity '&%1;' %2").arg(name().toString(), loc)); break; case ProcessingInstruction: raiseError(QString::fromLatin1("Unexpected processing instruction %1").arg(loc)); break; } }
bool Config::parseSection(const std::string& section, std::ifstream& file, int& lineNb) { std::string line; while(std::getline(file, line)) { lineNb++; if(isSection(line)) { lineNb--; putbackIntoStream(file, line); break; //end of section } //skip comments & empty lines else if(isComment(line) || isEmpty(line)) { continue; } //parse line else if(parseLine(section, line, file, lineNb)) { continue; } else { Log(LOG_TYPE::ERROR) << "Config: invalid line at '" << lineNb << "' : " << line; return false; } } return true; }
void CXML::setAttribute(const char * attributeName, const char * value) { if(m_node && !isComment()) { m_node->SetAttribute(attributeName, value); } }
bool HumdrumLine::isData(void) const { if (isComment() || isInterp() || isBarline() || isEmpty()) { return false; } else { return true; } }
void blf2xml(const string & file_name, const string & result_name, const string & ext) { ifstream in(file_name.c_str()); if(!in) { cerr << "Warning: Can't open file " << file_name << " ignoring." << endl; return; } ofstream out(result_name.c_str()); if(!out) { cerr << "Warning: Can't open file " << result_name << " ignoring." << endl; return; } c_SentenceEntry sentence(basename(file_name, ext), "#", "#."); string tmp; vector< c_LabelEntry > label; while(getline(in, tmp)) { if(!isComment(tmp)) { c_LabelEntry tmplabel; stringstream ins(tmp.c_str()); ins >> tmplabel.first >> tmplabel.phon >> tmplabel.pros; label.push_back(tmplabel); } }
static CMap getCMap(FILE * file) { CMapNode cnode; CMap cmap = mallocCMap(); char * line = mallocZStr(); while (fgets(line, 257, file)) { if (strlen(line) > 255) { freeCMap(cmap); free(line); line = NULL; die("Parameter line exceeds maximum line length", 1); } line = rmSpace(line); if ((!isComment(*line)) && (*line != '\0')) if ((cnode = nextCNode(line)) != NULL) pushCNode(cnode, cmap); } free(line); line = NULL; if (cmap->size > ARYLEN) { freeCMap(cmap); die("Duplicate keyword/value pairs illegal", 1); } return cmap; }
void tokenise(Program *program, char *sep, void (*tokenFunction)(Program *program, char *token)) { char line[512]; while (fgets(line, sizeof(line), program->input) != NULL) { char *token = strtok(line, sep); bool isBlankLine = token == NULL; bool isCommentLine = !isBlankLine && isComment(token); while (token != NULL && !isComment(token)) { tokenFunction(program, token); token = strtok(NULL, sep); } if (!isBlankLine && !isCommentLine) { tokens_add(program->tokens, "nl", NEWLINE); } } tokens_add(program->tokens, "end", ENDFILE); }
bool Parser::hasMoreCommands() { if (f_.eof()) { // No more file to check. return false; } // We have to look ahead for commands, skipping comments and blank lines. std::istream::streampos parsed_pos = f_.tellg(); // Saving stream state. std::ios::iostate status = f_.rdstate(); f_.clear(); bool res = false; unsigned int lines_read = 0; // So that if there's an error it displays where. do { std::string s; std::getline(f_, s); lines_read++; trimLeft(s); trimRight(s); if (!s.empty() && !isComment(s)) { // Found a command line... if (!validCommand(s)) { // ...but it's an invalid line! std::cerr << "Error at line "; std::cerr << current_line_number_ + lines_read; std::cerr << ". '" << s << "': Invalid line" << std::endl; throw std::runtime_error("Invalid line"); } res = true; goto done; } } while (!f_.eof()); done: f_.seekg(parsed_pos); // Restoring stream to its original state. f_.setstate(status); return res; }
void Config::extractArray(const std::string& section, const std::string& line, std::ifstream& file, int& lineNb) { std::string array = line; std::string lineTmp; while(std::getline(file, lineTmp)) { lineNb++; if(isSection(lineTmp)) { lineNb--; putbackIntoStream(file, lineTmp); break; //end of section } //skip comments & empty lines else if(isComment(lineTmp) || isEmpty(lineTmp)) { continue; } //parse array else { array += ":" + lineTmp; } } std::string key = section.substr(1, section.length()-2) + "=" + array; extractLine(section, key, file, lineNb); }
void DataReader::assignHeader(std::ifstream & fin) { /*returns a commented string preceeding uncommented ones*/ std::string header, line; int pos; /*memorize initial position*/ pos = fin.tellg(); while(getline(fin,line)) { /*remove spaces from front and back of the string*/ boost::algorithm::trim(line); if (isComment(line)) { header = line.substr(1); /*memorize initial position*/ pos = fin.tellg(); } else { fin.seekg(pos); break; } } Tokenizer tok(header, *m_cs); //split header line into column names m_columnNames.assign(tok.begin(), tok.end()); }
bool Config::parse(const char* filePath) { std::ifstream file; file.open(filePath); if(!file) { Log(LOG_TYPE::ERROR) << "Can't open config file '" << filePath << "'"; return false; } std::string line; int lineNb = 0; while(std::getline(file, line)) { lineNb++; //find section if(isSection(line)) { if(!parseSection(line, file, lineNb)) { return false; } } //skip comments & empty lines else if(!isComment(line) && !isEmpty(line)) { //otherwise flag line Log(LOG_TYPE::ERROR) << "Config: invalid line at '" << lineNb << "' : " << line; return false; } } file.close(); _parsed = true; _filePath = filePath; return true; }
xml_node* xml_node::create( string const& temp ) { if( temp.size() < 2 ) { // only <> in element throw Xml_exception( eEx::parse, msg_unknown_node + ": " + temp ); // return 0; } xml_document* d = document(); assert( d != nullptr ); if( isElement( temp ) ) { // is alpha or underscore xml_node* node = d->element_create(); return node; } if( isComment( temp ) ) { // <!-- und --> xml_node* node = d->comment_create(); return node; } if( isDeclaration( temp ) ) { // "<?xml und ?>" xml_node* node = d->declaration_create( tlfm_ ); return node; } throw Xml_exception( eEx::parse, msg_unknown_node + ": " + temp ); }
// Read the header of the image to determine if it's a pgm // image type. Return false if != PGM void readHeader(FILE* imgFin) { int haveReadImgID = FALSE; int haveReadImgSize = FALSE; int haveReadMaxVal = FALSE; char line[BUFFER_SIZE]; while(!(haveReadImgID && haveReadImgSize && haveReadMaxVal)) { fgets(line, BUFFER_SIZE, imgFin); if((strlen(line) == 0) || (strlen(line) == 1)) continue; if(isComment(line)) continue; if(!(haveReadImgID)) { readImgID(line); haveReadImgID = TRUE; } else if(!(haveReadImgSize)) { readImgSize(line); haveReadImgSize = TRUE; } else if(!(haveReadMaxVal)) { readMaxVal(line); haveReadMaxVal = TRUE; } } }
const char * CXML::nodeContent() { if(m_node && !isComment()) { return m_node->GetText(); } return NULL; }
const char * CXML::getAttribute(const char * attributeName) { if(m_node && !isComment()) { return m_node->Attribute(attributeName); } return NULL; }
/* Load the database */ bool MAVLinkComponent::loadDatabase(const char * filename) { /* If we already have components, somethings gone wrong! */ if (numberOfComponents != 0) { fprintf(stderr,"MAVLinkComponent loadDatabase called multiple times!\n"); return false; } //open the file string line; ifstream f(filename); //if the file exists, start reading if (f.is_open()) { //until we reach the end while (f.good()) { //read a line getline(f,line); //check whether a line is blank or a comment if (isComment(line)) continue; //read the parameters int count = sscanf(line.c_str(),"%s %d %d %X-%X\n", identifiers[numberOfComponents], &systemIDs[numberOfComponents], &componentIDs[numberOfComponents], &physicalAddress[numberOfComponents][0], &physicalAddress[numberOfComponents][1]); //did we get enough parameters? //if yes, increment counter //if no, print an error (database is now screwed up) //TODO: keep stupid data out of database! if (count == 5) { numberOfComponents++; } else { fprintf(stderr, "MAVLinkComponent loadDatabase cannot parse '%s'\n",line.c_str()); } } //if we have no components, let the user know if (numberOfComponents == 0) { fprintf(stderr,"MAVLinkComponent found no components in %s!\n", filename); return false; } //if we have components, tell the user how many printf("%d MAVLink Components loaded\n", numberOfComponents); return true; } else { //if the file doesn't exist, tell somebody fprintf(stderr,"MAVLinkComponent could not load database!\n"); numberOfComponents = 0; } return false; }
void DataReader::readFile(std::string filename) { this->filename=filename; std::ifstream fin(filename.c_str()); if (!fin) throw Exception("File has not been opened:\t" + filename); std::string line; std::istringstream is; size_t ln; std::vector<std::string> values; double value; //here we read a header line getline(fin, line); readHeader(line); ln = 1; while (!fin.eof()) { getline(fin, line); ++ln; //skip the comment if (isComment(line)) continue; //split line into columns values = split(line, " \t\n \r"); //check if nb of columns corresponds to number of headers if(values.size() != columnNames.size()) { /*LOG(logWARNING) << "Line: " << ln << " of "<< filename << ":\n" << "\t nb columns expected:\t" << columnNames.size() << "\n" << "\t eb columns found:\t" << values.size();*/ continue; } for(size_t icol = 0; icol < columnNames.size(); icol++) { is.str(values[icol]); is >> value; is.clear(); data[columnNames[icol]].push_back(value); dataf[columnNames[icol]].push_back(value); } ++nbRows; } fin.close(); }
void Parser::advance() { /* PRE: hasMoreCommands() == true => next non-comment line contains a valid command. */ std::string s; do { std::getline(f_, s); current_line_number_++; trimLeft(s); } while (isComment(s) || s.empty()); trimRight(s); current_line_ = s; }
void Model::preparePVector(){ string temp; list<string> tokenList; vector<SubComponent *> subComponents = device->getSubComponents(); SubComponent* sc; int i=0; pwr_consumers_cnt = 0; //Storing the device subcomponents order in a map, i.e., powerMappingDeviceOrder in order to access them while filling the P vector for(vector<SubComponent *>::iterator it1 = subComponents.begin(); it1 != subComponents.end(); it1++){ sc=(*it1); //avoid using (i,j) concatenated with the names in the high-res components if (sc->isPrimary()){ powerMappingDeviceOrder.insert(pair<string, int> (sc->getComponent()->getName(), i)); }else{ powerMappingDeviceOrder.insert(pair<string, int> (sc->getName(), i)); } if (sc->getComponent()->isPowerGen() && (sc->getComponent()->hasFloorPlan() || sc->isPrimary())){ pwr_consumers_cnt++; } i++; } //Opening the power trace file for reading string powerTraceFileAddr = device->getPowerTraceFile(); powerTraceFile.open(powerTraceFileAddr.c_str(), ifstream::in); if (!powerTraceFile.is_open()){ cerr<<"Could not open "<<powerTraceFileAddr<<" for parsing as the power trace."<<endl; exit(-1); } do{ getline(powerTraceFile, temp); algorithm::trim(temp); }while (isComment(temp) && powerTraceFile.eof()==false); if (powerTraceFile.eof()){ cerr<<"The file does not contain valid power trace."<<endl; exit (-1); } split(tokenList, temp, is_any_of("\t "), token_compress_on); i=0; BOOST_FOREACH(string token, tokenList){ powerMappingTraceOrder.insert(pair<int, string> (i, token)); i++; }
void Parser::advance() { /* PRE: hasMoreCommands() == true => next non-comment line contains a valid command. */ std::string s; do { std::getline(f_, s); current_line_number_++; trimLeft(s); } while (isComment(s) || s.empty()); trimRight(s); // Saving the line in lower case: std::transform(s.begin(), s.end(), s.begin(), ::tolower); current_line_ = s; }
void DocumentBuilder::consumeMisc(Node *root){ consumeSpaces(); bool hasTokens = true; while(hasTokens){ if(isComment()){ consumeComment(root); consumeSpaces(); }else if(isPI()){ consumePI(root); consumeSpaces(); }else{ hasTokens = false; } } }
bool SemiExp::isTerminator(std::string tok) { if(tok.length() == 0) return false; if(tok == "{" || tok == "}" || tok == ";") return true; if(tok == "\n") { int n = find("#"); if(n < length()) return true; } if(commentIsSemiExp && isComment(tok)) return true; return false; }
inputType identifyType (char input[]) { if (isDirective(input)) { return DIRECTIVE; } else if (isLabel(input)) { return LABEL; } else if (isComment(input)) { return COMMENT; } else if(isInstruction(input)) { return INSTRUCTION; } return ERROR; }
/** * Lex the string into tokens, each of which has a given offset into the string. * Lexing is done by the following algorithm: * (1) If the current character is a space, and if it is then check the next: * (a) If it is another space, then the token is a tab. * (b) If it is some other character, the token is a space. * (2) If the current character is a character (either upper or lower case), or a digit, * then continue until the first non-matching character and that is an ident. * (3) If the current character is a #, then ignore everything until the end of the line. * (4) If the current character is a newline, then the token is a newline. * (5) If the current character is a colon, then the token is just a colon. * (6) If the current character is a quote, then read until the endquote and * declare the string as the contents of the string. */ Token* lex(char* input, int len) { Token* first = newToken(0, 0, 0); Token* last = first; int index = 0; while (index < len-1) { //printf("*"); int start = index; char cur = input[index]; if (isSpace(cur)) { if (isSpace(input[index+1])) { index++; addNewToken(last, TAB, start, index); } else { addNewToken(last, SPACE, index, index); } index++; } else if (isTab(cur)) { index++; addNewToken(last, TAB, start, index); } else if (isChar(cur)) { while (isChar(input[++index])); addNewToken(last, IDENT, start, index); } else if (isComment(cur)) { while (!isNewLine(input[++index])); } else if (isNewLine(cur)) { index++; addNewToken(last, NEWLINE, index, index); } else if (isColon(cur)) { index++; addNewToken(last, COLON, index, index); } else if (isQuote(cur)) { while (!isQuote(input[++index])); addNewToken(last, STRING, start+1, index); index++; /* Pass by the end quote. */ } if (last->next != NULL) last = last->next; } addNewToken(last, NEWLINE, index, index); return first->next; }
bool TabReader::Impl::process(const char* filename) { bool success=false; if (filename == NULL) throw runtime_error("NULL pointer in filename"); if (th_ == NULL) { default_th_ = shared_ptr<TabHandler>(new DefaultTabHandler()); th_ = default_th_.get(); } ifstream in(filename); string line; if (in.is_open()) { th_->open(); getline(in, line); while(getline(in, line)) { if (isComment(line)) continue; else { th_->updateLine(line); vector<string> fields; getFields(line, fields); th_->updateRecord(fields); } } } in.close(); th_->close(); return success; }
void Parser::printFileInfo(std::ostream& os) { std::istream::streampos parsed_pos = f_.tellg(); // Saving stream position. std::ios::iostate status = f_.rdstate(); f_.clear(); f_.seekg(0, std::ios::beg); unsigned int lines = 0; unsigned int comments = 0; std::string s; while (getline(f_, s)) { trimLeft(s); if (isComment(s)) comments++; lines++; } os << "File has " << lines << " lines. "; os << lines-comments << " are commands. \n"; f_.seekg(parsed_pos); // Restoring stream to its original position. f_.setstate(status); }
/* * Textual contents setting */ __INLINE void ElementRef::setText (const char* text, DomTextSize textSize) { AssertBug(isText() || isPI() || isComment(), "Not a text element ! id=%llx, path=%s\n", getElementId(), generateVersatileXPath().c_str()); ElementSegment* me = getMe<Write>(); AssertBug(me->flags & ElementFlag_HasTextualContents, "Invalid flags %x\n", me->flags); if (me->textualContents.size > me->textualContents.shortFormatSize) { // We shall reuse a bit here... Nevermind... getDocumentAllocator().freeSegment(me->textualContents.contentsPtr, me->textualContents.size); } if (textSize <= me->textualContents.shortFormatSize) { getDocumentAllocator().alter(me); me->textualContents.size = textSize; memcpy(me->textualContents.contents, text, textSize); getDocumentAllocator().protect(me); } else { SegmentPtr contentsPtr = getDocumentAllocator().getFreeSegmentPtr(textSize, getAllocationProfile()); char* textSegment = getDocumentAllocator().getSegment<char, Write>(contentsPtr, textSize); getDocumentAllocator().alter(textSegment, textSize); memcpy(textSegment, text, textSize); getDocumentAllocator().protect(textSegment, textSize); getDocumentAllocator().alter(me); me->textualContents.size = textSize; me->textualContents.contentsPtr = contentsPtr; getDocumentAllocator().protect(me); } if ( getFather() ) { getDocument().appendJournal(*this, JournalOperation_UpdateTextNode, *this, 0); } }