char * MoonAppRecordIterator::ParseOrigin () { char *start, *str; GString *origin; if (!EatWhiteSpace (false)) return NULL; // the rest of this line is our origin value... origin = g_string_new (""); do { *inend = '\n'; start = inptr; while (*inptr != '\n') inptr++; g_string_append_len (origin, start, inptr - start); if (inptr < inend) { str = origin->str; g_string_free (origin, false); // eat the '\n' inptr++; return g_strchomp (str); } } while (Fill ()); g_string_free (origin, true); return NULL; }
char * MoonAppRecordIterator::ParseUid () { char *start, *str; GString *uid; if (!EatWhiteSpace (false)) return NULL; uid = g_string_new (""); do { start = inptr; while (inptr < inend && *inptr != ' ' && *inptr != '\t' && *inptr != '=') inptr++; g_string_append_len (uid, start, inptr - start); if (inptr < inend) { str = uid->str; g_string_free (uid, false); return str; } } while (Fill ()); g_string_free (uid, true); return NULL; }
char MoonAppRecordIterator::NextToken () { if (!EatWhiteSpace (false)) return '\0'; return *inptr; }
void CUtlBufferEditor::GetDelimitedString( CUtlCharConversion *pConv, char *pString, int nMaxChars ) { if ( !IsText() || !pConv ) { GetString( pString, nMaxChars ); return; } if (!IsValid()) { *pString = 0; return; } if ( nMaxChars == 0 ) { nMaxChars = INT_MAX; } EatWhiteSpace(); if ( !PeekStringMatch( 0, pConv->GetDelimiter(), pConv->GetDelimiterLength() ) ) return; // Pull off the starting delimiter SeekGet( SEEK_CURRENT, pConv->GetDelimiterLength() ); int nRead = 0; while ( IsValid() ) { if ( PeekStringMatch( 0, pConv->GetDelimiter(), pConv->GetDelimiterLength() ) ) { SeekGet( SEEK_CURRENT, pConv->GetDelimiterLength() ); break; } char c = GetDelimitedCharInternal( pConv ); if ( nRead < nMaxChars ) { pString[nRead] = c; ++nRead; } } if ( nRead >= nMaxChars ) { nRead = nMaxChars - 1; } pString[nRead] = '\0'; }
//----------------------------------------------------------------------------- // Reads a null-terminated string //----------------------------------------------------------------------------- void CUtlBufferEditor::GetString( char* pString, int nMaxChars ) { if (!IsValid()) { *pString = 0; return; } if ( nMaxChars == 0 ) { nMaxChars = INT_MAX; } // Remember, this *includes* the null character // It will be 0, however, if the buffer is empty. int nLen = PeekStringLength(); if ( IsText() ) { EatWhiteSpace(); } if ( nLen == 0 ) { *pString = 0; m_Error |= GET_OVERFLOW; return; } // Strip off the terminating NULL if ( nLen <= nMaxChars ) { Get( pString, nLen - 1 ); pString[ nLen - 1 ] = 0; } else { Get( pString, nMaxChars - 1 ); pString[ nMaxChars - 1 ] = 0; SeekGet( SEEK_CURRENT, nLen - 1 - nMaxChars ); } // Read the terminating NULL in binary formats if ( !IsText() ) { VerifyEquals( GetChar(), 0 ); } }
bool CParser::ReadNextLine (std::ifstream& FileInput, int& nLineNum, std::string& szInputString, const int MAXCHARS, const std::string& szComment, bool bLowerCase) // --------------------------------------------------------------------------- // Function: reads the next line skipping over the comment lines // and converts all alphabets to lower case if requested // Input: file istream, line #, string to hold the input line, // max. # of characters expected in each input line, // comment character(s) at the beginning of a comment line, // lowercase conversion option // Output: updated values of line # and the string // return value is true if successful // false if an error state is encountered // Restriction: Cannot read a line over 256 characters // --------------------------------------------------------------------------- { int flag = 0; int flag1 =0; bool bWhSpc = false; int tokenfound = 1; const int MAXCH = 1000; char szInp[MAXCH]; char szTemp [MAXCH]; std::vector<std::string> tokens; // enough capacity to read and store? if (MAXCHARS > MAXCH) return false; // comment character(s) int nCLen = static_cast<int>(szComment.length()); // read the line (skip over comment lines) for(;;) { ++nLineNum; FileInput.getline (szInp, MAXCHARS); // // end-of-file? // if (FileInput.eof()) // return false; if (FileInput.fail()) FileInput.clear (FileInput.rdstate() & ~std::ios::failbit); // unrecoverable error? if (FileInput.bad()) return false; // successful read szInputString = szInp; GetTokens(szInputString, " ", tokens); bWhSpc = EatWhiteSpace(szInputString); if ((szInputString.substr(0,nCLen) != szComment)&& (bWhSpc ==false)){ szInputString = szInp; GetTokens(szInputString, " ", tokens); for(int i=0; i< abs(tokens.size()); i++){ std::string temptoken = tokens[i]; if (temptoken == "&") flag1 = 1; } //Filter the comment tokens // FilterComment(szInputString, szComment); //if "&" is found continue to read the next line std::string szTempString = szInputString; // check if line is continued & while(flag1 ==1 && tokenfound == 1){ GetTokens(szTempString, " ", tokens); for(int i=1; i<=abs(tokens.size()); i++){ std::string temptoken = tokens[i-1]; if (temptoken == "&"){ tokenfound = 1; flag = 1; } else{ if(flag==1) flag = 1;//do nothing token already found else tokenfound = 0; } } if(tokenfound ==1){ ++nLineNum; RemoveToken(szInputString); //- getting more tokens and add to the existing FileInput.getline (szTemp, MAXCHARS); // end-of-file? if (FileInput.eof()) return false; if (FileInput.fail()) FileInput.clear (FileInput.rdstate() & ~std::ios::failbit); // unrecoverable error? if (FileInput.bad()) return false; // successful read szTempString = szTemp; FilterComment(szTempString, szComment); szInputString+=" "; szInputString+=szTemp; } else{ break;//while loop ents } flag = 0; } // while loop ends // convert to lower case? if (bLowerCase){ for (int i=0; i < static_cast<int>(szInputString.length()); i++) szInputString[i] = tolower(szInputString[i]); } break; } } return true; }
PRBool nsCSSScanner::NextURL(nsCSSToken& aToken) { EatWhiteSpace(); PRInt32 ch = Read(); if (ch < 0) { return PR_FALSE; } // STRING if ((ch == '"') || (ch == '\'')) { #ifdef DEBUG PRBool ok = #endif ParseString(ch, aToken); NS_ABORT_IF_FALSE(ok, "ParseString should never fail, " "since there's always something read"); NS_ABORT_IF_FALSE(aToken.mType == eCSSToken_String || aToken.mType == eCSSToken_Bad_String, "unexpected token type"); if (NS_LIKELY(aToken.mType == eCSSToken_String)) { EatWhiteSpace(); if (LookAheadOrEOF(')')) { aToken.mType = eCSSToken_URL; } else { aToken.mType = eCSSToken_Bad_URL; } } else { aToken.mType = eCSSToken_Bad_URL; } return PR_TRUE; } // Process a url lexical token. A CSS1 url token can contain // characters beyond identifier characters (e.g. '/', ':', etc.) // Because of this the normal rules for tokenizing the input don't // apply very well. To simplify the parser and relax some of the // requirements on the scanner we parse url's here. If we find a // malformed URL then we emit a token of type "Bad_URL" so that // the CSS1 parser can ignore the invalid input. The parser must // treat a Bad_URL token like a Function token, and process // tokens until a matching parenthesis. aToken.mType = eCSSToken_Bad_URL; aToken.mSymbol = PRUnichar(0); nsString& ident = aToken.mIdent; ident.SetLength(0); Pushback(ch); // start of a non-quoted url (which may be empty) PRBool ok = PR_TRUE; for (;;) { ch = Read(); if (ch < 0) break; if (ch == CSS_ESCAPE) { ParseAndAppendEscape(ident); } else if (IsWhitespace(ch)) { // Whitespace is allowed at the end of the URL EatWhiteSpace(); // Consume the close paren if we have it; if not we're an invalid URL. ok = LookAheadOrEOF(')'); break; } else if (ch == '"' || ch == '\'' || ch == '(' || ch < PRUnichar(' ')) { // This is an invalid URL spec ok = PR_FALSE; Pushback(ch); // push it back so the parser can match tokens and // then closing parenthesis break; } else if (ch == ')') { // All done break; } else { // A regular url character. ident.Append(PRUnichar(ch)); } } // If the result of the above scanning is ok then change the token // type to a useful one. if (ok) { aToken.mType = eCSSToken_URL; } return PR_TRUE; }
PRBool nsCSSScanner::Next(nsCSSToken& aToken) { for (;;) { // Infinite loop so we can restart after comments. PRInt32 ch = Read(); if (ch < 0) { return PR_FALSE; } // UNICODE-RANGE if ((ch == 'u' || ch == 'U') && Peek() == '+') return ParseURange(ch, aToken); // IDENT if (StartsIdent(ch, Peek())) return ParseIdent(ch, aToken); // AT_KEYWORD if (ch == '@') { PRInt32 nextChar = Read(); if (nextChar >= 0) { PRInt32 followingChar = Peek(); Pushback(nextChar); if (StartsIdent(nextChar, followingChar)) return ParseAtKeyword(ch, aToken); } } // NUMBER or DIM if ((ch == '.') || (ch == '+') || (ch == '-')) { PRInt32 nextChar = Peek(); if (IsDigit(nextChar)) { return ParseNumber(ch, aToken); } else if (('.' == nextChar) && ('.' != ch)) { nextChar = Read(); PRInt32 followingChar = Peek(); Pushback(nextChar); if (IsDigit(followingChar)) return ParseNumber(ch, aToken); } } if (IsDigit(ch)) { return ParseNumber(ch, aToken); } // ID if (ch == '#') { return ParseRef(ch, aToken); } // STRING if ((ch == '"') || (ch == '\'')) { return ParseString(ch, aToken); } // WS if (IsWhitespace(ch)) { aToken.mType = eCSSToken_WhiteSpace; aToken.mIdent.Assign(PRUnichar(ch)); EatWhiteSpace(); return PR_TRUE; } if (ch == '/' && !IsSVGMode()) { PRInt32 nextChar = Peek(); if (nextChar == '*') { (void) Read(); #if 0 // If we change our storage data structures such that comments are // stored (for Editor), we should reenable this code, condition it // on being in editor mode, and apply glazou's patch from bug // 60290. aToken.mIdent.SetCapacity(2); aToken.mIdent.Assign(PRUnichar(ch)); aToken.mIdent.Append(PRUnichar(nextChar)); return ParseCComment(aToken); #endif if (!SkipCComment()) { return PR_FALSE; } continue; // start again at the beginning } } if (ch == '<') { // consume HTML comment tags if (LookAhead('!')) { if (LookAhead('-')) { if (LookAhead('-')) { aToken.mType = eCSSToken_HTMLComment; aToken.mIdent.AssignLiteral("<!--"); return PR_TRUE; } Pushback('-'); } Pushback('!'); } } if (ch == '-') { // check for HTML comment end if (LookAhead('-')) { if (LookAhead('>')) { aToken.mType = eCSSToken_HTMLComment; aToken.mIdent.AssignLiteral("-->"); return PR_TRUE; } Pushback('-'); } } // INCLUDES ("~=") and DASHMATCH ("|=") if (( ch == '|' ) || ( ch == '~' ) || ( ch == '^' ) || ( ch == '$' ) || ( ch == '*' )) { PRInt32 nextChar = Read(); if ( nextChar == '=' ) { if (ch == '~') { aToken.mType = eCSSToken_Includes; } else if (ch == '|') { aToken.mType = eCSSToken_Dashmatch; } else if (ch == '^') { aToken.mType = eCSSToken_Beginsmatch; } else if (ch == '$') { aToken.mType = eCSSToken_Endsmatch; } else if (ch == '*') { aToken.mType = eCSSToken_Containsmatch; } return PR_TRUE; } else if (nextChar >= 0) { Pushback(nextChar); } } aToken.mType = eCSSToken_Symbol; aToken.mSymbol = ch; return PR_TRUE; } }
int CUtlBufferEditor::VaScanf( const char* pFmt, va_list list ) { Assert( pFmt ); if ( m_Error || !IsText() ) return 0; int numScanned = 0; int nLength; char c; char* pEnd; while ( c = *pFmt++ ) { // Stop if we hit the end of the buffer if ( m_Get >= TellMaxPut() ) { m_Error |= GET_OVERFLOW; break; } switch (c) { case ' ': // eat all whitespace EatWhiteSpace(); break; case '%': { // Conversion character... try to convert baby! char type = *pFmt++; if (type == 0) return numScanned; switch(type) { case 'c': { char* ch = va_arg( list, char * ); if ( CheckPeekGet( 0, sizeof(char) ) ) { *ch = *(const char*)PeekGet(); ++m_Get; } else { *ch = 0; return numScanned; } } break; case 'i': case 'd': { int* i = va_arg( list, int * ); // NOTE: This is not bullet-proof; it assumes numbers are < 128 characters nLength = 128; if ( !CheckArbitraryPeekGet( 0, nLength ) ) { *i = 0; return numScanned; } *i = strtol( (char*)PeekGet(), &pEnd, 10 ); int nBytesRead = (int)( pEnd - (char*)PeekGet() ); if ( nBytesRead == 0 ) return numScanned; m_Get += nBytesRead; } break; case 'x': { int* i = va_arg( list, int * ); // NOTE: This is not bullet-proof; it assumes numbers are < 128 characters nLength = 128; if ( !CheckArbitraryPeekGet( 0, nLength ) ) { *i = 0; return numScanned; } *i = strtol( (char*)PeekGet(), &pEnd, 16 ); int nBytesRead = (int)( pEnd - (char*)PeekGet() ); if ( nBytesRead == 0 ) return numScanned; m_Get += nBytesRead; } break; case 'u': { unsigned int* u = va_arg( list, unsigned int *); // NOTE: This is not bullet-proof; it assumes numbers are < 128 characters nLength = 128; if ( !CheckArbitraryPeekGet( 0, nLength ) ) { *u = 0; return numScanned; } *u = strtoul( (char*)PeekGet(), &pEnd, 10 ); int nBytesRead = (int)( pEnd - (char*)PeekGet() ); if ( nBytesRead == 0 ) return numScanned; m_Get += nBytesRead; } break; case 'f': { float* f = va_arg( list, float *); // NOTE: This is not bullet-proof; it assumes numbers are < 128 characters nLength = 128; if ( !CheckArbitraryPeekGet( 0, nLength ) ) { *f = 0.0f; return numScanned; } *f = (float)strtod( (char*)PeekGet(), &pEnd ); int nBytesRead = (int)( pEnd - (char*)PeekGet() ); if ( nBytesRead == 0 ) return numScanned; m_Get += nBytesRead; } break; case 's': { char* s = va_arg( list, char * ); GetString( s ); } break; default: { // unimplemented scanf type Assert(0); return numScanned; } break; } ++numScanned; } break; default: { // Here we have to match the format string character // against what's in the buffer or we're done. if ( !CheckPeekGet( 0, sizeof(char) ) ) return numScanned; if ( c != *(const char*)PeekGet() ) return numScanned; ++m_Get; } } } return numScanned; }
//----------------------------------------------------------------------------- // Parses the next token, given a set of character breaks to stop at //----------------------------------------------------------------------------- int CUtlBufferEditor::ParseToken( characterset_t *pBreaks, char *pTokenBuf, int nMaxLen, bool bParseComments ) { Assert( nMaxLen > 0 ); pTokenBuf[0] = 0; // skip whitespace + comments while ( true ) { if ( !IsValid() ) return -1; EatWhiteSpace(); if ( bParseComments ) { if ( !EatCPPComment() ) break; } else { break; } } char c = GetChar(); // End of buffer if ( c == 0 ) return -1; // handle quoted strings specially if ( c == '\"' ) { int nLen = 0; while( IsValid() ) { c = GetChar(); if ( c == '\"' || !c ) { pTokenBuf[nLen] = 0; return nLen; } pTokenBuf[nLen] = c; if ( ++nLen == nMaxLen ) { pTokenBuf[nLen-1] = 0; return nMaxLen; } } // In this case, we hit the end of the buffer before hitting the end qoute pTokenBuf[nLen] = 0; return nLen; } // parse single characters if ( IN_CHARACTERSET( *pBreaks, c ) ) { pTokenBuf[0] = c; pTokenBuf[1] = 0; return 1; } // parse a regular word int nLen = 0; while ( true ) { pTokenBuf[nLen] = c; if ( ++nLen == nMaxLen ) { pTokenBuf[nLen-1] = 0; return nMaxLen; } c = GetChar(); if ( !IsValid() ) break; if ( IN_CHARACTERSET( *pBreaks, c ) || c == '\"' || c <= ' ' ) { SeekGet( SEEK_CURRENT, -1 ); break; } } pTokenBuf[nLen] = 0; return nLen; }
//----------------------------------------------------------------------------- // (For text buffers only) // Parse a token from the buffer: // Grab all text that lies between a starting delimiter + ending delimiter // (skipping whitespace that leads + trails both delimiters). // Note the delimiter checks are case-insensitive. // If successful, the get index is advanced and the function returns true, // otherwise the index is not advanced and the function returns false. //----------------------------------------------------------------------------- bool CUtlBufferEditor::ParseToken( const char *pStartingDelim, const char *pEndingDelim, char* pString, int nMaxLen ) { int nCharsToCopy = 0; int nCurrentGet = 0; size_t nEndingDelimLen; // Starting delimiter is optional char emptyBuf = '\0'; if ( !pStartingDelim ) { pStartingDelim = &emptyBuf; } // Ending delimiter is not Assert( pEndingDelim && pEndingDelim[0] ); nEndingDelimLen = Q_strlen( pEndingDelim ); int nStartGet = TellGet(); char nCurrChar; int nTokenStart = -1; EatWhiteSpace( ); while ( *pStartingDelim ) { nCurrChar = *pStartingDelim++; if ( !isspace((unsigned char)nCurrChar) ) { if ( tolower( GetChar() ) != tolower( nCurrChar ) ) goto parseFailed; } else { EatWhiteSpace(); } } EatWhiteSpace(); nTokenStart = TellGet(); if ( !GetToken( pEndingDelim ) ) goto parseFailed; nCurrentGet = TellGet(); nCharsToCopy = (nCurrentGet - nEndingDelimLen) - nTokenStart; if ( nCharsToCopy >= nMaxLen ) { nCharsToCopy = nMaxLen - 1; } if ( nCharsToCopy > 0 ) { SeekGet( CUtlBufferEditor::SEEK_HEAD, nTokenStart ); Get( pString, nCharsToCopy ); if ( !IsValid() ) goto parseFailed; // Eat trailing whitespace for ( ; nCharsToCopy > 0; --nCharsToCopy ) { if ( !isspace( (unsigned char)pString[ nCharsToCopy-1 ] ) ) break; } } pString[ nCharsToCopy ] = '\0'; // Advance the Get index SeekGet( CUtlBufferEditor::SEEK_HEAD, nCurrentGet ); return true; parseFailed: // Revert the get index SeekGet( SEEK_HEAD, nStartGet ); pString[0] = '\0'; return false; }
PRBool nsCSSScanner::NextURL(nsresult& aErrorCode, nsCSSToken& aToken) { PRInt32 ch = Read(aErrorCode); if (ch < 0) { return PR_FALSE; } if (ch < 256) { PRUint8* lexTable = gLexTable; // STRING if ((ch == '"') || (ch == '\'')) { return ParseString(aErrorCode, ch, aToken); } // WS if ((lexTable[ch] & IS_WHITESPACE) != 0) { aToken.mType = eCSSToken_WhiteSpace; aToken.mIdent.Assign(PRUnichar(ch)); (void) EatWhiteSpace(aErrorCode); return PR_TRUE; } if (ch == '/') { PRInt32 nextChar = Peek(aErrorCode); if (nextChar == '*') { (void) Read(aErrorCode); #if 0 // If we change our storage data structures such that comments are // stored (for Editor), we should reenable this code, condition it // on being in editor mode, and apply glazou's patch from bug // 60290. aToken.mIdent.SetCapacity(2); aToken.mIdent.Assign(PRUnichar(ch)); aToken.mIdent.Append(PRUnichar(nextChar)); return ParseCComment(aErrorCode, aToken); #endif return SkipCComment(aErrorCode) && Next(aErrorCode, aToken); } } // Process a url lexical token. A CSS1 url token can contain // characters beyond identifier characters (e.g. '/', ':', etc.) // Because of this the normal rules for tokenizing the input don't // apply very well. To simplify the parser and relax some of the // requirements on the scanner we parse url's here. If we find a // malformed URL then we emit a token of type "InvalidURL" so that // the CSS1 parser can ignore the invalid input. We attempt to eat // the right amount of input data when an invalid URL is presented. aToken.mType = eCSSToken_InvalidURL; nsString& ident = aToken.mIdent; ident.SetLength(0); if (ch == ')') { Pushback(ch); // empty url spec; just get out of here aToken.mType = eCSSToken_URL; } else { // start of a non-quoted url Pushback(ch); PRBool ok = PR_TRUE; for (;;) { ch = Read(aErrorCode); if (ch < 0) break; if (ch == CSS_ESCAPE) { ParseAndAppendEscape(aErrorCode, ident); } else if ((ch == '"') || (ch == '\'') || (ch == '(')) { // This is an invalid URL spec ok = PR_FALSE; } else if ((256 > ch) && ((gLexTable[ch] & IS_WHITESPACE) != 0)) { // Whitespace is allowed at the end of the URL (void) EatWhiteSpace(aErrorCode); if (LookAhead(aErrorCode, ')')) { Pushback(')'); // leave the closing symbol // done! break; } // Whitespace is followed by something other than a // ")". This is an invalid url spec. ok = PR_FALSE; } else if (ch == ')') { Unread(); // All done break; } else { // A regular url character. ident.Append(PRUnichar(ch)); } } // If the result of the above scanning is ok then change the token // type to a useful one. if (ok) { aToken.mType = eCSSToken_URL; } } } return PR_TRUE; }
PRBool nsCSSScanner::Next(nsresult& aErrorCode, nsCSSToken& aToken) { PRInt32 ch = Read(aErrorCode); if (ch < 0) { return PR_FALSE; } PRUint8* lexTable = gLexTable; // IDENT if (StartsIdent(ch, Peek(aErrorCode), lexTable)) return ParseIdent(aErrorCode, ch, aToken); // From this point on, 0 <= ch < 256. // AT_KEYWORD if (ch == '@') { PRInt32 nextChar = Read(aErrorCode); PRInt32 followingChar = Peek(aErrorCode); Pushback(nextChar); if (StartsIdent(nextChar, followingChar, lexTable)) return ParseAtKeyword(aErrorCode, ch, aToken); } // NUMBER or DIM if ((ch == '.') || (ch == '+') || (ch == '-')) { PRInt32 nextChar = Peek(aErrorCode); if (CheckLexTable(nextChar, IS_DIGIT, lexTable)) { return ParseNumber(aErrorCode, ch, aToken); } else if (('.' == nextChar) && ('.' != ch)) { nextChar = Read(aErrorCode); PRInt32 followingChar = Peek(aErrorCode); Pushback(nextChar); if (CheckLexTable(followingChar, IS_DIGIT, lexTable)) return ParseNumber(aErrorCode, ch, aToken); } } if ((lexTable[ch] & IS_DIGIT) != 0) { return ParseNumber(aErrorCode, ch, aToken); } // ID if (ch == '#') { return ParseRef(aErrorCode, ch, aToken); } // STRING if ((ch == '"') || (ch == '\'')) { return ParseString(aErrorCode, ch, aToken); } // WS if ((lexTable[ch] & IS_WHITESPACE) != 0) { aToken.mType = eCSSToken_WhiteSpace; aToken.mIdent.Assign(PRUnichar(ch)); (void) EatWhiteSpace(aErrorCode); return PR_TRUE; } if (ch == '/') { PRInt32 nextChar = Peek(aErrorCode); if (nextChar == '*') { (void) Read(aErrorCode); #if 0 // If we change our storage data structures such that comments are // stored (for Editor), we should reenable this code, condition it // on being in editor mode, and apply glazou's patch from bug // 60290. aToken.mIdent.SetCapacity(2); aToken.mIdent.Assign(PRUnichar(ch)); aToken.mIdent.Append(PRUnichar(nextChar)); return ParseCComment(aErrorCode, aToken); #endif return SkipCComment(aErrorCode) && Next(aErrorCode, aToken); } } if (ch == '<') { // consume HTML comment tags if (LookAhead(aErrorCode, '!')) { if (LookAhead(aErrorCode, '-')) { if (LookAhead(aErrorCode, '-')) { aToken.mType = eCSSToken_HTMLComment; aToken.mIdent.AssignLiteral("<!--"); return PR_TRUE; } Pushback('-'); } Pushback('!'); } } if (ch == '-') { // check for HTML comment end if (LookAhead(aErrorCode, '-')) { if (LookAhead(aErrorCode, '>')) { aToken.mType = eCSSToken_HTMLComment; aToken.mIdent.AssignLiteral("-->"); return PR_TRUE; } Pushback('-'); } } // INCLUDES ("~=") and DASHMATCH ("|=") if (( ch == '|' ) || ( ch == '~' ) || ( ch == '^' ) || ( ch == '$' ) || ( ch == '*' )) { PRInt32 nextChar = Read(aErrorCode); if ( nextChar == '=' ) { if (ch == '~') { aToken.mType = eCSSToken_Includes; } else if (ch == '|') { aToken.mType = eCSSToken_Dashmatch; } else if (ch == '^') { aToken.mType = eCSSToken_Beginsmatch; } else if (ch == '$') { aToken.mType = eCSSToken_Endsmatch; } else if (ch == '*') { aToken.mType = eCSSToken_Containsmatch; } return PR_TRUE; } else { Pushback(nextChar); } } aToken.mType = eCSSToken_Symbol; aToken.mSymbol = ch; return PR_TRUE; }
MoonAppRecord * MoonAppRecordIterator::Next () { char *uid, *origin = NULL; MoonAppRecord *app; time_t mtime = 0; char c; if (inptr == inend && !Fill ()) return NULL; if (!EatWhiteSpace (true)) return NULL; // first, try decoding the uid if (!(uid = ParseUid ())) return NULL; // make sure the next token is an '=' if (NextToken () != '=') goto error; inptr++; // make sure the next token is '{' if (NextToken () != '{') goto error; inptr++; // make sure the next token is '\n' if (NextToken () != '\n') goto error; inptr++; // now we parse the properties (like origin and mtime), 1 property per line while ((c = NextToken ()) != '}') { switch (c) { case 'o': /* origin? */ if (origin) { // already have origin, error goto error; } // make sure we have enough data for strncmp while ((inend - inptr) < 6) { if (!Fill ()) goto error; } if (strncmp (inptr, "origin", 6) != 0) goto error; inptr += 6; // make sure next token is '=' if (NextToken () != '=') goto error; inptr++; // parse the origin if (!(origin = ParseOrigin ())) goto error; // Note: ParseOrigin() gobbles the trailing '\n' for us break; case 'm': /* mtime? */ if (mtime != 0) { // already have mtime, error goto error; } // make sure we have enough data for strncmp while ((inend - inptr) < 5) { if (!Fill ()) goto error; } if (strncmp (inptr, "mtime", 5) != 0) goto error; inptr += 5; // make sure next token is '=' if (NextToken () != '=') goto error; inptr++; // parse the mtime if (!(mtime = ParseMTime ())) goto error; // make sure the next token is '\n' if (NextToken () != '\n') goto error; inptr++; break; default: /* error */ goto error; } } inptr++; if (NextToken () != '\n') goto error; inptr++; app = new MoonAppRecord (); app->origin = origin; app->mtime = mtime; app->uid = uid; return app; error: g_free (origin); g_free (uid); return NULL; }