Beispiel #1
0
int TokensTree::insert(int loc, Token* newToken)
{
    if(!newToken)
        return -1;

    return AddToken(newToken, loc);
}
static void AddLiteral(const char* zLit, int nChars, struct Parse *pParse)
{
  TemplateToken *pToken;
  if (!nChars) return;
  pToken = (TemplateToken*)malloc(sizeof(TemplateToken) + (nChars+1)*sizeof(char));  
  assert(pToken);
  memset(pToken, 0, sizeof(TemplateToken));
  if ( (nChars==1 && *zLit=='\n') || (nChars==2 && *zLit=='\n' && *zLit=='\r'))
    pToken->nType = Newline;
  else
  {
    if (nChars>2 && *zLit=='\n' && *zLit=='\r')
    {
      AddLiteral(zLit, 2, pParse); // break tokens
      zLit+=2;
      nChars-=2;
    } else if (nChars>1 && *zLit=='\n') {
      AddLiteral(zLit, 1, pParse); // break tokens
      zLit++;
      nChars--;
    }
    pToken->nType = Literal;
  }
  pToken->nChars = nChars;
  AddToken(pToken, pParse);
  pToken++;
  strncpy((char*)pToken, zLit, nChars);
  ((char*)pToken)[nChars]='\0';
}
static void AddCommand(const char *zCmd, struct Parse *pParse)
{
  TemplateToken *pToken;
  int nChars = strlen(zCmd);
  char **pzVar;
  if (!nChars) return;
  pToken = (TemplateToken*)malloc(sizeof(TemplateToken)+(nChars+1)*sizeof(char));
  assert(pToken);
  memset(pToken, 0, sizeof(TemplateToken));
  if ( strcmp(zCmd, "other")==0 )
    pToken->nType = Alternative;
  else if (strcmp(zCmd, "common")==0)
    pToken->nType = Common;
  else {
    pToken->nType = Variable;
    pToken->nChars = nChars;
    for (pzVar = VariableStr; *pzVar; pzVar++)
      if (strcmp(*pzVar, zCmd)==0)
      {
        pToken->nType = InternalVariable;
        pToken->nVariable = pzVar-VariableStr;
      }
  }
  AddToken(pToken, pParse);
  pToken++;
  strncpy((char*)pToken, zCmd, nChars);
  ((char*)pToken)[nChars]='\0';
}
/**
 * This method is called just after a known text char has
 * been consumed and we should read a text run. Note: we actually ignore the
 * first character of the text run so that we can consume invalid markup 
 * as text.
 *  
 * @param aToken The OUT parameter that holds our resulting token.
 * @param aScanner Our source of data
 * @return Error result.
 */ 
nsresult
nsHTMLTokenizer::ConsumeText(CToken*& aToken, nsScanner& aScanner)
{
  nsresult result = NS_OK;
  nsTokenAllocator* theAllocator = this->GetTokenAllocator();
  CTextToken* theToken =
    (CTextToken*)theAllocator->CreateTokenOfType(eToken_text, eHTMLTag_text);
  if (theToken) {
    PRUnichar ch = '\0';
    result = theToken->Consume(ch, aScanner, mFlags);
    if (NS_FAILED(result)) {
      if (0 == theToken->GetTextLength()) {
        IF_FREE(aToken, mTokenAllocator);
        aToken = nullptr;
      } else {
        result = NS_OK;
      }
    }

    aToken = theToken;
    AddToken(aToken, result, &mTokenDeque, theAllocator);
  }

  return result;
}
Beispiel #5
0
void Tokenizer::TokenizeByPreidentified(bool enclosed, const TokenRange& range) {
  std::vector<TokenRange> preidentified_tokens;
  keyword_manager.Peek(filename_, range, elements_, preidentified_tokens);

  size_t offset = range.offset;
  TokenRange subrange(range.offset, 0);

  while (offset < range.offset + range.size) {
    for (const auto& preidentified_token : preidentified_tokens) {
      if (offset == preidentified_token.offset) {
        if (subrange.size > 0)
          TokenizeByDelimiters(enclosed, subrange);
        AddToken(kIdentifier, enclosed, preidentified_token);
        subrange.offset = preidentified_token.offset + preidentified_token.size;
        offset = subrange.offset - 1;  // It's going to be incremented below
        break;
      }
    }
    subrange.size = ++offset - subrange.offset;
  }

  // Either there was no preidentified token range, or we're now about to
  // process the tail of our current range.
  if (subrange.size > 0)
    TokenizeByDelimiters(enclosed, subrange);
}
void CParser::checkFinished()
{
    if( IsCorrect() ) {
        state = &CParser::parsingInitial;
    } else if( IsWrong() ) {
        state = &CParser::parsingIgnoreLine;
        AddToken();
    }
}
Beispiel #7
0
void Driver2Part1Test2()
{
  std::vector<Token> tokens;
  AddToken(tokens, "class", TokenType::Class);
  AddToken(tokens, "Enemy", TokenType::Identifier);
  AddToken(tokens, "{", TokenType::OpenCurley);
  AddToken(tokens, "var", TokenType::Var);
  AddToken(tokens, "Lives", TokenType::Identifier);
  AddToken(tokens, ":", TokenType::Colon);
  AddToken(tokens, "Integer", TokenType::Identifier);
  AddToken(tokens, ";", TokenType::Semicolon);
  AddToken(tokens, "}", TokenType::CloseCurley);

  RunTest(1, 2, tokens);
}
Beispiel #8
0
static inline
void AddCustomToken(GLToken* TokenHash, const char* Value)
{
  GLToken Token;
  GLString String;
  String.Chars = (char*)Value;
  String.Length = (unsigned int)strlen(Value);
  Token.Hash = GetStringHash(String);
  AddToken(TokenHash, Token);
}
Beispiel #9
0
void CParser::BoolExpr()
{
	if (mLookahead == '!')
	{
		Match('!');
		BoolExpr();
		AddToken(opNOT);
	}
	else
	{
		Expr();
		if (mLookahead == RELOP)
		{
			int savedRelop = mRelop;
			Match(RELOP);
			Expr();
			AddToken(savedRelop);
		}
	}
} // CParser::BoolExpr
Beispiel #10
0
	void AddTokenCommand(const CString& sLine) {
		CString sToken = sLine.Token(1).Left(DEFAULT_TOKEN_ID_LEN);

		if (sToken.length() != 12) {
			PutModule("Invalid token ID");
			return;
		}

		AddToken(sToken);
		PutModule(sToken + " added");
	}
Beispiel #11
0
void Tokenizer::TokenizeByBrackets() {
  static const std::vector<std::pair<char_t, char_t>> brackets{
      {L'(', L')'},  // U+0028-U+0029 Parenthesis
      {L'[', L']'},  // U+005B-U+005D Square bracket
      {L'{', L'}'},  // U+007B-U+007D Curly bracket
      {L'\u300C', L'\u300D'},  // Corner bracket
      {L'\u300E', L'\u300F'},  // White corner bracket
      {L'\u3010', L'\u3011'},  // Black lenticular bracket
      {L'\uFF08', L'\uFF09'},  // Fullwidth parenthesis
  };

  bool is_bracket_open = false;
  char_t matching_bracket = L'\0';

  auto char_begin = filename_.begin();
  const auto char_end = filename_.end();

  // This is basically std::find_first_of() customized to our needs
  auto find_first_bracket = [&]() -> string_t::const_iterator {
    for (auto it = char_begin; it != char_end; ++it) {
      for (const auto& bracket_pair : brackets) {
        if (*it == bracket_pair.first) {
          matching_bracket = bracket_pair.second;
          return it;
        }
      }
    }
    return char_end;
  };

  auto current_char = char_begin;

  while (current_char != char_end && char_begin != char_end) {
    if (!is_bracket_open) {
      current_char = find_first_bracket();
    } else {
      // Looking for the matching bracket allows us to better handle some rare
      // cases with nested brackets.
      current_char = std::find(char_begin, char_end, matching_bracket);
    }

    const TokenRange range(std::distance(filename_.begin(), char_begin),
                           std::distance(char_begin, current_char));

    if (range.size > 0)  // Found unknown token
      TokenizeByPreidentified(is_bracket_open, range);

    if (current_char != char_end) {  // Found bracket
      AddToken(kBracket, true, TokenRange(range.offset + range.size, 1));
      is_bracket_open = !is_bracket_open;
      char_begin = ++current_char;
    }
  }
}
/**
 * This method consumes an end tag and any "attributes" that may come after it.
 *
 * @param aChar The last character read from the scanner.
 * @param aToken The OUT parameter that holds our resulting token.
 * @param aScanner Our source of data
 * @return Error result
 */
nsresult
nsHTMLTokenizer::ConsumeEndTag(PRUnichar aChar,
                               CToken*& aToken,
                               nsScanner& aScanner)
{
  // Get the "/" (we've already seen it with a Peek)
  aScanner.GetChar(aChar);

  nsTokenAllocator* theAllocator = this->GetTokenAllocator();
  aToken = theAllocator->CreateTokenOfType(eToken_end, eHTMLTag_unknown);
  NS_ENSURE_TRUE(aToken, NS_ERROR_OUT_OF_MEMORY);

  // Remember this for later in case you have to unwind...
  int32_t theDequeSize = mTokenDeque.GetSize();
  nsresult result = NS_OK;

  // Tell the new token to finish consuming text...
  result = aToken->Consume(aChar, aScanner, mFlags);
  AddToken(aToken, result, &mTokenDeque, theAllocator);
  if (NS_FAILED(result)) {
    // Note that this early-return here is safe because we have not yet
    // added any of our tokens to the queue (AddToken only adds the token if
    // result is a success), so we don't need to fall through.
    return result;
  }

  result = aScanner.Peek(aChar);
  if (NS_FAILED(result)) {
    aToken->SetInError(true);

    // Note: We know here that the scanner is not incremental since if
    // this peek fails, then we've already masked over a kEOF coming from
    // the Consume() call above.
    return NS_OK;
  }

  if (kGreaterThan != aChar) {
    result = ConsumeAttributes(aChar, aToken, aScanner);
  } else {
    aScanner.GetChar(aChar);
  }

  // Do the same thing as we do in ConsumeStartTag. Basically, if we've run
  // out of room in this *section* of the document, pop all of the tokens
  // we've consumed this round and wait for more data.
  if (NS_FAILED(result)) {
    while (mTokenDeque.GetSize() > theDequeSize) {
      CToken* theToken = (CToken*)mTokenDeque.Pop();
      IF_FREE(theToken, mTokenAllocator);
    }
  }

  return result;
}
Beispiel #13
0
static inline
int ParseFile(char* Filename, GLArbToken* ArbHash,
               GLToken* FunctionsHash, unsigned int* FunctionCount,
               GLToken* DefinesHash, unsigned int* DefinesCount,
               GLSettings* Settings)
{
  char* Data = ReadEntireFile(Filename);
  int Success = 0;
  if (Data)
  {
    GLTokenizer Tokenizer;
    Tokenizer.At = Data;
    while(*Tokenizer.At)
    {
      GLToken Token = ParseToken(&Tokenizer);
      if (StartsWith(Token.Value, "gl") && IsUpperCase(Token.Value.Chars[2]))
      {
        if (!Contains(FunctionsHash, Token) && IsKnownOrIgnoredToken(ArbHash, &Token, Settings))
        {
          AddToken(FunctionsHash, Token);
          *FunctionCount += 1;
        }
      }
      if (StartsWith(Token.Value, "GL_"))
      {
        if (!Contains(DefinesHash, Token) && IsKnownOrIgnoredToken(ArbHash, &Token, Settings))
        {
          AddToken(DefinesHash, Token);
          *DefinesCount += 1;
        }
      }
    }
    free(Data);
    Success = 1;
  }
  else
  {
    fprintf(stderr, "Couldn't open file %s", Filename);
  }
  return Success;
}
 void createCheckbox(const char *title,NxI32 token,const char *cmd,bool state)
 {
 	AddToken(cmd,token);
 	CPARSER.Parse("TuiCheckboxBegin %s", cmd);
 	CPARSER.Parse("TuiName \"%s\"",title);
 	if ( state )
   	CPARSER.Parse("TuiArg true");
   else
   	CPARSER.Parse("TuiArg false");
 	CPARSER.Parse("TuiScript %s %%1",cmd);
 	CPARSER.Parse("TuiCheckboxEnd");
 }
void AccumulationEncoder::DoEncoding(int token) {
    AddToken(token);
    
    // now normalize the vector!            // TODO: what do we want here? A Unit vector? Max of 1? (atm it doesn't decay...)
    double _max = 0.0;
    for (int i = 0; i < dimensions; i++)
        _max = std::max(_max, myEncoder[i]);
    if (_max > 0)
        for (int i = 0; i<dimensions; i++) {
            myEncoder[i] /= _max;
        }
}
void CParser::parsingWordBlankS()
{
    if( token.type == TT_Blank ) {
        token.Swap( savedToken1 );
        CQualifierParser::StartNamedQualifier();
        state = IsWrong() ?
                &CParser::parsingIgnoreLine : &CParser::parsingQualifier;
        token.Swap( savedToken1 );
    } else {
        token.Swap( savedToken1 );
        if( !CRuleParser::BeginFunction() ) { // action
            state = &CParser::parsingIgnoreLine;
            return;
        }
        token.Swap( savedToken1 );
        state = &CParser::parsingRule;
        savedToken2.Swap( token );
        AddToken(); // QualifierTag
        savedToken2.Move( token );
        AddToken(); // current token
    }
}
 void createSlider(const char *title,NxI32 token,const char *cmd,NxF32 smin,NxF32 smax,NxF32 sdefault,bool isint)
 {
 	AddToken(cmd,token);
 	CPARSER.Parse("TuiSliderBegin %s", cmd);
 	CPARSER.Parse("TuiName \"%s\"",title);
 	CPARSER.Parse("TuiSliderRange %f %f",smin,smax);
 	CPARSER.Parse("TuiArg %f",sdefault);
   if ( isint )
   {
     CPARSER.Parse("TuiInt true");
   }
 	CPARSER.Parse("TuiScript %s %%1",cmd);
 	CPARSER.Parse("TuiSliderEnd");
 }
  void createButton(const char *title,NxI32 token,const char *cmd)
  {
    char scratch[512];
    strcpy(scratch,cmd);
    char *space = strchr(scratch,' ');
    if ( space ) *space = 0;

  	AddToken(scratch,token);
  	CPARSER.Parse("TuiButtonBegin %s", scratch);
  	CPARSER.Parse("TuiName \"%s\"",title);
  	CPARSER.Parse("TuiScript %s",cmd);
    CPARSER.Parse("TuiExecuteOk false");
  	CPARSER.Parse("TuiCheckboxEnd");
  }
void CParser::parsingBlank()
{
    if( token.type == TT_Word && CDirectiveParser::StartParseIfDirective() ) {
        state = &CParser::parsingDirective;
    } else if( token.type == TT_LineFeed ) {
        state = &CParser::parsingInitial;
    } else {
        if( !CRuleParser::BeginRule() ) {
            state = &CParser::parsingIgnoreLine;
            return;
        }
        state = &CParser::parsingRule;
        AddToken();
    }
}
/**
 * This method is called just after a "<!" has been consumed.
 * NOTE: Here we might consume DOCTYPE and "special" markups. 
 * 
 * @param aChar The last character read from the scanner.
 * @param aToken The OUT parameter that holds our resulting token.
 * @param aScanner Our source of data
 * @return Error result.
 */
nsresult
nsHTMLTokenizer::ConsumeSpecialMarkup(PRUnichar aChar,
                                      CToken*& aToken,
                                      nsScanner& aScanner)
{
  // Get the "!"
  aScanner.GetChar(aChar);

  nsresult result = NS_OK;
  nsAutoString theBufCopy;
  aScanner.Peek(theBufCopy, 20);
  ToUpperCase(theBufCopy);
  int32_t theIndex = theBufCopy.Find("DOCTYPE", false, 0, 0);
  nsTokenAllocator* theAllocator = this->GetTokenAllocator();

  if (theIndex == kNotFound) {
    if ('[' == theBufCopy.CharAt(0)) {
      aToken = theAllocator->CreateTokenOfType(eToken_cdatasection,
                                               eHTMLTag_comment);
    } else if (StringBeginsWith(theBufCopy, NS_LITERAL_STRING("ELEMENT")) ||
               StringBeginsWith(theBufCopy, NS_LITERAL_STRING("ATTLIST")) ||
               StringBeginsWith(theBufCopy, NS_LITERAL_STRING("ENTITY"))  ||
               StringBeginsWith(theBufCopy, NS_LITERAL_STRING("NOTATION"))) {
      aToken = theAllocator->CreateTokenOfType(eToken_markupDecl,
                                               eHTMLTag_markupDecl);
    } else {
      aToken = theAllocator->CreateTokenOfType(eToken_comment,
                                               eHTMLTag_comment);
    }
  } else {
    aToken = theAllocator->CreateTokenOfType(eToken_doctypeDecl,
                                             eHTMLTag_doctypeDecl);
  }

  if (aToken) {
    result = aToken->Consume(aChar, aScanner, mFlags);
    AddToken(aToken, result, &mTokenDeque, theAllocator);
  }

  if (result == kNotAComment) {
    result = ConsumeText(aToken, aScanner);
  }

  return result;
}
/**
 * This method is called just after a newline has been consumed. 
 *  
 * @param aChar The last character read from the scanner.
 * @param aToken The OUT parameter that holds our resulting token.
 * @param aScanner Our source of data
 * @return Error result.
 */
nsresult
nsHTMLTokenizer::ConsumeNewline(PRUnichar aChar,
                                CToken*& aToken,
                                nsScanner& aScanner)
{
  // Get the newline character
  aScanner.GetChar(aChar);

  nsTokenAllocator* theAllocator = this->GetTokenAllocator();
  aToken = theAllocator->CreateTokenOfType(eToken_newline, eHTMLTag_newline);
  nsresult result = NS_OK;
  if (aToken) {
    result = aToken->Consume(aChar, aScanner, mFlags);
    AddToken(aToken, result, &mTokenDeque, theAllocator);
  }

  return result;
}
/**
 * This method is called just after a <? has been consumed. 
 *  
 * @param aChar The last character read from the scanner.
 * @param aToken The OUT parameter that holds our resulting token.
 * @param aScanner Our source of data
 * @return Error result.
 */
nsresult
nsHTMLTokenizer::ConsumeProcessingInstruction(PRUnichar aChar,
                                              CToken*& aToken,
                                              nsScanner& aScanner)
{
  // Get the "?"
  aScanner.GetChar(aChar);

  nsTokenAllocator* theAllocator = this->GetTokenAllocator();
  aToken = theAllocator->CreateTokenOfType(eToken_instruction,
                                           eHTMLTag_unknown);
  nsresult result = NS_OK;
  if (aToken) {
    result = aToken->Consume(aChar, aScanner, mFlags);
    AddToken(aToken, result, &mTokenDeque, theAllocator);
  }

  return result;
}
void CParser::parsingWord()
{
    if( token.type == TT_LineFeed ) {
        token.Swap( savedToken1 );
        CRuleParser::BeginFunction(); // action
        state = &CParser::parsingInitial;
    } else if( token.type == TT_Blank ) {
        state = &CParser::parsingWordBlank;
    } else {
        token.Swap( savedToken1 );
        if( !CRuleParser::BeginFunction() ) { // action
            state = &CParser::parsingIgnoreLine;
            return;
        }
        token.Swap( savedToken1 );
        state = &CParser::parsingRule;
        AddToken();
    }
}
Beispiel #24
0
void Tokenizer::TokenizeByBrackets() {
  static const string_t kOpeningBrackets = L"[({";
  static const string_t kClosingBrackets = L"])}";

  bool bracket_open = false;
  size_t last_bracket_index = 0;

  TokenRange range;

  for (size_t offset = 0; offset < filename_.size(); offset++) {
    const auto& brackets = bracket_open ? kClosingBrackets : kOpeningBrackets;
    const size_t index = brackets.find(filename_.at(offset));

    // Character is a bracket
    if (index != string_t::npos) {
      // Check if it matches last open bracket
      if (bracket_open) {
        if (index != last_bracket_index)
          continue;
      } else {
        last_bracket_index = index;
      }

      // Add unknown token
      if (range.offset < offset) {
        range.size = offset - range.offset;
        TokenizeByDelimiter(bracket_open, range);
      }
      // Add bracket
      AddToken(kBracket, true, TokenRange(offset, 1));
      bracket_open = !bracket_open;
      range.offset = offset + 1;

    // Character is not a bracket, and the loop reached the end
    } else if (offset == filename_.size() - 1) {
      // Add last unknown token
      range.size = offset - range.offset + 1;
      TokenizeByDelimiter(false, range);
    }
  }
}
Beispiel #25
0
void CParser::ParamList()
{
	int args = 0;
	
	mIsFormula = true;
	
	while (mLookahead != ')')
	{
		if (mLookahead != LIST)
			RelExpr();
		else
			AddToken(valNil);
		
		if (mLookahead != ')')
			Match(LIST);
		
		args++;
	}
	
	mArgCnt = args;
} // CParser::ParamList
Beispiel #26
0
bool CParser::Parse(const char *inString, cell inLocation)
{

	//	fprintf( stderr, "CParser::Parse( \"%s\", [ %hd , %hd ] )\n",
	//		inString , inLocation.h , inLocation.v ) ;
	
	mLoc = inLocation;
	mExpr = mExprStart = inString;
	mOffset = 0;
	
	bool result = true;

	try
	{
		mTokenStart = mExpr;
		mLookahead = GetNextToken(true);
		
		if (mLookahead == RELOP && mRelop == opEQ)
		{
			Match(RELOP);
			mIsFormula = true;
		}
		else
			mIsFormula = false;
	
		RelExpr();
		Match(END);
	
		AddToken(opEnd);
	}
	catch (CErr& e)
	{
		if (mIsFormula)
			throw;
		
		result = false;
	}
	
	return result;
} // CParser::Parse
/**
 *  This method is called just after a "&" has been consumed 
 *  and we know we're at the start of an entity.  
 *  
 * @param aChar The last character read from the scanner.
 * @param aToken The OUT parameter that holds our resulting token.
 * @param aScanner Our source of data
 * @return Error result. 
 */
nsresult
nsHTMLTokenizer::ConsumeEntity(PRUnichar aChar,
                               CToken*& aToken,
                               nsScanner& aScanner)
{
  PRUnichar  theChar;
  nsresult result = aScanner.Peek(theChar, 1);

  nsTokenAllocator* theAllocator = this->GetTokenAllocator();
  if (NS_SUCCEEDED(result)) {
    if (nsCRT::IsAsciiAlpha(theChar) || theChar == kHashsign) {
      aToken = theAllocator->CreateTokenOfType(eToken_entity, eHTMLTag_entity);
      NS_ENSURE_TRUE(aToken, NS_ERROR_OUT_OF_MEMORY);
      result = aToken->Consume(theChar, aScanner, mFlags);

      if (result == NS_HTMLTOKENS_NOT_AN_ENTITY) {
        IF_FREE(aToken, mTokenAllocator);
      } else {
        if (result == kEOF && !aScanner.IsIncremental()) {
          result = NS_OK; // Use as much of the entity as you can get.
        }

        AddToken(aToken, result, &mTokenDeque, theAllocator);
        return result;
      }
    }

    // Oops, we're actually looking at plain text...
    result = ConsumeText(aToken, aScanner);
  } else if (result == kEOF && !aScanner.IsIncremental()) {
    // If the last character in the file is an &, consume it as text.
    result = ConsumeText(aToken, aScanner);
    if (aToken) {
      aToken->SetInError(true);
    }
  }

  return result;
}
/**
 *  This method is called just after a "<!" has been consumed 
 *  and we know we're at the start of a comment.  
 *  
 * @param aChar The last character read from the scanner.
 * @param aToken The OUT parameter that holds our resulting token.
 * @param aScanner Our source of data
 * @return Error result.
 */
nsresult
nsHTMLTokenizer::ConsumeComment(PRUnichar aChar,
                                CToken*& aToken,
                                nsScanner& aScanner)
{
  // Get the "!"
  aScanner.GetChar(aChar);

  nsTokenAllocator* theAllocator = this->GetTokenAllocator();
  aToken = theAllocator->CreateTokenOfType(eToken_comment, eHTMLTag_comment);
  nsresult result = NS_OK;
  if (aToken) {
    result = aToken->Consume(aChar, aScanner, mFlags);
    AddToken(aToken, result, &mTokenDeque, theAllocator);
  }

  if (kNotAComment == result) {
    // AddToken has IF_FREE()'d our token, so...
    result = ConsumeText(aToken, aScanner);
  }

  return result;
}
Beispiel #29
0
	CppColorizer()
	{
		text::ColorEntry entry=win7::Win7GetTextBoxTextColor();
		SetDefaultColor(entry);

		entry.normal.text=Color(128, 0, 255);
		AddToken(L"/d+(./d*)?([eE][+/-]?/d+)?", entry);

		entry.normal.text=Color(163, 21, 21);
		AddToken(L"\"([^\\\\\"]|\\\\/.)*\"", entry);

		entry.normal.text=Color(0, 128, 0);
		AddToken(L"////[^\r\n]*", entry);
		AddToken(L"///*(//|[*]*[^*//])*/*+//", entry);
		// debug this: L"//[*]([^*]|[*]+[^//])*[*]+//"

		entry.normal.text=Color(0, 0, 255);
		AddToken(L"#[a-zA-Z0-9_]*", entry);
		AddToken(CppKeywords, entry);
		
		AddToken(L"[a-zA-Z0-9_]+", GetDefaultColor());

		Setup();
	}
Beispiel #30
0
int VP8RecordCoeffTokens(int ctx, int coeff_type, int first, int last,
                         const int16_t* const coeffs,
                         VP8TBuffer* const tokens) {
  int n = first;
  uint32_t base_id = TOKEN_ID(coeff_type, n, ctx, 0);
  if (!AddToken(tokens, last >= 0, base_id + 0)) {
    return 0;
  }

  while (n < 16) {
    const int c = coeffs[n++];
    const int sign = c < 0;
    int v = sign ? -c : c;
    if (!AddToken(tokens, v != 0, base_id + 1)) {
      ctx = 0;
      base_id = TOKEN_ID(coeff_type, VP8EncBands[n], ctx, 0);
      continue;
    }
    if (!AddToken(tokens, v > 1, base_id + 2)) {
      ctx = 1;
    } else {
      if (!AddToken(tokens, v > 4, base_id + 3)) {
        if (AddToken(tokens, v != 2, base_id + 4))
          AddToken(tokens, v == 4, base_id + 5);
      } else if (!AddToken(tokens, v > 10, base_id + 6)) {
        if (!AddToken(tokens, v > 6, base_id + 7)) {
          AddConstantToken(tokens, v == 6, 159);
        } else {
          AddConstantToken(tokens, v >= 9, 165);
          AddConstantToken(tokens, !(v & 1), 145);
        }
      } else {
        int mask;
        const uint8_t* tab;
        if (v < 3 + (8 << 1)) {          // VP8Cat3  (3b)
          AddToken(tokens, 0, base_id + 8);
          AddToken(tokens, 0, base_id + 9);
          v -= 3 + (8 << 0);
          mask = 1 << 2;
          tab = VP8Cat3;
        } else if (v < 3 + (8 << 2)) {   // VP8Cat4  (4b)
          AddToken(tokens, 0, base_id + 8);
          AddToken(tokens, 1, base_id + 9);
          v -= 3 + (8 << 1);
          mask = 1 << 3;
          tab = VP8Cat4;
        } else if (v < 3 + (8 << 3)) {   // VP8Cat5  (5b)
          AddToken(tokens, 1, base_id + 8);
          AddToken(tokens, 0, base_id + 10);
          v -= 3 + (8 << 2);
          mask = 1 << 4;
          tab = VP8Cat5;
        } else {                         // VP8Cat6 (11b)
          AddToken(tokens, 1, base_id + 8);
          AddToken(tokens, 1, base_id + 10);
          v -= 3 + (8 << 3);
          mask = 1 << 10;
          tab = VP8Cat6;
        }
        while (mask) {
          AddConstantToken(tokens, !!(v & mask), *tab++);
          mask >>= 1;
        }
      }
      ctx = 2;
    }
    AddConstantToken(tokens, sign, 128);
    base_id = TOKEN_ID(coeff_type, VP8EncBands[n], ctx, 0);
    if (n == 16 || !AddToken(tokens, n <= last, base_id + 0)) {
      return 1;   // EOB
    }
  }
  return 1;
}