EToken ToToken( const wstring& rs ) { wstring s = Lower( rs ); static map<wstring,EToken> list; if( list.empty() ) { ADD_TOKEN( alter , kTokenAlter ); ADD_TOKEN( analyze , kTokenAnalyze ); ADD_TOKEN( attach , kTokenAttach ); ADD_TOKEN( begin , kTokenBegin ); ADD_TOKEN( case , kTokenCase ); ADD_TOKEN( close , kTokenClose ); ADD_TOKEN( commit , kTokenCommit ); ADD_TOKEN( create , kTokenCreate ); ADD_TOKEN( cursor , kTokenCursor ); ADD_TOKEN( dbms_output , kTokenOutput ); ADD_TOKEN( declare , kTokenDeclare ); ADD_TOKEN( default , kTokenDefault ); ADD_TOKEN( delete , kTokenDelete ); ADD_TOKEN( detach , kTokenDetach ); ADD_TOKEN( drop , kTokenDrop ); ADD_TOKEN( else , kTokenElse ); ADD_TOKEN( elsif , kTokenElsIf ); ADD_TOKEN( end , kTokenEnd ); ADD_TOKEN( exception , kTokenException ); ADD_TOKEN( exec , kTokenExec ); ADD_TOKEN( execute , kTokenExecute ); ADD_TOKEN( exit , kTokenExit ); ADD_TOKEN( explain , kTokenExplain ); ADD_TOKEN( fetch , kTokenFetch ); ADD_TOKEN( for , kTokenFor ); ADD_TOKEN( goto , kTokenGoto ); ADD_TOKEN( if , kTokenIf ); ADD_TOKEN( insert , kTokenInsert ); ADD_TOKEN( into , kTokenInto ); ADD_TOKEN( loop , kTokenLoop ); ADD_TOKEN( not , kTokenNot ); ADD_TOKEN( null , kTokenNull ); ADD_TOKEN( off , kTokenOff ); ADD_TOKEN( on , kTokenOn ); ADD_TOKEN( open , kTokenOpen ); ADD_TOKEN( pragma , kTokenPragma ); ADD_TOKEN( print , kTokenPrint ); ADD_TOKEN( reindex , kTokenReindex ); ADD_TOKEN( release , kTokenRelease ); ADD_TOKEN( replace , kTokenReplace ); ADD_TOKEN( return , kTokenReturn ); ADD_TOKEN( reverse , kTokenReverse ); ADD_TOKEN( rollback , kTokenRollback ); ADD_TOKEN( rowtype , kTokenRowType ); ADD_TOKEN( savepoint , kTokenSavePoint ); ADD_TOKEN( select , kTokenSelect ); ADD_TOKEN( serveroutput , kTokenServerOutput ); ADD_TOKEN( set , kTokenSet ); ADD_TOKEN( subtype , kTokenSubType ); ADD_TOKEN( then , kTokenThen ); ADD_TOKEN( type , kTokenType ); ADD_TOKEN( update , kTokenUpdate ); ADD_TOKEN( vacuum , kTokenVacuum ); ADD_TOKEN( when , kTokenWhen ); ADD_TOKEN( while , kTokenWhile ); ADD_TOKEN( < , kTokenLess ); ADD_TOKEN( ; , kTokenSemicolon );
/**** Global functions definitions. ****/ TA_RetCode TA_FileIndexParsePath( TA_FileIndexPriv *fileIndexPriv, TA_String *path ) { typedef enum { INIT_PROCESSING, FIX_PROCESSING, FIELD_PROCESSING, WILD_PROCESSING, SEP_PROCESSING } State; TA_PROLOG State currentState; const char *currentTokenStart; unsigned int length; char *str; char *pos; char sepTmp[2]; TA_RetCode retCode; unsigned int tokenSize; TA_TokenId tokenId; const char *sourcePattern; TA_TRACE_BEGIN( TA_FileIndexParsePath ); TA_ASSERT( path != NULL ); sepTmp[1] = '\0'; sourcePattern = TA_StringToChar( path ); /* The following macro should help for the readability of the parsing logic. * These macro are used only inside this function. */ #define RETURN(y) {TA_Free(str); TA_TRACE_RETURN( y );} #define REJECT_STATE(x,y) { if(currentState==x)RETURN(y); } #define CHANGE_STATE(x) {currentState=x; currentTokenStart=pos+1;} #define ADD_TOKEN(id,value) \ { \ retCode = addToken(fileIndexPriv,id,value); \ if( retCode != TA_SUCCESS) RETURN(retCode); \ } #define FLUSH_FIX() \ { \ retCode = flushFixPart(fileIndexPriv,currentTokenStart,pos); \ if( retCode != TA_SUCCESS ) RETURN(retCode); \ } /* This function build a list representing the tokens * of the sourcePattern. * * Example: "C:\a\*AZD?\[S]\data.txt" becomes * * TokenId Value * TA_TOK_FIX "C:" * TA_TOK_SEP "\" * TA_TOK_FIX "a" * TA_TOK_SEP "\" * TA_TOK_WILD "*" * TA_TOK_FIX "AZD" * TA_TOK_WILD_CHAR "?" * TA_TOK_SEP "\" * TA_TOK_S "?*" * TA_TOK_SEP "\" * TA_TOK_FIX "data.txt" * TA_TOK_END (null) * * In the values, the '?' and '*' character represent MS-DOS kind * of wildcards: * '?' is any character (but only one). * '*' zero or more of any character */ if( sourcePattern == NULL ) return TA_INVALID_PATH; length = strlen( sourcePattern ) + 1; if( (length <= 1) || (length > 2048) ) return TA_INVALID_PATH; str = (char *)TA_Malloc( length ); strcpy( str, sourcePattern ); pos = str; currentState = INIT_PROCESSING; currentTokenStart = pos; while( *pos != '\0' ) { if( (*pos == '\\') || (*pos == '/') ) { /* Handle directories separator character. */ REJECT_STATE( FIELD_PROCESSING, TA_INVALID_FIELD ); REJECT_STATE( SEP_PROCESSING, TA_INVALID_PATH ); FLUSH_FIX(); #if 0 !!! Needed? /* Check that the string prior to the separator * does not terminate with a dot '.' */ if( currentState != INIT_PROCESSING ) { if( *(pos-1) == '.' ) RETURN( TA_INVALID_PATH ); } #endif /* Transform into the directory delimiter * used on the host file system. */ sepTmp[0] = (char)TA_SeparatorASCII(); ADD_TOKEN( TA_TOK_SEP, sepTmp ); CHANGE_STATE( SEP_PROCESSING ); } else switch( *pos )
////////////////////////////////////////////////////////////////////////// // // // // ////////////////////////////////////////////////////////////////////////// BOOL CPreprocessor::ProcessIdentifier(CPPTokenHolder* pcDest, CPPText* pcText, CPreprocessorTokenParser* pcParser, BOOL bAllowDefined, int iDepth) { CPPToken* pcToken; CDefine* pcDefine; BOOL bResult; CPPAbstractHolder* pcHolder; CPPText* pcDecorator; char* pcValue; int i; CPPTokenHolder* pcTokenHolder; SDefineArgument* psArguments; int iArgIndex; pcDefine = mcDefines.GetDefine(&pcText->mcText); if (pcDefine) { iArgIndex = -1; if (pcDefine->IsBacketed()) { pcParser->NextToken(); psArguments = mcArguments.Add(pcDefine->miIndex); iArgIndex = mcArguments.mcDefineToArguments.GetIndex(psArguments); bResult = FindArguments(pcParser, &psArguments->mcArguments); if ((!bResult) || (psArguments->mcArguments.NumElements() != pcDefine->mcArguments.NumElements())) { //Expected arguments but there weren't any. for (i = 0; i < psArguments->mcArguments.NumElements(); i++) { pcTokenHolder = psArguments->mcArguments.Get(i); pcTokenHolder->Kill(); } mcArguments.Remove(pcDefine->miIndex); return FALSE; } } else { pcParser->NextToken(); } if (pcDefine->mcReplacement.mcTokens.mcArray.NumElements() > 0) { pcHolder = ADD_TOKEN(CPPHolder, &pcDest->mcArray, mpcStack->Add(sizeof(CPPHolder))); pcHolder->Init(4, -1, -1); ExpandDefined(pcHolder, pcDefine, bAllowDefined, iDepth+1); } //I'm not sure if it's safe to do this anymore... another define might refer to it. if (iArgIndex != -1) { psArguments = mcArguments.Get(pcDefine->miIndex); for (i = 0; i < psArguments->mcArguments.NumElements(); i++) { pcTokenHolder = psArguments->mcArguments.Get(i); pcTokenHolder->Kill(); } mcArguments.Remove(pcDefine->miIndex); } return TRUE; } else if (bAllowDefined) { bResult = pcParser->GetExactIdentifier("defined", TRUE, TRUE); if (bResult) { pcParser->GetExactDecorator('('); pcParser->SkipWhiteSpace(); pcToken = pcParser->GetToken(); pcDecorator = CPPText::Construct(mpcStack->Add(sizeof(CPPText))); pcValue = (char*)gcTokenStrings.Add(1); *pcValue = '0'; pcDecorator->Init(PPT_Number, -1, -1, pcValue, pcValue+1); if (pcToken) { if (pcToken->IsText()) { pcText = (CPPText*)pcToken; if (pcText->meType == PPT_Identifier) { pcDefine = mcDefines.GetDefine(&pcText->mcText); pcParser->NextToken(); if (pcDefine) { *pcValue = '1'; } } } pcDest->Add((CPPToken**)&pcDecorator); pcParser->SkipWhiteSpace(); pcParser->GetExactDecorator(')'); } return TRUE; } } pcToken = DuplicatePPToken(pcText, mpcStack); pcDest->Add(&pcToken); pcParser->NextToken(); return TRUE; }