lemonscript::SequentialCommand::SequentialCommand(int l, const LemonScriptState &state, const std::string &sequenceString) : Command(l, state) { LemonScriptTokenizer tokenizer(sequenceString); std::string token; TokenType type; int lineNum; while(true) { std::tie(token, type, lineNum) = tokenizer.nextToken(); if(type == NOT_A_TOKEN) { break; } printTok(token, type, lineNum); Command *command; if(type == CppToken) { command = new CppCommand(l, state, token); } else if(type == WhileAlsoToken) { command = new WhileAlsoCommand(l, state, token); } else if(type == CompleteAnyToken) { command = new CompleteAnyCommand(l, state, token); } else if(type == CompleteAllToken) { command = new CompleteAllCommand(l, state, token); } sequence.push_back(command); } }
int htmllex() { #ifdef HAVE_EXPAT static char *begin_html = "<HTML>"; static char *end_html = "</HTML>"; char *s; char *endp = 0; int len, llen; int rv; state.tok = 0; do { if (state.mode == 2) return EOF; if (state.mode == 0) { state.mode = 1; s = begin_html; len = strlen(s); endp = 0; } else { s = state.ptr; if (*s == '\0') { state.mode = 2; s = end_html; len = strlen(s); } else { endp = findNext(s,&state.lb); len = endp - s; } } state.prevtok = state.currtok; state.prevtoklen = state.currtoklen; state.currtok = s; state.currtoklen = len; if ((llen = agxblen(&state.lb))) rv = XML_Parse(state.parser, agxbuse(&state.lb),llen, 0); else rv = XML_Parse(state.parser, s, len, (len ? 0 : 1)); if (rv == XML_STATUS_ERROR) { if (!state.error) { agerr(AGERR, "%s in line %d \n", XML_ErrorString(XML_GetErrorCode(state.parser)), htmllineno()); error_context(); state.error = 1; state.tok = T_error; } } if (endp) state.ptr = endp; } while (state.tok == 0); #if DEBUG printTok (state.tok); #endif return state.tok; #else return EOF; #endif }
//create a token of the current word void Scanner::getToken(void) { assert(inputStream.good()); string word = nextWord(); Token tmp; tmp.lexeme = word; //don't lower case for strings if(word[0] != 0x22) tmp.toLowerCase(); //take advantage of the toLowerCase here categorizeToken(tmp.lexeme); //if it was whitespace, grab the next token ( we aren't keeping it anyway ) if(readTokType().compare("whitespace") == 0) { getToken(); //unless it is a string, we want to lowercase everything } else if (readTokType().compare("string") != 0) { token.toLowerCase(); //double check that it is lower case printTok(); } }//getToken