static PSmmAstNode parseStatement(PSmmParser parser) { switch (parser->curToken->kind) { case tkSmmReturn: return parseReturnStmt(parser); case '{': return (PSmmAstNode)parseBlock(parser, parser->curScope->returnType, false); case tkSmmIdent: case '(': case '-': case '+': case tkSmmNot: case tkSmmInt: case tkSmmFloat: case tkSmmBool: return parseExpressionStmt(parser); case tkSmmIf: case tkSmmWhile: return parseIfWhileStmt(parser); case tkSmmErr: if (findToken(parser, ';')) getNextToken(parser); return NULL; case ';': return NULL; // Just skip empty statements default: if (parser->lastErrorLine != parser->curToken->filePos.lineNumber) { char gotBuf[4]; const char* got = smmTokenToString(parser->curToken, gotBuf); smmPostMessage(parser->msgs, errSmmGotUnexpectedToken, parser->curToken->filePos, "valid statement", got); } getNextToken(parser); // Skip the bad character if (findToken(parser, ';')) getNextToken(parser); return &errorNode; } }
static void parseBlock (tokenInfo *const token, const boolean local) { int depth = 1; while (depth > 0) { readToken (token); switch (token->keyword) { default: if (isType (token, TOKEN_IDENTIFIER)) { if (local) makeSqlTag (token, SQLTAG_LOCAL_VARIABLE); else makeSqlTag (token, SQLTAG_VARIABLE); } break; case KEYWORD_cursor: parseSimple (token, SQLTAG_CURSOR); break; case KEYWORD_function: parseSubProgram (token); break; case KEYWORD_procedure: parseSubProgram (token); break; case KEYWORD_subtype: parseSimple (token, SQLTAG_SUBTYPE); break; case KEYWORD_trigger: parseSimple (token, SQLTAG_TRIGGER); break; case KEYWORD_type: parseType (token); break; case KEYWORD_end: --depth; break; case KEYWORD_begin: { while (depth > 0) { switch (token->keyword) { case KEYWORD_if: case KEYWORD_loop: ++depth; readToken (token); break; case KEYWORD_end: --depth; findToken (token, TOKEN_SEMICOLON); break; default: readToken (token); break; } } break; } } findToken (token, TOKEN_SEMICOLON); } }
static int16_t compressLine(uint8_t *s, int16_t length, int16_t *pGroupTop) { int16_t start, limit, token, groupTop=*pGroupTop; start=0; do { /* write any "noise" characters */ limit=skipNoise((char *)s, start, length); while(start<limit) { groupStore[groupTop++]=s[start++]; } if(start==length) { break; } /* write a word, as token or directly */ limit=getWord((char *)s, start, length); if(limit-start==1) { groupStore[groupTop++]=s[start++]; } else { token=findToken(s+start, (int16_t)(limit-start)); if(token!=-1) { if(token>0xff) { groupStore[groupTop++]=(uint8_t)(token>>8); } groupStore[groupTop++]=(uint8_t)token; start=limit; } else { while(start<limit) { groupStore[groupTop++]=s[start++]; } }
static void skipArgumentList (tokenInfo *const token) { if (isType (token, TOKEN_OPEN_PAREN)) /* arguments? */ { findToken (token, TOKEN_CLOSE_PAREN); readToken (token); } }
/** * This is called after we already parsed parameters so we expect optional * arrow and type and then also optional function body. Func node should * have kind, token and params set. */ static PSmmAstNode parseFunction(PSmmParser parser, PSmmAstFuncDefNode func) { assert(func->kind == nkSmmFunc && func->token); bool ignoreMissingSemicolon = false; int curKind = parser->curToken->kind; if (curKind != tkSmmRArrow && curKind != '{' && curKind != ';') { if (curKind != tkSmmErr) { char gotBuf[4]; const char* got = smmTokenToString(parser->curToken, gotBuf); smmPostMessage(parser->msgs, errSmmGotUnexpectedToken, parser->curToken->filePos, "one of '->', '{' or ';'", got); } if (!parser->curToken->isFirstOnLine) { findToken(parser, tkSmmRArrow); } // Otherwise assume ';' was forgotten ignoreMissingSemicolon = true; } PSmmTypeInfo typeInfo = &builtInTypes[tiSmmVoid]; if (parser->curToken->kind == tkSmmRArrow) { ignoreMissingSemicolon = false; getNextToken(parser); typeInfo = parseType(parser); } func->returnType = typeInfo; if (parser->curToken->kind == '{') { func->body = parseBlock(parser, typeInfo, true); } else if (parser->curToken->kind != ';') { if (!ignoreMissingSemicolon && parser->curToken->kind != tkSmmErr) { char gotBuf[4]; const char* got = smmTokenToString(parser->curToken, gotBuf); smmPostMessage(parser->msgs, errSmmGotUnexpectedToken, parser->curToken->filePos, "{ or ;", got); } if (!parser->curToken->isFirstOnLine) { // if illegal token is in the same line then we will skip all until terminating token findToken(parser, ';'); } // Otherwise we assume ';' is forgotten so we don't do findToken here hoping normal stmt starts next return &errorNode; } PSmmAstParamNode param = func->params; while (param) { ibsDictPop(parser->idents, param->token->repr); param = param->next; } return (PSmmAstNode)func; }
bool SqlParser::setDefaultLimit() { static const char *defaultLimit = NULL; if (defaultLimit == NULL) { defaultLimit = get_config_string("DEFAULT_SELECT_LIMIT"); if (defaultLimit == NULL) defaultLimit = "500"; } int limitPos; if (!findToken(1, getTokensLen(), TK_SQL_LIMIT, &limitPos)) { g_string_append(inputSql, " LIMIT "); g_string_append(inputSql, defaultLimit); network_mysqld_proto_set_header_len((unsigned char *) (inputSql->str), inputSql->len - NET_HEADER_SIZE); return true; } if (limitPos + 3 > getTokensLen() - 1) // no offset return true; if (limitPos + 3 < getTokensLen() - 1) { printTokens("only queries with LIMIT at the last field is supported now!"); return false; } int offset, rowCount; if (getTokenId(limitPos + 2) == TK_COMMA) { offset = atoi(getTokenStr(limitPos + 1).c_str()); rowCount = atoi(getTokenStr(limitPos + 3).c_str()); } else if (getTokenId(limitPos + 2) == TK_SQL_OFFSET) { offset = atoi(getTokenStr(limitPos + 3).c_str()); rowCount = atoi(getTokenStr(limitPos + 1).c_str()); } else { printTokens("Unrecognized LIMIT OFFSET format:"); return false; } // TODO: the tokenizer needs to have the field offset info // for now, we search LIMIT from the end char *p = inputSql->str + inputSql->len - 1; while (toupper(*p) != 'L') p--; // remove the old LIMIT OFFSET info // g_string_truncate(inputSql, p - inputSql->str); // add new LIMIT OFFSET info char buff[128]; snprintf(buff, sizeof (buff), "LIMIT %d", offset + rowCount); g_string_append(inputSql, buff); network_mysqld_proto_set_header_len((unsigned char *) (inputSql->str), inputSql->len - NET_HEADER_SIZE); return true; }
bool SqlParser::getSqlWhere(int begin, int *start, int *end) const { if (!findToken(begin, tokens->len, TK_SQL_WHERE, start)) return false; (*start)++; int ids[] = {TK_SQL_GROUP, TK_SQL_HAVING, TK_SQL_ORDER, TK_SQL_LIMIT, TK_SQL_PROCEDURE, TK_SQL_INTO, TK_SQL_FOR, TK_SQL_LOCK}; findTokens(*start, tokens->len, ids, sizeof (ids) / sizeof (int), end); return true; }
void project1() { codeFile = fopen("input.txt", "r"); output = fopen("cleaninput.txt", "w"); initArrays(); load1(); printCleanInput(); cleanArrayList(); findToken(); //errorCheck(); printLexemeTable(); printTest(); }
void ePythonOutput(const char *file, int line, const char *function, const char *string) { #ifdef DEBUG char timebuffer[32]; char header[256]; char buf[1024]; char ncbuf[1024]; bool is_alert = false; bool is_warning = false; if(strstr(file, "e2reactor.py") || strstr(file, "traceback.py")) is_alert = true; printtime(timebuffer, sizeof(timebuffer)); snprintf(header, sizeof(header), "%s %s:%d %s ", timebuffer, file, line, function); snprintf(buf, sizeof(buf), "%s", string); removeAnsiEsc(buf, ncbuf); is_alert |= findToken(ncbuf, alertToken); if(!is_alert) is_warning = findToken(ncbuf, warningToken); singleLock s(DebugLock); logOutput(lvlWarning, std::string(header) + std::string(ncbuf)); if (logOutputConsole) { if (!logOutputColors) fprintf(stderr, "%s%s", header, ncbuf); else { snprintf(header, sizeof(header), \ "%s%s " /*color of timestamp*/\ ANSI_CYAN "%s:%d " /*color of filename and linenumber*/\ ANSI_BCYAN "%s " /*color of functionname*/\ ANSI_BWHITE /*color of debugmessage*/\ , is_alert?ANSI_BRED:is_warning?ANSI_BYELLOW:ANSI_WHITE, timebuffer, file, line, function); fprintf(stderr, "%s%s"ANSI_RESET, header, buf); } } #endif }
void _eDebug(const char *file, int line, const char *function, const char* fmt, ...) { char timebuffer[32]; char header[256]; char buf[1024]; char ncbuf[1024]; bool is_alert = false; bool is_warning = false; printtime(timebuffer, sizeof(timebuffer)); snprintf(header, sizeof(header), "%s %s:%d %s ", timebuffer, file, line, function); va_list ap; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); removeAnsiEsc(buf, ncbuf); is_alert = findToken(ncbuf, alertToken); if(!is_alert) is_warning = findToken(ncbuf, warningToken); singleLock s(DebugLock); logOutput(lvlDebug, std::string(header) + std::string(ncbuf) + "\n"); if (logOutputConsole) { if (!logOutputColors) fprintf(stderr, "%s%s\n", header , ncbuf); else { snprintf(header, sizeof(header), \ "%s%s " /*color of timestamp*/\ ANSI_GREEN "%s:%d " /*color of filename and linenumber*/\ ANSI_BGREEN "%s " /*color of functionname*/\ ANSI_BWHITE /*color of debugmessage*/\ , is_alert?ANSI_BRED:is_warning?ANSI_BYELLOW:ANSI_WHITE, timebuffer, file, line, function); fprintf(stderr, "%s%s\n"ANSI_RESET, header, buf); } } }
static rewritetoken * newRedirectToken(const char **str, int urlEncode) { rewritetoken *dev; const tokendesc *ptoken = findToken(*str); debug(85, 5) ("newRedirectToken(%s)\n", *str); if (ptoken == NULL) { debug(85, 3) ("newRedirectToken: %s => NULL\n", *str); return NULL; } debug(85, 5) ("newRedirectToken: %s => %s\n", *str, tokenNames[ptoken->type]); dev = newRedirectTokenStr(ptoken->type, NULL, 0, urlEncode); *str += 2; return dev; }
static PSmmAstNode parseExpressionStmt(PSmmParser parser) { PSmmAstNode lval; struct SmmFilePos fpos = parser->curToken->filePos; parser->curToken->canBeNewSymbol = true; lval = parseExpression(parser); if (lval == &errorNode) { if (findToken(parser, ';')) getNextToken(parser); return &errorNode; } if (!lval->isIdent && (parser->curToken->kind == ':' || parser->curToken->kind == '=')) { smmPostMessage(parser->msgs, errSmmOperandMustBeLVal, fpos); if (findToken(parser, ';')) getNextToken(parser); return &errorNode; } if (parser->curToken->kind == ':') { lval = parseDeclaration(parser, lval); } else if (parser->curToken->kind == '=') { lval = parseAssignment(parser, lval); } if (lval) { bool isJustIdent = lval->isIdent && (lval->kind != nkSmmCall) && (lval->kind != nkSmmError); bool isAnyBinOpExceptLogical = lval->isBinOp && lval->kind != nkSmmAndOp && lval->kind != nkSmmOrOp; if (isJustIdent || isAnyBinOpExceptLogical) { smmPostMessage(parser->msgs, wrnSmmNoEffectStmt, lval->token->filePos); if (isJustIdent) lval = NULL; } } if (parser->prevToken->kind != '}' && (lval != &errorNode || parser->curToken->kind == ';')) { expect(parser, ';'); } return lval; }
static void parseTable (tokenInfo *const token) { tokenInfo *const name = newToken (); readToken (name); readToken (token); if (isType (token, TOKEN_OPEN_PAREN)) { if (isType (name, TOKEN_IDENTIFIER)) { makeSqlTag (name, SQLTAG_TABLE); parseRecord (token); } } findToken (token, TOKEN_SEMICOLON); deleteToken (name); }
static void parsePackage (tokenInfo *const token) { tokenInfo *const name = newToken (); readToken (name); if (isKeyword (name, KEYWORD_body)) readToken (name); readToken (token); if (isKeyword (token, KEYWORD_is)) { if (isType (name, TOKEN_IDENTIFIER)) makeSqlTag (name, SQLTAG_PACKAGE); parseBlock (token, FALSE); } findToken (token, TOKEN_SEMICOLON); deleteToken (name); }
/* ** tokenize the current line in up to 3 tokens and store those values ** into p->azToken[0], p->azToken[1], and p->azToken[2]. Record the ** current line in p->startLine. */ static void tokenizeLine(Script *p){ int i, j, k; int len, n; for(i=0; i<count(p->azToken); i++) p->azToken[i][0] = 0; p->startLine = p->nLine; for(i=j=0; j<p->len && i<count(p->azToken); i++){ findToken(&p->zLine[j], &k, &len); j += k; n = len; if( n>=sizeof(p->azToken[0]) ){ n = sizeof(p->azToken[0])-1; } memcpy(p->azToken[i], &p->zLine[j], n); p->azToken[i][n] = 0; j += n+1; } }
static PSmmAstNode parseAssignment(PSmmParser parser, PSmmAstNode lval) { PSmmToken eqToken = parser->curToken; getNextToken(parser); PSmmAstNode val = parseExpression(parser); if (val == &errorNode) { findToken(parser, ';'); return &errorNode; } if (val->kind == nkSmmParamDefinition) return val; PSmmAstNode assignment = smmNewAstNode(nkSmmAssignment, parser->a); assignment->left = lval; assignment->right = val; assignment->type = lval->type; assignment->token = eqToken; return assignment; }
static void parseArguments (tokenInfo *const token) { #ifndef TYPE_REFERENCE_TOOL findToken (token, TOKEN_CLOSE_PAREN); readToken (token); #else Assert (isType (token, TOKEN_OPEN_PAREN)); readToken (token); do { if (isType (token, TOKEN_COLON)) parseEntityType (token); else readToken (token); } while (! isType (token, TOKEN_CLOSE_PAREN)); readToken (token); #endif }
script::token script::getCommandLine() { bool tokenFound = false; script::token curToken = NOTHING; commandLine.clear(); while (read.pos < read.text.size() && !tokenFound) { if (read.text.at(read.pos).toLatin1() == ' ') commandLine.append(' '); tokenFound = findToken(curToken); if (!tokenFound) { if (read.text.at(read.pos).toLatin1() == '%') { read.runOverComment(); } else { commandLine.append(read.text.at(read.pos)); } read.pos++; } } return curToken; }
static PSmmAstNode parseReturnStmt(PSmmParser parser) { assert(parser->curToken->kind == tkSmmReturn); PSmmAstNode expr = NULL; PSmmToken retToken = parser->curToken; getNextToken(parser); if (parser->curToken->kind != ';') { expr = parseExpression(parser); if (expr == &errorNode) { if (findToken(parser, ';')) getNextToken(parser); return &errorNode; } } PSmmAstNode res = smmNewAstNode(nkSmmReturn, parser->a); res->type = parser->curScope->returnType; res->left = expr; res->token = retToken; expect(parser, ';'); return res; }
tokenInfo getNextToken(FILE *fp) { //char c; long long int lexCount=0; int currState=1; tokenInfo tk; memset(tk.lexeme,'\0', ( sizeof(tk.lexeme)/ sizeof(char) ) ); //tk = (tokenInfo*) malloc(sizeof(tokenInfo)); while(1) { char c = getCharacter(fp); if(c==EOF) { c = 127; } tk.lexeme[lexCount] = c ; lexCount++; switch(currState) { case 1: if(c==',') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_COMMA; return tk; } else if(c=='[') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_SQL; return tk; } else if(c==']') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_SQR; return tk; } else if(c==':') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_COLON; return tk; } else if(c==';') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_SEM; return tk; } else if(c=='.') { tk.lineNum = currLine; tk.lexeme[lexCount]='\0'; tk.tokenType = TK_DOT; return tk; } else if(c=='(') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_OP; return tk; } else if(c==')') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_CL; return tk; } else if(c=='+') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_PLUS; return tk; } else if(c=='-') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_MINUS; return tk; } else if(c=='*') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_MUL; return tk; } else if(c=='/') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_DIV; return tk; } else if(c=='~') { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_NOT; return tk; } else if(c==127) { tk.lexeme[lexCount]='\0'; tk.lineNum = currLine; tk.tokenType = TK_EOF; return tk; } else if(c==' '||c=='\t'||c=='\v'||c=='\r') { lexCount=0; } else if(c=='\n') { currLine++; lexCount=0; } else if(c=='@') { currState=11; } else if(c=='&') { currState=21; } else if(c=='=') { currState = 31; } else if(c=='!') { currState=41; } else if(c=='<') { currState=51; } else if(c=='>') { currState=61; } else if(c=='%') { currState=71; } else if(c=='b' || c=='c' || c=='d') { currState = 81; } else if(c>='a' && c<='z') { currState = 91; } else if(c>='0' && c<='9') { currState = 101; } else if(c=='_') { currState = 111; } else if(c=='#') { currState = 121; } else { tk.tokenType = TK_ERROR; tk.lineNum = currLine; printf("ERROR_2: Unknown Symbol %c at line %d\n",c,currLine); sprintf(tk.lexeme,"ERROR_2"); //sprintf(tk.lexeme,"ERROR_2: Unknown Symbol %c at line %d",c,currLine); return tk; } break; case 11: if(c=='@') { currState=12; } else { bufferIndex--; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3"); //sprintf(tk.lexeme, "ERROR_3:Unknown pattern %s at line %d", temp,currLine); tk.tokenType = TK_ERROR; tk.lineNum = currLine; if(c == '\n') currLine++; return tk; } break; case 12: if(c=='@') { tk.lexeme[lexCount]='\0'; tk.tokenType = TK_OR; tk.lineNum = currLine; return tk; } else { bufferIndex--; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); tk.tokenType = TK_ERROR; tk.lineNum = currLine; if(c == '\n') currLine++; return tk; } break; case 21: if(c=='&') { currState=22; } else { bufferIndex--; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); tk.tokenType = TK_ERROR; tk.lineNum = currLine; //if(c == '\n') // currLine++; return tk; } break; case 22: if(c=='&') { tk.tokenType = TK_AND; tk.lineNum = currLine; tk.lexeme[lexCount]='\0'; return tk; } else { bufferIndex--; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); tk.tokenType = TK_ERROR; tk.lineNum = currLine; //if(c == '\n') // currLine++; return tk; } break; case 31: if(c=='=') { tk.tokenType = TK_EQ; tk.lineNum = currLine; tk.lexeme[lexCount]='\0'; return tk; } else if(c==' '||c=='\n'||c=='\t'||c=='\r'||c=='\v') { tk.tokenType = TK_ASSIGNOP; tk.lexeme[lexCount]='\0'; tk.lineNum=currLine; return tk; } else { bufferIndex--; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); tk.tokenType = TK_ERROR; tk.lineNum = currLine; //if(c == '\n') // currLine++; return tk; } break; case 41: if(c=='=') { tk.tokenType = TK_NE; tk.lineNum = currLine; tk.lexeme[lexCount]='\0'; return tk; } else { bufferIndex--; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); tk.tokenType = TK_ERROR; tk.lineNum = currLine; //if(c == '\n') // currLine++; return tk; } break; case 51: if(c=='=') { tk.tokenType = TK_LE; tk.lineNum = currLine ; tk.lexeme[lexCount]='\0'; return tk; } else if(c=='-') { currState=52; } else { bufferIndex--; tk.lexeme[lexCount-1]='\0'; tk.tokenType = TK_LT; tk.lineNum = currLine; return tk; } break; case 52: if(c=='-') { currState=53; } else { bufferIndex--; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); tk.tokenType = TK_ERROR; tk.lineNum = currLine; //if(c == '\n') // currLine++; return tk; } break; case 53: if(c=='-') { tk.tokenType = TK_ASSIGNOP; tk.lineNum = currLine; tk.lexeme[lexCount]='\0'; return tk; } else { bufferIndex--; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); tk.tokenType = TK_ERROR; tk.lineNum = currLine; //if(c == '\n') // currLine++; return tk; } break; case 61: if(c=='=') { tk.tokenType = TK_GE; tk.lineNum = currLine ; tk.lexeme[lexCount]='\0'; return tk; } else { bufferIndex--; tk.lexeme[lexCount-1]='\0'; // buffer position wala panga tk.tokenType = TK_GT; tk.lineNum = currLine; return tk; } break; case 71: if(c=='\n') { currLine++; currState=1; lexCount=0; //tk.tokenType= TK_COMMENT; //tk.lineNum = currLine ; //tk.lexeme[lexCount]='\0'; //return tk; } else if(c==127) { tk.tokenType = TK_EOF; tk.lexeme[0]='\0'; return tk ; } break; case 81: if(c>='a' && c<='z') currState = 91; else if(c>='2' && c<='7') currState = 82; else { bufferIndex--; tk.tokenType = TK_ERROR ; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); //tk.lexeme[lexCount-1]= '\0'; tk.lineNum = currLine ; return tk; } break; case 82: if(c>='2' && c<='7') { currState = 83; } else if(!(c>='b' && c<='d') ) { if(lexCount>20) { printf("ERROR_1 : Identifier at line %d is longer than the prescribed length of 20 characters\n",currLine); sprintf(tk.lexeme,"ERROR_1");// : Identifier at line %d is longer than the prescribed length of 20 characters",currLine); bufferIndex--; tk.tokenType = TK_ERROR; tk.lineNum = currLine; return tk; } bufferIndex--; tk.tokenType = TK_ID; tk.lineNum = currLine; tk.lexeme[lexCount-1]='\0'; return tk; } break; case 83: if(!(c>='2' && c<='7') ) { if(lexCount>20) { printf("ERROR_1 : Identifier at line %d is longer than the prescribed length of 20 characters\n",currLine); sprintf(tk.lexeme,"ERROR_1");// : Identifier at line %d is longer than the prescribed length of 20 characters",currLine); bufferIndex--; tk.tokenType = TK_ERROR; tk.lineNum = currLine; return tk; } bufferIndex--; tk.tokenType = TK_ID; tk.lineNum = currLine ; tk.lexeme[lexCount-1]='\0'; return tk; } break; case 91: if ( !(c>='a' && c<='z') ) { if(lexCount>20) { printf("ERROR_1 : Identifier at line %d is longer than the prescribed length of 20 characters\n",currLine); sprintf(tk.lexeme,"ERROR_1");// : Identifier at line %d is longer than the prescribed length of 20 characters",currLine); bufferIndex--; tk.tokenType = TK_ERROR; tk.lineNum = currLine; return tk; } //buffer thing bufferIndex--; tk.lexeme[lexCount-1] = '\0'; tk.tokenType = findToken(tk.lexeme); tk.lineNum = currLine ; return tk; } break; case 101: if(c>='0' && c<='9') { if(lexCount>20) { tk.tokenType = TK_ERROR ; tk.lineNum = currLine; printf("ERROR_1 : Identifier at line %d is longer than the prescribed length of 20 characters\n",currLine); sprintf(tk.lexeme,"ERROR_1");// : Identifier at line %d is longer than the prescribed length of 20 characters",currLine); return tk; } } else if(c=='.') { currState = 102; } else { // buffer position wala problem // managing the lexemee bufferIndex--; tk.lexeme[lexCount-1] = '\0'; tk.tokenType = TK_NUM; tk.lineNum = currLine ; return tk ; } break; case 102: if(c>='0' && c<='9') { currState = 103; } else { bufferIndex--; tk.tokenType = TK_ERROR; tk.lineNum = currLine; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); return tk; } break; case 103: if(c>='0' && c<='9') { tk.tokenType = TK_RNUM; tk.lineNum = currLine ; tk.lexeme[lexCount] = '\0'; return tk; } else { bufferIndex--; tk.lexeme[lexCount]='\0'; tk.tokenType = TK_ERROR ; tk.lineNum = currLine; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); return tk; } break; case 111: if( (c>='a' && c<='z') || (c>='A' && c<='Z') ) { currState = 112; } else { // buffer wala problem; bufferIndex--; tk.tokenType = TK_ERROR; tk.lineNum = currLine; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); return tk; } break; case 112: if(c>='0' && c<='9') { currState = 113 ; } else if(c==' '|| c=='\t' || c=='\r' || c=='\v' || c=='\n') { if(lexCount>30) { printf("ERROR_1 : Identifier at line %d is longer than the prescribed length of 20 characters\n",currLine); sprintf(tk.lexeme,"ERROR_1");// : Identifier at line %d is longer than the prescribed length of 20 characters",currLine); //bufferIndex--; tk.tokenType = TK_ERROR; tk.lineNum = currLine; return tk; } bufferIndex--; tk.lexeme[lexCount-1] = '\0'; tk.lineNum = currLine; if(strcmp(tk.lexeme,"_main")==0) tk.tokenType = TK_MAIN; else tk.tokenType = TK_FUNID; return tk; } else if( !( (c>='a' && c<='z')|| (c>='A' && c<='Z') || (c>='0' && c<='9') ) ) { bufferIndex--; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); tk.tokenType = TK_ERROR ; tk.lineNum = currLine; return tk; } break; case 113: if(c==' '|| c=='\t' || c=='\r' || c=='\v' || c=='\n') { if(lexCount>30) { printf("ERROR_1 : Identifier at line %d is longer than the prescribed length of 20 characters\n",currLine); sprintf(tk.lexeme,"ERROR_1");// : Identifier at line %d is longer than the prescribed length of 20 characters",currLine); //bufferIndex--; tk.tokenType = TK_ERROR; tk.lineNum = currLine; return tk; } bufferIndex--; tk.lexeme[lexCount-1] = '\0'; tk.lineNum = currLine; if(strcmp(tk.lexeme,"_main")==0) tk.tokenType = TK_MAIN; else tk.tokenType = TK_FUNID; return tk; } else if( !(c>='0' && c<='9') ) { bufferIndex--; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); tk.tokenType = TK_ERROR ; tk.lineNum = currLine; return tk; } break; case 121: if(c>='a' && c<='z') { currState =122; } else { //buffer stuff bufferIndex--; tk.tokenType = TK_ERROR ; tk.lineNum = currLine ; tk.lexeme[lexCount]='\0'; char temp[lexCount+1]; strncpy(temp,tk.lexeme,lexCount+1); printf("ERROR_3:Unknown pattern %s at line %d\n", temp,currLine); sprintf(tk.lexeme, "ERROR_3");//:Unknown pattern %s at line %d", temp,currLine); //sprintf(tk.lexeme,"ERROR_3:Unknown pattern %s at line %d", tk.lexeme,currLine); if(c=='\n') { currLine++; } return tk; } break; case 122: if(! ( (c>='a') && (c<='z') ) ) { //buffer wala problem // some bullshit about lexeme if(lexCount>20) { printf("ERROR_1 : Identifier at line %d is longer than the prescribed length of 20 characters\n",currLine); sprintf(tk.lexeme,"ERROR_1");// : Identifier at line %d is longer than the prescribed length of 20 characters",currLine); bufferIndex--; tk.tokenType = TK_ERROR; tk.lineNum = currLine; return tk; } bufferIndex--; tk.lexeme[lexCount-1]='\0'; tk.tokenType = TK_RECORDID; tk.lineNum = currLine ; return tk; } break; default: break; //this should never be executed } } }
void ParagraphParameters::read(Lexer & lex, bool merge) { if (!merge) clear(); while (lex.isOK()) { lex.nextToken(); string const token = lex.getString(); if (token.empty()) continue; if (token[0] != '\\') { lex.pushToken(token); break; } if (token == "\\noindent") { noindent(true); } else if (token == "\\indent") { //not found in LyX files but can be used with lfuns noindent(false); } else if (token == "\\indent-toggle") { //not found in LyX files but can be used with lfuns noindent(!noindent()); } else if (token == "\\leftindent") { lex.next(); Length value(lex.getString()); leftIndent(value); } else if (token == "\\start_of_appendix") { startOfAppendix(true); } else if (token == "\\paragraph_spacing") { lex.next(); string const tmp = rtrim(lex.getString()); if (tmp == "default") { //not found in LyX files but can be used with lfuns spacing(Spacing(Spacing::Default)); } else if (tmp == "single") { spacing(Spacing(Spacing::Single)); } else if (tmp == "onehalf") { spacing(Spacing(Spacing::Onehalf)); } else if (tmp == "double") { spacing(Spacing(Spacing::Double)); } else if (tmp == "other") { lex.next(); spacing(Spacing(Spacing::Other, lex.getString())); } else { lex.printError("Unknown spacing token: '$$Token'"); } } else if (token == "\\align") { lex.next(); int tmpret = findToken(string_align, lex.getString()); if (tmpret == -1) ++tmpret; align(LyXAlignment(1 << tmpret)); } else if (token == "\\labelwidthstring") { lex.eatLine(); labelWidthString(lex.getDocString()); } else { lex.pushToken(token); break; } } }
void ePythonOutput(const char *file, int line, const char *function, const char *string) { #ifdef DEBUG char flagstring[10]; char timebuffer[32]; char header[256]; char buf[2*1024]; char ncbuf[2*1024]; bool is_alert = false; bool is_warning = false; printtime(timebuffer, sizeof(timebuffer)); if(strstr(file, "e2reactor.py") || strstr(file, "traceback.py")) is_alert = true; snprintf(buf, sizeof(buf), "%s", string); removeAnsiEsc(buf, ncbuf); is_alert |= findToken(ncbuf, alertToken); if(!is_alert) is_warning = findToken(ncbuf, warningToken); if(is_alert) snprintf(flagstring, sizeof(flagstring), "%s", "{ E }"); else if(is_warning) snprintf(flagstring, sizeof(flagstring), "%s", "{ W }"); else snprintf(flagstring, sizeof(flagstring), "%s", "{ }"); if(line) snprintf(header, sizeof(header), "%s %s %s:%d %s ", timebuffer, flagstring, file, line, function); else { snprintf(flagstring, sizeof(flagstring), "%s", "{ D }"); snprintf(header, sizeof(header), "%s %s ", timebuffer, flagstring); } singleLock s(DebugLock); logOutput(lvlWarning, std::string(header) + std::string(ncbuf)); if (logOutputConsole) { if (!logOutputColors) { if(m_erroroutput && m_erroroutput->eErrorOutput::pipe_fd[1] && m_erroroutput->eErrorOutput::threadrunning) { int n; char obuf[1024]; snprintf(obuf, sizeof(obuf), "%s%s", header, ncbuf); n=write(m_erroroutput->eErrorOutput::pipe_fd[1], obuf, strlen(obuf)); if(n<0) fprintf(stderr, "[eerror] row %d error: %s\n", __LINE__,strerror(errno)); } else fprintf(stderr, "%s%s", header, ncbuf); } else { if(line) { snprintf(header, sizeof(header), \ "%s" /*newline*/ "%s%s " /*color of timestamp*/\ ANSI_CYAN "%s:%d " /*color of filename and linenumber*/\ ANSI_BCYAN "%s " /*color of functionname*/\ ANSI_BWHITE /*color of debugmessage*/\ , inNoNewLine?"\n":"", is_alert?ANSI_BRED:is_warning?ANSI_BYELLOW:ANSI_WHITE, timebuffer, file, line, function); } else { snprintf(header, sizeof(header), \ "%s" /*newline*/ "%s%s " /*color of timestamp*/\ ANSI_BWHITE /*color of debugmessage*/\ , inNoNewLine?"\n":"", ANSI_MAGENTA, timebuffer); } if(m_erroroutput && m_erroroutput->eErrorOutput::pipe_fd[1] && m_erroroutput->eErrorOutput::threadrunning) { int n; char obuf[1024]; snprintf(obuf, sizeof(obuf), "%s%s"ANSI_RESET, header, buf); n=write(m_erroroutput->eErrorOutput::pipe_fd[1], obuf, strlen(obuf)); if(n<0) fprintf(stderr, "[eerror] row %d error: %s\n", __LINE__,strerror(errno)); } else fprintf(stderr, "%s%s"ANSI_RESET, header, buf); } } #endif inNoNewLine = false; }
bool Tokenizer::findToken(const char* pCmp) { return findToken(0, pCmp); }
static int processLine(char *line, int lineLength) { int tokenCount; char *cursor; int i; char *tokens[10]; char buffer[80]; struct timeval done_time; struct timeval cur_time; tokenCount = 0; for (cursor = line, i = 0; i < 10; i++) { if (*cursor == '\0') { tokens[i] = NULL; } else { findToken(&cursor, &(tokens[i])); tokenCount++; } } if (tokenCount == 0) { return 0; } /* Skip over any trailing whitespace. */ while (isspace((int) *cursor)) { cursor++; } /* Make sure we've parsed everything. */ if (*cursor != '\0') { printText("Too many tokens."); return 0; } /* Have parsed the command. Now execute it. */ switch (*(tokens[0])) /* Command code. */ { case 0: /* Empty line. */ case '#': /* Comment. */ return 0; case '?': case 'h': printUsage(); return 0; case 'v': isprintf(buffer, sizeof buffer, "%s", IONVERSIONNUMBER); printText(buffer); return 0; case '1': initializeDtpc(tokenCount, tokens); return 0; case 's': if (attachToDtpc() == 0) { if (tokenCount > 1) { printText("Too many tokens for start \ command."); } else { if (dtpcStart() < 0) { putErrmsg("Can't start DTPC.", NULL); return 0; } } /* Wait for dtpc to start up. */ getCurrentTime(&done_time); done_time.tv_sec += STARTUP_TIMEOUT; while (dtpc_entity_is_started() == 0) { snooze(1); getCurrentTime(&cur_time); if (cur_time.tv_sec >= done_time.tv_sec && cur_time.tv_usec >= done_time.tv_usec) { printText("[?] DTPC start hung \ up, abandoned."); break; } }
void _eDebugNoNewLineStart(const char *file, int line, const char *function, const char* fmt, ...) { char flagstring[10]; char timebuffer[32]; char header[256]; char buf[1024]; char ncbuf[1024]; bool is_alert = false; bool is_warning = false; printtime(timebuffer, sizeof(timebuffer)); va_list ap; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); removeAnsiEsc(buf, ncbuf); is_alert = findToken(ncbuf, alertToken); if(!is_alert) is_warning = findToken(ncbuf, warningToken); if(is_alert) snprintf(flagstring, sizeof(flagstring), "%s", "< E >"); else if(is_warning) snprintf(flagstring, sizeof(flagstring), "%s", "< W >"); else snprintf(flagstring, sizeof(flagstring), "%s", "< >"); snprintf(header, sizeof(header), "%s %s %s:%d %s ", timebuffer, flagstring, file, line, function); singleLock s(DebugLock); logOutput(lvlDebug, std::string(header) + std::string(ncbuf)); if (logOutputConsole) { if (!logOutputColors) { if(m_erroroutput && m_erroroutput->eErrorOutput::pipe_fd[1] && m_erroroutput->eErrorOutput::threadrunning) { int n; char obuf[1024]; snprintf(obuf, sizeof(obuf), "%s%s", header, ncbuf); n=write(m_erroroutput->eErrorOutput::pipe_fd[1], obuf, strlen(obuf)); if(n<0) fprintf(stderr, "[eerror] row %d error: %s\n", __LINE__,strerror(errno)); } else fprintf(stderr, "%s%s", header, ncbuf); } else { snprintf(header, sizeof(header), \ ANSI_WHITE "%s%s " /*color of timestamp*/\ ANSI_GREEN "%s:%d " /*color of filename and linenumber*/\ ANSI_BGREEN "%s " /*color of functionname*/\ ANSI_BWHITE /*color of debugmessage*/\ , is_alert?ANSI_BRED:is_warning?ANSI_BYELLOW:ANSI_WHITE, timebuffer, file, line, function); if(m_erroroutput && m_erroroutput->eErrorOutput::pipe_fd[1] && m_erroroutput->eErrorOutput::threadrunning) { int n; char obuf[1024]; snprintf(obuf, sizeof(obuf), "%s%s", header, buf); n=write(m_erroroutput->eErrorOutput::pipe_fd[1], obuf, strlen(obuf)); if(n<0) fprintf(stderr, "[eerror] row %d error: %s\n", __LINE__,strerror(errno)); } else fprintf(stderr, "%s%s", header, buf); } } inNoNewLine = true; }
int readIonParms(char *configFileName, IonParms *parms) { char ownHostName[MAXHOSTNAMELEN + 1]; char *endOfHostName; char configFileNameBuffer[PATHLENMAX + 1 + 9 + 1]; int configFile; char buffer[512]; int lineNbr; char line[256]; int lineLength; int result; char *cursor; int i; char *tokens[2]; int tokenCount; /* Set defaults. */ CHKERR(parms); memset((char *) parms, 0, sizeof(IonParms)); parms->wmSize = 5000000; parms->wmAddress = 0; /* Dyamically allocated. */ parms->configFlags = SDR_IN_DRAM; parms->heapWords = 250000; parms->heapKey = SM_NO_KEY; istrcpy(parms->pathName, "/usr/ion", sizeof parms->pathName); /* Determine name of config file. */ if (configFileName == NULL) { #ifdef ION_NO_DNS ownHostName[0] = '\0'; #else if (getNameOfHost(ownHostName, MAXHOSTNAMELEN) < 0) { writeMemo("[?] Can't get name of local host."); return -1; } #endif /* Find end of high-order part of host name. */ if ((endOfHostName = strchr(ownHostName, '.')) != NULL) { *endOfHostName = 0; } isprintf(configFileNameBuffer, sizeof configFileNameBuffer, "%.256s.ionconfig", ownHostName); configFileName = configFileNameBuffer; } /* Get overrides from config file. */ configFile = open(configFileName, O_RDONLY, 0777); if (configFile < 0) { if (errno == ENOENT) /* No overrides apply. */ { writeMemo("[i] admin pgm using default SDR parms."); printIonParms(parms); return 0; } isprintf(buffer, sizeof buffer, "[?] admin pgm can't open SDR \ config file '%.255s': %.64s", configFileName, system_error_msg()); writeMemo(buffer); return -1; } isprintf(buffer, sizeof buffer, "[i] admin pgm using SDR parm \ overrides from %.255s.", configFileName); writeMemo(buffer); lineNbr = 0; while (1) { if (igets(configFile, line, sizeof line, &lineLength) == NULL) { if (lineLength == 0) { result = 0; printIonParms(parms); } else { result = -1; writeErrMemo("admin pgm SDR config file igets \ failed"); } break; /* Done. */ } lineNbr++; if (lineLength < 1) { continue; /* Empty line. */ } if (line[0] == '#') /* Comment only. */ { continue; } tokenCount = 0; for (cursor = line, i = 0; i < 2; i++) { if (*cursor == '\0') { tokens[i] = NULL; } else { findToken((char **) &cursor, &(tokens[i])); tokenCount++; } } if (tokenCount != 2) { isprintf(buffer, sizeof buffer, "[?] incomplete SDR \ configuration file line (%d).", lineNbr); writeMemo(buffer); result = -1; break; } if (strcmp(tokens[0], "wmKey") == 0) { parms->wmKey = atoi(tokens[1]); continue; } if (strcmp(tokens[0], "wmSize") == 0) { parms->wmSize = atoi(tokens[1]); continue; } if (strcmp(tokens[0], "wmAddress") == 0) { parms->wmAddress = (char *) atol(tokens[1]); continue; } if (strcmp(tokens[0], "sdrName") == 0) { istrcpy(parms->sdrName, tokens[1], sizeof(parms->sdrName)); continue; } if (strcmp(tokens[0], "configFlags") == 0) { parms->configFlags = atoi(tokens[1]); continue; } if (strcmp(tokens[0], "heapWords") == 0) { parms->heapWords = atoi(tokens[1]); continue; } if (strcmp(tokens[0], "heapKey") == 0) { parms->heapKey = atoi(tokens[1]); continue; } if (strcmp(tokens[0], "pathName") == 0) { istrcpy(parms->pathName, tokens[1], sizeof(parms->pathName)); continue; } isprintf(buffer, sizeof buffer, "[?] unknown SDR config \ keyword '%.32s' at line %d.", tokens[0], lineNbr); writeMemo(buffer); result = -1; break; }
static int processLine(char *line, int lineLength) { int tokenCount; char *cursor; int i; char *tokens[9]; tokenCount = 0; for (cursor = line, i = 0; i < 9; i++) { if (*cursor == '\0') { tokens[i] = NULL; } else { findToken(&cursor, &(tokens[i])); tokenCount++; } } if (tokenCount == 0) { return 0; } /* Skip over any trailing whitespace. */ while (isspace((int) *cursor)) { cursor++; } /* Make sure we've parsed everything. */ if (*cursor != '\0') { printText("Too many tokens."); return 0; } /* Have parsed the command. Now execute it. */ switch (*(tokens[0])) /* Command code. */ { case 0: /* Empty line. */ case '#': /* Comment. */ return 0; case '?': case 'h': printUsage(); return 0; case '1': initializeCfdp(tokenCount, tokens); return 0; case 's': if (attachToCfdp() == 0) { if (tokenCount < 2) { printText("Can't start CFDP: no UTA \ command."); } else { if (cfdpStart(tokens[1]) < 0) { putErrmsg("Can't start CFDP.", NULL); } } }
int DSNLEXER::NextTok() throw( IO_ERROR ) { const char* cur = next; const char* head = cur; prevTok = curTok; if( curTok != DSN_EOF ) { if( cur >= limit ) { L_read: // blank lines are returned as "\n" and will have a len of 1. // EOF will have a len of 0 and so is detectable. int len = readLine(); if( len == 0 ) { cur = start; // after readLine(), since start can change, set cur offset to start curTok = DSN_EOF; goto exit; } cur = start; // after readLine() since start can change. // skip leading whitespace while( cur<limit && isSpace(*cur) ) ++cur; // If the first non-blank character is #, this line is a comment. // Comments cannot follow any other token on the same line. if( cur<limit && *cur=='#' ) { if( commentsAreTokens ) { // save the entire line, including new line as the current token. // the '#' character may not be at offset zero. curText = start; // entire line is the token cur = start; // ensure a good curOffset below curTok = DSN_COMMENT; head = limit; // do a readLine() on next call in here. goto exit; } else goto L_read; } } else { // skip leading whitespace while( cur<limit && isSpace(*cur) ) ++cur; } if( cur >= limit ) goto L_read; // switching the string_quote character if( prevTok == DSN_STRING_QUOTE ) { static const wxString errtxt( _("String delimiter must be a single character of ', \", or $")); char cc = *cur; switch( cc ) { case '\'': case '$': case '"': break; default: THROW_PARSE_ERROR( errtxt, CurSource(), CurLine(), CurLineNumber(), CurOffset() ); } curText = cc; head = cur+1; if( head<limit && *head!=')' && *head!='(' && !isSpace(*head) ) { THROW_PARSE_ERROR( errtxt, CurSource(), CurLine(), CurLineNumber(), CurOffset() ); } curTok = DSN_QUOTE_DEF; goto exit; } if( *cur == '(' ) { curText = *cur; curTok = DSN_LEFT; head = cur+1; goto exit; } if( *cur == ')' ) { curText = *cur; curTok = DSN_RIGHT; head = cur+1; goto exit; } /* get the dash out of a <pin_reference> which is embedded for example like: U2-14 or "U2"-"14" This is detectable by a non-space immediately preceeding the dash. */ if( *cur == '-' && cur>start && !isSpace( cur[-1] ) ) { curText = '-'; curTok = DSN_DASH; head = cur+1; goto exit; } // handle DSN_NUMBER if( strchr( "+-.0123456789", *cur ) ) { head = cur+1; while( head<limit && strchr( ".0123456789", *head ) ) ++head; if( (head<limit && isSpace(*head)) || *head==')' || *head=='(' || head==limit ) { curText.clear(); curText.append( cur, head ); curTok = DSN_NUMBER; goto exit; } // else it was something like +5V, fall through below } // a quoted string, will return DSN_STRING if( *cur == stringDelimiter ) { // Non-specctraMode, understands and deciphers escaped \, \r, \n, and \". // Strips off leading and trailing double quotes if( !specctraMode ) { // copy the token, character by character so we can remove doubled up quotes. curText.clear(); ++cur; // skip over the leading delimiter, which is always " in non-specctraMode head = cur; while( head<limit ) { // ESCAPE SEQUENCES: if( *head =='\\' ) { char tbuf[8]; char c; int i; if( ++head >= limit ) break; // throw exception at L_unterminated switch( *head++ ) { case '"': case '\\': c = head[-1]; break; case 'a': c = '\x07'; break; case 'b': c = '\x08'; break; case 'f': c = '\x0c'; break; case 'n': c = '\n'; break; case 'r': c = '\r'; break; case 't': c = '\x09'; break; case 'v': c = '\x0b'; break; case 'x': // 1 or 2 byte hex escape sequence for( i=0; i<2; ++i ) { if( !isxdigit( head[i] ) ) break; tbuf[i] = head[i]; } tbuf[i] = '\0'; if( i > 0 ) c = (char) strtoul( tbuf, NULL, 16 ); else c = 'x'; // a goofed hex escape sequence, interpret as 'x' head += i; break; default: // 1-3 byte octal escape sequence --head; for( i=0; i<3; ++i ) { if( head[i] < '0' || head[i] > '7' ) break; tbuf[i] = head[i]; } tbuf[i] = '\0'; if( i > 0 ) c = (char) strtoul( tbuf, NULL, 8 ); else c = '\\'; // a goofed octal escape sequence, interpret as '\' head += i; break; } curText += c; } else if( *head == '"' ) // end of the non-specctraMode DSN_STRING { curTok = DSN_STRING; ++head; // omit this trailing double quote goto exit; } else curText += *head++; } // while // L_unterminated: wxString errtxt(_("Un-terminated delimited string") ); THROW_PARSE_ERROR( errtxt, CurSource(), CurLine(), CurLineNumber(), CurOffset() ); } else // specctraMode DSN_STRING { ++cur; // skip over the leading delimiter: ",', or $ head = cur; while( head<limit && !isStringTerminator( *head ) ) ++head; if( head >= limit ) { wxString errtxt(_("Un-terminated delimited string") ); THROW_PARSE_ERROR( errtxt, CurSource(), CurLine(), CurLineNumber(), CurOffset() ); } curText.clear(); curText.append( cur, head ); ++head; // skip over the trailing delimiter curTok = DSN_STRING; goto exit; } } // Maybe it is a token we will find in the token table. // If not, then call it a DSN_SYMBOL. { head = cur+1; while( head<limit && !isSpace( *head ) && *head!=')' && *head!='(' ) ++head; curText.clear(); curText.append( cur, head ); int found = findToken( curText ); if( found != -1 ) curTok = found; else if( 0 == curText.compare( "string_quote" ) ) curTok = DSN_STRING_QUOTE; else // unrecogized token, call it a symbol curTok = DSN_SYMBOL; } } exit: // single point of exit, no returns elsewhere please. curOffset = cur - start; next = head; // printf("tok:\"%s\"\n", curText.c_str() ); return curTok; }
static void parseArguments (tokenInfo *const token) { if (findToken (token, TOKEN_CLOSE_PAREN)) readToken (token); }
db_lookup_retval_t SqlParser::parseSql(std::set<std::string> &dbs, int *txLevel, bool parseMaster) { dbs.clear(); StringVector tables, aliases; if (getTokensLen() <= 0) { log_warning("empty sql for dababase lookup!\n"); return RET_ERROR_UNPARSABLE; } switch (getTokenId(0)) { case TK_SQL_SELECT: { int usemaster = 0; if (parseMaster) usemaster = 1; // special handling for our get unique id function call. if (getTokensLen() > 1 && getTokenStr(1) == "get_next_id") return RET_USE_DEFAULT_DATABASE; int fromStart, fromEnd; if (!getSqlFrom(0, &fromStart, &fromEnd)) { if ((getTokensLen() > 3 && getTokenId(1) == TK_LITERAL && getTokenId(2) == TK_OBRACE) || (getTokensLen() == 2 && getTokenId(1) == TK_LITERAL)) { // for special stored procedures return RET_USE_ALL_PARTITIONS; } printTokens("no FROM found, using default db: "); return RET_USE_DEFAULT_DATABASE; } if (!parseTableNameAndAlias(fromStart, fromEnd, tables, aliases)) { printTokens("could not parse table alias, using default db: "); return RET_USE_DEFAULT_DATABASE; } // for non-partitioned tables, we can use any db // since each db should have a view of it bool partitioned = false; for (size_t i = 0; i < tables.size(); i++) { if (dbPart->isPartitionedTable(tables[i])) { partitioned = true; break; } } if (!setDefaultLimit()) { printTokens("error in modifying LIMIT: "); return RET_ERROR_UNPARSABLE; } /* if (!partitioned) return ((*txLevel) > 0 ? RET_USE_DEFAULT_DATABASE : RET_USE_ANY_PARTITION); */ int whereStart, whereEnd; if (!getSqlWhere(fromEnd, &whereStart, &whereEnd)) { // add LIMIT, change the offset to 0 if needed uint64_t aa = 0; dbPart->getPartitionNum(tables[0], &aa); for (size_t i = 0; i < aa; i++) { std::string db; getDbMapping(tables[0], "", i, db, usemaster, 0); if (!db.empty()) dbs.insert(db); } return RET_USE_ALL_PARTITIONS; } for (size_t i = 0; i < tables.size(); i++) { std::string partitionKey; getPartitionKey(tables[i], partitionKey); if (partitionKey.empty()) { std::string db; getDbMapping(tables[i], "", 0, db, usemaster, 0); if (!db.empty()) dbs.insert(db); continue; } std::vector<uint64_t> keyValues; if (!findPartitionKeyValue(whereStart, whereEnd, tables[i], aliases[i], partitionKey, keyValues)) { printTokens("unrecognized key ranges: "); return RET_ERROR_UNPARSABLE; } if (keyValues.size() == 0) { uint64_t aa = 0; dbPart->getPartitionNum(tables[0], &aa); for (size_t i = 0; i < aa; i++) { std::string db; getDbMapping(tables[0], "", i, db, usemaster, 0); if (!db.empty()) dbs.insert(db); } return RET_USE_ALL_PARTITIONS; } // find the db partition for all the IDs for (size_t k = 0; k < keyValues.size(); k++) { std::string db; getDbMapping(tables[i], partitionKey, keyValues[k], db, usemaster, 0); if (!db.empty()) dbs.insert(db); } } if (dbs.empty()) return RET_USE_ALL_PARTITIONS; return RET_DB_LOOKUP_SUCCESS; } case TK_SQL_UPDATE: { int setPos; if (!findToken(0, getTokensLen(), TK_SQL_SET, &setPos)) { printTokens("could not find SET in UPDATE: "); return RET_ERROR_UNPARSABLE; }; if (getTokenId(setPos - 1) != TK_LITERAL) { printTokens("expecting table name before SET: "); return RET_ERROR_UNPARSABLE; } std::string table = getTokenStr(setPos - 1); // for nonpartitioned tables, update the default master db if (!(dbPart->isPartitionedTable(table))) { std::string db; getDbMapping(table, "", 0, db, 1, 0); if (!db.empty()) dbs.insert(db); return RET_USE_ALL_PARTITIONS; } int whereStart, whereEnd; if (!getSqlWhere(setPos + 1, &whereStart, &whereEnd)) { printTokens("no WHERE found: "); return RET_ERROR_UNPARSABLE; } std::string partitionKey; getPartitionKey(table, partitionKey); g_assert(!partitionKey.empty()); std::vector<uint64_t> keyValues; if (!findPartitionKeyValue(whereStart, whereEnd, table, "", partitionKey, keyValues)) { printTokens("unrecognized ranges: "); return RET_ERROR_UNPARSABLE; } // find the db partition for all the IDs for (size_t k = 0; k < keyValues.size(); k++) { std::string db; getDbMapping(table, partitionKey, keyValues[k], db, 1, 0); if (!db.empty()) dbs.insert(db); } if (dbs.empty()) return RET_USE_ALL_PARTITIONS; return RET_DB_LOOKUP_SUCCESS; } case TK_SQL_INSERT: { // support format: INSERT ... <table> (...) VALUES (....) int pos; uint64_t insertid = 0; if (!findToken(1, getTokensLen(), TK_LITERAL, &pos)) { printTokens("could not find table name: "); return RET_ERROR_UNPARSABLE; } std::string table = getTokenStr(pos); std::string partitionKey; getPartitionKey(table, partitionKey); if (getTokenId(++pos) != TK_OBRACE) { printTokens("unrecognized INSERT: "); return RET_ERROR_UNPARSABLE; } pos++; std::string autoIncrementColumn; dbPart->getAutoIncrementColumn(table, autoIncrementColumn); int keyPos = -1; int autoColPos = -1; for (int i = pos; i < getTokensLen(); i++) { if ((getTokenId(i) == TK_CBRACE) || (autoColPos >= 0 && keyPos >= 0)) break; if (getTokenId(i) == TK_LITERAL && tokComp(i, partitionKey) == 0) { keyPos = i - pos; continue; } if (getTokenId(i) == TK_LITERAL && tokComp(i, autoIncrementColumn) == 0) { autoColPos = i - pos; } } if ((!partitionKey.empty()) && keyPos == -1 && partitionKey != autoIncrementColumn) { log_warning("could not find the partition key %s:", partitionKey.c_str()); printTokens(); return RET_ERROR_UNPARSABLE; } if ((!partitionKey.empty()) && keyPos == -1) { // special handling for the case in which partition key type is auto increment. // need to get the id first and then modify the INSERT uint64_t id; if (!dbPart->getNextUniqueId(table, &id)) { log_warning("could not get next unique id for %s", partitionKey.c_str()); printTokens(); return RET_DB_LOOKUP_ERROR; } insertid = id; std::string db; getDbMapping(table, partitionKey, id, db, 1, id); if (!db.empty()) dbs.insert(db); else { printTokens("could not find db for id %d: "); return RET_DB_LOOKUP_ERROR; } if (modifySqlForInsert(partitionKey, id)) { if (partitionKey == autoIncrementColumn || autoIncrementColumn.empty()) return RET_DB_LOOKUP_SUCCESS; } else { log_warning("could not insert id for %s ", partitionKey.c_str()); printTokens(); return RET_DB_LOOKUP_ERROR; } } if (!autoIncrementColumn.empty() && autoColPos < 0 && (partitionKey != autoIncrementColumn)) { // need to get unique ids for auto increment columns uint64_t id; if (!dbPart->getNextUniqueId(table, &id)) { log_warning("could not get next unique id for %s", autoIncrementColumn.c_str()); printTokens(); return RET_DB_LOOKUP_ERROR; } insertid = id; if (modifySqlForInsert(autoIncrementColumn, id)) { // for nonparitioned table INSERT, use the default master db if (partitionKey.empty()) return RET_USE_DEFAULT_DATABASE; if (keyPos == -1) return RET_DB_LOOKUP_SUCCESS; } else { log_warning("could not insert id for %s ", autoIncrementColumn.c_str()); printTokens(); return RET_DB_LOOKUP_ERROR; } } // for nonparitioned table INSERT, use the default master db if (partitionKey.empty()) { std::string db; getDbMapping(table, "", 0, db, 1, insertid); if (!db.empty()) dbs.insert(db); return RET_USE_ALL_PARTITIONS; } pos += keyPos; int valPos; if (!findToken(pos, getTokensLen(), TK_SQL_VALUES, &valPos)) { printTokens("VALUES is not found: "); return RET_ERROR_UNPARSABLE; } if (getTokenId(valPos + 1) != TK_OBRACE) { printTokens("expecting '(' after VALUES: "); return RET_ERROR_UNPARSABLE; } pos = valPos + 2 + keyPos; if (pos < getTokensLen()) {//dqm //if (pos < getTokensLen() && getTokenId(pos) == TK_INTEGER) { uint64_t id = tokenToUint64(pos); std::string db; getDbMapping(table, partitionKey, id, db, 1, insertid); if (!db.empty()) dbs.insert(db); if (dbs.empty()) { printTokens("could not find db mapping: "); return RET_ERROR_UNPARSABLE; } return RET_DB_LOOKUP_SUCCESS; } else { log_warning("could not recognize value for %s:", partitionKey.c_str()); printTokens(); return RET_ERROR_UNPARSABLE; } break; } case TK_SQL_REPLACE: { // support format: replace ... <table> (...) VALUES (....) int pos; uint64_t insertid = 0; if (!findToken(1, getTokensLen(), TK_LITERAL, &pos)) { printTokens("could not find table name: "); return RET_ERROR_UNPARSABLE; } std::string table = getTokenStr(pos); std::string partitionKey; getPartitionKey(table, partitionKey); if (getTokenId(++pos) != TK_OBRACE) { printTokens("unrecognized INSERT: "); return RET_ERROR_UNPARSABLE; } pos++; std::string autoIncrementColumn; dbPart->getAutoIncrementColumn(table, autoIncrementColumn); int keyPos = -1; int autoColPos = -1; for (int i = pos; i < getTokensLen(); i++) { if ((getTokenId(i) == TK_CBRACE) || (autoColPos >= 0 && keyPos >= 0)) break; if (getTokenId(i) == TK_LITERAL && tokComp(i, partitionKey) == 0) { keyPos = i - pos; continue; } if (getTokenId(i) == TK_LITERAL && tokComp(i, autoIncrementColumn) == 0) { autoColPos = i - pos; } } if ((!partitionKey.empty()) && keyPos == -1 && partitionKey != autoIncrementColumn) { log_warning("could not find the partition key %s:", partitionKey.c_str()); printTokens(); return RET_ERROR_UNPARSABLE; } if ((!partitionKey.empty()) && keyPos == -1) { // special handling for the case in which partition key type is auto increment. // need to get the id first and then modify the INSERT uint64_t id; if (!dbPart->getNextUniqueId(table, &id)) { log_warning("could not get next unique id for %s", partitionKey.c_str()); printTokens(); return RET_DB_LOOKUP_ERROR; } insertid = id; std::string db; getDbMapping(table, partitionKey, id, db, 1, id); if (!db.empty()) dbs.insert(db); else { printTokens("could not find db for id %d: "); return RET_DB_LOOKUP_ERROR; } if (modifySqlForInsert(partitionKey, id)) { if (partitionKey == autoIncrementColumn || autoIncrementColumn.empty()) return RET_DB_LOOKUP_SUCCESS; } else { log_warning("could not insert id for %s ", partitionKey.c_str()); printTokens(); return RET_DB_LOOKUP_ERROR; } } if (!autoIncrementColumn.empty() && autoColPos < 0 && (partitionKey != autoIncrementColumn)) { // need to get unique ids for auto increment columns uint64_t id; if (!dbPart->getNextUniqueId(table, &id)) { log_warning("could not get next unique id for %s", autoIncrementColumn.c_str()); printTokens(); return RET_DB_LOOKUP_ERROR; } insertid = id; if (modifySqlForInsert(autoIncrementColumn, id)) { // for nonparitioned table INSERT, use the default master db if (partitionKey.empty()) return RET_USE_DEFAULT_DATABASE; if (keyPos == -1) return RET_DB_LOOKUP_SUCCESS; } else { log_warning("could not insert id for %s ", autoIncrementColumn.c_str()); printTokens(); return RET_DB_LOOKUP_ERROR; } } // for nonparitioned table INSERT, use the default master db if (partitionKey.empty()) { std::string db; getDbMapping(table, "", 0, db, 1, insertid); if (!db.empty()) dbs.insert(db); return RET_USE_ALL_PARTITIONS; } pos += keyPos; int valPos; if (!findToken(pos, getTokensLen(), TK_SQL_VALUES, &valPos)) { printTokens("VALUES is not found: "); return RET_ERROR_UNPARSABLE; } if (getTokenId(valPos + 1) != TK_OBRACE) { printTokens("expecting '(' after VALUES: "); return RET_ERROR_UNPARSABLE; } pos = valPos + 2 + keyPos; if (pos < getTokensLen()) {//dqm //if (pos < getTokensLen() && getTokenId(pos) == TK_INTEGER) { uint64_t id = tokenToUint64(pos); std::string db; getDbMapping(table, partitionKey, id, db, 1, insertid); if (!db.empty()) dbs.insert(db); if (dbs.empty()) { printTokens("could not find db mapping: "); return RET_ERROR_UNPARSABLE; } return RET_DB_LOOKUP_SUCCESS; } else { log_warning("could not recognize value for %s:", partitionKey.c_str()); printTokens(); return RET_ERROR_UNPARSABLE; } break; } case TK_SQL_ALTER: { std::string tableName; if (getTokensLen() >= 3 && getTokenId(1) == TK_SQL_TABLE) { tableName = getTokenStr(2); } else if (getTokensLen() >= 4 && getTokenId(1) == TK_SQL_IGNORE && getTokenId(2) == TK_SQL_TABLE) { tableName = getTokenStr(3); } else break; if (dbPart->isPartitionedTable(tableName)) return RET_USE_ALL_PARTITIONS; else return RET_USE_DEFAULT_DATABASE; } case TK_SQL_CALL: { return RET_USE_ALL_PARTITIONS; } case TK_SQL_SHOW: { if (getTokensLen() == 4 && getTokenId(2) == TK_SQL_FROM && strcasecmp(getTokenStr(1).c_str(), "fields") == 0) { if (dbPart->isPartitionedTable(getTokenStr(3))) { return RET_USE_ANY_PARTITION; } return RET_USE_DEFAULT_DATABASE; } if (getTokensLen() == 2 && strcasecmp(getTokenStr(1).c_str(), "tables") == 0) { //special handling for show tables; // std::string sql = "select table_name "; sql.append(" from kind_setting order by table_name"); g_string_truncate(inputSql, NET_HEADER_SIZE + 1); g_string_append_len(inputSql, sql.data(), sql.size()); network_mysqld_proto_set_header_len((unsigned char *) (inputSql->str), inputSql->len - NET_HEADER_SIZE); return RET_USE_DEFAULT_DATABASE; } else return RET_USE_DEFAULT_DATABASE; break; } case TK_SQL_DELETE: { int fromPos; if (!findToken(1, getTokensLen(), TK_SQL_FROM, &fromPos)) { printTokens("could not find FROM in DELETE: "); return RET_ERROR_UNPARSABLE; }; if (fromPos >= getTokensLen() - 1) { printTokens("could not find table name in DELETE: "); return RET_ERROR_UNPARSABLE; } std::string table = getTokenStr(fromPos + 1); // for nonpartitioned tables, update the default master db if (!(dbPart->isPartitionedTable(table))) { std::string db; getDbMapping(table, "", 0, db, 1, 0); if (!db.empty()) dbs.insert(db); return RET_USE_ALL_PARTITIONS; } int whereStart, whereEnd; if (!getSqlWhere(fromPos + 1, &whereStart, &whereEnd)) { printTokens("no WHERE found: "); return RET_ERROR_UNPARSABLE; } std::string partitionKey; getPartitionKey(table, partitionKey); g_assert(!partitionKey.empty()); std::vector<uint64_t> keyValues; if (!findPartitionKeyValue(whereStart, whereEnd, table, "", partitionKey, keyValues)) { printTokens("unrecognized ranges: "); return RET_ERROR_UNPARSABLE; } // find the db partition for all the IDs for (size_t k = 0; k < keyValues.size(); k++) { std::string db; getDbMapping(table, partitionKey, keyValues[k], db, 1, 0); if (!db.empty()) dbs.insert(db); } if (dbs.empty()) return RET_USE_ALL_PARTITIONS; return RET_DB_LOOKUP_SUCCESS; } case TK_SQL_DESC: case TK_SQL_DESCRIBE: { if (getTokensLen() >= 2) { std::string tableName = getTokenStr(1); if (dbPart->isPartitionedTable(tableName)) return RET_USE_ANY_PARTITION; else return RET_USE_DEFAULT_DATABASE; } return RET_ERROR_UNPARSABLE; } case TK_SQL_SET: { if ((getTokensLen() >= 4) && (getTokenId(1) == TK_SQL_AUTOCOMMIT) && (getTokenStr(3).compare("0") == 0)) { (*txLevel)++; } return RET_USE_ALL_DATABASES; } case TK_SQL_START: case TK_SQL_BEGIN: { (*txLevel)++; return RET_USE_ALL_DATABASES; } case TK_SQL_COMMIT: case TK_SQL_ROLLBACK: { (*txLevel)--; return RET_USE_ALL_DATABASES; } default: { break; } } printTokens("unrecognized query, using default master db: "); return RET_USE_DEFAULT_DATABASE; }