VOID ConsolePrintStr(PUCHAR str,ULONG ForeColor,ULONG BackColor) { CHAR c; ULONG OverflowBytes; c = *str; while(c) { if(c == '\n') { while(!IsNewLine(ConsoleCurrentPtr)) { ConsolePrintChar(0,0,0); } } else { ConsolePrintChar(c,ForeColor,BackColor); } c = *(++str); } if(ConsoleCurrentPtr >= ConsoleEndPtr) { OverflowBytes = ((ConsoleCurrentPtr - ConsoleEndPtr + ConsoleWidth*2) / ConsoleWidth*2) * ConsoleWidth*2; ConsoleCurrentPtr -= OverflowBytes; memcpy(ConsoleBuffer, ConsoleBuffer+OverflowBytes, ConsoleWidth*2*(ConsoleHistoryHeight)); memset(ConsoleCurrentPtr,0,ConsoleEndPtr-ConsoleCurrentPtr); } if((ConsoleCurrentPtr - ConsoleWidth*ConsoleHeight*2) <= ConsoleBuffer) { ConsoleCurrentPagePtr = ConsoleBuffer; } else { ConsoleCurrentPagePtr = ConsoleCurrentPtr - ConsoleWidth*ConsoleHeight*2; while(!IsNewLine(ConsoleCurrentPagePtr)) ConsoleCurrentPagePtr += 2; } if(bScreenBackuped) ConsolePrintCurrentPage(); }
int tool::source::Column::Check() { if (m_read.Open()) { int character = 0; std::size_t column = 1; std::string string_line = ""; while (!m_read.IsEOF()) { character = m_read.GetCharacter(); if (IsNewLine(character)) { Newline(column, string_line); column = 1; string_line.clear(); } else if (IsSkip(character)) { continue; } else if (std::isspace(character)) { if (character == ' ') { ++column; string_line.push_back(character); } else if (character == '\t') { column += (4 - ((column - 1) % 4)); string_line.push_back(character); } } else { ++column; if (string_line.size() < MAXIMUM_STRING_COLUMN_BUFFER_SIZE + 1) { string_line.push_back(character); } else if (string_line.size() == MAXIMUM_STRING_COLUMN_BUFFER_SIZE + 1) { string_line += "..."; } } } if (!string_line.empty()) { Newline(column - 1, string_line); } return 0; } return 1; }
static void SkipNewLine(IStringReader * reader) { codepoint_t codepoint; while (reader->TryPeek(&codepoint)) { if (IsNewLine(codepoint)) { reader->Skip(); } else { break; } } }
void LanguagePack::ParseGroupObject(IStringReader * reader) { auto sb = StringBuilder(); codepoint_t codepoint; // Should have already deduced that the next codepoint is a [ reader->Skip(); // Read string up to ] or line end bool closedCorrectly = false; while (reader->TryPeek(&codepoint)) { if (IsNewLine(codepoint)) break; reader->Skip(); if (codepoint == ']') { closedCorrectly = true; break; } sb.Append(codepoint); } if (closedCorrectly) { SafeFree(_currentGroup); while (sb.GetLength() < 8) { sb.Append(' '); } if (sb.GetLength() == 8) { _currentGroup = sb.GetString(); _currentObjectOverride = GetObjectOverride(_currentGroup); _currentScenarioOverride = nullptr; if (_currentObjectOverride == nullptr) { _objectOverrides.push_back(ObjectOverride()); _currentObjectOverride = &_objectOverrides[_objectOverrides.size() - 1]; memset(_currentObjectOverride, 0, sizeof(ObjectOverride)); memcpy(_currentObjectOverride->name, _currentGroup, 8); } } } }
void JsonParser::SkipSpaces() { while (true) { if (IsSpace(*cur_)) { Advance(); } else if (IsNewLine(*cur_)) { AdvanceNewLine(); } else { return; } } }
int CubeLexer::SortText(char *SourceText) { unsigned long Offset=0; if (m_Sources) { free(m_Sources); m_SourceOffset=0; m_SortStatus=SORT_STATUS_NORMAL; } if ((m_Sources=(char *)malloc(strlen(SourceText)+1))==NULL) { return FALSE; } m_SortStatus=SORT_STATUS_NEWLINE; while(*SourceText) { if (NextChar(*SourceText)) { if(IsNewLine(*SourceText)&&IsSpacer(m_Sources[Offset-1])) Offset--; if(IsCommentStart(*SourceText)&&IsSpacer(m_Sources[Offset-1])) Offset--; if(IsDelimiter(*SourceText)&&IsSpacer(m_Sources[Offset-1])) Offset--; if(IsSpacer(*SourceText)&&IsDelimiter(m_Sources[Offset-1])) { SourceText++; continue; } m_Sources[Offset++]=*(SourceText); } SourceText++; } m_Sources[Offset]='\0'; return TRUE; }
void LanguagePack::ParseGroupScenario(IStringReader * reader) { auto sb = StringBuilder(); codepoint_t codepoint; // Should have already deduced that the next codepoint is a < reader->Skip(); // Read string up to > or line end bool closedCorrectly = false; while (reader->TryPeek(&codepoint)) { if (IsNewLine(codepoint)) break; reader->Skip(); if (codepoint == '>') { closedCorrectly = true; break; } sb.Append(codepoint); } if (closedCorrectly) { SafeFree(_currentGroup); _currentGroup = sb.GetString(); _currentObjectOverride = nullptr; _currentScenarioOverride = GetScenarioOverride(_currentGroup); if (_currentScenarioOverride == nullptr) { _scenarioOverrides.push_back(ScenarioOverride()); _currentScenarioOverride = &_scenarioOverrides[_scenarioOverrides.size() - 1]; Memory::Set(_currentScenarioOverride, 0, sizeof(ScenarioOverride)); _currentScenarioOverride->filename = sb.GetString(); } } }
bool LanguagePack::ParseToken(IStringReader * reader, uint32 * token, bool * isByte) { auto sb = StringBuilder(); codepoint_t codepoint; // Skip open brace reader->Skip(); while (reader->TryPeek(&codepoint)) { if (IsNewLine(codepoint)) return false; if (IsWhitespace(codepoint)) return false; reader->Skip(); if (codepoint == '}') break; sb.Append(codepoint); } const utf8 * tokenName = sb.GetBuffer(); *token = format_get_code(tokenName); *isByte = false; // Handle explicit byte values if (*token == 0) { int number; if (sscanf(tokenName, "%d", &number) == 1) { *token = Math::Clamp(0, number, 255); *isByte = true; } } return true; }
static int ParseFile( const char *v_pszBuf, const char *v_pszSection, const char *v_pszKey, int *v_piSecStart, int *v_piSecEnd, int *v_piKeyStart, int *v_piKeyEnd, int *v_piValueStart, int *v_piValueEnd) { const char *p = v_pszBuf; int i = 0; int iSecLen; int iKeyLen; iSecLen = strlen(v_pszSection); iKeyLen = strlen(v_pszKey); assert(NULL != v_pszBuf); assert((NULL != v_pszSection) && (0 != iSecLen)); assert((NULL != v_pszKey) && (0 != iKeyLen)); *v_piSecStart = UNKNOW_POS; *v_piSecEnd = UNKNOW_POS; *v_piKeyStart = UNKNOW_POS; *v_piKeyEnd = UNKNOW_POS; *v_piValueStart = UNKNOW_POS; *v_piValueEnd = UNKNOW_POS; while (!IsStrEnd(p[i])) { if (((0 == i) || IsNewLine(p[i-1])) && IsLeftBrace(p[i])) { /* 找到了'[' */ int iSecStart = i + 1; /* 寻找']' */ do { i++; } while (!IsRightBrace(p[i]) && !IsStrEnd(p[i])); if ((0 == strncmp(p + iSecStart, v_pszSection, i - iSecStart)) && (iSecLen == i - iSecStart)) { int iNewLineStart = 0; i++; /* 跳过']'后的空白 */ while (isspace(p[i])) { i++; } /* 找到了section */ *v_piSecStart = iSecStart; *v_piSecEnd = i; while (!(IsNewLine(p[i-1]) && IsLeftBrace(p[i])) && !IsStrEnd(p[i])) { int j = 0; /* 到了一个新行 */ iNewLineStart = i; while (!IsNewLine(p[i]) && !IsStrEnd(p[i])) { i++; } /* now i is equal to end of the line */ j = iNewLineStart; if (';' != p[j]) /* skip over comment */ { while ((j < i) && ('=' != p[j])) { j++; if ('=' == p[j]) { if ((strncmp(v_pszKey,p+iNewLineStart,j-iNewLineStart)==0) && (iKeyLen==j-iNewLineStart)) { /*find the key ok */ *v_piKeyStart = iNewLineStart; *v_piKeyEnd = j-1; *v_piValueStart = j+1; *v_piValueEnd = i; return 0; } } } } i++; } } } else { i++; } } return -INI_ERR_PARSE_FILE; }
unsigned int CubeLexer::GetNextLexeme() { int iLem=0; char chBegin; if (m_Sources[m_SourceOffset]=='\0') { //printf("<End>\n"); m_Symbol='0'; m_CurLexeme[0]='\0'; return CUBE_LEXER_LEXEME_TYPE_END; } if (IsSpacer(m_Sources[m_SourceOffset])) { m_Symbol=m_Sources[m_SourceOffset]; m_CurLexeme[0]=m_Sources[m_SourceOffset]; m_CurLexeme[1]='\0'; m_SourceOffset++; //printf("<Spacer>\n"); return CUBE_LEXER_LEXEME_TYPE_SPACER; } if (IsNewLine(m_Sources[m_SourceOffset])) { m_Symbol=m_Sources[m_SourceOffset]; m_CurLexeme[0]=m_Sources[m_SourceOffset]; m_CurLexeme[1]='\0'; m_SourceOffset++; //printf("<New line>\n"); return CUBE_LEXER_LEXEME_TYPE_NEWLINE; } if (IsContainerStart((chBegin=m_Sources[m_SourceOffset]))) { iLem=0; m_CurLexeme[iLem++]=m_Sources[m_SourceOffset]; m_SourceOffset++; while (!IsSourcsEnd()&&!IsContainerEnd(chBegin,m_Sources[m_SourceOffset])) { m_CurLexeme[iLem++]=m_Sources[m_SourceOffset++]; } m_CurLexeme[iLem++]=m_Sources[m_SourceOffset]; m_CurLexeme[iLem]='\0'; m_SourceOffset++; //printf("<Container> %s\n",m_CurLexeme); return CUBE_LEXER_LEXEME_TYPE_CONATINER; } if (IsDelimiter(m_Sources[m_SourceOffset])) { m_Symbol=m_Sources[m_SourceOffset]; m_CurLexeme[0]=m_Sources[m_SourceOffset]; m_CurLexeme[1]='\0'; //printf("<Delimiter> %c\n",m_Sources[m_SourceOffset]); m_SourceOffset++; return CUBE_LEXER_LEXEME_TYPE_DELIMITER; } iLem=0; m_Symbol='\0'; while (!IsSourcsEnd()&&!IsDelimiter(m_Sources[m_SourceOffset])&&!IsSpacer(m_Sources[m_SourceOffset])&&!IsNewLine(m_Sources[m_SourceOffset])&&!IsContainerStart(m_Sources[m_SourceOffset])) { m_CurLexeme[iLem++]=m_Sources[m_SourceOffset++]; } m_CurLexeme[iLem]='\0'; //printf("<Token> %s\n",m_CurLexeme); return CUBE_LEXER_LEXEME_TYPE_TOKEN; }
int CubeLexer::NextChar(char ch) { switch(m_SortStatus) { case SORT_STATUS_NORMAL: if (IsSpacer(ch)) { m_SortStatus=SORT_STATUS_SPACER; return TRUE; } if (IsCommentStart(ch)) { m_SortStatus=SORT_STATUS_COMMENT; m_SortComment=ch; return FALSE; } if (IsNewLine(ch)) { m_SortStatus=SORT_STATUS_NEWLINE; return TRUE; } return TRUE; break; case SORT_STATUS_SPACER: if (IsSpacer(ch)) { return FALSE; } if (IsCommentStart(ch)) { m_SortStatus=SORT_STATUS_COMMENT; m_SortComment=ch; return FALSE; } if (IsNewLine(ch)) { m_SortStatus=SORT_STATUS_NEWLINE; return TRUE; } m_SortStatus=SORT_STATUS_NORMAL; return TRUE; break; case SORT_STATUS_COMMENT: if (IsCommentEnd(m_SortComment,ch)) { if(IsNewLine(ch)&&m_SortStatus!=SORT_STATUS_NEWLINE) { m_SortStatus=SORT_STATUS_NEWLINE; return TRUE; } m_SortStatus=SORT_STATUS_NORMAL; } return FALSE; break; case SORT_STATUS_NEWLINE: if (IsSpacer(ch)||IsNewLine(ch)) { return FALSE; } if (IsCommentStart(ch)) { m_SortStatus=SORT_STATUS_COMMENT; m_SortComment=ch; return FALSE; } m_SortStatus=SORT_STATUS_NORMAL; return TRUE; } return TRUE; }
static inline bool IsValidEscape(char16_t aFirst, char16_t aSecond) { return aFirst == '\\' && !IsNewLine(aSecond); }
void LanguagePack::ParseString(IStringReader *reader) { auto sb = StringBuilder(); codepoint_t codepoint; // Parse string identifier while (reader->TryPeek(&codepoint)) { if (IsNewLine(codepoint)) { // Unexpected new line, ignore line entirely return; } else if (!IsWhitespace(codepoint) && codepoint != ':') { reader->Skip(); sb.Append(codepoint); } else { break; } } SkipWhitespace(reader); // Parse a colon if (!reader->TryPeek(&codepoint) || codepoint != ':') { // Expected a colon, ignore line entirely return; } reader->Skip(); // Validate identifier const utf8 *identifier = sb.GetBuffer(); int stringId; if (_currentGroup == nullptr) { if (sscanf(identifier, "STR_%4d", &stringId) != 1) { // Ignore line entirely return; } } else { if (strcmp(identifier, "STR_NAME") == 0) { stringId = 0; } else if (strcmp(identifier, "STR_DESC") == 0) { stringId = 1; } else if (strcmp(identifier, "STR_CPTY") == 0) { stringId = 2; } else if (strcmp(identifier, "STR_SCNR") == 0) { stringId = 0; } else if (strcmp(identifier, "STR_PARK") == 0) { stringId = 1; } else if (strcmp(identifier, "STR_DTLS") == 0) { stringId = 2; } else { // Ignore line entirely return; } } // Rest of the line is the actual string sb.Clear(); while (reader->TryPeek(&codepoint) && !IsNewLine(codepoint)) { if (codepoint == '{') { uint32 token; bool isByte; if (ParseToken(reader, &token, &isByte)) { if (isByte) { sb.Append((const utf8*)&token, 1); } else { sb.Append((int)token); } } else { // Syntax error or unknown token, ignore line entirely return; } } else { reader->Skip(); sb.Append(codepoint); } } // Append a null terminator for the benefit of the last string _stringDataSB.Append('\0'); // Get the relative offset to the string (add the base offset when we extract the string properly) utf8 * relativeOffset = (utf8*)_stringDataSB.GetLength(); if (_currentGroup == nullptr) { // Make sure the list is big enough to contain this string id while (_strings.size() <= (size_t)stringId) { _strings.push_back(nullptr); } _strings[stringId] = relativeOffset; } else { if (_currentObjectOverride != nullptr) { _currentObjectOverride->strings[stringId] = relativeOffset; } else { _currentScenarioOverride->strings[stringId] = relativeOffset; } } _stringDataSB.Append(&sb); }