void CScrollBufL1::ParseText(char *szText, int nTextLen, ETextType *eTextType, int *nLineLen) { if ((m_nEmulation == SCROLLBUFF_CFG_EMM_EMMULATE) || (m_nEmulation == SCROLLBUFF_CFG_EMM_SKIP)) { if (m_nBrokenEscLen > 0) { strncat(m_szEscCode, szText, sizeof(m_szEscCode) - 1 - m_nBrokenEscLen); szText = m_szEscCode; } switch (*szText) { case LINEBUF_CARRIAGE_RETURN: case LINEBUF_BEEP: case LINEBUF_TAB: case LINEBUF_BACKSPACE: case LINEBUF_NEWLINE: (*eTextType) = (ETextType)(*szText); (*nLineLen) = 1; break; case LINEBUF_ESCAPE_BRACKET: ParseEscapeBracket(szText, nTextLen, eTextType, nLineLen); break; default: FindNextToken(szText, nTextLen, eTextType, nLineLen); break; } } else { FindNextToken(szText, nTextLen, eTextType, nLineLen); } }
bool Parser::CheckAnimeSeasonKeyword(const token_iterator_t token) { auto set_anime_season = [&](token_iterator_t first, token_iterator_t second, const string_t& content) { elements_.insert(kElementAnimeSeason, content); first->category = kIdentifier; second->category = kIdentifier; }; auto previous_token = FindPreviousToken(tokens_, token, kFlagNotDelimiter); if (previous_token != tokens_.end()) { auto number = GetNumberFromOrdinal(previous_token->content); if (!number.empty()) { set_anime_season(previous_token, token, number); return true; } } auto next_token = FindNextToken(tokens_, token, kFlagNotDelimiter); if (next_token != tokens_.end() && IsNumericString(next_token->content)) { set_anime_season(token, next_token, next_token->content); return true; } return false; }
// ------------------------------------------------------------------------------------------------ Lexer::Lexer(const LexicalEntry* _rules, const char* _stringToLex, StateObject* _state) : mStringToLex(_stringToLex) , mSrcPos(mStringToLex.cbegin()) , mState(_state) { ConstructRules(_rules); mLookaheadToken = FindNextToken(); }
bool Parser::IsTokenIsolated(const token_iterator_t token) const { auto previous_token = FindPreviousToken(tokens_, token, kFlagNotDelimiter); if (previous_token == tokens_.end() || previous_token->category != kBracket) return false; auto next_token = FindNextToken(tokens_, token, kFlagNotDelimiter); if (next_token == tokens_.end() || next_token->category != kBracket) return false; return true; }
bool Parser::NumberComesBeforeTotalNumber(const token_iterator_t token) { auto next_token = FindNextToken(tokens_, token, kFlagNotDelimiter); if (next_token != tokens_.end()) { if (IsStringEqualTo(next_token->content, L"of")) { auto other_token = FindNextToken(tokens_, next_token, kFlagNotDelimiter); if (other_token != tokens_.end()) { if (IsNumericString(other_token->content)) { SetEpisodeNumber(token->content, *token, false); next_token->category = kIdentifier; other_token->category = kIdentifier; return true; } } } } return false; }
void RawParser::ParseInfoToken(const char *&pcs, int &iLen, bool *pbMoreFollows) { if (!FindNextToken()) { if (pbMoreFollows) *pbMoreFollows = false; return; } pcs = (const char *)pbPos; if (*pcs == '"') pcs++; ParseToken(sz, pbMoreFollows); iLen = int(strlen(sz)); }
bool Parser::SearchForEquivalentNumbers(std::vector<size_t>& tokens) { for (auto token_index = tokens.begin(); token_index != tokens.end(); ++token_index) { auto token = tokens_.begin() + *token_index; if (IsTokenIsolated(token)) continue; // Find the first enclosed, non-delimiter token auto next_token = FindNextToken(tokens_, token, kFlagNotDelimiter); if (next_token != tokens_.end() && next_token->category == kBracket) { next_token = FindNextToken(tokens_, next_token, kFlagEnclosed | kFlagNotDelimiter); } else { continue; } // See if it's an isolated number if (next_token != tokens_.end() && next_token->category == kUnknown && IsTokenIsolated(next_token) && IsNumericString(next_token->content)) { if (IsValidEpisodeNumber(token->content) && IsValidEpisodeNumber(next_token->content)) { auto lower_token = StringToInt(token->content) < StringToInt(next_token->content) ? token : next_token; SetEpisodeNumber(lower_token->content, *token, false); next_token->category = kIdentifier; return true; } } } return false; }
bool Parser::CheckEpisodeKeyword(const token_iterator_t token) { auto next_token = FindNextToken(tokens_, token, kFlagNotDelimiter); if (next_token != tokens_.end() && next_token->category == kUnknown) { if (FindNumberInString(next_token->content) == 0) { if (!MatchEpisodePatterns(next_token->content, *next_token)) SetEpisodeNumber(next_token->content, *next_token, false); token->category = kIdentifier; return true; } } return false; }
void main () { char filename[64]; FILE *f; int meshes=0; long start=0, end=0, last=0; mdh_tempholder temp[MAX_MESHES]; printf("WARNING TEXTURE NAMES LACK EXTENSTION!\nASE -> DH Convert v%d\n\n", VERSION); printf ("Enter File name : "); scanf("%s", filename); if (f = fopen(filename, "r")) { while (FindNextToken(f, "*GEOMOBJECT", last, &start, &end)) { char str[64], meshname[64]; char texture[64]; int *faces, *tfaces; float *verts, *vnorms, *tverts = NULL; int numverts=0, numfaces=0, numtverts=0, numtfaces=0; int mesh_no, s, e; // maybe get texture mesh_no = meshes++; GetName(f, str, start, end); str[strlen(str)-1] = 0; sscanf(str, "\"%s", meshname); // TODO : if tag remove! and add tag vectors at end of file //sprintf(texture, "%s.nfo", meshname); strcpy(texture, meshname); printf("Reading node %s.\n", meshname); GetInfo(f, start, end, &numverts, &numfaces, &numtverts, &numtfaces); // Allocate faces = (int *)malloc (numfaces * 3 * sizeof(int)); verts = (float *)malloc (numverts * 3 * sizeof(float)); vnorms = (float *)malloc (numverts * 3 * sizeof(float)); if (numtverts) tverts = (float *)malloc (numtverts * 3 * sizeof(float)); if (numtfaces) tfaces = (int *)malloc (numtfaces * 3 * sizeof(int)); // get the data printf("MESH_VERTEX_LIST "); FindNextToken(f, "*MESH_VERTEX_LIST", start, &s, &e); GetVerts(f, verts, s, e); printf("MESH_FACE_LIST "); FindNextToken(f, "*MESH_FACE_LIST", start, &s, &e); GetFaces(f, faces, s, e); printf("MESH_NORMALS "); FindNextToken(f, "*MESH_NORMALS", start, &s, &e); GetVNorms(f, vnorms, s, e); // Only these are optional if (numtverts) { printf("MESH_TVERTLIST "); FindNextToken(f, "*MESH_TVERTLIST", start, &s, &e); GetTVerts(f, tverts, s, e); } if (numtfaces) { printf("MESH_TFACELIST "); FindNextToken(f, "*MESH_TFACELIST", start, &s, &e); GetTFaces(f, tfaces, s, e); } printf("\n"); rewind(f); last = end; // store them for a mo strcpy (temp[mesh_no].name, meshname); strcpy (temp[mesh_no].texture, texture); temp[mesh_no].num_faces = numfaces; temp[mesh_no].num_verts = numverts; temp[mesh_no].faces = faces; temp[mesh_no].verts = verts; temp[mesh_no].norms = vnorms; if (numtverts) temp[mesh_no].tverts = tverts; else temp[mesh_no].tverts = NULL; // Dunno if i need t verts } fclose(f); filename[strlen(filename)-3] = 0; strcat(filename, "dh4"); if (f = fopen(filename, "wb")) { int i; WriteDH4(f, meshes, temp); fclose(f); for (i = 0; i < meshes; i++) { free (temp[i].faces); free (temp[i].verts); free (temp[i].norms); if (temp[i].tverts) free (temp[i].tverts); } } } }
AUI_ERRCODE aui_TextBox::CalculateItems(MBCHAR const * text) { sint32 rangerValue = m_verticalRanger->GetValueY(); BOOL moveToEnd = rangerValue == m_verticalRanger->GetMaximumY(); if ( !text ) { text = m_text; aui_Static **itemPtr = m_items; for ( sint32 i = m_numItems; i; i-- ) RemoveItem( (*itemPtr++)->Id() ); m_numItems = 0; m_curItem = 0; } MBCHAR const * cur = text; MBCHAR const * stop = text + strlen( text ); if ( cur == stop ) return AUI_ERRCODE_OK; uint32 length = 0; aui_Static **itemPtr = m_items + m_curItem; { do { const MBCHAR *start = cur; RECT wrap = { 0, 0, m_width, 0 }; POINT penPos = { 0, 0 }; m_textfont->GetLineInfo( &wrap, &penPos, NULL, NULL, &start, stop, TRUE ); length = start - cur; if ( !length ) { MBCHAR const * token = FindNextToken(cur, " \t\n", 1); if ( token ) length = token - cur + 1; else length = stop - cur; } Assert( cur + length <= stop ); if ( cur + length > stop ) length = stop - cur; MBCHAR * tempCopy = new MBCHAR[length + 1]; std::copy(cur, cur + length, tempCopy); tempCopy[length] = '0'; if ( ++m_numItems > k_AUI_TEXTBOX_MAXITEMS ) { m_numItems = k_AUI_TEXTBOX_MAXITEMS; RemoveItem( (*itemPtr)->Id() ); memmove( m_text, m_text + m_maxLength - length, length ); memset( m_text + m_maxLength - length, '\0', length + 1 ); } (*itemPtr)->SetText(tempCopy); COLORREF color = m_curColor; if ( color == k_AUI_UI_NOCOLOR ) { if ( !m_curItem ) color = m_items[ k_AUI_TEXTBOX_MAXITEMS - 1 ]-> GetTextColor(); else color = (*(itemPtr - 1))->GetTextColor(); } (*itemPtr)->SetTextColor( color ); (*itemPtr)->SetTextBold( m_curBold ); (*itemPtr)->SetTextItalic( m_curItalic ); AddItem( (aui_Item *)*itemPtr++ ); if ( ++m_curItem == k_AUI_TEXTBOX_MAXITEMS ) { m_curItem = 0; itemPtr = m_items; } } while ( (cur += length) < stop ); } if ( moveToEnd ) rangerValue = m_verticalRanger->GetMaximumY(); m_verticalRanger->SetValue( 0, rangerValue ); return AUI_ERRCODE_OK; }
// ------------------------------------------------------------------------------------------------ Token Lexer::Pop() { Token retToken = mLookaheadToken; mLookaheadToken = FindNextToken(); return retToken; }
/***************************************************************************** * CTTSEngObj::AddNextSentItem * *-----------------------------* * Locates the next sentence item in the stream and adds it to the list. * Returns true if the last item added is the end of the sentence. ****************************************************************************/ BOOL CTTSEngObj::AddNextSentItem( CItemList& ItemList ) { //--- Get the token ULONG ulIndex; CSentItem Item; Item.pItem = FindNextToken( m_pNextChar, m_pEndChar, m_pNextChar ); //--- This case can occur when we hit the end of a text fragment. // Returning at this point will cause advancement to the next fragment. if( Item.pItem == NULL ) { return false; } const WCHAR* pTrailChar = m_pNextChar-1; ULONG TokenLen = m_pNextChar - Item.pItem; //--- Split off leading punction if any static const WCHAR LeadItems[] = { L'(', L'\"', L'{', L'\'', L'[' }; while( TokenLen > 1 ) { if( SearchSet( Item.pItem[0], LeadItems, sp_countof(LeadItems), &ulIndex ) ) { CSentItem LItem; LItem.pItem = Item.pItem; LItem.ulItemLen = 1; LItem.pXmlState = &m_pCurrFrag->State; LItem.ulItemSrcLen = LItem.ulItemLen; LItem.ulItemSrcOffset = m_pCurrFrag->ulTextSrcOffset + ( LItem.pItem - m_pCurrFrag->pTextStart ); ItemList.AddTail( LItem ); ++Item.pItem; --TokenLen; } else { break; } } //--- Get primary item insert position SPLISTPOS ItemPos = ItemList.AddTail( Item ); //--- Split off trailing punction if any. static const WCHAR EOSItems[] = { L'.', L'!', L'?' }; static const WCHAR TrailItems[] = { L',', L'\"', L';', L':', L')', L'}', L'\'', L']' }; SPLISTPOS NextPos = NULL; BOOL fIsEOS = false; while( TokenLen > 1 ) { BOOL fAddTrailItem = false; if( SearchSet( *pTrailChar, EOSItems, sp_countof(EOSItems), &ulIndex ) ) { fIsEOS = true; fAddTrailItem = true; } else if( SearchSet( *pTrailChar, TrailItems, sp_countof(TrailItems), &ulIndex ) ) { fAddTrailItem = true; } if( fAddTrailItem ) { CSentItem TItem; TItem.pItem = pTrailChar; TItem.ulItemLen = 1; TItem.pXmlState = &m_pCurrFrag->State; TItem.ulItemSrcLen = TItem.ulItemLen; TItem.ulItemSrcOffset = m_pCurrFrag->ulTextSrcOffset + ( TItem.pItem - m_pCurrFrag->pTextStart ); NextPos = ItemList.InsertAfter( ItemPos, TItem ); --TokenLen; --pTrailChar; } else { break; } } //--- Abreviation or sentence end? // If we are at the end of the buffer then EOS is implied. if( *m_pNextChar == NULL ) { fIsEOS = true; if( !SearchSet( *(m_pNextChar-1), EOSItems, sp_countof(EOSItems), &ulIndex ) ) { //--- Terminate with a period if we are at the end of a buffer // and no end of sentence punction has been added. static const WCHAR* pPeriod = L"."; CSentItem EOSItem; EOSItem.pItem = pPeriod; EOSItem.ulItemLen = 1; EOSItem.pXmlState = &m_pCurrFrag->State; EOSItem.ulItemSrcLen = EOSItem.ulItemLen; EOSItem.ulItemSrcOffset = m_pCurrFrag->ulTextSrcOffset + ( (m_pNextChar-1) - m_pCurrFrag->pTextStart ); ItemList.AddTail( EOSItem ); } } else if( pTrailChar[1] == L'.' ) { //--- Here is where you would try to prove that it's not EOS // It might be an abreviation. That's a hard problem that // we are not going to attempt here. } //--- Substitute underscore for apostrophe for( ULONG i = 0; i < TokenLen; ++i ) { if( Item.pItem[i] == L'\'' ) { ((WCHAR)Item.pItem[i]) = L'_'; } } //--- Add the main item if( TokenLen > 0 ) { Item.ulItemLen = TokenLen; Item.pXmlState = &m_pCurrFrag->State; Item.ulItemSrcLen = Item.ulItemLen; Item.ulItemSrcOffset = m_pCurrFrag->ulTextSrcOffset + ( Item.pItem - m_pCurrFrag->pTextStart ); ItemList.SetAt( ItemPos, Item ); } return fIsEOS; } /* CTTSEngObj::AddNextSentItem */
void ase_model::Load(char *file_name) { long start = 0, end = 0, last = 0; FILE *f; if (f = fopen(file_name, "r")) { int mesh_no = 0; no_meshes = 0; while (FindNextToken(f, "*GEOMOBJECT", last, &start, &end)) { no_meshes++; last = end; } rewind(f); mesh = new ase_mesh_t [no_meshes]; start = 0; end = 0; last = 0; while (FindNextToken(f, "*GEOMOBJECT", last, &start, &end)) { char str[64]; long s, e; // maybe get texture GetName(f, str, start, end); str[strlen(str)-1] = 0; // sscanf(str, "\"%s", mesh->name); strcpy(mesh[mesh_no].name, str+1); //printf("Reading node %s.\n", meshname); GetInfo(f, start, end, &mesh[mesh_no].numverts, &mesh[mesh_no].numfaces, &mesh[mesh_no].numtverts, &mesh[mesh_no].numtfaces); // Allocate mesh[mesh_no].faces = new int[mesh[mesh_no].numfaces * 3]; mesh[mesh_no].verts = new float[mesh[mesh_no].numverts * 3]; mesh[mesh_no].vnorms = new float[mesh[mesh_no].numverts * 3]; if (mesh[mesh_no].numtverts) mesh[mesh_no].tverts = new float[mesh[mesh_no].numtverts * 3]; if (mesh[mesh_no].numtfaces) mesh[mesh_no].tfaces = new int[mesh[mesh_no].numtfaces * 3]; // get the data //printf("MESH_VERTEX_LIST "); FindNextToken(f, "*MESH_VERTEX_LIST", start, &s, &e); GetVerts(f, mesh[mesh_no].verts, s, e); //printf("MESH_FACE_LIST "); FindNextToken(f, "*MESH_FACE_LIST", start, &s, &e); GetFaces(f, mesh[mesh_no].faces, s, e); //printf("MESH_NORMALS "); FindNextToken(f, "*MESH_NORMALS", start, &s, &e); GetVNorms(f, mesh[mesh_no].vnorms, s, e); // Only these are optional if (mesh[mesh_no].numtverts) { // printf("MESH_TVERTLIST "); FindNextToken(f, "*MESH_TVERTLIST", start, &s, &e); GetTVerts(f, mesh[mesh_no].tverts, s, e); } if (mesh[mesh_no].numtfaces) { // printf("MESH_TFACELIST "); FindNextToken(f, "*MESH_TFACELIST", start, &s, &e); GetTFaces(f, mesh[mesh_no].tfaces, s, e); } //printf("\n"); rewind(f); last = end; mesh_no++; } fclose(f); } }
void Tokenizer::ValidateDelimiterTokens() { auto is_delimiter_token = [&](token_iterator_t it) { return it != tokens_.end() && it->category == kDelimiter; }; auto is_unknown_token = [&](token_iterator_t it) { return it != tokens_.end() && it->category == kUnknown; }; auto is_single_character_token = [&](token_iterator_t it) { return is_unknown_token(it) && it->content.size() == 1 && it->content.front() != L'-'; }; auto append_token_to = [](token_iterator_t token, token_iterator_t append_to) { append_to->content.append(token->content); token->category = kInvalid; }; for (auto token = tokens_.begin(); token != tokens_.end(); ++token) { if (token->category != kDelimiter) continue; auto delimiter = token->content.front(); auto prev_token = FindPreviousToken(tokens_, token, kFlagValid); auto next_token = FindNextToken(tokens_, token, kFlagValid); // Check for single-character tokens to prevent splitting group names, // keywords, episode number, etc. if (delimiter != L' ' && delimiter != L'_') { if (is_single_character_token(prev_token)) { append_token_to(token, prev_token); while (is_unknown_token(next_token)) { append_token_to(next_token, prev_token); next_token = FindNextToken(tokens_, next_token, kFlagValid); if (is_delimiter_token(next_token) && next_token->content.front() == delimiter) { append_token_to(next_token, prev_token); next_token = FindNextToken(tokens_, next_token, kFlagValid); } } continue; } if (is_single_character_token(next_token)) { append_token_to(token, prev_token); append_token_to(next_token, prev_token); continue; } } // Check for adjacent delimiters if (is_unknown_token(prev_token) && is_delimiter_token(next_token)) { auto next_delimiter = next_token->content.front(); if (delimiter != next_delimiter && delimiter != ',') { if (next_delimiter == ' ' || next_delimiter == '_') { append_token_to(token, prev_token); } } } } auto remove_if_invalid = std::remove_if(tokens_.begin(), tokens_.end(), [](const Token& token) -> bool { return token.category == kInvalid; }); tokens_.erase(remove_if_invalid, tokens_.end()); }