Esempio n. 1
0
void XapianIndex::removeFirstPostingsFromDocument(Tokenizer &tokens, Xapian::Document &doc,
	const string &prefix, const string &language, StemmingMode mode) const
{
	Xapian::TermIterator termListIter = doc.termlist_begin();
	Xapian::Stem *pStemmer = NULL;
	string upperCasePrefix("R");
	string term;

	// Do we know what language to use for stemming ?
	if (language.empty() == false)
	{
		pStemmer = new Xapian::Stem(StringManip::toLowerCase(language));
	}

	// Terms starting with a capital letter are R-prefixed, unless a prefix is already defined
	if (prefix.empty() == false)
	{
		upperCasePrefix = prefix;
	}

	// Get the terms and remove the first posting for each
	while (tokens.nextToken(term) == true)
	{
		if (term.empty() == true)
		{
			continue;
		}
		// Does it start with a capital letter ?
		if (isupper((int)term[0]) != 0)
		{
			removeFirstPosting(doc, termListIter, upperCasePrefix + term);
		}
		// Lower case the term
		term = StringManip::toLowerCase(term);

		// Stem the term ?
		if ((mode == STORE_UNSTEM) ||
			(pStemmer == NULL))
		{
			removeFirstPosting(doc, termListIter, prefix + XapianDatabase::limitTermLength(term));
		}
		else if (mode == STORE_STEM)
		{
#if XAPIAN_MAJOR_VERSION==0
			string stemmedTerm(pStemmer->stem_word(term));
#else
			string stemmedTerm((*pStemmer)(term));
#endif

			removeFirstPosting(doc, termListIter, prefix + XapianDatabase::limitTermLength(stemmedTerm));
		}
		else if (mode == STORE_BOTH)
		{
#if XAPIAN_MAJOR_VERSION==0
			string stemmedTerm(pStemmer->stem_word(term));
#else
			string stemmedTerm((*pStemmer)(term));
#endif

			removeFirstPosting(doc, termListIter, prefix + XapianDatabase::limitTermLength(term));
			if (stemmedTerm != term)
			{
				removeFirstPosting(doc, termListIter, prefix + XapianDatabase::limitTermLength(stemmedTerm));
			}
		}
	}

	if (pStemmer != NULL)
	{
		delete pStemmer;
	}
}
Esempio n. 2
0
	void RPedigree::inputPed(string fileString){
		// Authors: Rohan L. Fernando
		// (2005) 
		// Contributors:
		const char* fname = fileString.c_str();
		cout << "reading pedigree file \n";
		if (colName.size() < 3) {
			cerr << "RPedigree::input(): colName.size() < 3 \n";
			cerr << "Did you forget the putColNames method? \n";
			throw exception("Error in RPedigree::input()");
		}
		int indIndex = colName.getIndex("individual");
		if (indIndex == -1){
			cerr << "RPedigree::input(): individual column is missing in colName \n";
			throw exception("Error in RPedigree::input()");
		}
		int sireIndex = colName.getIndex("sire");
		if (sireIndex == -1){
			cerr << "RPedigree::input(): sire column is missing in colName \n";
			throw exception("Error in RPedigree::input()");
		}
		int damIndex = colName.getIndex("dam");
		if (damIndex == -1){
			cerr << "RPedigree::input(): dam column is missing in colName \n";
			throw exception("Error in RPedigree::input()");			
		}
		unsigned numCol = colName.size();			
		double rec = 0, rec1 = 0;
		string indstr, sirestr, damstr;
		ifstream datafile(fname);
		if(!datafile){
			cout<< "Cannot open pedigree file: " << fname << endl;
			exit(1);
		}
		datafile.setf(ios::skipws);
		PNode *ptr;
		std::string sep(" \t,\n\r");
		std::string inputStr;
		Tokenizer colData;
		unsigned COUNT = 0;
		while ( getline(datafile,inputStr) ){
			colData.getTokens(inputStr,sep);
			indstr  = colData[indIndex];
			sirestr = colData[sireIndex];
			damstr  = colData[damIndex];
			rec++;
			if(rec==1000){
				cout<<rec+rec1<<"\r";
				cout.flush();
				rec1 += rec;
				rec = 0;
			}
			if (colData.size() != numCol){
				cerr << " Record " << rec1 + rec << " has " << colData.size() << " columns \n";
				cerr << " Expected " << numCol << endl;
				throw exception("Error in RPedigree::input()"); 
			}
			ptr = new PNode(indstr, sirestr, damstr);
			if (orderedPed) ptr->ind = ++COUNT;
			(*this)[indstr] = ptr;
		}
		datafile.close();
		if(orderedPed){
			seqnPed();
		}
		else {
			generateEntriesforParents();
			codePed();
		}	
		makePedVector();
		calc_inbreeding();
		fillCoder(); 
	}
Esempio n. 3
0
void ArchiveOperations::removeUnusedFlats(Archive* archive)
{
	// Check archive was given
	if (!archive)
		return;

	// --- Build list of used flats ---
	TexUsedMap used_textures;
	int        total_maps = 0;

	// Get all SECTORS entries
	Archive::SearchOptions opt;
	opt.match_type = EntryType::fromId("map_sectors");
	auto sectors   = archive->findAll(opt);
	total_maps += sectors.size();

	// Go through and add used flats to list
	DoomMapFormat::Sector sec;
	wxString              tex_floor, tex_ceil;
	for (auto& sector : sectors)
	{
		int nsec = sector->size() / 26;
		sector->seek(0, SEEK_SET);
		for (int s = 0; s < nsec; s++)
		{
			// Read sector data
			sector->read(&sec, 26);

			// Get textures
			tex_floor = wxString::FromAscii(sec.f_tex, 8);
			tex_ceil  = wxString::FromAscii(sec.c_tex, 8);

			// Add to used textures list
			used_textures[tex_floor].used = true;
			used_textures[tex_ceil].used  = true;
		}
	}

	// Get all TEXTMAP entries
	opt.match_name = "TEXTMAP";
	opt.match_type = EntryType::fromId("udmf_textmap");
	auto udmfmaps  = archive->findAll(opt);
	total_maps += udmfmaps.size();

	// Go through and add used flats to list
	Tokenizer tz;
	tz.setSpecialCharacters("{};=");
	for (auto& udmfmap : udmfmaps)
	{
		// Open in tokenizer
		tz.openMem(udmfmap->data(), "UDMF TEXTMAP");

		// Go through text tokens
		wxString token = tz.getToken();
		while (!token.IsEmpty())
		{
			// Check for sector definition
			if (token == "sector")
			{
				tz.getToken(); // Skip {

				token = tz.getToken();
				while (token != "}")
				{
					// Check for texture property
					if (token == "texturefloor" || token == "textureceiling")
					{
						tz.getToken(); // Skip =
						used_textures[tz.getToken()].used = true;
					}

					token = tz.getToken();
				}
			}

			// Next token
			token = tz.getToken();
		}
	}

	// Check if any maps were found
	if (total_maps == 0)
		return;

	// Find all flats
	opt.match_name      = "";
	opt.match_namespace = "flats";
	opt.match_type      = nullptr;
	auto flats          = archive->findAll(opt);

	// Create list of all unused flats
	wxArrayString unused_tex;
	bool          anim = false;
	for (auto& flat : flats)
	{
		// Skip markers
		if (flat->size() == 0)
			continue;

		// Check for animation start
		std::string flatname{ flat->nameNoExt() };
		for (int b = 0; b < n_flat_anim; b++)
		{
			if (flatname == flat_anim_start[b])
			{
				anim = true;
				Log::info(wxString::Format("%s anim start", flatname));
				break;
			}
		}

		// Check for animation end
		bool thisend = false;
		for (int b = 0; b < n_flat_anim; b++)
		{
			if (flatname == flat_anim_end[b])
			{
				anim    = false;
				thisend = true;
				Log::info(wxString::Format("%s anim end", flatname));
				break;
			}
		}

		// Add if not animated
		if (!used_textures[flatname].used && !anim && !thisend)
			unused_tex.Add(flatname);
	}

	// Pop up a dialog with a checkbox list of unused textures
	wxMultiChoiceDialog dialog(
		theMainWindow,
		"The following textures are not used in any map,\nselect which textures to delete",
		"Delete Unused Textures",
		unused_tex);

	// Select all flats initially
	wxArrayInt selection;
	for (unsigned a = 0; a < unused_tex.size(); a++)
		selection.push_back(a);
	dialog.SetSelections(selection);

	int n_removed = 0;
	if (dialog.ShowModal() == wxID_OK)
	{
		// Go through selected flats
		selection           = dialog.GetSelections();
		opt.match_namespace = "flats";
		for (int i : selection)
		{
			opt.match_name      = unused_tex[i];
			ArchiveEntry* entry = archive->findFirst(opt);
			archive->removeEntry(entry);
			n_removed++;
		}
	}

	wxMessageBox(wxString::Format("Removed %d unused flats", n_removed));
}
Esempio n. 4
0
//
// reads the next timestep from the opened file,
// using the current quantization settings in the global
// state object
//
bool read_pair_timestep(TIMESTEP &ts)   {
    static Tokenizer tok;
    static char buf[G_MAX_INPUT_LINE] = "";
    if(!state.fpData)
        return false;
    ts.edges.clear();
    ts.vertices.clear();

    // do we have a left-over line from the last timestep, or do we need
    // to read another line from the file?
    if(!buf[0])
        if(!fgets(buf, G_MAX_INPUT_LINE, state.fpData)) {
            // no lines left in file -- we're done reading the whole file
            buf[0] = 0;
            return false;
        }

    // read a new timestep
    ts.epoch = -1;
    do {
        tok.tokenize(buf);
        if(tok.num_tokens() >= 1)   {   // skip blank lines
            // get the timestep label
            char *endptr;
            errno = 0;
            int tsnum = strtod(tok.token(0), &endptr);
            if(errno || endptr == tok.token(0))   {
                // encountered a non-numeric timestamp
                fprintf(stderr, "Error: encountered a non-numeric timestep label '%s' in input file.\n", tok.token(0));
                exit(-5);
            }
            int quant = quantize(tsnum);
            if(ts.epoch == -1)
                ts.epoch = quant;
            if(ts.epoch != quant)   // new timestep?
                break;

            // add this observation to the timestep
            if(tok.num_tokens() >= 2)       {
                // have at least one vertex
                int vid1 = map_vertex(tok.token(1));
                ts.vertices.insert(vid1);
                if(tok.num_tokens() >= 3)   {
                    // have an edge
                    int vid2 = map_vertex(tok.token(2));

                    // skip self-edges, i.e., (v, v)
                    if(vid1 != vid2)    {
                        ts.vertices.insert(vid2);

                        // make sure vid1 < vid2, so if the graph is undirected then
                        // we never insert a duplicate edge with the endpoints flipped
                        if(!state.directed && vid2 < vid1)  {
                            int t = vid1;
                            vid1 = vid2;
                            vid2 = t;
                        }
                        ts.edges.insert(make_pair<int,int>(vid1,vid2));
                    }
                }
            }
        }

        // read the next line
        if(!fgets(buf, G_MAX_INPUT_LINE, state.fpData)) {
            buf[0] = 0;
            break;
        }
    } while(1);

    // update state
    state.ne+= ts.edges.size();
    state.nt++;

    // sanity check
    assert(ts.edges.size() <= ts.vertices.size()*(ts.vertices.size()-1)/(state.directed?1:2) );

    return true;
}
Esempio n. 5
0
void QSGShaderSourceBuilder::addDefinition(const QByteArray &definition)
{
    if (definition.isEmpty())
        return;

    Tokenizer tok;
    const char *input = m_source.constData();
    tok.initialize(input);

    // First find #version, #extension's and "void main() { ... "
    const char *versionPos = 0;
    const char *extensionPos = 0;
    bool inSingleLineComment = false;
    bool inMultiLineComment = false;
    bool foundVersionStart = false;
    bool foundExtensionStart = false;

    Tokenizer::Token lt = Tokenizer::Token_Unspecified;
    Tokenizer::Token t = tok.next();
    while (t != Tokenizer::Token_EOF) {
        // Handle comment blocks
        if (t == Tokenizer::Token_MultiLineCommentStart )
            inMultiLineComment = true;
        if (t == Tokenizer::Token_MultiLineCommentEnd)
            inMultiLineComment = false;
        if (t == Tokenizer::Token_SingleLineComment)
            inSingleLineComment = true;
        if (t == Tokenizer::Token_NewLine && inSingleLineComment && !inMultiLineComment)
            inSingleLineComment = false;

        // Have we found #version, #extension or void main()?
        if (t == Tokenizer::Token_Version && !inSingleLineComment && !inMultiLineComment)
            foundVersionStart = true;

        if (t == Tokenizer::Token_Extension && !inSingleLineComment && !inMultiLineComment)
            foundExtensionStart = true;

        if (foundVersionStart && t == Tokenizer::Token_NewLine) {
            versionPos = tok.pos;
            foundVersionStart = false;
        } else if (foundExtensionStart && t == Tokenizer::Token_NewLine) {
            extensionPos = tok.pos;
            foundExtensionStart = false;
        } else if (lt == Tokenizer::Token_Void && t == Tokenizer::Token_Identifier) {
            if (qstrncmp("main", tok.identifier, 4) == 0)
                break;
        }

        // Scan to next token
        lt = t;
        t = tok.next();
    }

    // Determine where to insert the definition.
    // If we found #extension directives, insert after last one,
    // else, if we found #version insert after #version
    // otherwise, insert at beginning.
    const char *insertionPos = extensionPos ? extensionPos : (versionPos ? versionPos : input);

    // Construct a new shader string, inserting the definition
    QByteArray newSource = QByteArray::fromRawData(input, insertionPos - input)
            + "#define " + definition + '\n'
            + QByteArray::fromRawData(insertionPos, m_source.size() - (insertionPos - input));
    m_source = std::move(newSource);
}
Esempio n. 6
0
/* CTexture::parse
 * Parses a TEXTURES format texture definition
 *******************************************************************/
bool CTexture::parse(Tokenizer& tz, string type)
{
	// Check if optional
	if (S_CMPNOCASE(tz.peekToken(), "optional"))
	{
		tz.getToken();	// Skip it
		optional = true;
	}

	// Read basic info
	this->type = type;
	this->extended = true;
	this->defined = false;
	name = tz.getToken().Upper();
	tz.getToken();	// Skip ,
	width = tz.getInteger();
	tz.getToken();	// Skip ,
	height = tz.getInteger();

	// Check for extended info
	if (tz.peekToken() == "{")
	{
		tz.getToken();	// Skip {

		// Read properties
		string property = tz.getToken();
		while (property != "}")
		{
			// Check if end of text is reached (error)
			if (property.IsEmpty())
			{
				wxLogMessage("Error parsing texture %s: End of text found, missing } perhaps?", name);
				return false;
			}

			// XScale
			if (S_CMPNOCASE(property, "XScale"))
				scale_x = tz.getFloat();

			// YScale
			if (S_CMPNOCASE(property, "YScale"))
				scale_y = tz.getFloat();

			// Offset
			if (S_CMPNOCASE(property, "Offset"))
			{
				offset_x = tz.getInteger();
				tz.getToken();	// Skip ,
				offset_y = tz.getInteger();
			}

			// WorldPanning
			if (S_CMPNOCASE(property, "WorldPanning"))
				world_panning = true;

			// NoDecals
			if (S_CMPNOCASE(property, "NoDecals"))
				no_decals = true;

			// NullTexture
			if (S_CMPNOCASE(property, "NullTexture"))
				null_texture = true;

			// Patch
			if (S_CMPNOCASE(property, "Patch"))
			{
				CTPatchEx* patch = new CTPatchEx();
				patch->parse(tz);
				patches.push_back(patch);
			}

			// Graphic
			if (S_CMPNOCASE(property, "Graphic"))
			{
				CTPatchEx* patch = new CTPatchEx();
				patch->parse(tz, PTYPE_GRAPHIC);
				patches.push_back(patch);
			}

			// Read next property
			property = tz.getToken();
		}
	}

	return true;
}
Esempio n. 7
0
// -----------------------------------------------------------------------------
// Reads and parses the SLADE configuration file
// -----------------------------------------------------------------------------
void readConfigFile()
{
	// Open SLADE.cfg
	Tokenizer tz;
	if (!tz.openFile(App::path("slade3.cfg", App::Dir::User)))
		return;

	// Go through the file with the tokenizer
	while (!tz.atEnd())
	{
		// If we come across a 'cvars' token, read in the cvars section
		if (tz.advIf("cvars", 2))
		{
			// Keep reading name/value pairs until we hit the ending '}'
			while (!tz.checkOrEnd("}"))
			{
				CVar::set(tz.current().text, tz.peek().text);
				tz.adv(2);
			}

			tz.adv(); // Skip ending }
		}

		// Read base resource archive paths
		if (tz.advIf("base_resource_paths", 2))
		{
			while (!tz.checkOrEnd("}"))
			{
				archive_manager.addBaseResourcePath(tz.current().text);
				tz.adv();
			}

			tz.adv(); // Skip ending }
		}

		// Read recent files list
		if (tz.advIf("recent_files", 2))
		{
			while (!tz.checkOrEnd("}"))
			{
				archive_manager.addRecentFile(tz.current().text);
				tz.adv();
			}

			tz.adv(); // Skip ending }
		}

		// Read keybinds
		if (tz.advIf("keys", 2))
			KeyBind::readBinds(tz);

		// Read nodebuilder paths
		if (tz.advIf("nodebuilder_paths", 2))
		{
			while (!tz.checkOrEnd("}"))
			{
				NodeBuilders::addBuilderPath(tz.current().text, tz.peek().text);
				tz.adv(2);
			}

			tz.adv(); // Skip ending }
		}

		// Read game exe paths
		if (tz.advIf("executable_paths", 2))
		{
			while (!tz.checkOrEnd("}"))
			{
				Executables::setGameExePath(tz.current().text, tz.peek().text);
				tz.adv(2);
			}

			tz.adv(); // Skip ending }
		}

		// Read window size/position info
		if (tz.advIf("window_info", 2))
			Misc::readWindowInfo(tz);

		// Next token
		tz.adv();
	}
}
Esempio n. 8
0
void Program::parseProgram() {
    t.setIndex(-1);
    t.skipToken();
    curToken=t.getToken();
    //----------------------------------------------------------------//
    if(curToken == 1) {
      t.skipToken();
      curToken=t.getToken();
      while(curToken != 2) {
        ds.parseDS(); 
        t.skipToken();
        curToken=t.getToken();
      }
    }
    else {
      cout<<"error 1"<<endl;
      exit(1);
    }
    cout<<endl;
    //----------------------------------------------------------------
    if(curToken == 2) {
      t.skipToken();
      curToken=t.getToken();
      while(curToken != 3) {
        ss.parseSS(); 
        t.skipToken();
        curToken=t.getToken();
      }
    }
    else {
      cout<<"error 2"<<endl;
      exit(1);
    }
    cout<<endl;
    //----------------------------------------------------------------//
    if(curToken == 3) {
      t.skipToken();
      curToken=t.getToken();
      cout<<"End of Parsing. No errors found"<<endl;
    }
    else {
      cout<<"error 3"<<endl;
      exit(1);
    }
}
Esempio n. 9
0
File: Main.cpp Progetto: lcs2/carpg
//=================================================================================================
bool RunInstallScripts()
{
	Info("Reading install scripts.");
	WIN32_FIND_DATA data;
	HANDLE find = FindFirstFile(Format("%s/install/*.txt", g_system_dir.c_str()), &data);
	if(find == INVALID_HANDLE_VALUE)
		return true;

	vector<InstallScript> scripts;

	Tokenizer t;
	t.AddKeyword("install", 0);
	t.AddKeyword("version", 1);
	t.AddKeyword("remove", 2);

	do
	{
		int major, minor, patch;

		// read file to find version info
		try
		{
			if(t.FromFile(Format("%s/install/%s", g_system_dir.c_str(), data.cFileName)))
			{
				t.Next();
				if(t.MustGetKeywordId() == 2)
				{
					// old install script
					if(sscanf_s(data.cFileName, "%d.%d.%d.txt", &major, &minor, &patch) != 3)
					{
						if(sscanf_s(data.cFileName, "%d.%d.txt", &major, &minor) == 2)
							patch = 0;
						else
						{
							// unknown version
							major = 0;
							minor = 0;
							patch = 0;
						}
					}
				}
				else
				{
					t.AssertKeyword(0);
					t.Next();
					if(t.MustGetInt() != 1)
						t.Throw(Format("Unknown install script version '%d'.", t.MustGetInt()));
					t.Next();
					t.AssertKeyword(1);
					t.Next();
					major = t.MustGetInt();
					t.Next();
					minor = t.MustGetInt();
					t.Next();
					patch = t.MustGetInt();
				}

				InstallScript& s = Add1(scripts);
				s.filename = data.cFileName;
				s.version = (((major & 0xFF) << 16) | ((minor & 0xFF) << 8) | (patch & 0xFF));
			}
		}
		catch(const Tokenizer::Exception& e)
		{
			Warn("Unknown install script '%s': %s", data.cFileName, e.ToString());
		}
	} while(FindNextFile(find, &data));

	FindClose(find);

	if(scripts.empty())
		return true;

	std::sort(scripts.begin(), scripts.end());

	GetModuleFileName(nullptr, BUF, 256);
	char buf[512], buf2[512];
	char* filename;
	GetFullPathName(BUF, 512, buf, &filename);
	*filename = 0;
	DWORD len = strlen(buf);

	LocalString s, s2;

	for(vector<InstallScript>::iterator it = scripts.begin(), end = scripts.end(); it != end; ++it)
	{
		cstring path = Format("%s/install/%s", g_system_dir.c_str(), it->filename.c_str());

		try
		{
			if(!t.FromFile(path))
			{
				Error("Failed to load install script '%s'.", it->filename.c_str());
				continue;
			}
			Info("Using install script %s.", it->filename.c_str());

			t.Next();
			t.AssertKeyword();
			if(t.MustGetKeywordId() == 0)
			{
				// skip install 1, version X Y Z W
				t.Next();
				t.AssertInt();
				t.Next();
				t.AssertKeyword(1);
				t.Next();
				t.AssertInt();
				t.Next();
				t.AssertInt();
				t.Next();
				t.AssertInt();
				t.Next();
				t.AssertInt();
				t.Next();
			}

			while(true)
			{
				if(t.IsEof())
					break;
				t.AssertKeyword(2);

				t.Next();
				s2 = t.MustGetString();

				if(GetFullPathName(s2->c_str(), 512, buf2, nullptr) == 0 || strncmp(buf, buf2, len) != 0)
				{
					Error("Invalid file path '%s'.", s2->c_str());
					return false;
				}

				DeleteFile(buf2);
				t.Next();
			}

			DeleteFile(path);
		}
		catch(cstring err)
		{
			Error("Failed to parse install script '%s': %s", path, err);
		}
	}

	return true;
}
Esempio n. 10
0
void CheckUnusedFunctions::parseTokens(const Tokenizer &tokenizer)
{
    // Function declarations..
    for (const Token *tok = tokenizer.tokens(); tok; tok = tok->next()) {
        if (tok->fileIndex() != 0)
            continue;

        // token contains a ':' => skip to next ; or {
        if (tok->str().find(":") != std::string::npos) {
            while (tok && tok->str().find_first_of(";{"))
                tok = tok->next();
            if (tok)
                continue;
            break;
        }

        // If this is a template function, skip it
        if (tok->previous() && tok->previous()->str() == ">")
            continue;

        const Token *funcname = 0;

        if (Token::Match(tok, "%type% %var% ("))
            funcname = tok->tokAt(1);
        else if (Token::Match(tok, "%type% * %var% ("))
            funcname = tok->tokAt(2);
        else if (Token::Match(tok, "%type% :: %var% (") && !Token::Match(tok, tok->strAt(2).c_str()))
            funcname = tok->tokAt(2);

        // Don't assume throw as a function name: void foo() throw () {}
        if (Token::Match(tok->previous(), ")|const"))
            funcname = 0;

        // Check that ") {" is found..
        for (const Token *tok2 = funcname; tok2; tok2 = tok2->next()) {
            if (tok2->str() == ")") {
                if (! Token::simpleMatch(tok2, ") {") &&
                    ! Token::simpleMatch(tok2, ") const {") &&
                    ! Token::simpleMatch(tok2, ") const throw ( ) {") &&
                    ! Token::simpleMatch(tok2, ") throw ( ) {"))
                    funcname = 0;
                break;
            }
        }

        if (funcname) {
            FunctionUsage &func = _functions[ funcname->str()];

            if (!func.lineNumber)
                func.lineNumber = funcname->linenr();

            // No filename set yet..
            if (func.filename.empty()) {
                func.filename = tokenizer.getFiles()->at(0);
            }
            // Multiple files => filename = "+"
            else if (func.filename != tokenizer.getFiles()->at(0)) {
                //func.filename = "+";
                func.usedOtherFile |= func.usedSameFile;
            }
        }
    }

    // Function usage..
    for (const Token *tok = tokenizer.tokens(); tok; tok = tok->next()) {
        const Token *funcname = 0;

        if (Token::Match(tok->next(), "%var% (")) {
            funcname = tok->next();
        }

        else if (Token::Match(tok, "[;{}.,()[=+-/&|!?:] %var% [(),;:}]"))
            funcname = tok->next();

        else if (Token::Match(tok, "[=(,] & %var% :: %var% [,);]"))
            funcname = tok->tokAt(4);

        else
            continue;

        // funcname ( => Assert that the end parenthesis isn't followed by {
        if (Token::Match(funcname, "%var% (")) {
            int parlevel = 0;
            for (const Token *tok2 = funcname; tok2; tok2 = tok2->next()) {
                if (tok2->str() == "(")
                    ++parlevel;

                else if (tok2->str() == ")") {
                    --parlevel;
                    if (parlevel == 0 && (Token::Match(tok2, ") const|{")))
                        funcname = NULL;
                    if (parlevel <= 0)
                        break;
                }
            }
        }

        if (funcname) {
            FunctionUsage &func = _functions[ funcname->str()];

            if (func.filename.empty() || func.filename == "+")
                func.usedOtherFile = true;
            else
                func.usedSameFile = true;
        }
    }
}
Esempio n. 11
0
int main()
{
    Tokenizer t;

    //adding the tokens for the tokenizer
    t.add("\\+|-",PLUSMINUS);
    t.add("\\*|/",MULTDIV);
    t.add("\\^",RAISED);
    t.add("!",FACULTY);
    //regexp matching is greedy; try sinh/cosh/tanh first
    t.add("sinh|cosh|tanh",FUNCTION);
    t.add("asin|acos|atan",FUNCTION);
    t.add("sin|cos|tan|sqrt",FUNCTION);
    t.add("log\\[[[[:digit:]]+(\\.)?[[:digit:]]*\\]|log",FUNCTION);
    t.add("\\(",OPEN_BRACKET);
    t.add("\\)",CLOSE_BRACKET);
    t.add("[[:digit:]]+(\\.)?[[:digit:]]*",NUMBER);

    string str;

    while(1) {
        cout << "=> ";

        if(!getline(cin,str)) {
          cout << endl;
          return 1;
        }

        str.erase(remove_if(str.begin(),str.end(), ::isspace),str.end());

        if(str.empty())
          continue;

        if(str == "quit")
            break;
        try{
            vector<token> a = t.tokenize(str);
            Parser p(a);
            Expression* exp = p.parse();
            cout << "\t\t== " << exp->evaluate() << endl;;
        } catch (runtime_error& e) {
            cout << "\t\tSyntax error: " << e.what() << endl;;
        }
    }

    return 0;
}
Esempio n. 12
0
void ppPragma::HandleError(Tokenizer &tk)
{
    Errors::Error(tk.GetString());
}
Esempio n. 13
0
void CppCheck::executeRules(const std::string &tokenlist, const Tokenizer &tokenizer)
{
    (void)tokenlist;
    (void)tokenizer;

#ifdef HAVE_RULES
    // Are there rules to execute?
    bool isrule = false;
    for (std::list<Settings::Rule>::const_iterator it = mSettings.rules.begin(); it != mSettings.rules.end(); ++it) {
        if (it->tokenlist == tokenlist)
            isrule = true;
    }

    // There is no rule to execute
    if (isrule == false)
        return;

    // Write all tokens in a string that can be parsed by pcre
    std::ostringstream ostr;
    for (const Token *tok = tokenizer.tokens(); tok; tok = tok->next())
        ostr << " " << tok->str();
    const std::string str(ostr.str());

    for (std::list<Settings::Rule>::const_iterator it = mSettings.rules.begin(); it != mSettings.rules.end(); ++it) {
        const Settings::Rule &rule = *it;
        if (rule.pattern.empty() || rule.id.empty() || rule.severity == Severity::none || rule.tokenlist != tokenlist)
            continue;

        const char *pcreCompileErrorStr = nullptr;
        int erroffset = 0;
        pcre * const re = pcre_compile(rule.pattern.c_str(),0,&pcreCompileErrorStr,&erroffset,nullptr);
        if (!re) {
            if (pcreCompileErrorStr) {
                const std::string msg = "pcre_compile failed: " + std::string(pcreCompileErrorStr);
                const ErrorLogger::ErrorMessage errmsg(std::list<ErrorLogger::ErrorMessage::FileLocation>(),
                                                       emptyString,
                                                       Severity::error,
                                                       msg,
                                                       "pcre_compile",
                                                       false);

                reportErr(errmsg);
            }
            continue;
        }

        // Optimize the regex, but only if PCRE_CONFIG_JIT is available
#ifdef PCRE_CONFIG_JIT
        const char *pcreStudyErrorStr = nullptr;
        pcre_extra * const pcreExtra = pcre_study(re, PCRE_STUDY_JIT_COMPILE, &pcreStudyErrorStr);
        // pcre_study() returns NULL for both errors and when it can not optimize the regex.
        // The last argument is how one checks for errors.
        // It is NULL if everything works, and points to an error string otherwise.
        if (pcreStudyErrorStr) {
            const std::string msg = "pcre_study failed: " + std::string(pcreStudyErrorStr);
            const ErrorLogger::ErrorMessage errmsg(std::list<ErrorLogger::ErrorMessage::FileLocation>(),
                                                   emptyString,
                                                   Severity::error,
                                                   msg,
                                                   "pcre_study",
                                                   false);

            reportErr(errmsg);
            // pcre_compile() worked, but pcre_study() returned an error. Free the resources allocated by pcre_compile().
            pcre_free(re);
            continue;
        }
#else
        const pcre_extra * const pcreExtra = nullptr;
#endif

        int pos = 0;
        int ovector[30]= {0};
        while (pos < (int)str.size()) {
            const int pcreExecRet = pcre_exec(re, pcreExtra, str.c_str(), (int)str.size(), pos, 0, ovector, 30);
            if (pcreExecRet < 0) {
                const std::string errorMessage = pcreErrorCodeToString(pcreExecRet);
                if (!errorMessage.empty()) {
                    const ErrorLogger::ErrorMessage errmsg(std::list<ErrorLogger::ErrorMessage::FileLocation>(),
                                                           emptyString,
                                                           Severity::error,
                                                           std::string("pcre_exec failed: ") + errorMessage,
                                                           "pcre_exec",
                                                           false);

                    reportErr(errmsg);
                }
                break;
            }
            const unsigned int pos1 = (unsigned int)ovector[0];
            const unsigned int pos2 = (unsigned int)ovector[1];

            // jump to the end of the match for the next pcre_exec
            pos = (int)pos2;

            // determine location..
            ErrorLogger::ErrorMessage::FileLocation loc;
            loc.setfile(tokenizer.list.getSourceFilePath());
            loc.line = 0;

            std::size_t len = 0;
            for (const Token *tok = tokenizer.tokens(); tok; tok = tok->next()) {
                len = len + 1U + tok->str().size();
                if (len > pos1) {
                    loc.setfile(tokenizer.list.getFiles().at(tok->fileIndex()));
                    loc.line = tok->linenr();
                    break;
                }
            }

            const std::list<ErrorLogger::ErrorMessage::FileLocation> callStack(1, loc);

            // Create error message
            std::string summary;
            if (rule.summary.empty())
                summary = "found '" + str.substr(pos1, pos2 - pos1) + "'";
            else
                summary = rule.summary;
            const ErrorLogger::ErrorMessage errmsg(callStack, tokenizer.list.getSourceFilePath(), rule.severity, summary, rule.id, false);

            // Report error
            reportErr(errmsg);
        }

        pcre_free(re);
#ifdef PCRE_CONFIG_JIT
        // Free up the EXTRA PCRE value (may be NULL at this point)
        if (pcreExtra) {
            pcre_free_study(pcreExtra);
        }
#endif
    }
#endif
}
Esempio n. 14
0
void DocumentWriter::addData(const char* str, int len, bool flush)
{
    if (len == 0 && !flush)
        return;

    if (len == -1)
        len = strlen(str);

    Tokenizer* tokenizer = m_frame->document()->tokenizer();
    if (tokenizer && tokenizer->wantsRawData()) {
        if (len > 0)
            tokenizer->writeRawData(str, len);
        return;
    }
    
    if (!m_decoder) {
        if (Settings* settings = m_frame->settings()) {
            m_decoder = TextResourceDecoder::create(m_mimeType,
                settings->defaultTextEncodingName(),
                settings->usesEncodingDetector());
            Frame* parentFrame = m_frame->tree()->parent();
            // Set the hint encoding to the parent frame encoding only if
            // the parent and the current frames share the security origin.
            // We impose this condition because somebody can make a child frame 
            // containing a carefully crafted html/javascript in one encoding
            // that can be mistaken for hintEncoding (or related encoding) by
            // an auto detector. When interpreted in the latter, it could be
            // an attack vector.
            // FIXME: This might be too cautious for non-7bit-encodings and
            // we may consider relaxing this later after testing.
            if (canReferToParentFrameEncoding(m_frame, parentFrame))
                m_decoder->setHintEncoding(parentFrame->document()->decoder());
        } else
            m_decoder = TextResourceDecoder::create(m_mimeType, String());
        Frame* parentFrame = m_frame->tree()->parent();
        if (m_encoding.isEmpty()) {
            if (canReferToParentFrameEncoding(m_frame, parentFrame))
                m_decoder->setEncoding(parentFrame->document()->inputEncoding(), TextResourceDecoder::EncodingFromParentFrame);
        } else {
            m_decoder->setEncoding(m_encoding,
                m_encodingWasChosenByUser ? TextResourceDecoder::UserChosenEncoding : TextResourceDecoder::EncodingFromHTTPHeader);
        }
        m_frame->document()->setDecoder(m_decoder.get());
    }

    String decoded = m_decoder->decode(str, len);
    if (flush)
        decoded += m_decoder->flush();
    if (decoded.isEmpty())
        return;

    if (!m_receivedData) {
        m_receivedData = true;
        if (m_decoder->encoding().usesVisualOrdering())
            m_frame->document()->setVisuallyOrdered();
        m_frame->document()->recalcStyle(Node::Force);
    }

    if (tokenizer) {
        ASSERT(!tokenizer->wantsRawData());
        tokenizer->write(decoded, true);
    }
}
Esempio n. 15
0
/* Console::execute
 * Attempts to execute the command line given
 *******************************************************************/
void Console::execute(string command)
{
	wxLogMessage("> %s", command);

	// Don't bother doing anything else with an empty command
	if (command.size() == 0)
		return;

	// Add the command to the log
	cmd_log.insert(cmd_log.begin(), command);

	// Announce that a command has been executed
	MemChunk mc;
	announce("console_execute", mc);

	// Tokenize the command string
	Tokenizer tz;
	tz.openString(command);

	// Get the command name
	string cmd_name = tz.getToken();

	// Get all args
	string arg = tz.getToken();
	vector<string> args;
	while (arg != "")
	{
		args.push_back(arg);
		arg = tz.getToken();
	}

	// Check that it is a valid command
	for (size_t a = 0; a < commands.size(); a++)
	{
		// Found it, execute and return
		if (commands[a].getName() == cmd_name)
		{
			commands[a].execute(args);
			return;
		}
	}

	// Check if it is a cvar
	CVar* cvar = get_cvar(cmd_name);
	if (cvar)
	{
		// Arg(s) given, set cvar value
		if (args.size() > 0)
		{
			if (cvar->type == CVAR_BOOLEAN)
			{
				if (args[0] == "0" || args[0] == "false")
					*((CBoolCVar*)cvar) = false;
				else
					*((CBoolCVar*)cvar) = true;
			}
			else if (cvar->type == CVAR_INTEGER)
				*((CIntCVar*)cvar) = atoi(CHR(args[0]));
			else if (cvar->type == CVAR_FLOAT)
				*((CFloatCVar*)cvar) = (float)atof(CHR(args[0]));
			else if (cvar->type == CVAR_STRING)
				*((CStringCVar*)cvar) = args[0];
		}

		// Print cvar value
		string value = "";
		if (cvar->type == CVAR_BOOLEAN)
		{
			if (cvar->GetValue().Bool)
				value = "true";
			else
				value = "false";
		}
		else if (cvar->type == CVAR_INTEGER)
			value = S_FMT("%d", cvar->GetValue().Int);
		else if (cvar->type == CVAR_FLOAT)
			value = S_FMT("%1.4f", cvar->GetValue().Float);
		else
			value = ((CStringCVar*)cvar)->value;

		logMessage(S_FMT("\"%s\" = \"%s\"", cmd_name, value));

		if (cmd_name == "log_verbosity")
			Global::log_verbosity = cvar->GetValue().Int;

		return;
	}

	// Toggle global debug mode
	if (cmd_name == "debug")
	{
		Global::debug = !Global::debug;
		if (Global::debug)
			logMessage("Debugging stuff enabled");
		else
			logMessage("Debugging stuff disabled");

		return;
	}

	// Command not found
	logMessage(S_FMT("Unknown command: \"%s\"", cmd_name));
	return;
}
Esempio n. 16
0
void Line::process_function(Tokenizer& tokens, string& errs)
/*****************************************************************************/
{
  //create a function call object
  string temp = tokens.token().value();
  FunctionCall* funcCall = Registrar::generateCall(temp);
  CHECKERR((funcCall == NULL), func_err(temp))

  tokens.nextToken(); //move past funcname
  tokens.nextToken(); //move past open paren

  //put tokens into the argument lists.. We loop until we have seen the paren
  //that terminates this function call or until we have run out of tokens on
  //this line
  list< vector<Token> > args;
  vector<Token> currArg;
  int depth = 0;
  while(!((tokens.token().value() == ")" && depth == 0) || tokens.eol())) {

    //if we see a comma at paren depth zero, we have just reached the end of an
    //argument
    if (tokens.token().type() == COMMA && depth == 0) {
      assert(!currArg.empty());
      args.push_back(currArg);
      currArg.clear();
    }
    else {
      currArg.push_back(tokens.token());

      if (tokens.token() == Token(OPERATOR, "(", 0))
        ++depth;
      if (tokens.token() == Token(OPERATOR, ")", 0))
        --depth;
    }
    tokens.nextToken();
  }
  if (!currArg.empty())
    args.push_back(currArg);

  CHECKERR(
      tokens.eol() || tokens.token().value() != ")",
      arg_err(temp)
      )

  if (funcCall->hasVariableArgs()) {
    CHECKERR (
        args.size() < funcCall->getNumArgs(),
        arg_err(temp)
        )
  } else {
    CHECKERR(
        args.size() != funcCall->getNumArgs(),
        arg_err(temp)
        )
  }

  //Construct a Line for each argument
  list< vector<Token> >::iterator arg_itr = args.begin();
  for ( ; arg_itr != args.end(); ++arg_itr) {
    CHECKERR (((*arg_itr).size() == 0), arg_err(temp))
    Tokenizer tempToken(*arg_itr);
    Line* arg = new Line(tempToken, _parent, errs, true);
    funcCall->fillArg(arg);
  }

  addNewObject(funcCall);
}
Esempio n. 17
0
/* CTPatchEx::parse
 * Parses a ZDoom TEXTURES format patch definition
 *******************************************************************/
bool CTPatchEx::parse(Tokenizer& tz, uint8_t type)
{
	// Read basic info
	this->type = type;
	name = tz.getToken().Upper();
	tz.getToken();	// Skip ,
	offset_x = tz.getInteger();
	tz.getToken();	// Skip ,
	offset_y = tz.getInteger();

	// Check if there is any extended info
	if (tz.peekToken() == "{")
	{
		// Skip {
		tz.getToken();

		// Parse extended info
		string property = tz.getToken();
		while (property != "}")
		{
			// FlipX
			if (S_CMPNOCASE(property, "FlipX"))
				flip_x = true;

			// FlipY
			if (S_CMPNOCASE(property, "FlipY"))
				flip_y = true;

			// UseOffsets
			if (S_CMPNOCASE(property, "UseOffsets"))
				use_offsets = true;

			// Rotate
			if (S_CMPNOCASE(property, "Rotate"))
				rotation = tz.getInteger();

			// Translation
			if (S_CMPNOCASE(property, "Translation"))
			{
				// Add first translation string
				translation.parse(tz.getToken());

				// Add any subsequent translations (separated by commas)
				while (tz.peekToken() == ",")
				{
					tz.getToken();	// Skip ,
					translation.parse(tz.getToken());
				}

				blendtype = 1;
			}

			// Blend
			if (S_CMPNOCASE(property, "Blend"))
			{
				double val;
				wxColour col;
				blendtype = 2;

				// Read first value
				string first = tz.getToken();

				// If no second value, it's just a colour string
				if (tz.peekToken() != ",")
				{
					col.Set(first);
					colour.set(col.Red(), col.Green(), col.Blue());
				}
				else
				{
					// Second value could be alpha or green
					tz.getToken();	// Skip ,
					double second = tz.getDouble();

					// If no third value, it's an alpha value
					if (tz.peekToken() != ",")
					{
						col.Set(first);
						colour.set(col.Red(), col.Green(), col.Blue(), second*255);
						blendtype = 3;
					}
					else
					{
						// Third value exists, must be R,G,B,A format
						tz.getToken();	// Skip ,
						first.ToDouble(&val);
						colour.r = val*255;
						colour.g = second*255;
						colour.b = tz.getDouble()*255;
						if (tz.peekToken() != ",")
						{
							wxLogMessage("Invalid TEXTURES definition, expected ',', got '%s'", tz.getToken());
							return false;
						}
						tz.getToken();	// Skip ,
						colour.a = tz.getDouble()*255;
						blendtype = 3;
					}
				}
			}

			// Alpha
			if (S_CMPNOCASE(property, "Alpha"))
				alpha = tz.getFloat();

			// Style
			if (S_CMPNOCASE(property, "Style"))
				style = tz.getToken();

			// Read next property name
			property = tz.getToken();
		}
	}

	return true;
}
Esempio n. 18
0
void Line::process_newvar(Tokenizer& tokens, string& errs)
/*****************************************************************************/
{
  string type = tokens.token().value();
  tokens.nextToken();

  CHECKERR(tokens.eol() || tokens.token().type() != VAR, var_err())

  string name = tokens.token().value();
  tokens.nextToken();

  bool isArray = false;
  Line* sizePtr = NULL;

  //if the following token is an open index, we know that our new variable
  //is an array
  if (!tokens.eol() && tokens.token().type() == OPENINDEX) {
    vector<Token> size_expr;
    tokens.nextToken(); //move past openindex

    //get all the tokens that are part of the array's size expression
    while (!tokens.eol() && tokens.token().type() != CLOSEINDEX) {
      size_expr.push_back(tokens.token());
      tokens.nextToken();
    }
    CHECKERR ((size_expr.size() == 0), ara_err(name))
    CHECKERR ((tokens.token().type() != CLOSEINDEX), ara_err(name))

    isArray = true;
    Tokenizer tempToken(size_expr);
    sizePtr = new Line(tempToken, _parent, errs, true);
  }
  else
    tokens.previousToken();

  if (_parent->getVar(name) == NULL)
    add_newvar(type, name, sizePtr, isArray);
  else
    { CHECKERR(true, dec_err(name)) }
}
Esempio n. 19
0
int main() {
	ifstream sourceFile;
	TokenList tokens;
	Tokenizer tokenizer;

	//Read in a file line-by-line and tokenize each line
	sourceFile.open("test.vhd");
	if (!sourceFile.is_open())
	{
		cout << "Failed to open file" << endl;
		return 1;
	}

	while(!sourceFile.eof())
	{
		string lineA, lineB;

		getline(sourceFile, lineA);

		//while the current line ends with a line-continuation \
		//append the next line to the current line
		while(lineA.length() > 0 && lineA[lineA.length()-1] == '\\')
		{
			lineA.erase(lineA.length()-1, 1);
			getline(sourceFile, lineB);
			lineA += lineB;
		}

		tokenizer.setString(&lineA);

		while(!tokenizer.isComplete())
		{
			tokens.append(tokenizer.getNextToken());
		}
		//Re-insert newline that was removed by the getline function
		tokens.append("\n");
	}



   Token *b = tokens.getFirst();
	while(b)
	{
		cout << b->getStringRep() << " ";//  << endl; // remove endl for submission
		b = b->getNext();
	}



	cout << endl << "----------------------------------------------"<< endl<< endl;



	removeComments(tokens);




	/*Test your tokenization of the file by traversing the tokens list and printing out the tokens*/
	Token *t = tokens.getFirst();

	while(t)
	{
		cout << t->getStringRep() << " ";//  << endl; // remove endl for submission
		t = t->getNext();
	}

	return 0;
}
Esempio n. 20
0
void Line::process_existing_var(Tokenizer& tokens, string& errs)
/*****************************************************************************/
{
  string temp = tokens.token().value();
  Variable* v = _parent->getVar(temp);
  CHECKERR ((v == NULL), und_err(temp))

  //Note: we must allow for arrays to be passed to RTBoundFuncs without
  //having to use braces [].
  if (tokens.isArg()) {
    addNewObject(v);
    return;
  }

  //When we see an array variable, it must be followed by an index
  if (v->getObjectType() == ArrayVarOT) {
    tokens.nextToken();
    CHECKERR((tokens.eol() || tokens.token().type()!=OPENINDEX),ara_err(temp))
    tokens.nextToken(); //move past OPENINDEX
    vector<Token> index_list;

    //get all the tokens that are part of the array's index expression
    while (!tokens.eol() && tokens.token().type() != CLOSEINDEX) {
      index_list.push_back(tokens.token());
      tokens.nextToken();
    }
    CHECKERR ((index_list.size() == 0), ara_err(temp))
    CHECKERR ((tokens.eol()||tokens.token().type()!=CLOSEINDEX), ara_err(temp))

    Tokenizer tempToken(index_list);
    Line* indexPtr = new Line(tempToken, _parent, errs, true);
    ArrayIndex* ai = new ArrayIndex(v, indexPtr);
    addNewObject(ai);
  }
  else {
    addNewObject(v);
  }
}
Esempio n. 21
0
void CLint::executeRules(const std::string &tokenlist, const Tokenizer &tokenizer)
{
    (void)tokenlist;
    (void)tokenizer;

#ifdef HAVE_RULES
    // Are there rules to execute?
    bool isrule = false;
    for (std::list<Settings::Rule>::const_iterator it = _settings.rules.begin(); it != _settings.rules.end(); ++it) {
        if (it->tokenlist == tokenlist)
            isrule = true;
    }

    // There is no rule to execute
    if (isrule == false)
        return;

    // Write all tokens in a string that can be parsed by pcre
    std::ostringstream ostr;
    for (const Token *tok = tokenizer.tokens(); tok; tok = tok->next())
        ostr << " " << tok->str();
    const std::string str(ostr.str());

    for (std::list<Settings::Rule>::const_iterator it = _settings.rules.begin(); it != _settings.rules.end(); ++it) {
        const Settings::Rule &rule = *it;
        if (rule.pattern.empty() || rule.id.empty() || rule.severity.empty() || rule.tokenlist != tokenlist)
            continue;

        const char *error = nullptr;
        int erroffset = 0;
        pcre *re = pcre_compile(rule.pattern.c_str(),0,&error,&erroffset,nullptr);
        if (!re) {
            if (error) {
                ErrorLogger::ErrorMessage errmsg(std::list<ErrorLogger::ErrorMessage::FileLocation>(),
                                                 Severity::error,
                                                 error,
                                                 "pcre_compile",
                                                 false);

                reportErr(errmsg);
            }
            continue;
        }

        int pos = 0;
        int ovector[30];
        while (pos < (int)str.size() && 0 <= pcre_exec(re, nullptr, str.c_str(), (int)str.size(), pos, 0, ovector, 30)) {
            unsigned int pos1 = (unsigned int)ovector[0];
            unsigned int pos2 = (unsigned int)ovector[1];

            // jump to the end of the match for the next pcre_exec
            pos = (int)pos2;

            // determine location..
            ErrorLogger::ErrorMessage::FileLocation loc;
            loc.setfile(tokenizer.list.getSourceFilePath());
            loc.line = 0;

            std::size_t len = 0;
            for (const Token *tok = tokenizer.tokens(); tok; tok = tok->next()) {
                len = len + 1U + tok->str().size();
                if (len > pos1) {
                    loc.setfile(tokenizer.list.getFiles().at(tok->fileIndex()));
                    loc.line = tok->linenr();
                    break;
                }
            }

            const std::list<ErrorLogger::ErrorMessage::FileLocation> callStack(1, loc);

            // Create error message
            std::string summary;
            if (rule.summary.empty())
                summary = "found '" + str.substr(pos1, pos2 - pos1) + "'";
            else
                summary = rule.summary;
            const ErrorLogger::ErrorMessage errmsg(callStack, Severity::fromString(rule.severity), summary, rule.id, false);

            // Report error
            reportErr(errmsg);
        }

        pcre_free(re);
    }
#endif
}
Esempio n. 22
0
void Line::infToPost(Tokenizer& tokens, string& errs)
/*****************************************************************************/
{
  stack<Operator*> ops;

  //loop through all tokens and handle them
  for ( ; !tokens.eol(); tokens.nextToken()) {
    if (tokens.token().type() == FUNC) {
      process_function(tokens, errs);
      ++_tempSize; //for the return value
    }
    else if (tokens.token().type() == DECL) {
      process_newvar(tokens, errs);
    }
    else if (tokens.token().type() == CONSTANT) {
      process_number(tokens);
    }
    else if (tokens.token().type() == VAR) {
      process_existing_var(tokens, errs);
    }
    else if (tokens.token().type() == OPERATOR) {
      process_operator(tokens, ops);
    }
    else if (tokens.token().type() == SEMICOLON ||
	     tokens.token().type() == OPENBRACE) {
      tokens.nextToken();
      assert(tokens.eol());
      break;
    }
    else {
      CHECKERR(true, syntax_err(tokens.token().value()))
    }
    if (errs != "") return;
  }

  //put remaining opps at end of postfixLine
  while (!ops.empty()) {
    addNewObject(ops.top());
    ops.pop();
  }

  compile(errs, tokens);
  performNumericOps();
}
Esempio n. 23
0
/**
*  @brief
*    Parses an pl_module_plugin..pl_module_end block
*/
void PLPluginInfo::ParsePluginModuleBlock(const String &sPluginModuleBlock)
{
	// Setup regular expressions
	RegEx cRegExModulePlugin("^\\s*pl_module_plugin\\(\\s*\\\"(?<name>\\w*)\\\"\\s*\\)\\s*$");
	RegEx cRegExVendor("^\\s*pl_module_vendor\\s*\\(\\s*\\\"(?<text>.*)\\\"\\s*\\)\\s*$", RegEx::MatchCaseSensitive);
	RegEx cRegExLicense("^\\s*pl_module_license\\s*\\(\\s*\\\"(?<text>.*)\\\"\\s*\\)\\s*$", RegEx::MatchCaseSensitive);
	RegEx cRegExDescription("^\\s*pl_module_description\\s*\\(\\s*\\\"(?<text>.*)\\\"\\s*\\)\\s*$", RegEx::MatchCaseSensitive);
	RegEx cRegExVersion("^\\s*pl_module_version\\s*\\(\\s*\\\"(?<text>.*)\\\"\\s*\\)\\s*$", RegEx::MatchCaseSensitive);
	RegEx cRegExActive("^\\s*pl_module_active\\s*\\((?<num>\\d)\\)\\s*$");
	RegEx cRegExDelayed("^\\s*pl_module_delayed\\s*\\((?<num>\\d)\\)\\s*$");

	// Setup tokenizer
	Tokenizer cTokenizer;
	cTokenizer.SetDelimiters("\r\n");
	cTokenizer.SetSingleChars("");
	cTokenizer.SetQuotes("");
	cTokenizer.SetCommentStartTag("");
	cTokenizer.SetCommentEndTag("");
	cTokenizer.SetSingleLineComment("");
	cTokenizer.Start(sPluginModuleBlock);

	// Iterate through all lines
	String sLine = cTokenizer.GetNextToken();
	while (sLine.GetLength()) {
		// Check for pl_module_plugin
		if (cRegExModulePlugin.Match(sLine)) {
			m_sPluginName = cRegExModulePlugin.GetNameResult("name");

		// Check for pl_module_vendor
		} else if (cRegExVendor.Match(sLine)) {
			m_sVendor = cRegExVendor.GetNameResult("text");

		// Check for pl_module_license
		} else if (cRegExLicense.Match(sLine)) {
			m_sLicense = cRegExLicense.GetNameResult("text");

		// Check for pl_module_description
		} else if (cRegExDescription.Match(sLine)) {
			m_sDescription = cRegExDescription.GetNameResult("text");

		// Check for pl_module_version
		} else if (cRegExVersion.Match(sLine)) {
			m_sModuleVersion = cRegExVersion.GetNameResult("text");

		// Check for pl_module_active
		} else if (cRegExActive.Match(sLine)) {
			m_bActive = cRegExActive.GetNameResult("num").GetBool();

		// Check for pl_module_delayed
		} else if (cRegExDelayed.Match(sLine)) {
			m_bDelayed = cRegExDelayed.GetNameResult("num").GetBool();

		// Let PLPluginPlatformInfo parse the line
		} else {
			m_cPluginPlatformInfo.ParseLine(sLine);
		}

		// Next, please
		sLine = cTokenizer.GetNextToken();
	}
}
Esempio n. 24
0
int main() {
	ifstream sourceFile;
	TokenList tokens;

  //Lists for types of tokens
  TokenList operatorTokens;
  TokenList identifierTokens;
  TokenList literalTokens;
  TokenList commentBodyTokens;
  TokenList otherTokens;

	Tokenizer tokenizer;


	bool verboseModeFlag = false;
	string userInput;
	char parsedUserInput;

	vector<string> errorLines;
	string errorMissingEndIf = "Missing \"End If\" here: ";
	string errorMissingThen = "Missing \"Then\" here: ";
	string errorMissingIf = "Extra \"End If\" Here: ";
	string errorExtraThen = "Extra \"Then\" here: ";
	string errorTypeMismatch = "Type Mismatch of types : ";
	string errorWidthMismatch = "Width Mismatch: ";
	int numberOfTokens =0;
	int numberOfCondExp =0;
	int numberOfMissingEndIfs = 0;
	int numberOfMissingIfs =0;
	int numberofMissingThens =0;
	int numberofMissingProcess =0;
	int numberofMissingEndProcess=0;
	int numberofMissingOpenBracket =0;
	int numberofMissingCloseBracket =0;
	int ifEndifBalance =0; //if there is a positive number there are too many ifs, negative number too many endifs.
	int ifthenBalance =0; //Like above except with Then.
    int processBalance =0; // like above except with process - end process
    int BracketBalance =0; // check the missing bracket

	//Read in a file line-by-line and tokenize each line

	cout << "Enter the name of the file to open: ";
	cin >> userInput;

	sourceFile.open(userInput);
	if (!sourceFile.is_open()) {
		cout << "Failed to open file" << endl;
		return 1;
	}

	while(!sourceFile.eof()) {
		string line;
		getline(sourceFile, line);

		tokenizer.setString(&line);
		while(!tokenizer.isComplete()) {
			tokens.append(tokenizer.getNextToken());
		}
	}
    ///removeComments(tokens); ///test remove comment
    Token *t = tokens.getFirst();
    while(t)
	{
	    tokens.findAndSetTokenDetails(t); ///test findAndSetTokenDetails
        /*if(t->getTokenType() == 1 || t->getTokenType() == 2)
        {
           detailtoken = *(t->getTokenDetails());
           //cout << t->getStringRep() << " Token Type: " << t->getTokenType() <<" Token Detail: " << detailtoken.type << " Token Width: " << detailtoken.width <<endl;
            t = t->getNext();
        }
        else
        {
            //cout << t->getStringRep() << " Token Type: " << t->getTokenType() <<endl;

		t = t->getNext();
        }
        */
        t = t->getNext();

	}

	cout << "Enter Verbose Mode? Type \"1\" for Yes, Other inputs will be a No. : ";
	cin >> userInput;

    parsedUserInput = userInput[0];

	if (parsedUserInput == '1')
    {
        verboseModeFlag = true;
    }

    //This part counts the number of tokens.
    t = tokens.getFirst();
	while(t)
	{
	    numberOfTokens++;
		t = t->getNext();
	}

	//This part counts the number of conditional expressions.
	TokenList *conditionalTokenList = findAllConditionalExpressions(tokens);
	Token *condtokens = conditionalTokenList->getFirst();
    while(condtokens)
	{
	    if(condtokens->getStringRep() == "\\n")
        {
            numberOfCondExp++;
        }
		condtokens = condtokens->getNext();
	}

	//This part counts the number of ifs, end ifs.
	t = tokens.getFirst();
	Token *tError = tokens.getFirst();
	string errorCodeLine[7] = {" ", " ", " ", " ", " ", " ", " "};
	while(t)
	{
	    if(t->getStringRep() == "if")
        {
            if(t->getPrev()->getStringRep() != "end")
            {
                ifEndifBalance++;
                if(ifEndifBalance>0)
                {
                    { // this part gets the tokens around the error.
                    errorCodeLine[3] = t->getStringRep();
                    if(t->getPrev()!=nullptr)
                    {
                        tError = t->getPrev();
                        errorCodeLine[2] = tError->getStringRep();
                    }
                    if(tError->getPrev()!=nullptr)
                    {
                        tError = tError->getPrev();
                        errorCodeLine[1] = tError->getStringRep();
                    }
                    if(tError->getPrev()!=nullptr)
                    {
                        tError = tError->getPrev();
                        errorCodeLine[0] = tError->getStringRep();
                    }
                    if(t->getNext()!=nullptr)
                    {
                        tError = t->getNext();
                        errorCodeLine[4] = tError->getStringRep();
                    }
                    if(tError->getNext()!=nullptr)
                    {
                        tError = tError->getNext();
                        errorCodeLine[5] = tError->getStringRep();
                    }
                    if(tError->getNext()!=nullptr)
                    {
                        tError = tError->getNext();
                        errorCodeLine[6] = tError->getStringRep();
                    }
                    }
                    errorLines.push_back(errorMissingEndIf+errorCodeLine[0]+" "+errorCodeLine[1]+" "+errorCodeLine[2]+" "+errorCodeLine[3]+" "+errorCodeLine[4]+" "+errorCodeLine[5]+" "+errorCodeLine[6]);
                    for(int ii=0; ii <7;ii++)
                    {
                        errorCodeLine[ii] = " ";
                    }
                }
                else if(ifEndifBalance<=0)
                {
                    errorLines.pop_back();
                }
            }
        }
        else if (t->getStringRep() == "end")
        {
            if(t->getNext()!= nullptr)
            {
                tError = t->getNext();
                if(tError ->getStringRep() == "if")
                {
                    ifEndifBalance--;
                    if(ifEndifBalance<0)
                    {
                        { // This part gets the tokens around the error
                        errorCodeLine[3] = t->getStringRep();
                        if(t->getPrev()!=nullptr)
                        {
                            tError = t->getPrev();
                            errorCodeLine[2] = tError->getStringRep();
                        }
                        if(tError->getPrev()!=nullptr)
                        {
                            tError = tError->getPrev();
                            errorCodeLine[1] = tError->getStringRep();
                        }
                        if(tError->getPrev()!=nullptr)
                        {
                            tError = tError->getPrev();
                            errorCodeLine[0] = tError->getStringRep();
                        }
                        if(t->getNext()!=nullptr)
                        {
                            tError = t->getNext();
                            errorCodeLine[4] = tError->getStringRep();
                        }
                        if(tError->getNext()!=nullptr)
                        {
                            tError = tError->getNext();
                            errorCodeLine[5] = tError->getStringRep();
                        }
                        if(tError->getNext()!=nullptr)
                        {
                            tError = tError->getNext();
                            errorCodeLine[6] = tError->getStringRep();
                        }
                        }
                        errorLines.push_back(errorMissingIf+errorCodeLine[0]+" "+errorCodeLine[1]+" "+errorCodeLine[2]+" "+errorCodeLine[3]+" "+errorCodeLine[4]+" "+errorCodeLine[5]+" "+errorCodeLine[6]);
                        for(int ii=0; ii <7;ii++)
                        {
                            errorCodeLine[ii] = " ";
                        }
                    }
                    else if (ifEndifBalance>=0)
                    {
                        errorLines.pop_back();
                    }
                }
            }
        }
		t = t->getNext();
	}

	//this part finds the missing thens.
    t = tokens.getFirst();
    tError = tokens.getFirst();
	while(t)
	{
	    if(t->getStringRep() == "if" || t->getStringRep() == "elsif" )
        {
            if(t->getPrev()->getStringRep() != "end")
            {
                ifthenBalance++;
                if(ifthenBalance>0)
                {
                    { // this part gets the tokens around the error.
                    errorCodeLine[3] = t->getStringRep();
                    if(t->getPrev()!=nullptr)
                    {
                        tError = t->getPrev();
                        errorCodeLine[2] = tError->getStringRep();
                    }
                    if(tError->getPrev()!=nullptr)
                    {
                        tError = tError->getPrev();
                        errorCodeLine[1] = tError->getStringRep();
                    }
                    if(tError->getPrev()!=nullptr)
                    {
                        tError = tError->getPrev();
                        errorCodeLine[0] = tError->getStringRep();
                    }
                    if(t->getNext()!=nullptr)
                    {
                        tError = t->getNext();
                        errorCodeLine[4] = tError->getStringRep();
                    }
                    if(tError->getNext()!=nullptr)
                    {
                        tError = tError->getNext();
                        errorCodeLine[5] = tError->getStringRep();
                    }
                    if(tError->getNext()!=nullptr)
                    {
                        tError = tError->getNext();
                        errorCodeLine[6] = tError->getStringRep();
                    }
                    }
                    errorLines.push_back(errorMissingThen+errorCodeLine[0]+" "+errorCodeLine[1]+" "+errorCodeLine[2]+" "+errorCodeLine[3]+" "+errorCodeLine[4]+" "+errorCodeLine[5]+" "+errorCodeLine[6]);
                    for(int ii=0; ii <7;ii++)
                    {
                        errorCodeLine[ii] = " ";
                    }
                }
                else if(ifthenBalance<=0)
                {
                    errorLines.pop_back();
                }
            }

        }
        else if (t->getStringRep() == "then")
        {
            ifthenBalance--;
            if(ifthenBalance<0)
            {
                { // This part gets the tokens around the error
                errorCodeLine[3] = t->getStringRep();
                if(t->getPrev()!=nullptr)
                {
                    tError = t->getPrev();
                    errorCodeLine[2] = tError->getStringRep();
                }
                if(tError->getPrev()!=nullptr)
                {
                    tError = tError->getPrev();
                    errorCodeLine[1] = tError->getStringRep();
                }
                if(tError->getPrev()!=nullptr)
                {
                    tError = tError->getPrev();
                    errorCodeLine[0] = tError->getStringRep();
                }
                if(t->getNext()!=nullptr)
                {
                    tError = t->getNext();
                    errorCodeLine[4] = tError->getStringRep();
                }
                if(tError->getNext()!=nullptr)
                {
                    tError = tError->getNext();
                    errorCodeLine[5] = tError->getStringRep();
                }
                if(tError->getNext()!=nullptr)
                {
                    tError = tError->getNext();
                    errorCodeLine[6] = tError->getStringRep();
                }
                }
                errorLines.push_back(errorExtraThen+errorCodeLine[0]+" "+errorCodeLine[1]+" "+errorCodeLine[2]+" "+errorCodeLine[3]+" "+errorCodeLine[4]+" "+errorCodeLine[5]+" "+errorCodeLine[6]);
                for(int ii=0; ii <7;ii++)
                {
                    errorCodeLine[ii] = " ";
                }
            }
            else if (ifthenBalance>=0)
            {
                errorLines.pop_back();
            }
        }
        t = t->getNext();
	}

/// ///////////////////////////////////////This part counts the number of Process and End Process////////////////////////////////////////
	t = tokens.getFirst();
	tError = tokens.getFirst();

	while(t)
	{
	    if(t->getStringRep() == "process")
        {
            if(t->getPrev()->getStringRep() != "end")
            {
                processBalance++;
                if(processBalance>0)
                {
                    { // this part gets the tokens around the error.
                    errorCodeLine[3] = t->getStringRep();
                    if(t->getPrev()!=nullptr)
                    {
                        tError = t->getPrev();
                        errorCodeLine[2] = tError->getStringRep();
                    }
                    if(tError->getPrev()!=nullptr)
                    {
                        tError = tError->getPrev();
                        errorCodeLine[1] = tError->getStringRep();
                    }
                    if(tError->getPrev()!=nullptr)
                    {
                        tError = tError->getPrev();
                        errorCodeLine[0] = tError->getStringRep();
                    }
                    if(t->getNext()!=nullptr)
                    {
                        tError = t->getNext();
                        errorCodeLine[4] = tError->getStringRep();
                    }
                    if(tError->getNext()!=nullptr)
                    {
                        tError = tError->getNext();
                        errorCodeLine[5] = tError->getStringRep();
                    }
                    if(tError->getNext()!=nullptr)
                    {
                        tError = tError->getNext();
                        errorCodeLine[6] = tError->getStringRep();
                    }
                    }
                    errorLines.push_back("Missing \"End Process\" here: "+errorCodeLine[0]+" "+errorCodeLine[1]+" "+errorCodeLine[2]+" "+errorCodeLine[3]+" "+errorCodeLine[4]+" "+errorCodeLine[5]+" "+errorCodeLine[6]);
                    for(int ii=0; ii <7;ii++)
                    {
                        errorCodeLine[ii] = " ";
                    }
                }
                else if(ifEndifBalance<=0)
                {
                    errorLines.pop_back();
                }
            }
        }
        else if (t->getStringRep() == "end")
        {
            if(t->getNext()!= nullptr)
            {
                tError = t->getNext();
                if(tError ->getStringRep() == "process")
                {
                    processBalance--;
                    if(processBalance<0)
                    {
                        { // This part gets the tokens around the error
                        errorCodeLine[3] = t->getStringRep();
                        if(t->getPrev()!=nullptr)
                        {
                            tError = t->getPrev();
                            errorCodeLine[2] = tError->getStringRep();
                        }
                        if(tError->getPrev()!=nullptr)
                        {
                            tError = tError->getPrev();
                            errorCodeLine[1] = tError->getStringRep();
                        }
                        if(tError->getPrev()!=nullptr)
                        {
                            tError = tError->getPrev();
                            errorCodeLine[0] = tError->getStringRep();
                        }
                        if(t->getNext()!=nullptr)
                        {
                            tError = t->getNext();
                            errorCodeLine[4] = tError->getStringRep();
                        }
                        if(tError->getNext()!=nullptr)
                        {
                            tError = tError->getNext();
                            errorCodeLine[5] = tError->getStringRep();
                        }
                        if(tError->getNext()!=nullptr)
                        {
                            tError = tError->getNext();
                            errorCodeLine[6] = tError->getStringRep();
                        }
                        }
                        errorLines.push_back("Missing \"Process\" here: "+errorCodeLine[0]+" "+errorCodeLine[1]+" "+errorCodeLine[2]+" "+errorCodeLine[3]+" "+errorCodeLine[4]+" "+errorCodeLine[5]+" "+errorCodeLine[6]);
                        for(int ii=0; ii <7;ii++)
                        {
                            errorCodeLine[ii] = " ";
                        }
                    }
                    else if (processBalance>=0)
                    {
                        errorLines.pop_back();
                    }
                }
            }
        }
		t = t->getNext();
	}
	/// ///////////////////////////////////////This part counts the number of missing bracket////////////////////////////////////////
	t = tokens.getFirst();
	tError = tokens.getFirst();

	while(t)
	{
	    if(t->getStringRep() == "(")
        {
            //if(t->getPrev()->getStringRep() != "end")
            //{
                BracketBalance++;
                if(BracketBalance>0)
                {
                    { // this part gets the tokens around the error.
                    errorCodeLine[3] = t->getStringRep();
                    if(t->getPrev()!=nullptr)
                    {
                        tError = t->getPrev();
                        errorCodeLine[2] = tError->getStringRep();
                    }
                    if(tError->getPrev()!=nullptr)
                    {
                        tError = tError->getPrev();
                        errorCodeLine[1] = tError->getStringRep();
                    }
                    if(tError->getPrev()!=nullptr)
                    {
                        tError = tError->getPrev();
                        errorCodeLine[0] = tError->getStringRep();
                    }
                    if(t->getNext()!=nullptr)
                    {
                        tError = t->getNext();
                        errorCodeLine[4] = tError->getStringRep();
                    }
                    if(tError->getNext()!=nullptr)
                    {
                        tError = tError->getNext();
                        errorCodeLine[5] = tError->getStringRep();
                    }
                    if(tError->getNext()!=nullptr)
                    {
                        tError = tError->getNext();
                        errorCodeLine[6] = tError->getStringRep();
                    }
                    }
                    errorLines.push_back("Missing \"Close Bracket  \")\"  \" here: "+errorCodeLine[0]+" "+errorCodeLine[1]+" "+errorCodeLine[2]+" "+errorCodeLine[3]+" "+errorCodeLine[4]+" "+errorCodeLine[5]+" "+errorCodeLine[6]);
                    for(int ii=0; ii <7;ii++)
                    {
                        errorCodeLine[ii] = " ";
                    }
                }
                else if(BracketBalance<=0)
                {
                    errorLines.pop_back();
                }
            //}
        }
        else if (t->getStringRep() == ")")
        {
            //if(t->getNext()!= nullptr)
            //{
                //tError = t->getNext();
                //if(tError ->getStringRep() == "process")
               // {
                    BracketBalance--;
                    if(BracketBalance<0)
                    {
                        { // This part gets the tokens around the error
                        errorCodeLine[3] = t->getStringRep();
                        if(t->getPrev()!=nullptr)
                        {
                            tError = t->getPrev();
                            errorCodeLine[2] = tError->getStringRep();
                        }
                        if(tError->getPrev()!=nullptr)
                        {
                            tError = tError->getPrev();
                            errorCodeLine[1] = tError->getStringRep();
                        }
                        if(tError->getPrev()!=nullptr)
                        {
                            tError = tError->getPrev();
                            errorCodeLine[0] = tError->getStringRep();
                        }
                        if(t->getNext()!=nullptr)
                        {
                            tError = t->getNext();
                            errorCodeLine[4] = tError->getStringRep();
                        }
                        if(tError->getNext()!=nullptr)
                        {
                            tError = tError->getNext();
                            errorCodeLine[5] = tError->getStringRep();
                        }
                        if(tError->getNext()!=nullptr)
                        {
                            tError = tError->getNext();
                            errorCodeLine[6] = tError->getStringRep();
                        }
                        }
                        errorLines.push_back("Missing \"Open Bracket \"(\"  \" here: "+errorCodeLine[0]+" "+errorCodeLine[1]+" "+errorCodeLine[2]+" "+errorCodeLine[3]+" "+errorCodeLine[4]+" "+errorCodeLine[5]+" "+errorCodeLine[6]);
                        for(int ii=0; ii <7;ii++)
                        {
                            errorCodeLine[ii] = " ";
                        }
                    }
                    else if (BracketBalance>=0)
                    {
                        errorLines.pop_back();
                    }
                }
            //}
        //}
		t = t->getNext();
	}
    ////////////////////////////////////set the balance ////////////////////////////////
	if (ifEndifBalance == 0)
    {
        numberOfMissingEndIfs =0;
        numberOfMissingIfs = 0;
    }
    else if (ifEndifBalance > 0)
    {
        numberOfMissingEndIfs = abs(ifEndifBalance);
        numberOfMissingIfs = 0;
    }
    else if (ifEndifBalance <0)
    {
        numberOfMissingEndIfs =0;
        numberOfMissingIfs = abs(ifEndifBalance);
    }

    if (ifthenBalance == 0)
    {
        numberofMissingThens =0;
    }
    else if (ifthenBalance > 0)
    {
        numberofMissingThens = abs(ifthenBalance);
    }
    else if (ifthenBalance <0)
    {
        numberOfMissingIfs = numberOfMissingIfs + abs(ifthenBalance);
        numberofMissingThens =0;
    }

    if (processBalance == 0)
    {
        numberofMissingProcess=0;
        numberofMissingEndProcess=0;
    }
    else if (processBalance > 0)
    {
       numberofMissingEndProcess = abs(processBalance);
       numberofMissingProcess =0;

    }
    else if (processBalance <0)
    {
        numberofMissingProcess = numberofMissingProcess + abs(processBalance);
        numberofMissingEndProcess =0;
    }


    if (BracketBalance == 0)
    {
        numberofMissingOpenBracket=0;
        numberofMissingCloseBracket=0;
    }
    else if (BracketBalance > 0)
    {
       numberofMissingCloseBracket = abs(BracketBalance);
       numberofMissingOpenBracket =0;

    }
    else if (BracketBalance < 0)
    {
        numberofMissingOpenBracket = numberofMissingOpenBracket + abs(BracketBalance);
        numberofMissingCloseBracket=0;
    }

    t = tokens.getFirst();
    Token *t_before = tokens.getFirst();
    Token *t_after = tokens.getFirst();
    tokenDetails *t_before_details = nullptr;
    tokenDetails *t_after_details = nullptr;
    bool operatorFlag = false;

    while(t)
    {
        /*
        operatorFlag = false;
        for(int ii = 0; ii < 28; ii++)
        {
            if(t->getStringRep() == operatorList[ii])
            {
                operatorFlag = true;
            }
        }
        */
        if(t->isOperator())
        {
            if(t->getPrev()!= nullptr && t->getNext() != nullptr)
            {
                t_before = t->getPrev();
                t_after = t->getNext();
            }
            else if(t->getPrev()== nullptr && t->getNext() == nullptr)
            {
                errorLines.push_back("Error: Cannot find Objects for \""+t->getStringRep()+"\" Operator");
            }
            else if (t->getPrev()!= nullptr && t->getNext() == nullptr)
            {
                t_before = t->getPrev();
                errorLines.push_back("Error: Cannot find next object for \""+t_before->getStringRep()+ " " +t->getStringRep()+"\" Operator");
            }
            else if (t->getPrev() == nullptr && t->getNext() != nullptr)
            {
                t_after = t->getNext();
                errorLines.push_back("Error: Cannot find previous object for \""+t->getStringRep()+ " " + t_after->getStringRep()+ "\" Operator");
            }
            //debug code
            //cout << t_before->getStringRep() << " " <<t->getStringRep() << " " << t_after->getStringRep();
            t_before_details = t_before->getTokenDetails();
            t_after_details = t_after->getTokenDetails();

            if(t_before_details->type != t_after_details->type) // Error here.
            {
                errorLines.push_back(errorTypeMismatch+ t_before->getStringRep()+" of type "+t_before_details->type+" with operator \"" +t->getStringRep() +"\" and " + t_after->getStringRep() +" of type " + t_after_details->type);
            }
            else if (t_before_details->width != t_after_details->width)
            {
                errorLines.push_back(errorWidthMismatch+ t_before->getStringRep()+" of width "+to_string(t_before_details->width)+" with operator \"" +t->getStringRep()+"\" and " + t_after->getStringRep() +" of width " + to_string(t_after_details->width));
            }


        }
        t = t->getNext();
    }


	cout << "Number of Tokens                               : " << numberOfTokens <<endl;
	cout << "Number of Conditional Expressions              : " << numberOfCondExp <<endl;
	cout << "Number of Missing \"end if\"s                    : " << numberOfMissingEndIfs << endl;
	cout << "Number of Missing \"if\"s                        : " << numberOfMissingIfs << endl;
	cout << "Number of Missing \"Then\"s                      : " << numberofMissingThens << endl;
	cout << "Number of Missing \"Process\"                    : " << numberofMissingProcess << endl;
	cout << "Number of Missing \"End Process\"                : " << numberofMissingEndProcess << endl;
	cout << "Number of Missing \"Open Bracket \"(\"  \"         : " << numberofMissingOpenBracket << endl;
	cout << "Number of Missing \"Close Bracket \")\" \"         : " << numberofMissingCloseBracket << endl;


    if(verboseModeFlag)
    {
        cout << "Error Messages: " << endl;
        for ( vector<string>::iterator it = errorLines.begin() ; it != errorLines.end(); ++it)
        {
            cout << *it << endl;
        }
    }

  /* For your testing purposes only */
    ///test remove token type
    ///removeTokensOfType(tokens, T_Operator);
  /* Ensure that tokens have all type information set*/

  /* Create operator,identifier,literal, etc. tokenLists from the master list of tokens */


	return 0;
}
Esempio n. 25
0
// -----------------------------------------------------------------------------
// Opens a map from a mapdesc_t
// -----------------------------------------------------------------------------
bool MapPreviewCanvas::openMap(Archive::MapDesc map)
{
	// All errors = invalid map
	Global::error = "Invalid map";

	// Check if this map is a pk3 map
	bool map_archive = false;
	if (map.archive)
	{
		map_archive = true;

		// Attempt to open entry as wad archive
		temp_archive_ = new WadArchive();
		if (!temp_archive_->open(map.head))
		{
			delete temp_archive_;
			return false;
		}

		// Detect maps
		auto maps = temp_archive_->detectMaps();

		// Set map if there are any in the archive
		if (!maps.empty())
			map = maps[0];
		else
			return false;
	}

	// Parse UDMF map
	if (map.format == MapFormat::UDMF)
	{
		ArchiveEntry* udmfdata = nullptr;
		for (auto mapentry = map.head; mapentry != map.end; mapentry = mapentry->nextEntry())
		{
			// Check entry type
			if (mapentry->type() == EntryType::fromId("udmf_textmap"))
			{
				udmfdata = mapentry;
				break;
			}
		}
		if (udmfdata == nullptr)
			return false;

		// Start parsing
		Tokenizer tz;
		tz.openMem(udmfdata->data(), map.head->name());

		// Get first token
		wxString token       = tz.getToken();
		size_t   vertcounter = 0, linecounter = 0, thingcounter = 0;
		while (!token.IsEmpty())
		{
			if (!token.CmpNoCase("namespace"))
			{
				//  skip till we reach the ';'
				do
				{
					token = tz.getToken();
				} while (token.Cmp(";"));
			}
			else if (!token.CmpNoCase("vertex"))
			{
				// Get X and Y properties
				bool   gotx = false;
				bool   goty = false;
				double x    = 0.;
				double y    = 0.;
				do
				{
					token = tz.getToken();
					if (!token.CmpNoCase("x") || !token.CmpNoCase("y"))
					{
						bool isx = !token.CmpNoCase("x");
						token    = tz.getToken();
						if (token.Cmp("="))
						{
							Log::error(wxString::Format("Bad syntax for vertex %i in UDMF map data", vertcounter));
							return false;
						}
						if (isx)
							x = tz.getDouble(), gotx = true;
						else
							y = tz.getDouble(), goty = true;
						// skip to end of declaration after each key
						do
						{
							token = tz.getToken();
						} while (token.Cmp(";"));
					}
				} while (token.Cmp("}"));
				if (gotx && goty)
					addVertex(x, y);
				else
				{
					Log::error(wxString::Format("Wrong vertex %i in UDMF map data", vertcounter));
					return false;
				}
				vertcounter++;
			}
			else if (!token.CmpNoCase("linedef"))
			{
				bool   special  = false;
				bool   twosided = false;
				bool   gotv1 = false, gotv2 = false;
				size_t v1 = 0, v2 = 0;
				do
				{
					token = tz.getToken();
					if (!token.CmpNoCase("v1") || !token.CmpNoCase("v2"))
					{
						bool isv1 = !token.CmpNoCase("v1");
						token     = tz.getToken();
						if (token.Cmp("="))
						{
							Log::error(wxString::Format("Bad syntax for linedef %i in UDMF map data", linecounter));
							return false;
						}
						if (isv1)
							v1 = tz.getInteger(), gotv1 = true;
						else
							v2 = tz.getInteger(), gotv2 = true;
						// skip to end of declaration after each key
						do
						{
							token = tz.getToken();
						} while (token.Cmp(";"));
					}
					else if (!token.CmpNoCase("special"))
					{
						special = true;
						// skip to end of declaration after each key
						do
						{
							token = tz.getToken();
						} while (token.Cmp(";"));
					}
					else if (!token.CmpNoCase("sideback"))
					{
						twosided = true;
						// skip to end of declaration after each key
						do
						{
							token = tz.getToken();
						} while (token.Cmp(";"));
					}
				} while (token.Cmp("}"));
				if (gotv1 && gotv2)
					addLine(v1, v2, twosided, special);
				else
				{
					Log::error(wxString::Format("Wrong line %i in UDMF map data", linecounter));
					return false;
				}
				linecounter++;
			}
			else if (S_CMPNOCASE(token, "thing"))
			{
				// Get X and Y properties
				bool   gotx = false;
				bool   goty = false;
				double x    = 0.;
				double y    = 0.;
				do
				{
					token = tz.getToken();
					if (!token.CmpNoCase("x") || !token.CmpNoCase("y"))
					{
						bool isx = !token.CmpNoCase("x");
						token    = tz.getToken();
						if (token.Cmp("="))
						{
							Log::error(wxString::Format("Bad syntax for thing %i in UDMF map data", vertcounter));
							return false;
						}
						if (isx)
							x = tz.getDouble(), gotx = true;
						else
							y = tz.getDouble(), goty = true;
						// skip to end of declaration after each key
						do
						{
							token = tz.getToken();
						} while (token.Cmp(";"));
					}
				} while (token.Cmp("}"));
				if (gotx && goty)
					addThing(x, y);
				else
				{
					Log::error(wxString::Format("Wrong thing %i in UDMF map data", vertcounter));
					return false;
				}
				vertcounter++;
			}
			else
			{
				// Check for side or sector definition (increase counts)
				if (S_CMPNOCASE(token, "sidedef"))
					n_sides_++;
				else if (S_CMPNOCASE(token, "sector"))
					n_sectors_++;

				// map preview ignores sidedefs, sectors, comments,
				// unknown fields, etc. so skip to end of block
				do
				{
					token = tz.getToken();
				} while (token.Cmp("}") && !token.empty());
			}
			// Iterate to next token
			token = tz.getToken();
		}
	}

	// Non-UDMF map
	if (map.format != MapFormat::UDMF)
	{
		// Read vertices (required)
		if (!readVertices(map.head, map.end, map.format))
			return false;

		// Read linedefs (required)
		if (!readLines(map.head, map.end, map.format))
			return false;

		// Read things
		if (map.format != MapFormat::UDMF)
			readThings(map.head, map.end, map.format);

		// Read sides & sectors (count only)
		ArchiveEntry* sidedefs = nullptr;
		ArchiveEntry* sectors  = nullptr;
		while (map.head)
		{
			// Check entry type
			if (map.head->type() == EntryType::fromId("map_sidedefs"))
				sidedefs = map.head;
			if (map.head->type() == EntryType::fromId("map_sectors"))
				sectors = map.head;

			// Exit loop if we've reached the end of the map entries
			if (map.head == map.end)
				break;
			else
				map.head = map.head->nextEntry();
		}
		if (sidedefs && sectors)
		{
			// Doom64 map
			if (map.format != MapFormat::Doom64)
			{
				n_sides_   = sidedefs->size() / 30;
				n_sectors_ = sectors->size() / 26;
			}

			// Doom/Hexen map
			else
			{
				n_sides_   = sidedefs->size() / 12;
				n_sectors_ = sectors->size() / 16;
			}
		}
	}

	// Clean up
	if (map_archive)
	{
		temp_archive_->close();
		delete temp_archive_;
		temp_archive_ = nullptr;
	}

	// Refresh map
	Refresh();

	return true;
}
Esempio n. 26
0
int main(int argc, const char * argv[]) {

	auto timeStart = std::chrono::high_resolution_clock::now();

	if (argc != 2) {
		std::cout << "Error: need to pass in one argument" << std::endl;
		return 0;
	}

	std::vector<std::string> modifiedFiles = findModifiedFiles(argv[1]);

	std::cout << "modified files: " << modifiedFiles.size() << std::endl;

	// TODO: we'll use this list of modified files somehow...
	// TODO: after a successful compilation, modify 'last_hashes.txt' with the new hashes

	std::string fileLine;
	std::string contents = "";
	std::string pathToFile = argv[1];


	auto start_open_file = std::chrono::high_resolution_clock::now();
	std::ifstream myfile(pathToFile);
	if (myfile.is_open()) {
		while(getline(myfile, fileLine)) {
			contents += fileLine;
			contents += "\n";
		}
		myfile.close();
	}
	else {
		std::cout << "Error: could not open file" << std::endl;
		return 0;
	}
	auto end_open_file = std::chrono::high_resolution_clock::now();



	auto start_tokenize = std::chrono::high_resolution_clock::now();
	Tokenizer tokenizer;
	std::list<Token> tokenizedList = tokenizer.process(contents);
	auto end_tokenize = std::chrono::high_resolution_clock::now();

	// auto timeFindingClasses = std::chrono::high_resolution_clock::now();
	// syntatic sugar to switch a.b(c) to b(a,c)
	// if (!addFunctionSugar(tokenizedList)) {
	// 	return 0;
	// }
	// auto timeAsteriskPtr = std::chrono::high_resolution_clock::now();

	/*
	auto start_vectorization = std::chrono::high_resolution_clock::now();
	// http://stackoverflow.com/questions/5218713/one-liner-to-convert-from-listt-to-vectort
	std::vector<Token> tokenizedList{ std::begin(list), std::end(list) };
	auto end_vectorization = std::chrono::high_resolution_clock::now();
	*/

	// print out tokens
	// for (std::list<Token>::iterator it = tokenizedList.begin(); it != tokenizedList.end(); ++it)
	// 	std::cout << *it << "\n";

	auto start_parse_rules = std::chrono::high_resolution_clock::now();
	std::vector<bool> leftToRight;
	std::vector<ParseRule> listOfRules;
	addParseRules(leftToRight, listOfRules);
	Parser foo(leftToRight, listOfRules);
	auto end_parse_rules = std::chrono::high_resolution_clock::now();

	auto start_parse = std::chrono::high_resolution_clock::now();
	ParseNode* parse_tree = foo.getParseTree(&tokenizedList);
	auto end_parse = std::chrono::high_resolution_clock::now();

	std::cout << "\n\n\n";
	parse_tree->print();
	std::cout << "\n\nhash: " << parse_tree->to_hash() << "\n\n";

	auto start_verification = std::chrono::high_resolution_clock::now();
	ParserVerifier pv(&foo);
	pv.verify(parse_tree);
	auto end_verification = std::chrono::high_resolution_clock::now();

	Compiler comp;
	comp.compile(parse_tree);

	std::cout << "Read Files : " << std::chrono::duration_cast<std::chrono::nanoseconds>(end_open_file - start_open_file).count() / 1000 << " µs (" << std::chrono::duration_cast<std::chrono::nanoseconds>(end_open_file - start_open_file).count() / tokenizedList.size() << " ns per token)\n";
	std::cout << "Tokenizing : " << std::chrono::duration_cast<std::chrono::nanoseconds>(end_tokenize - start_tokenize).count() / 1000 << " µs (" << std::chrono::duration_cast<std::chrono::nanoseconds>(end_tokenize - start_tokenize).count() / tokenizedList.size() << " ns per token)\n";
	// std::cout << "List->Vect : " << std::chrono::duration_cast<std::chrono::nanoseconds>(end_vectorization - start_vectorization).count() / 1000 << " µs (" << std::chrono::duration_cast<std::chrono::nanoseconds>(end_vectorization - start_vectorization).count() / tokenizedList.size() << " ns per token)\n";
	std::cout << "Parse Rules: " << std::chrono::duration_cast<std::chrono::nanoseconds>(end_parse_rules - start_parse_rules).count() / 1000 << " µs (" << std::chrono::duration_cast<std::chrono::nanoseconds>(end_parse_rules - start_parse_rules).count() / tokenizedList.size() << " ns per token)\n";
	std::cout << "Parser     : " << std::chrono::duration_cast<std::chrono::nanoseconds>(end_parse - start_parse).count() / 1000 << " µs (" << std::chrono::duration_cast<std::chrono::nanoseconds>(end_parse - start_parse).count() / tokenizedList.size() << " ns per token)\n";
	std::cout << "Parse Ver. : " << std::chrono::duration_cast<std::chrono::nanoseconds>(end_verification - start_verification).count() / 1000 << " µs (" << std::chrono::duration_cast<std::chrono::nanoseconds>(end_verification - start_verification).count() / tokenizedList.size() << " ns per token)\n";
	std::cout << "Total Time : " << std::chrono::duration_cast<std::chrono::nanoseconds>(end_verification - start_open_file).count() / 1000 << " µs (" << std::chrono::duration_cast<std::chrono::nanoseconds>(end_verification - start_open_file).count() / tokenizedList.size() << " ns per token)\n";
	return 0;
}
Esempio n. 27
0
void ArchiveOperations::removeUnusedTextures(Archive* archive)
{
	// Check archive was given
	if (!archive)
		return;

	// --- Build list of used textures ---
	TexUsedMap used_textures;
	int        total_maps = 0;

	// Get all SIDEDEFS entries
	Archive::SearchOptions opt;
	opt.match_type = EntryType::fromId("map_sidedefs");
	auto sidedefs  = archive->findAll(opt);
	total_maps += sidedefs.size();

	// Go through and add used textures to list
	DoomMapFormat::SideDef sdef;
	wxString               tex_lower, tex_middle, tex_upper;
	for (auto& sidedef : sidedefs)
	{
		int nsides = sidedef->size() / 30;
		sidedef->seek(0, SEEK_SET);
		for (int s = 0; s < nsides; s++)
		{
			// Read side data
			sidedef->read(&sdef, 30);

			// Get textures
			tex_lower  = wxString::FromAscii(sdef.tex_lower, 8);
			tex_middle = wxString::FromAscii(sdef.tex_middle, 8);
			tex_upper  = wxString::FromAscii(sdef.tex_upper, 8);

			// Add to used textures list
			used_textures[tex_lower].used  = true;
			used_textures[tex_middle].used = true;
			used_textures[tex_upper].used  = true;
		}
	}

	// Get all TEXTMAP entries
	opt.match_name = "TEXTMAP";
	opt.match_type = EntryType::fromId("udmf_textmap");
	auto udmfmaps  = archive->findAll(opt);
	total_maps += udmfmaps.size();

	// Go through and add used textures to list
	Tokenizer tz;
	tz.setSpecialCharacters("{};=");
	for (auto& udmfmap : udmfmaps)
	{
		// Open in tokenizer
		tz.openMem(udmfmap->data(), "UDMF TEXTMAP");

		// Go through text tokens
		wxString token = tz.getToken();
		while (!token.IsEmpty())
		{
			// Check for sidedef definition
			if (token == "sidedef")
			{
				tz.getToken(); // Skip {

				token = tz.getToken();
				while (token != "}")
				{
					// Check for texture property
					if (token == "texturetop" || token == "texturemiddle" || token == "texturebottom")
					{
						tz.getToken(); // Skip =
						used_textures[tz.getToken()].used = true;
					}

					token = tz.getToken();
				}
			}

			// Next token
			token = tz.getToken();
		}
	}

	// Check if any maps were found
	if (total_maps == 0)
		return;

	// Find all TEXTUREx entries
	opt.match_name  = "";
	opt.match_type  = EntryType::fromId("texturex");
	auto tx_entries = archive->findAll(opt);

	// Go through texture lists
	PatchTable    ptable; // Dummy patch table, patch info not needed here
	wxArrayString unused_tex;
	for (auto& tx_entrie : tx_entries)
	{
		TextureXList txlist;
		txlist.readTEXTUREXData(tx_entrie, ptable);

		// Go through textures
		bool anim = false;
		for (unsigned t = 1; t < txlist.size(); t++)
		{
			wxString texname = txlist.texture(t)->name();

			// Check for animation start
			for (int b = 0; b < n_tex_anim; b++)
			{
				if (texname == tex_anim_start[b])
				{
					anim = true;
					break;
				}
			}

			// Check for animation end
			bool thisend = false;
			for (int b = 0; b < n_tex_anim; b++)
			{
				if (texname == tex_anim_end[b])
				{
					anim    = false;
					thisend = true;
					break;
				}
			}

			// Mark if unused and not part of an animation
			if (!used_textures[texname].used && !anim && !thisend)
				unused_tex.Add(txlist.texture(t)->name());
		}
	}

	// Pop up a dialog with a checkbox list of unused textures
	wxMultiChoiceDialog dialog(
		theMainWindow,
		"The following textures are not used in any map,\nselect which textures to delete",
		"Delete Unused Textures",
		unused_tex);

	// Get base resource textures (if any)
	auto                  base_resource = App::archiveManager().baseResourceArchive();
	vector<ArchiveEntry*> base_tx_entries;
	if (base_resource)
		base_tx_entries = base_resource->findAll(opt);
	PatchTable   pt_temp;
	TextureXList tx;
	for (auto& texturex : base_tx_entries)
		tx.readTEXTUREXData(texturex, pt_temp, true);
	vector<wxString> base_resource_textures;
	for (unsigned a = 0; a < tx.size(); a++)
		base_resource_textures.push_back(tx.texture(a)->name());

	// Determine which textures to check initially
	wxArrayInt selection;
	for (unsigned a = 0; a < unused_tex.size(); a++)
	{
		bool swtex = false;

		// Check for switch texture
		if (unused_tex[a].StartsWith("SW1"))
		{
			// Get counterpart switch name
			wxString swname = unused_tex[a];
			swname.Replace("SW1", "SW2", false);

			// Check if its counterpart is used
			if (used_textures[swname].used)
				swtex = true;
		}
		else if (unused_tex[a].StartsWith("SW2"))
		{
			// Get counterpart switch name
			wxString swname = unused_tex[a];
			swname.Replace("SW2", "SW1", false);

			// Check if its counterpart is used
			if (used_textures[swname].used)
				swtex = true;
		}

		// Check for base resource texture
		bool br_tex = false;
		for (auto& texture : base_resource_textures)
		{
			if (texture.CmpNoCase(unused_tex[a]) == 0)
			{
				Log::info(3, "Texture " + texture + " is in base resource");
				br_tex = true;
				break;
			}
		}

		if (!swtex && !br_tex)
			selection.Add(a);
	}
	dialog.SetSelections(selection);

	int n_removed = 0;
	if (dialog.ShowModal() == wxID_OK)
	{
		// Get selected textures
		selection = dialog.GetSelections();

		// Go through texture lists
		for (auto& entry : tx_entries)
		{
			TextureXList txlist;
			txlist.readTEXTUREXData(entry, ptable);

			// Go through selected textures to delete
			for (int i : selection)
			{
				// Get texture index
				int index = txlist.textureIndex(WxUtils::strToView(unused_tex[i]));

				// Delete it from the list (if found)
				if (index >= 0)
				{
					txlist.removeTexture(index);
					n_removed++;
				}
			}

			// Write texture list data back to entry
			txlist.writeTEXTUREXData(entry, ptable);
		}
	}

	wxMessageBox(wxString::Format("Removed %d unused textures", n_removed));
}
int main() {
  UnqliteWrapper<string> * table = new UnqliteWrapper<string>();
  char * result;
  int result_size;
  char tok[SAMPLE_SIZE];
  Tokenizer * tokenizer = new Tokenizer();
  DoubleList<
    DoubleNode<string>,
    string
  > * list = new DoubleList<
    DoubleNode<string>,
    string
  >();
  CstringWrapper * wrapper = new CstringWrapper();
  TokenizerList<
    string,
    CstringWrapper,
    DoubleList<
      DoubleNode<string>,
      string
    >,
    Tokenizer
  > * tokenizer_list = new TokenizerList<
    string,
    CstringWrapper,
    DoubleList<
      DoubleNode<string>,
      string
    >,
    Tokenizer
  >(wrapper, tokenizer);
  string key("a");
  string anotherkey("b");
  table->insert(key, "a");
  table->insert(key, "b");
  table->insert(key, "c");
  table->insert(key, "dd");
  table->insert(key, "ee");

  result_size = table->value_size(key);
  
  result = new char[result_size];
  table->find(key, result);
  cout<<"result size: "<<result_size<<endl;
  cout<<key<<" "<<result<<endl;

  tokenizer->set_buffer(result, result_size);
  while(tokenizer->token(tok)) {
    cout<<"token "<<tok<<endl;
    memset(tok, '\0', SAMPLE_SIZE);
  }
  cout<<"token "<<tok<<endl;
  memset(tok, '\0', SAMPLE_SIZE);

  tokenizer->rewind();
  tokenizer_list->tokens(key, list);
  while(list->get_head()) {
    cout<<"from list "<<list->get_head()->value<<endl;
    list->pop_left();
  }

  cout<<"exists "<<key<<" "<<table->exists(key)<<endl;
  cout<<"exists "<<anotherkey<<" "<<table->exists(anotherkey)<<endl;

  cout<<"insert unique "<<anotherkey<<" "<<table->insert_unique(anotherkey, "a")<<endl;
  cout<<"insert unique "<<anotherkey<<" "<<table->insert_unique(anotherkey, "b")<<endl;

  table->remove(anotherkey);
  cout<<"exists "<<anotherkey<<" "<<table->exists(anotherkey)<<endl;
  delete table;
  delete result;
  delete tokenizer;
  delete list;
  delete wrapper;
  delete tokenizer_list;
};
Esempio n. 29
0
/* TextureXList::readTEXTURESData
 * Reads in a ZDoom-format TEXTURES entry. Returns true on success,
 * false otherwise
 *******************************************************************/
bool TextureXList::readTEXTURESData(ArchiveEntry* entry)
{
	// Check for empty entry
	if (!entry)
	{
		Global::error = "Attempt to read texture data from NULL entry";
		return false;
	}
	if (entry->getSize() == 0)
	{
		txformat = TXF_TEXTURES;
		return true;
	}

	// Get text to parse
	Tokenizer tz;
	tz.openMem(&(entry->getMCData()), entry->getName());

	// Parsing gogo
	string token = tz.getToken();
	while (!token.IsEmpty())
	{
		// Texture definition
		if (S_CMPNOCASE(token, "Texture"))
		{
			CTexture* tex = new CTexture();
			if (tex->parse(tz, "Texture"))
				addTexture(tex);
		}

		// Sprite definition
		if (S_CMPNOCASE(token, "Sprite"))
		{
			CTexture* tex = new CTexture();
			if (tex->parse(tz, "Sprite"))
				addTexture(tex);
		}

		// Graphic definition
		if (S_CMPNOCASE(token, "Graphic"))
		{
			CTexture* tex = new CTexture();
			if (tex->parse(tz, "Graphic"))
				addTexture(tex);
		}

		// WallTexture definition
		if (S_CMPNOCASE(token, "WallTexture"))
		{
			CTexture* tex = new CTexture();
			if (tex->parse(tz, "WallTexture"))
				addTexture(tex);
		}

		// Flat definition
		if (S_CMPNOCASE(token, "Flat"))
		{
			CTexture* tex = new CTexture();
			if (tex->parse(tz, "Flat"))
				addTexture(tex);
		}

		token = tz.getToken();
	}

	txformat = TXF_TEXTURES;

	return true;
}
Esempio n. 30
0
void XapianIndex::addPostingsToDocument(Tokenizer &tokens, Xapian::Document &doc,
	const string &prefix, Xapian::termcount &termPos, StemmingMode mode) const
{
	Xapian::Stem *pStemmer = NULL;
	string upperCasePrefix("R");
	string term;

	// Do we know what language to use for stemming ?
	if (m_stemLanguage.empty() == false)
	{
		pStemmer = new Xapian::Stem(StringManip::toLowerCase(m_stemLanguage));
	}

	// Terms starting with a capital letter are R-prefixed, unless a prefix is already defined
	if (prefix.empty() == false)
	{
		upperCasePrefix = prefix;
	}

	// Get the terms
	while (tokens.nextToken(term) == true)
	{
		if (term.empty() == true)
		{
			continue;
		}
		// Does it start with a capital letter ?
		if (isupper((int)term[0]) != 0)
		{
			doc.add_posting(upperCasePrefix + XapianDatabase::limitTermLength(term), termPos);
		}
		// Lower case the term
		term = StringManip::toLowerCase(term);

		// Stem the term ?
		if ((mode == STORE_UNSTEM) ||
			(pStemmer == NULL))
		{
			doc.add_posting(prefix + XapianDatabase::limitTermLength(term), termPos);
		}
		else if (mode == STORE_STEM)
		{
#if XAPIAN_MAJOR_VERSION==0
			string stemmedTerm(pStemmer->stem_word(term));
#else
			string stemmedTerm((*pStemmer)(term));
#endif

			doc.add_posting(prefix + XapianDatabase::limitTermLength(stemmedTerm), termPos);
		}
		else if (mode == STORE_BOTH)
		{
#if XAPIAN_MAJOR_VERSION==0
			string stemmedTerm(pStemmer->stem_word(term));
#else
			string stemmedTerm((*pStemmer)(term));
#endif

			// Add both at the same position
			doc.add_posting(prefix + XapianDatabase::limitTermLength(term), termPos);
			if (stemmedTerm != term)
			{
				// No point adding the same term twice
				doc.add_posting(prefix + XapianDatabase::limitTermLength(stemmedTerm), termPos);
			}
		}

		++termPos;
	}
#ifdef DEBUG
	cout << "XapianIndex::addPostingsToDocument: added " << termPos << " terms" << endl;
#endif

	if (pStemmer != NULL)
	{
		delete pStemmer;
	}
}