/** @brief Processes a single line from a LOF text file, doing whatever is * indicated on the line. * * This function should just return for lines it cannot deal with, and the * caller will continue to the next line of the input file */ void LOFImportFileHandle::lofOpenFiles(wxString* ln) { wxStringTokenizer tok(*ln, wxT(" ")); wxStringTokenizer temptok1(*ln, wxT("\"")); wxStringTokenizer temptok2(*ln, wxT(" ")); int tokenplace = 0; wxString targetfile; wxString tokenholder = tok.GetNextToken(); if (tokenholder.IsSameAs(wxT("window"), false)) { // set any duration/offset factors for last window, as all files were called doDuration(); doScrollOffset(); if (windowCalledOnce) { mProject = CreateNewAudacityProject(); } windowCalledOnce = true; while (tok.HasMoreTokens()) { tokenholder = tok.GetNextToken(); if (tokenholder.IsSameAs(wxT("offset"), false)) { if (tok.HasMoreTokens()) tokenholder = tok.GetNextToken(); if (Internat::CompatibleToDouble(tokenholder, &scrollOffset)) { callScrollOffset = true; } else { /* i18n-hint: You do not need to translate "LOF" */ wxMessageBox(_("Invalid window offset in LOF file."), /* i18n-hint: You do not need to translate "LOF" */ _("LOF Error"), wxOK | wxCENTRE); } if (tok.HasMoreTokens()) tokenholder = tok.GetNextToken(); } if (tokenholder.IsSameAs(wxT("duration"), false)) { if (tok.HasMoreTokens()) tokenholder = tok.GetNextToken(); if (Internat::CompatibleToDouble(tokenholder, &durationFactor)) { callDurationFactor = true; } else { /* i18n-hint: You do not need to translate "LOF" */ wxMessageBox(_("Invalid duration in LOF file."), /* i18n-hint: You do not need to translate "LOF" */ _("LOF Error"), wxOK | wxCENTRE); } } // End if statement if (tokenholder.IsSameAs(wxT("#"))) { // # indicates comments; ignore line tok = wxStringTokenizer(wxT(""), wxT(" ")); } } // End while loop } // End if statement handling "window" lines else if (tokenholder.IsSameAs(wxT("file"), false)) { // To identify filename and open it tokenholder = temptok1.GetNextToken(); targetfile = temptok1.GetNextToken(); // If path is relative, make absolute path from LOF path if(!wxIsAbsolutePath(targetfile)) { wxFileName fName(targetfile); fName.Normalize(wxPATH_NORM_ALL, mLOFFileName->GetPath(wxPATH_GET_VOLUME | wxPATH_GET_SEPARATOR)); if(fName.FileExists()) { targetfile = fName.GetFullPath(); } } #ifdef USE_MIDI // If file is a midi if (targetfile.AfterLast(wxT('.')).IsSameAs(wxT("mid"), false) || targetfile.AfterLast(wxT('.')).IsSameAs(wxT("midi"), false)) { NoteTrack *nTrack = new NoteTrack(mProject->GetDirManager()); if (::ImportMIDI(targetfile, nTrack)) mProject->GetTracks()->Add(nTrack); else delete nTrack; } // If not a midi, open audio file else { #else // !USE_MIDI /* if we don't have midi support, go straight on to opening as an * audio file. TODO: Some sort of message here? */ { #endif // USE_MIDI mProject->OpenFile(targetfile); } // Set tok to right after filename temptok2.SetString(targetfile); tokenplace = temptok2.CountTokens(); for (int i = 0; i < tokenplace; i++) tokenholder = tok.GetNextToken(); if (tok.HasMoreTokens()) { tokenholder = tok.GetNextToken(); if (tokenholder.IsSameAs(wxT("#"))) { // # indicates comments; ignore line tok = wxStringTokenizer(wxT(""), wxT(" ")); } if (tokenholder.IsSameAs(wxT("offset"), false)) { if (tok.HasMoreTokens()) tokenholder = tok.GetNextToken(); double offset; // handle an "offset" specifier if (Internat::CompatibleToDouble(tokenholder, &offset)) { Track *t; TrackListIterator iter(mProject->GetTracks()); t = iter.First(); for (int i = 1; i < CountNumTracks(mProject) - 1; i++) t = iter.Next(); // t is now the last track in the project, unless the import of // all tracks failed, in which case it will be null. In that // case we return because we cannot offset a non-existent track. if (t == NULL) return; #ifdef USE_MIDI if (targetfile.AfterLast(wxT('.')).IsSameAs(wxT("mid"), false) || targetfile.AfterLast(wxT('.')).IsSameAs(wxT("midi"), false)) { wxMessageBox(_("MIDI tracks cannot be offset individually, only audio files can be."), _("LOF Error"), wxOK | wxCENTRE); } else #endif { if (CountNumTracks(mProject) == 1) t->SetOffset(offset); else { if (t->GetLinked()) t->SetOffset(offset); t = iter.Next(); t->SetOffset(offset); } } } // end of converting "offset" argument else { /* i18n-hint: You do not need to translate "LOF" */ wxMessageBox(_("Invalid track offset in LOF file."), _("LOF Error"), wxOK | wxCENTRE); } } // End if statement for "offset" parameters } // End if statement (more tokens after file name) } // End if statement "file" lines else if (tokenholder.IsSameAs(wxT("#")))
inline time_duration str_from_delimited_time_duration(const std::basic_string<char_type>& s) { unsigned short min=0, sec =0; int hour =0; bool is_neg = (s.at(0) == '-'); lslboost::int64_t fs=0; int pos = 0; typedef typename std::basic_string<char_type>::traits_type traits_type; typedef lslboost::char_separator<char_type, traits_type> char_separator_type; typedef lslboost::tokenizer<char_separator_type, typename std::basic_string<char_type>::const_iterator, std::basic_string<char_type> > tokenizer; typedef typename lslboost::tokenizer<char_separator_type, typename std::basic_string<char_type>::const_iterator, typename std::basic_string<char_type> >::iterator tokenizer_iterator; char_type sep_chars[5] = {'-',':',',','.'}; char_separator_type sep(sep_chars); tokenizer tok(s,sep); for(tokenizer_iterator beg=tok.begin(); beg!=tok.end(); ++beg) { switch(pos) { case 0: { hour = lslboost::lexical_cast<int>(*beg); break; } case 1: { min = lslboost::lexical_cast<unsigned short>(*beg); break; } case 2: { sec = lslboost::lexical_cast<unsigned short>(*beg); break; }; case 3: { int digits = static_cast<int>(beg->length()); //Works around a bug in MSVC 6 library that does not support //operator>> thus meaning lexical_cast will fail to compile. #if (defined(BOOST_MSVC) && (_MSC_VER < 1300)) // msvc wouldn't compile 'time_duration::num_fractional_digits()' // (required template argument list) as a workaround a temp // time_duration object was used time_duration td(hour,min,sec,fs); int precision = td.num_fractional_digits(); // _atoi64 is an MS specific function if(digits >= precision) { // drop excess digits fs = _atoi64(beg->substr(0, precision).c_str()); } else { fs = _atoi64(beg->c_str()); } #else int precision = time_duration::num_fractional_digits(); if(digits >= precision) { // drop excess digits fs = lslboost::lexical_cast<lslboost::int64_t>(beg->substr(0, precision)); } else { fs = lslboost::lexical_cast<lslboost::int64_t>(*beg); } #endif if(digits < precision) { // trailing zeros get dropped from the string, // "1:01:01.1" would yield .000001 instead of .100000 // the power() compensates for the missing decimal places fs *= power(10, precision - digits); } break; } default: break; }//switch pos++; } if(is_neg) { return -time_duration(hour, min, sec, fs); } else { return time_duration(hour, min, sec, fs); } }
int POL::getpol_tok (struct token_st *token) { KeywordCodeEntry* sym; token->ready = false; nexttok: gettok (token); if (token->type == TT_BLANK) goto nexttok; if (token->type == TT_SPECLCHAR) { if (strchr(m_szSkipChars, token->tokstr[0]) != NULL) goto nexttok; if (token->tokstr[0] == NEWLINE) goto nexttok; if (token->tokstr[0] == meta.cmd) { getcmd(); goto nexttok; } if (token->tokstr[0] == meta.com) { /* skip comment */ eatline (); goto nexttok; } if (token->tokstr[0] == meta.out) { getescape(token->tokstr, meta.out, MAXTOK); fputs (token->tokstr, stderr); goto nexttok; } if (token->tokstr[0] == meta.con) { /* continuation across NEWLINE */ while (lookchar() == BLANK || lookchar() == TAB) inchar(); if (lookchar() == NEWLINE) inchar(); } if (token->tokstr[0] == meta.ter) { /* get input from terminal */ usefile (P_USE_FILE, ""); tok (token); closefile(); return (token->type); } } /* look for filler words */ if (skiptable.lookup (token->tokstr) != NULL) /* ignore words in skip table */ goto nexttok; /* look for user defined symbols */ if ((sym = usertable.lookup (token->tokstr)) != NULL) { token->type = TT_USERTOK; token->code = sym->getCode(); } else token->code = 0; if (m_bTrace) sys_error (ERR_TRACE, "POL read token '%s', type = %d\n", token->tokstr, token->type); return (token->type); }
static status_t ParseArgsAux(const String & line, Message * optAddToMsg, Queue<String> * optAddToQueue, bool cs) { TCHECKPOINT; const String trimmed = line.Trim(); const uint32 len = trimmed.Length(); // First, we'll pre-process the string into a StringTokenizer-friendly // form, by replacing all quoted spaces with gunk and removing the quotes String tokenizeThis; if (tokenizeThis.Prealloc(len) != B_NO_ERROR) return B_ERROR; const char GUNK_CHAR = (char) 0x01; bool lastCharWasBackslash = false; bool inQuotes = false; for (uint32 i=0; i<len; i++) { char c = trimmed[i]; if ((lastCharWasBackslash == false)&&(c == '\"')) inQuotes = !inQuotes; else { if ((inQuotes == false)&&(c == '#')) break; // comment to EOL tokenizeThis += ((inQuotes)&&(c == ' ')) ? GUNK_CHAR : c; } lastCharWasBackslash = (c == '\\'); } StringTokenizer tok(tokenizeThis()," \t\r\n"); const char * t = tok(); while(t) { String n(t); n.Replace(GUNK_CHAR, ' '); // Check to see if the next token is the equals sign... const char * next = tok(); if ((next)&&(next[0] == '=')) { if (next[1] != '\0') { // It's the "x =5" case (2 tokens) String n2(next); n2.Replace(GUNK_CHAR, ' '); if (ParseArgAux(n+n2, optAddToMsg, optAddToQueue, cs) != B_NO_ERROR) return B_ERROR; t = tok(); } else { // It's the "x = 5" case (3 tokens) next = tok(); // find out what's after the equal sign if (next) { String n3(next); n3.Replace(GUNK_CHAR, ' '); if (ParseArgAux(n+"="+n3, optAddToMsg, optAddToQueue, cs) != B_NO_ERROR) return B_ERROR; t = tok(); } else { if (ParseArgAux(n, optAddToMsg, optAddToQueue, cs) != B_NO_ERROR) return B_ERROR; // for the "x =" case, just parse x and ignore the equals t = NULL; } } } else if (n.EndsWith('=')) { // Try to attach the next keyword String n4(next); n4.Replace(GUNK_CHAR, ' '); if (ParseArgAux(n+n4, optAddToMsg, optAddToQueue, cs) != B_NO_ERROR) return B_ERROR; t = tok(); } else { // Nope, it's just the normal case if (ParseArgAux(n, optAddToMsg, optAddToQueue, cs) != B_NO_ERROR) return B_ERROR; t = next; } } return B_NO_ERROR; }
void parse_util_token_extent(const wchar_t *buff, size_t cursor_pos, const wchar_t **tok_begin, const wchar_t **tok_end, const wchar_t **prev_begin, const wchar_t **prev_end) { const wchar_t *a = NULL, *b = NULL, *pa = NULL, *pb = NULL; CHECK(buff,); assert(cursor_pos >= 0); const wchar_t *cmdsubst_begin, *cmdsubst_end; parse_util_cmdsubst_extent(buff, cursor_pos, &cmdsubst_begin, &cmdsubst_end); if (!cmdsubst_end || !cmdsubst_begin) { return; } /* pos is equivalent to cursor_pos within the range of the command substitution {begin, end} */ long offset_within_cmdsubst = cursor_pos - (cmdsubst_begin - buff); a = cmdsubst_begin + offset_within_cmdsubst; b = a; pa = cmdsubst_begin + offset_within_cmdsubst; pb = pa; assert(cmdsubst_begin >= buff); assert(cmdsubst_begin <= (buff+wcslen(buff))); assert(cmdsubst_end >= cmdsubst_begin); assert(cmdsubst_end <= (buff+wcslen(buff))); const wcstring buffcpy = wcstring(cmdsubst_begin, cmdsubst_end-cmdsubst_begin); tokenizer_t tok(buffcpy.c_str(), TOK_ACCEPT_UNFINISHED | TOK_SQUASH_ERRORS); for (; tok_has_next(&tok); tok_next(&tok)) { size_t tok_begin = tok_get_pos(&tok); size_t tok_end = tok_begin; /* Calculate end of token */ if (tok_last_type(&tok) == TOK_STRING) { tok_end += wcslen(tok_last(&tok)); } /* Cursor was before beginning of this token, means that the cursor is between two tokens, so we set it to a zero element string and break */ if (tok_begin > offset_within_cmdsubst) { a = b = cmdsubst_begin + offset_within_cmdsubst; break; } /* If cursor is inside the token, this is the token we are looking for. If so, set a and b and break */ if ((tok_last_type(&tok) == TOK_STRING) && (tok_end >= offset_within_cmdsubst)) { a = cmdsubst_begin + tok_get_pos(&tok); b = a + wcslen(tok_last(&tok)); break; } /* Remember previous string token */ if (tok_last_type(&tok) == TOK_STRING) { pa = cmdsubst_begin + tok_get_pos(&tok); pb = pa + wcslen(tok_last(&tok)); } } if (tok_begin) { *tok_begin = a; } if (tok_end) { *tok_end = b; } if (prev_begin) { *prev_begin = pa; } if (prev_end) { *prev_end = pb; } assert(pa >= buff); assert(pa <= (buff+wcslen(buff))); assert(pb >= pa); assert(pb <= (buff+wcslen(buff))); }
// GNGGA void Ublox::read_gga() { int counter = 0; char token[20]; Tokeniser tok(buf, ','); while(tok.next(token, 20)) { switch(counter) { case 1: //time { float time = atof(token); int hms = int(time); datetime.millis = time - hms; datetime.seconds = fmod(hms, 100); hms /= 100; datetime.minutes = fmod(hms, 100); hms /= 100; datetime.hours = hms; time_age = millis(); } break; case 2: //latitude { float llat = atof(token); int ilat = llat/100; double mins = fmod(llat, 100); latitude = ilat + (mins/60); } break; case 3: //north/south { if(token[0] == 'S') latitude = -latitude; } break; case 4: //longitude { float llong = atof(token); int ilat = llong/100; double mins = fmod(llong, 100); longitude = ilat + (mins/60); } break; case 5: //east/west { if(token[0] == 'W') longitude = -longitude; latlng_age = millis(); } break; case 6: { fixtype = _fixtype(atoi(token)); } break; case 7: { sats_in_use = atoi(token); } break; case 8: { hdop = atoi(token); } break; case 9: { float new_alt = atof(token); vert_speed = (new_alt - altitude)/((millis()-alt_age)/1000.0); altitude = atof(token); alt_age = millis(); } break; } counter++; } }
int main(int argc , char ** argv ) { std::vector<std::string> ordhdr; if ( argc < 2 ) { std::cerr << "gcol: no columns specified\n"; exit(1); } for ( int i = 1 ; i < argc ; i++) ordhdr.push_back( argv[i] ); std::vector<int> cols; std::vector<std::string> data; int col = 0; // # cols in file int prtcol = 0; // # of cols to print while ( ! std::cin.eof() ) { // parse a line? if ( col ) { char line[MAXBUF]; std::cin.getline( line , MAXBUF , '\n' ); int n; char_tok tok( line , &n , '\t' ); if ( n != col ) { if ( n > 1 ) std::cerr << "*** skipping bad line (" << n << " fields, not " << col << "): " << line << "\n"; continue; } for (int i=0; i<prtcol; i++ ) { if ( i ) std::cout << "\t"; if ( cols[i] == -1 ) std::cout << ordhdr[i]; else std::cout << tok( cols[i] ) ; } std::cout << "\n"; } else // this is first row -- read as headers { std::string line; std::getline( std::cin , line ); char_tok headers( line , &col , '\t' ); data.resize( col ); bool first = true; std::map<std::string,int> hset; for (int i=0; i<headers.size(); i++) hset.insert( std::make_pair( headers[i] , i )) ; for (int i=0; i<ordhdr.size(); i++) { if ( hset.find( ordhdr[i] ) != hset.end() ) { if ( ! first ) std::cout << "\t"; first = false; std::cout << ordhdr[i] ; cols.push_back( hset[ ordhdr[i] ] ); //std::cout << "adding col " << ordhdr[i] << "\n"; } else // insert string literal { if ( ! first ) std::cout << "\t"; first = false; std::cout << ordhdr[i] ; cols.push_back( -1 ); //std::cout << "added str lit " << ordhdr[i] << "\n"; } } if ( cols.size() == 0 ) { std::cerr << "gcol: no matching columns\n"; exit(1); } prtcol = cols.size(); std::cout << "\n"; } } exit(0); }
World::World(std::string *level, TextureLibrary *lib, int playerX, int playerY, int playerRot) { std::string line; std::ifstream entityFile(*level); boost::char_separator<char> sep(" "); int width, height, tileSize; std::string tilePath; // Gets level parameters if (std::getline(entityFile, line)) { boost::tokenizer<boost::char_separator<char>> tok(line, sep); TokenIterator it = tok.begin(); width = std::stoi(*it); ++it; height = std::stoi(*it); ++it; tileSize = std::stoi(*it); ++it; _size.x = std::stoi(*it); ++it; _size.y = std::stoi(*it); ++it; tilePath = *it; } else { throw new std::runtime_error("Parsing error on header: unexpected end of line"); } if ((!std::getline(entityFile, line)) || (line != "")) { throw new std::runtime_error("Parsing error on header: unexpected end of line"); } // Get map tiles sf::Texture* tex; for (int i = 0; i < height; i++) { if (std::getline(entityFile, line)) { boost::tokenizer<boost::char_separator<char>> tok(line, sep); TokenIterator it = tok.begin(); for (int j = 0; j < width; j++) { if (it == tok.end()) { throw new std::runtime_error("Parsing error on tilemap: unexpected end of line"); } // Builds the texture tex = lib->Load(tilePath, tileSize, std::stoi(*it)); // Builds the sprite sf::Sprite sprite; sprite.setTexture(*tex); sprite.setPosition(j * tileSize, i * tileSize); _mapTiles.push_back(sprite); ++it; } } else { throw new std::runtime_error("Parsing error on tilemap: unexpected EOF"); } } if ((!std::getline(entityFile, line)) || (line != "")) { throw new std::runtime_error("Parsing error on tilemap: unexpected end of line"); } // Builds the world collision map for (int i = 0; i < height; i++) { _mapBarriers.push_back(std::vector<TileType>()); if (std::getline(entityFile, line)) { boost::tokenizer<boost::char_separator<char>> tok(line, sep); TokenIterator it = tok.begin(); for (int j = 0; j < width; j++) { if (it == tok.end()) { throw new std::runtime_error("Parsing error on wallmap: unexpected end of line"); } // Determines if the tile is solid _mapBarriers[i].push_back((TileType)(std::stoi(*it))); ++it; } } else { throw new std::runtime_error("Parsing error on wallmap: unexpected EOF"); } } if ((!std::getline(entityFile, line)) || (line != "")) { throw new std::runtime_error("Parsing error on tilemap: unexpected end of line"); } // Loading entities required at initialization of the level while (std::getline(entityFile, line) && line != "") { boost::tokenizer<boost::char_separator<char>> tok(line, sep); TokenIterator it = tok.begin(); if (it != tok.end()) { if (*it == "player") { ++it; AddEntity(playerX, playerY, playerRot, &(std::string(*it))); } else { int x, y, rot; x = std::stoi(*it); ++it; y = std::stoi(*it); ++it; rot = std::stoi(*it); ++it; AddEntity(x, y, rot, &(std::string(*it))); } } else { throw new std::runtime_error("Parsing error on entities: unexpected end of line"); } } }
std::vector<transform_ptr> transform::factory(const std::string& s) { std::vector<transform_ptr> results; enum { STATE_TYPE, STATE_NUMBER, } state = STATE_TYPE; std::vector<double> parameters; TransformType type = TransformType::ERROR; boost::char_separator<char> seperators(" \n\t\r,", "()"); boost::tokenizer<boost::char_separator<char>> tok(s, seperators); for(auto it = tok.begin(); it != tok.end(); ++it) { if(state == STATE_TYPE) { if(*it == "matrix") { type = TransformType::MATRIX; } else if(*it == "translate") { type = TransformType::TRANSLATE; } else if(*it == "scale") { type = TransformType::SCALE; } else if(*it == "rotate") { type = TransformType::ROTATE; } else if(*it == "skewX") { type = TransformType::SKEW_X; } else if(*it == "skewY") { type = TransformType::SKEW_Y; } else if(*it == "(") { parameters.clear(); state = STATE_NUMBER; } else { ASSERT_LOG(false, "Unexpected token while looking for a type: " << *it << " : " << s); } } else if(state == STATE_NUMBER) { if(*it == ")") { ASSERT_LOG(type != TransformType::ERROR, "svg transform type was not initialized"); switch(type) { case TransformType::MATRIX: { matrix_transform* mtrf = new matrix_transform(parameters); results.emplace_back(mtrf); break; } case TransformType::TRANSLATE: { ASSERT_LOG(parameters.size() == 1 || parameters.size() == 2, "Parsing transform:translate found " << parameters.size() << " parameter(s), expected 1 or 2"); double tx = parameters[0]; double ty = parameters.size() == 2 ? parameters[1] : 0.0f; translate_transform * ttrf = new translate_transform(tx, ty); results.emplace_back(ttrf); break; } case TransformType::SCALE: { ASSERT_LOG(parameters.size() == 1 || parameters.size() == 2, "Parsing transform:scale found " << parameters.size() << " parameter(s), expected 1 or 2"); double sx = parameters[0]; double sy = parameters.size() == 2 ? parameters[1] : sx; scale_transform * strf = new scale_transform(sx, sy); results.emplace_back(strf); break; } case TransformType::ROTATE: { ASSERT_LOG(parameters.size() == 1 || parameters.size() == 3, "Parsing transform:rotate found " << parameters.size() << " parameter(s), expected 1 or 3"); double angle = parameters[0] / 180.0 * M_PI; double cx = parameters.size() == 3 ? parameters[1] : 0; double cy = parameters.size() == 3 ? parameters[2] : 0; rotation_transform* rtrf = new rotation_transform(angle, cx, cy); results.emplace_back(rtrf); break; } case TransformType::SKEW_X: { ASSERT_LOG(parameters.size() == 1, "Parsing transform:skewX found " << parameters.size() << " parameter(s), expected 1"); double sa = tan(parameters[0]); skew_x_transform* sxtrf = new skew_x_transform(sa); results.emplace_back(sxtrf); break; } case TransformType::SKEW_Y: { ASSERT_LOG(parameters.size() == 1, "Parsing transform:skewY found " << parameters.size() << " parameter(s), expected 1"); double sa = tan(parameters[0]); skew_y_transform* sxtrf = new skew_y_transform(sa); results.emplace_back(sxtrf); break; } case TransformType::ERROR: assert(false); break; } state = STATE_TYPE; } else { char* end = nullptr; double value = strtod(it->c_str(), &end); if(value == 0 && it->c_str() == end) { ASSERT_LOG(false, "Invalid number value: " << *it); } ASSERT_LOG(errno != ERANGE, "parsed numeric value out-of-range: " << *it); parameters.push_back(value); } } } return results; }
//FIXME: should accept a stream as well void nl_convert_eagle_t::convert(const pstring &contents) { plib::pistringstream istrm(contents); eagle_tokenizer tok(*this, istrm); out("NETLIST_START(dummy)\n"); add_term("GND", "GND"); add_term("VCC", "VCC"); eagle_tokenizer::token_t token = tok.get_token(); while (true) { if (token.is_type(eagle_tokenizer::ENDOFFILE)) { dump_nl(); // FIXME: Parameter out("NETLIST_END()\n"); return; } else if (token.is(tok.m_tok_SEMICOLON)) { /* ignore empty statements */ token = tok.get_token(); } else if (token.is(tok.m_tok_ADD)) { pstring name = tok.get_string(); /* skip to semicolon */ do { token = tok.get_token(); } while (!token.is(tok.m_tok_SEMICOLON)); token = tok.get_token(); pstring sval = ""; if (token.is(tok.m_tok_VALUE)) { pstring vname = tok.get_string(); sval = tok.get_string(); tok.require_token(tok.m_tok_SEMICOLON); token = tok.get_token(); } switch (name.code_at(0)) { case 'Q': { add_device("QBJT", name, sval); } break; case 'R': { double val = get_sp_val(sval); add_device("RES", name, val); } break; case 'C': { double val = get_sp_val(sval); add_device("CAP", name, val); } break; case 'P': if (sval.ucase() == "HIGH") add_device("TTL_INPUT", name, 1); else if (sval.ucase() == "LOW") add_device("TTL_INPUT", name, 0); else add_device("ANALOG_INPUT", name, sval.as_double()); add_pin_alias(name, "1", "Q"); break; case 'D': /* Pin 1 = Anode, Pin 2 = Cathode */ add_device("DIODE", name, sval); add_pin_alias(name, "1", "A"); add_pin_alias(name, "2", "K"); break; case 'U': case 'X': { pstring tname = "TTL_" + sval + "_DIP"; add_device(tname, name); break; } default: tok.error("// IGNORED " + name); } } else if (token.is(tok.m_tok_SIGNAL)) { pstring netname = tok.get_string(); token = tok.get_token(); while (!token.is(tok.m_tok_SEMICOLON)) { /* fixme: should check for string */ pstring devname = token.str(); pstring pin = tok.get_string(); add_term(netname, devname + "." + pin); token = tok.get_token(); } } else { out("Unexpected {}\n", token.str().cstr()); return; } } }
wxString MetadataCtrl::FormatLabel(const wxString & format, MyFrame * frame, bool useLua) { if (! frame || ! frame->GetPuzzle().IsOk()) return wxEmptyString; wxString result; const puz::Puzzle & puz = frame->GetPuzzle(); wxStringTokenizer tok(format, _T("%"), wxTOKEN_RET_EMPTY_ALL); bool ismeta = false; // Is the current token a metadata value? while (tok.HasMoreTokens()) { if (! ismeta) { result << tok.GetNextToken(); ismeta = true; // The next token is a metadata value } else // We're in the middle of a percent-delimited string { wxString str = tok.GetNextToken(); if (str.empty()) // This is a literal % { result << _T("%"); } else // This is a metadata value { str = GetMeta(str, frame); #if XWORD_USE_LUA if (useLua) { if (str.empty()) { result << _T("nil"); } else { str.Replace("'", "\\'"); result << _T("'") << str << _T("'"); } } else // plain text result << str; #else // ! XWORD_USE_LUA result << str; #endif // XWORD_USE_LUA } ismeta = false; // The next token is plain text } } #if XWORD_USE_LUA if (! useLua) return result; // Process this with lua wxLuaState & luastate = wxGetApp().GetwxLuaState(); lua_State * L = luastate.GetLuaState(); // Compile a lua function taking a puzzle argument if (result.Find(_T("return")) == -1) // Make sure we're returning something result = _T("return ") + result; wxLuaCharBuffer code(wxString::Format(_T("return function (puzzle) %s end"), (const wxChar *)result.c_str())); // Compile and run the code if (luaL_loadbuffer(L, code.GetData(), code.Length(), "") == 0) { if (lua_pcall(L, 0, 1, 0) == 0) { if (lua_isfunction(L, -1)) // Make sure we got a function { // Call this function luapuz_pushPuzzle(L, &frame->GetPuzzle()); if (lua_pcall(L, 1, 1, 0) == 0) { // Check the result if (luastate.lua_IsString(-1)) { result = luastate.lua_TowxString(-1); lua_pop(L, 1); return result; } // Clean up the stack lua_pop(L, 1); } } else // Clean up the stack { lua_pop(L, 1); } } } else { // Return the error result = luastate.lua_TowxString(-1); lua_pop(L, 1); return result; } return wxEmptyString; #else // ! XWORD_USE_LUA return result; #endif // XWORD_USE_LUA }
static Reference create(SourceStream *in) { Reference tok(new SourceTokenC); read_token(in, tok); return tok; }
wxArrayString LadspaEffectsModule::FindPlugins(PluginManagerInterface & pm) { #if defined(USE_LIBLRDF) && defined(EFFECT_CATEGORIES) EffectManager& em = EffectManager::Get(); wxArrayString rdfPathList; wxString rdfPathVar; wxArrayString rdfFiles; InitCategoryMap(); lrdf_init(); rdfPathVar = wxGetenv(wxT("LADSPA_RDF_PATH")); if (rdfPathVar != wxT("")) wxGetApp().AddMultiPathsToPathList(rdfPathVar, rdfPathList); #ifdef __WXGTK__ wxGetApp().AddUniquePathToPathList(wxT("/usr/share/ladspa/rdf"), rdfPathList); wxGetApp().AddUniquePathToPathList(wxT("/usr/local/share/ladspa/rdf"), rdfPathList); #endif #ifdef __WXMAC__ wxGetApp().AddUniquePathToPathList(wxT("/usr/share/ladspa/rdf"), rdfPathList); // XXX Maybe other Mac paths here? #endif #ifdef __WXMSW__ //wxGetApp().AddUniquePathToPathList(wxT("WINDOWS LRDF PATH"), // rdfPathList); // XXX Other Windows paths here. #endif // Add the Audacity paths so we get ladspa.rdfs if we are using a local // liblrdf for(i=0; i<audacityPathList.GetCount(); i++) { wxString prefix = audacityPathList[i] + wxFILE_SEP_PATH; wxGetApp().AddUniquePathToPathList(prefix + wxT("rdf"), rdfPathList); } wxGetApp().FindFilesInPathList(wxT("*.rdf"), rdfPathList, rdfFiles); wxGetApp().FindFilesInPathList(wxT("*.rdfs"), rdfPathList, rdfFiles); for(size_t i = 0; i < rdfFiles.GetCount(); ++i) { wxString fileUri(wxT("file://")); fileUri += rdfFiles[i]; lrdf_read_file(fileUri.mb_str(wxConvUTF8)); } // Add all plugin categories found by LRDF lrdf_uris* cats = lrdf_get_all_subclasses("http://ladspa.org/ontology#Plugin"); if (cats) { // Add the categories and find the plugins belonging to them for (size_t i = 0; i < cats->count; ++i) { char* label = lrdf_get_label(cats->items[i]); if (!label) continue; wxString uri = MapCategoryUri(wxString::FromAscii(cats->items[i])); em.AddCategory(uri, wxString::FromUTF8(label)); std::free(label); lrdf_uris* plugs = lrdf_get_instances(cats->items[i]); if (plugs) { for (size_t j = 0; j < plugs->count; ++j) { unsigned long uid = lrdf_get_uid(plugs->items[j]); gPluginCategories.insert(std::make_pair(uid, uri)); } lrdf_free_uris(plugs); } } // And their relationships for (size_t i = 0; i < cats->count; ++i) { EffectCategory* p = em.LookupCategory(MapCategoryUri(wxString::FromAscii(cats-> items[i]))); if (!p) continue; lrdf_uris* subs = lrdf_get_subclasses(cats->items[i]); if (subs) { for (size_t j = 0; j < subs->count; ++j) { EffectCategory* c = em.LookupCategory(MapCategoryUri(wxString::FromAscii(subs->items[j]))); if (c) em.AddCategoryParent(c, p); } lrdf_free_uris(subs); } } lrdf_free_uris(cats); } #endif wxArrayString pathList; wxArrayString files; wxString pathVar; // Check for the LADSPA_PATH environment variable pathVar = wxString::FromUTF8(getenv("LADSPA_PATH")); if (!pathVar.empty()) { wxStringTokenizer tok(pathVar); while (tok.HasMoreTokens()) { pathList.Add(tok.GetNextToken()); } } #if defined(__WXMAC__) #define LADSPAPATH wxT("/Library/Audio/Plug-Ins/LADSPA") // Look in ~/Library/Audio/Plug-Ins/LADSPA and /Library/Audio/Plug-Ins/LADSPA pathList.Add(wxGetHomeDir() + wxFILE_SEP_PATH + LADSPAPATH); pathList.Add(LADSPAPATH); // Recursively scan for all shared objects pm.FindFilesInPathList(wxT("*.so"), pathList, files, true); #elif defined(__WXMSW__) // Recursively scan for all DLLs pm.FindFilesInPathList(wxT("*.dll"), pathList, files, true); #else pathList.Add(wxGetHomeDir() + wxFILE_SEP_PATH + wxT(".ladspa")); pathList.Add(wxT("/usr/local/lib/ladspa")); pathList.Add(wxT("/usr/lib/ladspa")); pathList.Add(wxT(LIBDIR) wxT("/ladspa")); // Recursively scan for all shared objects pm.FindFilesInPathList(wxT("*.so"), pathList, files, true); #endif return files; }
bool decode_url(wxString surl, wxString& prefix, wxString &window, PeriodType& period, time_t& time, int& selected_draw, bool xml) { wxString url; if(xml) url = decode_string_xml(surl); else url = decode_string(surl); selected_draw = -1; url.Trim(true); url.Trim(false); /* draw://<prefix>/<window>/<period>/time */ wxStringTokenizer tok(url, _T("/")); if (tok.CountTokens() != 6 && tok.CountTokens() != 7) return false; tok.GetNextToken(); tok.GetNextToken(); prefix = tok.GetNextToken(); window = tok.GetNextToken(); wxString pstr = tok.GetNextToken(); if (pstr == _T("E")) period = PERIOD_T_DECADE; else if (pstr == _T("Y")) period = PERIOD_T_YEAR; else if (pstr == _T("M")) period = PERIOD_T_MONTH; else if (pstr == _T("W")) period = PERIOD_T_WEEK; else if (pstr == _T("S")) period = PERIOD_T_SEASON; else if (pstr == _T("D")) period = PERIOD_T_DAY; else if (pstr == _T("10M")) period = PERIOD_T_30MINUTE; else return false; long rtime; if (tok.GetNextToken().ToLong(&rtime) == false) return false; wxDateTime dt(rtime); if (rtime <= 0 || #ifdef __WXMSW__ rtime >= 2147483647 #else rtime == std::numeric_limits<time_t>::max() #endif || dt.GetYear() < 1980 || dt.GetYear() > 2037) time = wxDateTime::Now().GetTicks(); else time = rtime; if (tok.HasMoreTokens()) { long tmpval; if (tok.GetNextToken().ToLong(&tmpval) == false) return false; selected_draw = tmpval; } return true; }
void SSE::genPlainIndex(string directoryPath) { cout << "[PATH] " << directoryPath << endl; /* OPTIONAL: Print progress */ int fileSum = 0, fileCount = -1; for(boost::filesystem::recursive_directory_iterator end, dir(directoryPath); dir != end; ++dir) { if(boost::filesystem::is_regular(dir->status())) { fileSum++; } } double SKETime = 0; double filestoreTime = 0; double plainIndexTime = 0; clock_t startTime = clock(); for(boost::filesystem::recursive_directory_iterator end, dir(directoryPath); dir != end; ++dir) { // string fileName = boost::filesystem::canonical(dir->path()).string(); string fileName = dir->path().string(); if((dir->path().filename()).compare(".DS_Store") == 0){ // cout << "Ignoring file " << dir->path().filename() << endl; continue; } if(boost::filesystem::is_regular(dir->status())) { fileCount++; // cout << "[FILE] " << fileName << "--->" << fileCount << endl; uint64_t docID = getDocNameHash(boost::lexical_cast<string>(fileCount)); CLEAR_BIT(docID, 0); /* Put file contents, FileStore is responsible for enryption and decryption of data files*/ clock_t startTime = clock(); double cryptoduration = 0; storefile(fileName, docID, cryptoduration); SKETime += cryptoduration; filestoreTime += (double)(clock()-startTime)/(double)CLOCKS_PER_SEC; ifstream input(fileName.c_str()); // char charsToRemove[] = "~!@#$%^&*()_+-=[]{};:'\"\\|?/<>,."; boost::char_separator<char> sep(" ~!#$%^&*()+=[]{};:'\"\\|?/<>,"); string getcontent; boost::tokenizer<boost::char_separator<char> > tok(getcontent); while(getline(input, getcontent)) { // cout << getcontent << endl; // for(unsigned int i = 0; i < strlen(charsToRemove); i++) // getcontent.erase(std::remove(getcontent.begin(), getcontent.end(), charsToRemove[i]), getcontent.end()); // cout << getcontent << endl; tok.assign(getcontent.begin(), getcontent.end()); for(boost::tokenizer<boost::char_separator<char> >::iterator beg = tok.begin(); beg != tok.end(); ++beg) { string keyword(*beg); // OPTIONAL: convert keywords to lower case std::transform(keyword.begin(), keyword.end(), keyword.begin(), ::tolower); // cout << "[KWRD] " << keyword << endl; // Possible optimization: memorize docID of fileName // add keyword --> fileName to the map // map[keyword].insert(docID); map[keyword].insert(fileCount); } } input.close(); } else { // // it's a directory cout << "[DIR] [" << (int)((double)fileCount/(double)fileSum*100) << "%] " << fileName << endl; } } cout << "[DONE] Number of keywords: " << map.size() << std::endl; plainIndexTime = (double)(clock()-startTime)/(double)CLOCKS_PER_SEC - filestoreTime; cout << "Plain index gen took " << plainIndexTime << " seconds." << endl; cout << "SKE took " << SKETime << " seconds." << endl; // OPTIONAL: Estimate map size uint64_t mapSize = 0; uint64_t fileKeywordPairs = 0; for(unordered_map<string, unordered_set<docid_t> , stringhash>::iterator itmap = map.begin(); itmap != map.end(); ++itmap) { const string & keyword = itmap->first; // mapSize += keyword.size(); unordered_set<docid_t> & set = itmap->second; for(unordered_set<docid_t>::iterator itset = set.begin(); itset != set.end(); ++itset) { docid_t documentId = *itset; mapSize += 4; fileKeywordPairs++; } } cout << "[DONE] Estimated map size: " << mapSize << endl; cout << "File keyword pairs are: " << fileKeywordPairs << endl; // TODO: copy all documents to DocumentStore, rename each file to its docId }
int mod_command(char *q, char *args) { if( q==NULL ) { ; } else if( strcmp(q, "edit")==0 ) { editmode = editmode ? 0 : 1; v_center = editmode ? 0 : 1; return 0; } else if( strcmp(q, "model")==0 ) { setmodel = safe_atoi(tok(args, " ")); // FIXME: lame hack return 0; } else if( strcmp(q, "bounds")==0 || strcmp(q, "blocksize")==0 ) { size_t n = 0; int x = safe_atoi(tok(args, " ")); int y = safe_atoi(tok(args, " ")); int z = safe_atoi(tok(args, " ")); char chr = strcmp(q, "bounds")==0 ? 'b' : 'z'; if( !x || !y || !z ) { context *co = fr[hotfr%maxframes].objs[mycontext].data; // FIXME is mycontext always set here? if( chr == 'b' ) echo("The current bounds are (X,Y,Z): %d %d %d", co->x, co->y, co->z); else echo("The current blocksize is (X,Y,Z): %d %d %d", co->bsx, co->bsy, co->bsz); return 0; } memset(&magic_c, 0, sizeof magic_c); packbytes(magic_c.data, chr, &n, 1); packbytes(magic_c.data, x, &n, 4); packbytes(magic_c.data, y, &n, 4); packbytes(magic_c.data, z, &n, 4); magic_c.datasz = n; magic_c.flags |= CMDF_DATA; magic_c.cmd = CMDT_0CON; // console command putcmd(-1, -1, -1); return 0; } else if( strcmp(q, "tilespacing")==0 ) { size_t n = 0; int tileuw = safe_atoi(tok(args, " ")); int tileuh = safe_atoi(tok(args, " ")); if( !tileuw || !tileuh ) { context *co = fr[hotfr%maxframes].objs[mycontext].data; echo("The current tilespacing is (W,H): %d %d", co->tileuw, co->tileuh); return 0; } memset(&magic_c, 0, sizeof magic_c); packbytes(magic_c.data, 't', &n, 1); packbytes(magic_c.data, tileuw, &n, 4); packbytes(magic_c.data, tileuh, &n, 4); magic_c.datasz = n; magic_c.flags |= CMDF_DATA; magic_c.cmd = CMDT_0CON; putcmd(-1, -1, -1); return 0; } else if( strcmp(q, "orthographic")==0 || strcmp(q, "dimetric")==0 ) { size_t n = 0; memset(&magic_c, 0, sizeof magic_c); packbytes(magic_c.data, q[0], &n, 1); magic_c.datasz = n; magic_c.flags |= CMDF_DATA; magic_c.cmd = CMDT_0CON; putcmd(-1, -1, -1); return 0; } else if( strcmp(q, "resprite")==0 ) { reload_sprites(); renumber_sprites(); echo("Was %d sprites, now %d sprites.", old_spr_count, spr_count); unload_sprites(old_sprites, old_spr_count); return 0; } return 1; }
void CorrelatedGaussianParameters::ParseCGP(std::vector<ModelParameter>& ModPars, std::ifstream& ifile, boost::tokenizer<boost::char_separator<char> >::iterator & beg, int rank) { name = *beg; ++beg; int size = atoi((*beg).c_str()); int nlines = 0; std::string line; bool IsEOF; boost::char_separator<char>sep(" \t"); for (int i = 0; i < size; i++) { IsEOF = getline(ifile, line).eof(); if (line.empty() || line.at(0) == '#') { if (rank == 0) std::cout << "ERROR: no comments or empty lines in CorrelatedGaussianParameters please!" << std::endl; exit(EXIT_FAILURE); } lineNo++; boost::tokenizer<boost::char_separator<char> > tok(line, sep); beg = tok.begin(); std::string type = *beg; ++beg; if (type.compare("ModelParameter") != 0) if (rank == 0) throw std::runtime_error("ERROR: in line no." + boost::lexical_cast<std::string>(lineNo) + " of file " + filename + ", expecting a ModelParameter type here...\n"); ModelParameter tmpMP; beg = tmpMP.ParseModelParameter(beg); if (beg != tok.end()) if (rank == 0) std::cout << "WARNING: unread information in parameter " << tmpMP.getname() << std::endl; tmpMP.setCgp_name(name); AddPar(tmpMP); nlines++; } if (nlines > 1) { gslpp::matrix<double> myCorr(gslpp::matrix<double>::Id(nlines)); int ni = 0; for (int i = 0; i < size; i++) { IsEOF = getline(ifile, line).eof(); if (line.empty() || line.at(0) == '#') { if (rank == 0) std::cout << "ERROR: no comments or empty lines in CorrelatedGaussianParameters please!" << std::endl; exit(EXIT_FAILURE); } lineNo++; boost::tokenizer<boost::char_separator<char> > mytok(line, sep); beg = mytok.begin(); int nj = 0; for (int j = 0; j < size; j++) { if ((*beg).compare(0, 1, "0") == 0 || (*beg).compare(0, 1, "1") == 0 || (*beg).compare(0, 1, "-") == 0) { if (std::distance(mytok.begin(), mytok.end()) < size && rank == 0) throw std::runtime_error(("ERROR: Correlation matrix is of wrong size in Correlated Gaussian Parameters: " + name).c_str()); myCorr(ni, nj) = atof((*beg).c_str()); nj++; beg++; } else { if (rank == 0) std::cout << "ERROR: invalid correlation matrix for " << name << ". Check element (" << ni + 1 << "," << nj + 1 << ") in line number " + boost::lexical_cast<std::string>(lineNo) << std::endl; exit(EXIT_FAILURE); } } ni++; } DiagonalizePars(myCorr); ModPars.insert(ModPars.end(), getDiagPars().begin(), getDiagPars().end()); } else { if (rank == 0) std::cout << "\nWARNING: Correlated Gaussian Parameters " << name.c_str() << " defined with less than two correlated parameters. The set is being marked as normal Parameters." << std::endl; if (getPars().size() == 1) ModPars.push_back(ModelParameter(getPar(0))); for (int i = 0; i < size; i++) { IsEOF = getline(ifile, line).eof(); lineNo++; } } }
Lexer::Token Sl::parse( Lexer* lexer ) { Lexer::Token tok( parseAttributes( lexer ) ); unsigned int itemCount( 0 ); while( tok != Lexer::END && !( tok == Lexer::TAG && lexer->tagId() == Lexer::EUSERDOC ) ) { if( parseInline( lexer, tok ) ) { switch( lexer->tagId() ) { case Lexer::DL: { Element* elt( new Dl( document, this, document->dataName(), document->dataLine(), document->dataCol(), nestLevel + 1, indent == 1 ? 4 : indent + 4 ) ); appendChild( elt ); tok = elt->parse( lexer ); } break; case Lexer::OL: { Element* elt( new Ol( document, this, document->dataName(), document->dataLine(), document->dataCol(), nestLevel + 1, indent == 1 ? 4 : indent + 4 ) ); appendChild( elt ); tok = elt->parse( lexer ); } break; case Lexer::LI: { Element* elt( new SlLi( document, this, document->dataName(), document->dataLine(), document->dataCol(), itemCount++, nestLevel, indent, compact ) ); appendChild( elt ); tok = elt->parse( lexer ); } break; case Lexer::LP: { Element* elt( new Lp( document, this, document->dataName(), document->dataLine(), document->dataCol(), indent ) ); appendChild( elt ); tok = elt->parse( lexer ); } break; case Lexer::PARML: { Element* elt( new Parml( document, this, document->dataName(), document->dataLine(), document->dataCol(), nestLevel + 1, indent == 1 ? 4 : indent + 4 ) ); appendChild( elt ); tok = elt->parse( lexer ); } break; case Lexer::SL: { Element* elt( new Sl( document, this, document->dataName(), document->dataLine(), document->dataCol(), nestLevel + 1, indent == 1 ? 4 : indent + 4 ) ); appendChild( elt ); tok = elt->parse( lexer ); } break; case Lexer::ESL: { Element* elt( new ESl( document, this, document->dataName(), document->dataLine(), document->dataCol() ) ); appendChild( elt ); tok = elt->parse( lexer ); if( !nestLevel ) appendChild( new BrCmd( document, this, document->dataName(), document->dataLine(), document->dataCol() ) ); return tok; } case Lexer::UL: { Element* elt( new Ul( document, this, document->dataName(), document->dataLine(), document->dataCol(), nestLevel + 1, indent == 1 ? 4 : indent + 4 ) ); appendChild( elt ); tok = elt->parse( lexer ); } break; default: document->printError( ERR1_NOENDLIST ); return tok; } } } return tok; }
void Ublox::read_rmc() { int counter = 0; char token[20]; Tokeniser tok(buf, ','); while(tok.next(token, 20)) { switch(counter) { case 1: //time { float time = atof(token); int hms = int(time); datetime.millis = time - hms; datetime.seconds = fmod(hms, 100); hms /= 100; datetime.minutes = fmod(hms, 100); hms /= 100; datetime.hours = hms; time_age = millis(); } break; case 2: { if(token[0] == 'A') datetime.valid = true; if(token[0] == 'V') datetime.valid = false; } break; /* case 3: { float llat = atof(token); int ilat = llat/100; double latmins = fmod(llat, 100); latitude = ilat + (latmins/60); } break; case 4: { if(token[0] == 'S') latitude = -latitude; } break; case 5: { float llong = atof(token); float ilat = llong/100; double lonmins = fmod(llong, 100); longitude = ilat + (lonmins/60); } break; case 6: { if(token[0] == 'W') longitude = -longitude; latlng_age = millis(); } break; */ case 8: { course = atof(token); course_age = millis(); } break; case 9: { uint32_t date = atoi(token); datetime.year = fmod(date, 100); date /= 100; datetime.month = fmod(date, 100); datetime.day = date / 100; date_age = millis(); } break; } counter++; } }
void multiCompare5() const { Token tok(0); tok.str("||"); ASSERT_EQUALS(true, Token::multiCompare(&tok, "+|%or%|%oror%", 0) >= 0); }
void PhysicsPaper3ContextMenu::addEntries(const std::string &commaSepEntryList){ const std::vector<std::string> entries = tok(commaSepEntryList,","); for(size_t i=0;i<entries.size();++i){ addEntry(entries[i]); } }
void isNameGuarantees3() const { Token tok(nullptr); tok.str("_123"); ASSERT_EQUALS(true, tok.isName()); }
/** Get the beginning and end of the job or process definition under the cursor */ static void job_or_process_extent(const wchar_t *buff, size_t cursor_pos, const wchar_t **a, const wchar_t **b, int process) { const wchar_t *begin, *end; long pos; wchar_t *buffcpy; int finished=0; CHECK(buff,); if (a) { *a=0; } if (b) { *b = 0; } parse_util_cmdsubst_extent(buff, cursor_pos, &begin, &end); if (!end || !begin) { return; } pos = cursor_pos - (begin - buff); if (a) { *a = begin; } if (b) { *b = end; } buffcpy = wcsndup(begin, end-begin); if (!buffcpy) { DIE_MEM(); } tokenizer_t tok(buffcpy, TOK_ACCEPT_UNFINISHED); for (; tok_has_next(&tok) && !finished; tok_next(&tok)) { int tok_begin = tok_get_pos(&tok); switch (tok_last_type(&tok)) { case TOK_PIPE: { if (!process) { break; } } case TOK_END: case TOK_BACKGROUND: { if (tok_begin >= pos) { finished=1; if (b) { *b = (wchar_t *)buff + tok_begin; } } else { if (a) { *a = (wchar_t *)buff + tok_begin+1; } } break; } default: { break; } } } free(buffcpy); }
void isNameGuarantees5() const { Token tok(nullptr); tok.str("a123456"); ASSERT_EQUALS(true, tok.isName()); ASSERT_EQUALS(false, tok.isNumber()); }
void parse_util_get_parameter_info(const wcstring &cmd, const size_t pos, wchar_t *quote, size_t *offset, int *type) { size_t prev_pos=0; wchar_t last_quote = '\0'; int unfinished; tokenizer_t tok(cmd.c_str(), TOK_ACCEPT_UNFINISHED | TOK_SQUASH_ERRORS); for (; tok_has_next(&tok); tok_next(&tok)) { if (tok_get_pos(&tok) > pos) break; if (tok_last_type(&tok) == TOK_STRING) last_quote = get_quote(tok_last(&tok), pos - tok_get_pos(&tok)); if (type != NULL) *type = tok_last_type(&tok); prev_pos = tok_get_pos(&tok); } wchar_t *cmd_tmp = wcsdup(cmd.c_str()); cmd_tmp[pos]=0; size_t cmdlen = wcslen(cmd_tmp); unfinished = (cmdlen==0); if (!unfinished) { unfinished = (quote != 0); if (!unfinished) { if (wcschr(L" \t\n\r", cmd_tmp[cmdlen-1]) != 0) { if ((cmdlen == 1) || (cmd_tmp[cmdlen-2] != L'\\')) { unfinished=1; } } } } if (quote) *quote = last_quote; if (offset != 0) { if (!unfinished) { while ((cmd_tmp[prev_pos] != 0) && (wcschr(L";|",cmd_tmp[prev_pos])!= 0)) prev_pos++; *offset = prev_pos; } else { *offset = pos; } } free(cmd_tmp); }
inline std::list<std::string> parse_command(const std::string &cmd_line) { std::list<std::string> args; boost::tokenizer<boost::escaped_list_separator<char>, std::string::const_iterator, std::string > tok(cmd_line, boost::escaped_list_separator<char>('\\', ' ', '\"')); BOOST_FOREACH(std::string s, tok) { if (!s.empty()) args.push_back(s); } return args; }
void loadAdditionalDocStrings(void) { // see if we can open the specified file DVariant fn = "docstrings.txt"; // default // override, if present mConfigSvc->getParam("docstrings", fn); std::fstream s; s.open(fn.toString().c_str(), ios::in); if (s.fail()) return; string target = ""; DocumentStringType doctype = DT_PROP; string text = ""; // process line by line while (!s.eof()) { string line; getline(s, line, '\n'); // process // strip comments size_t cpos = line.find_first_of("#"); if (cpos != line.npos) line = line.substr(0, --cpos); // we now have the comment stripped. See if we have a target specifier size_t cl = line.find_first_of("]"); if (line[0] == '[' && cl != line.npos) { // yup. see the type string cont = line.substr(1, cl - 1); WhitespaceStringTokenizer tok(cont); string type = tok.next(); string address = tok.next(); DocumentStringType ntype = DT_PROP; if (type == "prop") { ntype = DT_PROP; } else if (type == "link") { ntype = DT_LINK; } else if (type == "enum") { ntype = DT_ENUM; } else if (type == "page") { ntype = DT_SPECIAL; } else { OPDE_EXCEPT("Unknown section type :" + type, "DocGenerator::loadAdditionalDocStrings"); } // dispatch the old text dispatchAdditionalDocString(doctype, target, text); doctype = ntype; text = ""; target = address; } else { text += line; text += '\n'; // we want to preserve formatting } } // the cycle // last dispatch dispatchAdditionalDocString(doctype, target, text); s.close(); }
void wxGenericPrintSetupDialog::Init(wxPrintData* data) { if ( data ) m_printData = *data; m_targetData = data; wxBoxSizer *main_sizer = new wxBoxSizer( wxVERTICAL ); // printer selection wxStaticBoxSizer *printer_sizer = new wxStaticBoxSizer( new wxStaticBox( this, wxID_ANY, _("Printer") ), wxVERTICAL ); main_sizer->Add( printer_sizer, 0, wxALL|wxGROW, 10 ); m_printerListCtrl = new wxListCtrl( this, wxPRINTID_PRINTER, wxDefaultPosition, wxSize(wxDefaultCoord,100), wxLC_REPORT|wxLC_SINGLE_SEL|wxSUNKEN_BORDER ); wxImageList *image_list = new wxImageList; image_list->Add( wxBitmap(check_xpm) ); m_printerListCtrl->AssignImageList( image_list, wxIMAGE_LIST_SMALL ); m_printerListCtrl->InsertColumn( 0, wxT(" "), wxLIST_FORMAT_LEFT, 20 ); m_printerListCtrl->InsertColumn( 1, wxT("Printer"), wxLIST_FORMAT_LEFT, 150 ); m_printerListCtrl->InsertColumn( 2, wxT("Device"), wxLIST_FORMAT_LEFT, 150 ); m_printerListCtrl->InsertColumn( 3, wxT("Status"), wxLIST_FORMAT_LEFT, 80 ); wxListItem item; item.SetMask( wxLIST_MASK_TEXT ); item.SetColumn( 1 ); item.SetText( _("Default printer") ); item.SetId( m_printerListCtrl->InsertItem( item ) ); if (data->GetPrinterName().empty()) { wxListItem item2; item2.SetId( item.GetId() ); item2.SetMask( wxLIST_MASK_IMAGE ); item2.SetImage( 0 ); m_printerListCtrl->SetItem( item2 ); // also select item m_printerListCtrl->SetItemState( item.GetId(), wxLIST_STATE_SELECTED, wxLIST_STATE_SELECTED ); } item.SetId( 1+ item.GetId() ); wxArrayString errors; wxArrayString output; long res = wxExecute( wxT("lpstat -v"), output, errors, wxEXEC_NODISABLE ); if (res >= 0 && errors.GetCount() == 0) { size_t i; for (i = 0; i < output.GetCount(); i++) { wxStringTokenizer tok( output[i], wxT(" ") ); wxString tmp = tok.GetNextToken(); // "device" if (tmp != wxT("device")) break; // the lpstat syntax must have changed. tmp = tok.GetNextToken(); // "for" if (tmp != wxT("for")) break; // the lpstat syntax must have changed. tmp = tok.GetNextToken(); // "hp_deskjet930c:" if (tmp[tmp.length()-1] == wxT(':')) tmp.Remove(tmp.length()-1,1); wxString name = tmp; item.SetText( name ); item.SetId( m_printerListCtrl->InsertItem( item ) ); tmp = tok.GetNextToken(); // "parallel:/dev/lp0" item.SetColumn( 2 ); item.SetText( tmp ); m_printerListCtrl->SetItem( item ); if (data->GetPrinterName() == name) { wxListItem item2; item2.SetId( item.GetId() ); item2.SetMask( wxLIST_MASK_IMAGE ); item2.SetImage( 0 ); m_printerListCtrl->SetItem( item2 ); // also select item m_printerListCtrl->SetItemState( item.GetId(), wxLIST_STATE_SELECTED, wxLIST_STATE_SELECTED ); } wxString command = wxT("lpstat -p "); command += name; wxArrayString errors2; wxArrayString output2; res = wxExecute( command, output2, errors2, wxEXEC_NODISABLE ); if (res >= 0 && errors2.GetCount() == 0 && output2.GetCount() > 0) { tmp = output2[0]; // "printer hp_deskjet930c is idle. enable since ..." int pos = tmp.Find( wxT('.') ); if (pos != wxNOT_FOUND) tmp.Remove( (size_t)pos, tmp.length()-(size_t)pos ); wxStringTokenizer tok2( tmp, wxT(" ") ); tmp = tok2.GetNextToken(); // "printer" tmp = tok2.GetNextToken(); // "hp_deskjet930c" tmp = wxEmptyString; while (tok2.HasMoreTokens()) { tmp += tok2.GetNextToken(); tmp += wxT(" "); } item.SetColumn( 3 ); item.SetText( tmp ); m_printerListCtrl->SetItem( item ); } item.SetColumn( 1 ); item.SetId( 1+ item.GetId() ); } } printer_sizer->Add( m_printerListCtrl, 0, wxALL|wxGROW, 5 ); wxBoxSizer *item1 = new wxBoxSizer( wxHORIZONTAL ); main_sizer->Add( item1, 0, wxALL, 5 ); // printer options (on the left) wxBoxSizer *item2 = new wxBoxSizer( wxVERTICAL ); wxStaticBox *item4 = new wxStaticBox( this, wxPRINTID_STATIC, _("Paper size") ); wxStaticBoxSizer *item3 = new wxStaticBoxSizer( item4, wxVERTICAL ); m_paperTypeChoice = CreatePaperTypeChoice(); item3->Add( m_paperTypeChoice, 0, wxALIGN_CENTER|wxALL, 5 ); item2->Add( item3, 0, wxALIGN_CENTER|wxALL, 5 ); wxString strs6[] = { _("Portrait"), _("Landscape") }; m_orientationRadioBox= new wxRadioBox( this, wxPRINTID_ORIENTATION, _("Orientation"), wxDefaultPosition, wxDefaultSize, 2, strs6, 1, wxRA_SPECIFY_ROWS ); item2->Add( m_orientationRadioBox, 0, wxGROW|wxALIGN_CENTER_VERTICAL|wxALL, 5 ); wxStaticBox *item8 = new wxStaticBox( this, wxID_ANY, _("Options") ); wxStaticBoxSizer *item7 = new wxStaticBoxSizer( item8, wxHORIZONTAL ); m_colourCheckBox = new wxCheckBox( this, wxPRINTID_PRINTCOLOUR, _("Print in colour") ); item7->Add( m_colourCheckBox, 0, wxALIGN_CENTER|wxALL, 5 ); item2->Add( item7, 0, wxGROW|wxALIGN_CENTER_VERTICAL|wxALL, 5 ); item1->Add( item2, 0, wxALIGN_CENTER_HORIZONTAL, 5 ); // spooling options (on the right) wxStaticBox *item11 = new wxStaticBox( this, wxID_ANY, _("Print spooling") ); wxStaticBoxSizer *item10 = new wxStaticBoxSizer( item11, wxVERTICAL ); wxStaticText *item12 = new wxStaticText( this, wxID_ANY, _("Printer command:") ); item10->Add( item12, 0, wxALIGN_CENTER_VERTICAL|wxALL, 5 ); wxBoxSizer *item13 = new wxBoxSizer( wxHORIZONTAL ); item13->Add( 20, 20, 0, wxALIGN_CENTER|wxALL, 5 ); m_printerCommandText = new wxTextCtrl( this, wxPRINTID_COMMAND, wxEmptyString, wxDefaultPosition, wxSize(160,wxDefaultCoord) ); item13->Add( m_printerCommandText, 0, wxALIGN_CENTER|wxALL, 5 ); item10->Add( item13, 0, wxALIGN_CENTER|wxALL, 0 ); wxStaticText *item15 = new wxStaticText( this, wxID_ANY, _("Printer options:") ); item10->Add( item15, 0, wxALIGN_CENTER_VERTICAL|wxALL, 5 ); wxBoxSizer *item16 = new wxBoxSizer( wxHORIZONTAL ); item16->Add( 20, 20, 0, wxALIGN_CENTER|wxALL, 5 ); m_printerOptionsText = new wxTextCtrl( this, wxPRINTID_OPTIONS, wxEmptyString, wxDefaultPosition, wxSize(160,wxDefaultCoord) ); item16->Add( m_printerOptionsText, 0, wxALIGN_CENTER|wxALL, 5 ); item10->Add( item16, 0, wxALIGN_CENTER|wxALL, 0 ); item1->Add( item10, 0, wxALIGN_CENTER_HORIZONTAL|wxALL, 5 ); #if wxUSE_STATLINE // static line main_sizer->Add( new wxStaticLine( this, wxID_ANY ), 0, wxEXPAND | wxLEFT|wxRIGHT|wxTOP, 10 ); #endif // buttons main_sizer->Add( CreateButtonSizer( wxOK|wxCANCEL), 0, wxEXPAND|wxALL, 10 ); SetAutoLayout( true ); SetSizer( main_sizer ); main_sizer->Fit( this ); Centre(wxBOTH); Fit(); Centre(wxBOTH); InitDialog(); }
inline time_duration parse_undelimited_time_duration(const std::string& s) { int precision = 0; { // msvc wouldn't compile 'time_duration::num_fractional_digits()' // (required template argument list) as a workaround, a temp // time_duration object was used time_duration tmp(0,0,0,1); precision = tmp.num_fractional_digits(); } // 'precision+1' is so we grab all digits, plus the decimal int offsets[] = {2,2,2, precision+1}; int pos = 0, sign = 0; int hours = 0; short min=0, sec=0; boost::int64_t fs=0; // increment one position if the string was "signed" if(s.at(sign) == '-') { ++sign; } // stlport choked when passing s.substr() to tokenizer // using a new string fixed the error std::string remain = s.substr(sign); /* We do not want the offset_separator to wrap the offsets, we * will never want to process more than: * 2 char, 2 char, 2 char, frac_sec length. * We *do* want the offset_separator to give us a partial for the * last characters if there were not enough provided in the input string. */ bool wrap_off = false; bool ret_part = true; boost::offset_separator osf(offsets, offsets+4, wrap_off, ret_part); typedef boost::tokenizer<boost::offset_separator, std::basic_string<char>::const_iterator, std::basic_string<char> > tokenizer; typedef boost::tokenizer<boost::offset_separator, std::basic_string<char>::const_iterator, std::basic_string<char> >::iterator tokenizer_iterator; tokenizer tok(remain, osf); for(tokenizer_iterator ti=tok.begin(); ti!=tok.end();++ti){ switch(pos) { case 0: { hours = boost::lexical_cast<int>(*ti); break; } case 1: { min = boost::lexical_cast<short>(*ti); break; } case 2: { sec = boost::lexical_cast<short>(*ti); break; } case 3: { std::string char_digits(ti->substr(1)); // digits w/no decimal int digits = static_cast<int>(char_digits.length()); //Works around a bug in MSVC 6 library that does not support //operator>> thus meaning lexical_cast will fail to compile. #if (defined(BOOST_MSVC) && (_MSC_VER <= 1200)) // 1200 == VC++ 6.0 // _atoi64 is an MS specific function if(digits >= precision) { // drop excess digits fs = _atoi64(char_digits.substr(0, precision).c_str()); } else if(digits == 0) { fs = 0; // just in case _atoi64 doesn't like an empty string } else { fs = _atoi64(char_digits.c_str()); } #else if(digits >= precision) { // drop excess digits fs = boost::lexical_cast<boost::int64_t>(char_digits.substr(0, precision)); } else if(digits == 0) { fs = 0; // lexical_cast doesn't like empty strings } else { fs = boost::lexical_cast<boost::int64_t>(char_digits); } #endif if(digits < precision){ // trailing zeros get dropped from the string, // "1:01:01.1" would yield .000001 instead of .100000 // the power() compensates for the missing decimal places fs *= power(10, precision - digits); } break; } }; pos++; } if(sign) { return -time_duration(hours, min, sec, fs); } else { return time_duration(hours, min, sec, fs); } }
void LLPluginProcessParent::idle(void) { bool idle_again; do { // process queued messages mIncomingQueueMutex.lock(); while(!mIncomingQueue.empty()) { LLPluginMessage message = mIncomingQueue.front(); mIncomingQueue.pop(); mIncomingQueueMutex.unlock(); receiveMessage(message); mIncomingQueueMutex.lock(); } mIncomingQueueMutex.unlock(); // Give time to network processing if(mMessagePipe) { // Drain any queued outgoing messages mMessagePipe->pumpOutput(); // Only do input processing here if this instance isn't in a pollset. if(!mPolledInput) { mMessagePipe->pumpInput(); } } if(mState <= STATE_RUNNING) { if(APR_STATUS_IS_EOF(mSocketError)) { // Plugin socket was closed. This covers both normal plugin termination and plugin crashes. errorState(); } else if(mSocketError != APR_SUCCESS) { // The socket is in an error state -- the plugin is gone. LL_WARNS("Plugin") << "Socket hit an error state (" << mSocketError << ")" << LL_ENDL; errorState(); } } // If a state needs to go directly to another state (as a performance enhancement), it can set idle_again to true after calling setState(). // USE THIS CAREFULLY, since it can starve other code. Specifically make sure there's no way to get into a closed cycle and never return. // When in doubt, don't do it. idle_again = false; switch(mState) { case STATE_UNINITIALIZED: break; case STATE_INITIALIZED: { apr_status_t status = APR_SUCCESS; apr_sockaddr_t* addr = NULL; mListenSocket = LLSocket::create(LLSocket::STREAM_TCP); mBoundPort = 0; // This code is based on parts of LLSocket::create() in lliosocket.cpp. status = apr_sockaddr_info_get( &addr, "127.0.0.1", APR_INET, 0, // port 0 = ephemeral ("find me a port") 0, LLAPRRootPool::get()()); if(ll_apr_warn_status(status)) { killSockets(); errorState(); break; } // This allows us to reuse the address on quick down/up. This is unlikely to create problems. ll_apr_warn_status(apr_socket_opt_set(mListenSocket->getSocket(), APR_SO_REUSEADDR, 1)); status = apr_socket_bind(mListenSocket->getSocket(), addr); if(ll_apr_warn_status(status)) { killSockets(); errorState(); break; } // Get the actual port the socket was bound to { apr_sockaddr_t* bound_addr = NULL; if(ll_apr_warn_status(apr_socket_addr_get(&bound_addr, APR_LOCAL, mListenSocket->getSocket()))) { killSockets(); errorState(); break; } mBoundPort = bound_addr->port; if(mBoundPort == 0) { LL_WARNS("Plugin") << "Bound port number unknown, bailing out." << LL_ENDL; killSockets(); errorState(); break; } } LL_DEBUGS("Plugin") << "Bound tcp socket to port: " << addr->port << LL_ENDL; // Make the listen socket non-blocking status = apr_socket_opt_set(mListenSocket->getSocket(), APR_SO_NONBLOCK, 1); if(ll_apr_warn_status(status)) { killSockets(); errorState(); break; } apr_socket_timeout_set(mListenSocket->getSocket(), 0); if(ll_apr_warn_status(status)) { killSockets(); errorState(); break; } // If it's a stream based socket, we need to tell the OS // to keep a queue of incoming connections for ACCEPT. status = apr_socket_listen( mListenSocket->getSocket(), 10); // FIXME: Magic number for queue size if(ll_apr_warn_status(status)) { killSockets(); errorState(); break; } // If we got here, we're listening. setState(STATE_LISTENING); } break; case STATE_LISTENING: { // Launch the plugin process. // Only argument to the launcher is the port number we're listening on std::stringstream stream; stream << mBoundPort; mProcess.addArgument(stream.str()); if(mProcess.launch() != 0) { errorState(); } else { if(mDebug) { // If we're set to debug, start up a gdb instance in a new terminal window and have it attach to the plugin process and continue. std::stringstream cmd; #if LL_DARWIN // The command we're constructing would look like this on the command line: // osascript -e 'tell application "Terminal"' -e 'set win to do script "gdb -pid 12345"' -e 'do script "continue" in win' -e 'end tell' mDebugger.setExecutable("/usr/bin/osascript"); mDebugger.addArgument("-e"); mDebugger.addArgument("tell application \"Terminal\""); mDebugger.addArgument("-e"); cmd << "set win to do script \"gdb -pid " << mProcess.getProcessID() << "\""; mDebugger.addArgument(cmd.str()); mDebugger.addArgument("-e"); mDebugger.addArgument("do script \"continue\" in win"); mDebugger.addArgument("-e"); mDebugger.addArgument("end tell"); mDebugger.launch(); #elif LL_LINUX // The command we're constructing would look like this on the command line: // /usr/bin/xterm -geometry 160x24-0+0 -e '/usr/bin/gdb -n /proc/12345/exe 12345' // This can be changed by setting the following environment variables, for example: // export LL_DEBUG_TERMINAL_COMMAND="/usr/bin/gnome-terminal --geometry=165x24-0+0 -e %s" // export LL_DEBUG_GDB_PATH=/usr/bin/gdb char const* env; std::string const terminal_command = (env = getenv("LL_DEBUG_TERMINAL_COMMAND")) ? env : "/usr/bin/xterm -geometry 160x24+0+0 -e %s"; char const* const gdb_path = (env = getenv("LL_DEBUG_GDB_PATH")) ? env : "/usr/bin/gdb"; cmd << gdb_path << " -n /proc/" << mProcess.getProcessID() << "/exe " << mProcess.getProcessID(); typedef boost::tokenizer< boost::escaped_list_separator< char>, std::basic_string< char>::const_iterator, std::basic_string<char> > tokenizerT; tokenizerT tok(terminal_command.begin(), terminal_command.end(), boost::escaped_list_separator< char >("\\", " ", "'\"")); std::vector< std::basic_string<char> > tokens; for (tokenizerT::iterator cur_token(tok.begin()), end_token(tok.end()); cur_token != end_token; ++cur_token) { if (!cur_token->empty()) tokens.push_back(*cur_token); } std::vector<std::string>::iterator token = tokens.begin(); mDebugger.setExecutable(*token); while (++token != tokens.end()) { if (*token == "%s") { mDebugger.addArgument(cmd.str()); } else { mDebugger.addArgument(*token); } } mDebugger.launch(); #endif } // This will allow us to time out if the process never starts. mHeartbeat.start(); mHeartbeat.setTimerExpirySec(mPluginLaunchTimeout); setState(STATE_LAUNCHED); } } break; case STATE_LAUNCHED: // waiting for the plugin to connect if(pluginLockedUpOrQuit()) { errorState(); } else { // Check for the incoming connection. if(accept()) { // Stop listening on the server port mListenSocket.reset(); setState(STATE_CONNECTED); } } break; case STATE_CONNECTED: // waiting for hello message from the plugin if(pluginLockedUpOrQuit()) { errorState(); } break; case STATE_HELLO: LL_DEBUGS("Plugin") << "received hello message" << LL_ENDL; // Send the message to load the plugin { LLPluginMessage message(LLPLUGIN_MESSAGE_CLASS_INTERNAL, "load_plugin"); message.setValue("file", mPluginFile); sendMessage(message); } setState(STATE_LOADING); break; case STATE_LOADING: // The load_plugin_response message will kick us from here into STATE_RUNNING if(pluginLockedUpOrQuit()) { errorState(); } break; case STATE_RUNNING: if(pluginLockedUpOrQuit()) { errorState(); } break; case STATE_EXITING: if(!mProcess.isRunning()) { setState(STATE_CLEANUP); } else if(pluginLockedUp()) { LL_WARNS("Plugin") << "timeout in exiting state, bailing out" << LL_ENDL; errorState(); } break; case STATE_LAUNCH_FAILURE: if(mOwner != NULL) { mOwner->pluginLaunchFailed(); } setState(STATE_CLEANUP); break; case STATE_ERROR: if(mOwner != NULL) { mOwner->pluginDied(); } setState(STATE_CLEANUP); break; case STATE_CLEANUP: mProcess.kill(); killSockets(); setState(STATE_DONE); break; case STATE_DONE: // just sit here. break; } } while (idle_again); }