void CAnalysisHowPage::FillMode(short t) { WBSF::StringVector MODE_NAME = Tokenize(GetString(IDS_CMN_TIME_FORMAT_DATAHEAD1), ";|"); int curSel = m_TTModeCtrl.GetCurItemData(); m_TTModeCtrl.ResetContent(); for (int m = 0; m < CTM::NB_MODE; m++) { if (m_sourceTM.IsModeAvailable(CTM(t, m))) { int pos = m_TTModeCtrl.AddString(MODE_NAME[m].c_str()); m_TTModeCtrl.SetItemData(pos, m); } } m_TTModeCtrl.SelectFromItemData(curSel); }
bool promote_dependencies(const std::string& snapshot, std::vector<std::string>& dependencies) { zfs_elevate(); std::cout << "Searching for origin " << snapshot << std::endl; std::string snap_data; int rc = exec_wait(find_zfs_cmd(), snap_data, "list", "-H", "-o", "name", NULL); if(rc!=0) return false; std::vector<std::string> snaps; Tokenize(snap_data, snaps, "\n"); std::string snap_folder = ExtractFilePath(snapshot); for(size_t i=0;i<snaps.size();++i) { if( !next(trim(snaps[i]), 0, snap_folder) || trim(snaps[i]).size()<=snap_folder.size() ) continue; std::string stdout; std::string subvolume_folder = snaps[i]; int rc=exec_wait(find_zfs_cmd(), stdout, "get", "-H", "-o", "value", "origin", subvolume_folder.c_str(), NULL); if(rc==0) { stdout=trim(stdout); if(stdout==snapshot) { std::cout << "Origin is " << subvolume_folder << std::endl; if(exec_wait(find_zfs_cmd(), true, "promote", subvolume_folder.c_str(), NULL)!=0) { return false; } dependencies.push_back(subvolume_folder); } } } return true; }
/** check whether a file exists */ bool Parameter::FilesExist(const string ¶mName, int fieldNo, std::vector<std::string> const& extensions) { typedef std::vector<std::string> StringVec; StringVec::const_iterator iter; PARAM_MAP::const_iterator iterParam = m_setting.find(paramName); if (iterParam == m_setting.end()) { // no param. therefore nothing to check return true; } const StringVec &pathVec = (*iterParam).second; for (iter = pathVec.begin() ; iter != pathVec.end() ; ++iter) { StringVec vec = Tokenize(*iter); size_t tokenizeIndex; if (fieldNo == -1) tokenizeIndex = vec.size() - 1; else tokenizeIndex = static_cast<size_t>(fieldNo); if (tokenizeIndex >= vec.size()) { stringstream errorMsg(""); errorMsg << "Expected at least " << (tokenizeIndex+1) << " tokens per entry in '" << paramName << "', but only found " << vec.size(); UserMessage::Add(errorMsg.str()); return false; } const string &pathStr = vec[tokenizeIndex]; bool fileFound=0; for(size_t i=0; i<extensions.size() && !fileFound; ++i) { fileFound|=FileExists(pathStr + extensions[i]); } if(!fileFound) { stringstream errorMsg(""); errorMsg << "File " << pathStr << " does not exist"; UserMessage::Add(errorMsg.str()); return false; } } return true; }
// Parses a string of the form "source_planet destination_planet num_ships" // and calls state.ExecuteOrder. If that fails, the player is dropped. bool Game::ExecuteOrder(int playerID, const std::string& order) { std::vector<std::string> tokens = Tokenize(order, " "); if (tokens.size() != 3) return -1; int sourcePlanet = atoi(tokens[0].c_str()); int destinationPlanet = atoi(tokens[1].c_str()); int numShips = atoi(tokens[2].c_str()); if(!state.ExecuteOrder(desc, playerID, sourcePlanet, destinationPlanet, numShips)) { WriteLogMessage("Dropping player " + to_string(playerID) + ". source.Owner() = " + to_string(state.planets[sourcePlanet].owner) + ", playerID = " + to_string(playerID) + ", numShips = " + to_string(numShips) + ", source.NumShips() = " + to_string(state.planets[sourcePlanet].numShips)); std::cerr << "Dropping player " << playerID << " because of invalid order: " << order << std::endl; state.DropPlayer(playerID); return false; } return true; }
void sdpPlayerParseStateTableData::GetBoundary(double TimeStampForSearch, double &TopBoundary, double &LowBoundary) { // Load Relative Field std::vector <double> TimeIndex; unsigned int i = 0; std::string ExtractLine; TopBoundary = LowBoundary = 0.0; // where is TimeStampForSearch postition in index file? for ( i = 0 ; i < Index.size(); i++) { std::vector <std::string> Token; Tokenize(Index.at(i).LineAtIndex, Token, Header.Delimiter);// get rid of first string TopBoundary = strtod(Token.at(IndexOfTimeField).c_str(), NULL ); if (TimeStampForSearch <= TopBoundary) break; LowBoundary = TopBoundary; Token.clear(); } return; }
static KMETHOD Statement_namespace(KonohaContext *kctx, KonohaStack *sfp) { VAR_TypeCheck(stmt, ns, reqc); kstatus_t result = K_CONTINUE; kToken *tk = SUGAR kNode_GetToken(kctx, stmt, KSymbol_BlockPattern, NULL); if(tk != NULL && tk->resolvedSyntaxInfo->keyword == TokenType_LazyBlock) { INIT_GCSTACK(); kNameSpace *ns = new_(NameSpace, kNode_ns(stmt), _GcStack); KTokenSeq range = {ns, KGetParserContext(kctx)->preparedTokenList}; KTokenSeq_Push(kctx, range); SUGAR Tokenize(kctx, ns, kString_text(tk->text), tk->uline, tk->indent, range.tokenList); KTokenSeq_End(kctx, range); result = SUGAR EvalTokenList(kctx, &range, NULL/*trace*/); KTokenSeq_Pop(kctx, range); RESET_GCSTACK(); kNode_Type(kctx, stmt, KNode_Done, KType_void); } KReturnUnboxValue(result == K_CONTINUE); }
void FeatureFunction::SetTuneableComponents(const std::string& value) { std::vector<std::string> toks = Tokenize(value,","); UTIL_THROW_IF2(toks.empty(), GetScoreProducerDescription() << ": Empty tuneable-components"); UTIL_THROW_IF2(toks.size()!=m_numScoreComponents, GetScoreProducerDescription() << ": tuneable-components value has to be a comma-separated list of " << m_numScoreComponents << " boolean values"); m_tuneableComponents.resize(m_numScoreComponents); m_numTuneableComponents = m_numScoreComponents; for (size_t i = 0; i < toks.size(); ++i) { m_tuneableComponents[i] = Scan<bool>(toks[i]); if (!m_tuneableComponents[i]) { --m_numTuneableComponents; } } }
void ExtractLex::Process(vector<string> &toksTarget, vector<string> &toksSource, vector<string> &toksAlign, size_t lineCount) { std::vector<bool> m_sourceAligned(toksSource.size(), false) , m_targetAligned(toksTarget.size(), false); vector<string>::const_iterator iterAlign; for (iterAlign = toksAlign.begin(); iterAlign != toksAlign.end(); ++iterAlign) { const string &alignTok = *iterAlign; vector<size_t> alignPos; Tokenize(alignPos, alignTok, "-"); assert(alignPos.size() == 2); if (alignPos[0] >= toksSource.size()) { cerr << "ERROR: alignment over source length. Alignment " << alignPos[0] << " at line " << lineCount << endl; continue; } if (alignPos[1] >= toksTarget.size()) { cerr << "ERROR: alignment over target length. Alignment " << alignPos[1] << " at line " << lineCount << endl; continue; } assert(alignPos[0] < toksSource.size()); assert(alignPos[1] < toksTarget.size()); m_sourceAligned[ alignPos[0] ] = true; m_targetAligned[ alignPos[1] ] = true; const string &tmpSource = toksSource[ alignPos[0] ]; const string &tmpTarget = toksTarget[ alignPos[1] ]; const string *source = m_vocab.GetOrAdd(tmpSource); const string *target = m_vocab.GetOrAdd(tmpTarget); Process(target, source); } ProcessUnaligned(toksTarget, toksSource, m_sourceAligned, m_targetAligned); }
Token *CharactersToTokens(Character text[], size_t nchars, size_t *ntokens_return) { assert(text != NULL); puts("CharactersToTokens..."); // nchars がトークン数の上限である。 Token *res = GC_MALLOC(sizeof(Token) * nchars); size_t ntokens = 0; Character *p = text; while (p < text + nchars) { p = Tokenize(p, &res[ntokens++]); } puts("Done"); res = GC_REALLOC(res, sizeof(Token) * ntokens); *ntokens_return = ntokens; return res; }
// The GlobusResourceDownEvent is now deprecated and should be removed at // some point in the future (6.9?). bool WriteGlobusResourceDownEventToUserLog( ClassAd *job_ad ) { int cluster, proc; std::string contact; WriteUserLog *ulog = InitializeUserLog( job_ad ); if ( ulog == NULL ) { // User doesn't want a log return true; } job_ad->LookupInteger( ATTR_CLUSTER_ID, cluster ); job_ad->LookupInteger( ATTR_PROC_ID, proc ); dprintf( D_FULLDEBUG, "(%d.%d) Writing globus down record to user logfile\n", cluster, proc ); GlobusResourceDownEvent event; job_ad->LookupString( ATTR_GRID_RESOURCE, contact ); if ( contact.empty() ) { // Not a Globus job, don't log the event delete ulog; return true; } Tokenize( contact ); GetNextToken( " ", false ); event.rmContact = strnewp(GetNextToken( " ", false )); int rc = ulog->writeEvent(&event,job_ad); delete ulog; if (!rc) { dprintf( D_ALWAYS, "(%d.%d) Unable to log ULOG_GLOBUS_RESOURCE_DOWN event\n", cluster, proc ); return false; } return true; }
bool cChannel::Parse(const std::string& data) { std::vector<std::string> fields; Tokenize(data, fields, "|"); if (fields.size() >= 4) { // Expected format: // ListTVChannels, ListRadioChannels // 0 = channel uid // 1 = channel external id/number // 2 = channel name // 3 = isencrypted ("0"/"1") // ListRadioChannels only: (TVServerXBMC >= v1.1.0.100) // 4 = iswebstream // 5 = webstream url // 6 = visibleinguide (TVServerXBMC >= v1.2.3.120) uid = atoi(fields[0].c_str()); external_id = atoi(fields[1].c_str()); name = fields[2]; encrypted = (strncmp(fields[3].c_str(), "1", 1) == 0); if (fields.size() >= 6) { iswebstream = (strncmp(fields[4].c_str(), "1", 1) == 0); url = fields[5].c_str(); if (fields.size() >= 7) { visibleinguide = (strncmp(fields[6].c_str(), "1", 1) == 0); } } return true; } else { return false; } }
static ret_code GetNumber( char *string, int *pi, struct asm_tok tokenarray[] ) /*****************************************************************************/ { struct expr opndx; int i; int last; last = Tokenize( string, Token_Count+1, tokenarray, TOK_RESCAN ); i = Token_Count+1; if( EvalOperand( &i, tokenarray, last, &opndx, EXPF_NOUNDEF ) == ERROR ) { return( ERROR ); } /* v2.11: string constants are accepted ( although hardly useful ) */ //if( opndx.kind != EXPR_CONST || opndx.quoted_string != NULL || tokenarray[i].token != T_FINAL ) { if( opndx.kind != EXPR_CONST || tokenarray[i].token != T_FINAL ) { return( EmitErr( SYNTAX_ERROR_EX, string ) ); } *pi = opndx.value; return( NOT_ERROR ); }
//bool stringCompare( const string &left, const string &right ) //{ // for( string::const_iterator lit = left.begin(), rit = right.begin(); lit != left.end() && rit != right.end(); ++lit, ++rit ) // if( tolower( *lit ) < tolower( *rit ) ) // return true; // else if( tolower( *lit ) > tolower( *rit ) ) // return false; // if( left.size() < right.size() ) // return true; // return false; //} StringVector CDirectoryManagerBase::GetDirectoriesFromString(const std::string& directoryListString) { StringVector tmp1 = Tokenize(directoryListString, "|"); StringVector tmp2; for (StringVector::iterator it = tmp1.begin(); it != tmp1.end(); it++) { Trim(*it); if (!it->empty()) { if (!IsPathEndOk(*it)) *it += '\\'; *it = SimplifyFilePath(*it); if (tmp2.Find(*it, false) == UNKNOWN_POS) tmp2.push_back(*it); } } return tmp2; }
nsresult nsHtml5StringParser::ParseDocument(const nsAString& aSourceBuffer, nsIDocument* aTargetDoc, bool aScriptingEnabledForNoscriptParsing) { MOZ_ASSERT(!aTargetDoc->GetFirstChild()); NS_ENSURE_TRUE(aSourceBuffer.Length() <= INT32_MAX, NS_ERROR_OUT_OF_MEMORY); mTreeBuilder->setFragmentContext(nullptr, kNameSpaceID_None, nullptr, false); mTreeBuilder->SetPreventScriptExecution(true); Tokenize(aSourceBuffer, aTargetDoc, aScriptingEnabledForNoscriptParsing); return NS_OK; }
/* ** Append text to the tokenizer engine. ** ** This routine (actually the Tokenize() subroutine that is called ** by this routine) may invoke a callback procedure which could delete ** the HTML widget. */ void HtmlTokenizerAppend(HtmlWidget *htmlPtr, const char *zText){ int len = strlen(zText); if( htmlPtr->nText==0 ){ htmlPtr->nAlloc = len + 100; htmlPtr->zText = HtmlAlloc( htmlPtr->nAlloc ); TestPoint(0); }else if( htmlPtr->nText + len >= htmlPtr->nAlloc ){ htmlPtr->nAlloc += len + 100; htmlPtr->zText = HtmlRealloc( htmlPtr->zText, htmlPtr->nAlloc ); TestPoint(0); } if( htmlPtr->zText==0 ){ htmlPtr->nText = 0; UNTESTED; return; } strcpy(&htmlPtr->zText[htmlPtr->nText], zText); htmlPtr->nText += len; htmlPtr->nComplete = Tokenize(htmlPtr); }
int Encode (CFileReader &p_coFileReader) { int iRetVal = 0; XML_Parser psoParser; psoParser = XML_ParserCreate ("UTF-8"); if (NULL == psoParser) { iRetVal = ENOMEM; return iRetVal; } SDoc soDoc; /* регистрация обработчика данных */ XML_SetElementHandler (psoParser, StartElementHandler, EndElementHandler); XML_SetXmlDeclHandler (psoParser, XmlDeclHandler); XML_SetDoctypeDeclHandler (psoParser, StartDoctypeDeclHandler, EndDoctypeDeclHandler); XML_SetUserData (psoParser, &soDoc); /* парсинг данных */ char mcBuf[256]; int iDataLen; int iIsFinal = 0; do { iDataLen = sizeof (mcBuf); if (p_coFileReader.ReadData ((unsigned char*)mcBuf, iDataLen)) iIsFinal = 1; XML_Parse (psoParser, mcBuf, iDataLen, iIsFinal); if (iIsFinal) break; } while (1); Tokenize (soDoc); if (psoParser) { XML_ParserFree (psoParser); psoParser = NULL; } return iRetVal; }
SStatus Connector::getStatus(void) { std::string d=getResponse("STATUS",""); std::vector<std::string> toks; Tokenize(d, toks, "#"); SStatus ret; ret.pause=false; ret.capa=0; if(toks.size()>0) ret.lastbackupdate=wxString::FromUTF8(toks[0].c_str() ); if(toks.size()>1) ret.status=wxString::FromUTF8(toks[1].c_str() ); if(toks.size()>2) ret.pcdone=wxString::FromUTF8(toks[2].c_str() ); if(toks.size()>3) { if(toks[3]=="P") ret.pause=true; else if(toks[3]=="NP") ret.pause=false; } if(toks.size()>4) { std::map<std::wstring,std::wstring> params; ParseParamStr(toks[4], ¶ms); std::map<std::wstring,std::wstring>::iterator it_capa=params.find(L"capa"); if(it_capa!=params.end()) { ret.capa=watoi(it_capa->second); } std::map<std::wstring,std::wstring>::iterator it_new_server=params.find(L"new_ident"); if(it_new_server!=params.end()) { ret.new_server=wnarrow(it_new_server->second); } } return ret; }
int CShdrFileEchoView::GetNextTokens() { bool bSetLastTime=false; tokens.clear(); getline(in,_buffer); if(in.eof( )) { in.clear(); // forget we hit the end of file in.seekg(0, std::ios::beg); // move to the start of the file if(!_bRepeat) return 0; getline(in,_buffer); bSetLastTime=true; } tokens=Tokenize(_buffer, "|"); if(bSetLastTime && tokens.size()>0) lasttime = GetDateTime(tokens[0]); for(int i=0; i< tokens.size(); i++) Trim(tokens[i]); return tokens.size(); }
void FeatureFunction::ParseLine(const std::string &line) { vector<string> toks = Tokenize(line); UTIL_THROW_IF2(toks.empty(), "Empty line"); string nameStub = toks[0]; set<string> keys; for (size_t i = 1; i < toks.size(); ++i) { vector<string> args = TokenizeFirstOnly(toks[i], "="); UTIL_THROW_IF2(args.size() != 2, "Incorrect format for feature function arg: " << toks[i]); pair<set<string>::iterator,bool> ret = keys.insert(args[0]); UTIL_THROW_IF2(!ret.second, "Duplicate key in line " << line); if (args[0] == "num-features") { m_numScoreComponents = Scan<size_t>(args[1]); m_numTuneableComponents = m_numScoreComponents; } else if (args[0] == "name") { m_description = args[1]; } else { m_args.push_back(args); } } // name if (m_description == "") { size_t index = description_counts.count(nameStub); ostringstream dstream; dstream << nameStub; dstream << index; description_counts.insert(nameStub); m_description = dstream.str(); } }
void NonTermContextProperty::ProcessValue(const std::string &value) { vector<string> toks; Tokenize(toks, value); FactorCollection &fc = FactorCollection::Instance(); size_t numNT = Scan<size_t>(toks[0]); m_probStores.resize(numNT); size_t ind = 1; while (ind < toks.size()) { vector<const Factor *> factors; for (size_t nt = 0; nt < numNT; ++nt) { size_t ntInd = Scan<size_t>(toks[ind]); assert(nt == ntInd); ++ind; for (size_t contextInd = 0; contextInd < 4; ++contextInd) { //cerr << "toks[" << ind << "]=" << toks[ind] << endl; const Factor *factor = fc.AddFactor(toks[ind], false); factors.push_back(factor); ++ind; } } // done with the context. Just get the count and put it all into data structures // cerr << "count=" << toks[ind] << endl; float count = Scan<float>(toks[ind]); ++ind; for (size_t i = 0; i < factors.size(); ++i) { size_t ntInd = i / 4; size_t contextInd = i % 4; const Factor *factor = factors[i]; AddToMap(ntInd, contextInd, factor, count); } } }
hsa_status_t Isa::Initialize(const char *in_isa_name) { assert(in_isa_name); std::list<std::string> isa_name_tokens; if (!Tokenize(in_isa_name, ':', isa_name_tokens)) { return HSA_STATUS_ERROR_INVALID_ISA_NAME; } if (ISA_NAME_AMD_TOKEN_COUNT != isa_name_tokens.size()) { return HSA_STATUS_ERROR_INVALID_ISA_NAME; } full_name_ = in_isa_name; vendor_ = isa_name_tokens.front(); isa_name_tokens.pop_front(); device_ = isa_name_tokens.front(); isa_name_tokens.pop_front(); uint32_t version_major = std::stoi(isa_name_tokens.front()); isa_name_tokens.pop_front(); uint32_t version_minor = std::stoi(isa_name_tokens.front()); isa_name_tokens.pop_front(); uint32_t version_stepping = std::stoi(isa_name_tokens.front()); isa_name_tokens.pop_front(); assert(0 == isa_name_tokens.size()); compute_capability_.Initialize(version_major, version_minor, version_stepping); if (!IsValid()) { return HSA_STATUS_ERROR_INVALID_ISA_NAME; } return HSA_STATUS_SUCCESS; } // Isa::Initialize
_SEN * SSentence:: CreateSentence(CPool &rPool, vector<wstring> lines, bool updateMap) { _SEN *pSen = (_SEN *)rPool.Allocate(sizeof(_SEN)); pSen->m_pNodes = (SSenNode **)rPool.Allocate(sizeof(SSenNode*) * lines.size()); pSen->m_len = lines.size(); const int BUF_LEN = 65535; wchar_t buf[BUF_LEN]; vector<wchar_t *> itemVec; CStrIDMap &wordIDMap = CGlobalMap::s_WIDMap; CStrIDMap &posIDMap = CGlobalMap::s_POSIDMap; CStrIDMap &gFormIDMap = CGlobalMap::s_GIDMap; for (size_t i = 0; i < lines.size(); ++i) { pSen->m_pNodes[i] = SSenNode::MakeNode(rPool); wcscpy(buf, lines[i].c_str()); if (Tokenize(buf, itemVec) == false) { fprintf(stderr, "Invalid format %s\n", wstr2utf8(lines[i]).c_str()); exit(0); } if (updateMap == true) { wordIDMap.Add(itemVec[_GWORD]); gFormIDMap.Add(itemVec[_GGenFORM]); posIDMap.Add(itemVec[_GTAG]); } pSen->m_pNodes[i]->m_pwzWord = copyWcs(itemVec[_GWORD], &rPool); pSen->m_pNodes[i]->m_pwzGForm = copyWcs(itemVec[_GGenFORM], &rPool); pSen->SetWID(i, wordIDMap.ID(itemVec[_GWORD])); pSen->SetGID(i, gFormIDMap.ID(itemVec[_GGenFORM])); pSen->SetTag(i, posIDMap.ID(itemVec[_GTAG])); } return pSen; }
static kNode *ParseSource(KonohaContext *kctx, kNameSpace *ns, const char *script, size_t len) { KBuffer wb; KLIB KBuffer_Init(&(kctx->stack->cwb), &wb); KLIB KBuffer_Write(kctx, &wb, "(", 1); KLIB KBuffer_Write(kctx, &wb, script, len); KLIB KBuffer_Write(kctx, &wb, ")", 1); KTokenSeq tokens = {ns, KGetParserContext(kctx)->preparedTokenList}; KTokenSeq_Push(kctx, tokens); const char *buf = KLIB KBuffer_text(kctx, &wb, EnsureZero); SUGAR Tokenize(kctx, ns, buf, 0, 0, tokens.tokenList); KTokenSeq_End(kctx, tokens); KTokenSeq step2 = {ns, tokens.tokenList, kArray_size(tokens.tokenList)}; SUGAR Preprocess(kctx, ns, RangeTokenSeq(tokens), NULL, step2.tokenList); KTokenSeq_End(kctx, step2); kNode *newexpr = SUGAR ParseNewNode(kctx, ns, step2.tokenList, &step2.beginIdx, step2.endIdx, 0, NULL); KTokenSeq_Pop(kctx, tokens); KLIB KBuffer_Free(&wb); return newexpr; }
void Model1LexicalTable::Load(const std::string &fileName, const Model1Vocabulary& vcbS, const Model1Vocabulary& vcbT) { InputFileStream inFile(fileName); std::string line; unsigned i = 0; while ( getline(inFile, line) ) { ++i; std::vector<std::string> tokens = Tokenize(line); UTIL_THROW_IF2(tokens.size()!=3, "Line " << i << " in " << fileName << " has wrong number of tokens."); unsigned idS = atoll( tokens[0].c_str() ); unsigned idT = atoll( tokens[1].c_str() ); const Factor* wordS = vcbS.GetWord(idS); const Factor* wordT = vcbT.GetWord(idT); float prob = std::atof( tokens[2].c_str() ); if ( (wordS != NULL) && (wordT != NULL) ) { m_ltable[ wordS ][ wordT ] = prob; } UTIL_THROW_IF2((wordS == NULL) || (wordT == NULL), "Line " << i << " in " << fileName << " has unknown vocabulary."); // TODO: can we assume that the vocabulary is know and filter the model on loading? Then remove this line. } inFile.Close(); }
TargetPhraseImpl *TargetPhraseImpl::CreateFromString(MemPool &pool, const PhraseTable &pt, const System &system, const std::string &str) { //cerr << "str=" << str << endl; FactorCollection &vocab = system.GetVocab(); vector<string> toks = Tokenize(str); size_t size = toks.size() - 1; TargetPhraseImpl *ret = new (pool.Allocate<TargetPhraseImpl>()) TargetPhraseImpl(pool, pt, system, size); for (size_t i = 0; i < size; ++i) { SCFG::Word &word = (*ret)[i]; word.CreateFromString(vocab, system, toks[i]); } // lhs ret->lhs.CreateFromString(vocab, system, toks.back()); //cerr << "ret=" << *ret << endl; return ret; }
void Instance_COM_Initialize(int argc, char ** argv, char ** parsedCommand, int * argcPtr, char *** argvPtr) { #if !defined(__WIN32__) // Disable stdout buffering on Unix setvbuf(stdout, null, _IONBF, 0); #endif #if defined(__WIN32__) *parsedCommand = UTF16toUTF8(GetCommandLineW()); *argvPtr = eSystem_New0(sizeof(char *) * 512); *argcPtr = Tokenize(*parsedCommand, 512,(void*)(char **)(*argvPtr), false); #else *argcPtr = argc; *argvPtr = argv; #endif #if defined(__unix__) if(!__thisModule && argv) { getcwd(exeLocation, MAX_LOCATION); PathCat(exeLocation, argv[0]); } #endif }
NCluster *MakeNClusterFromFimi(string &inputFile, int sz){ ifstream myfile(inputFile.c_str()); if (myfile.is_open()){ vector<IOSet *> sets; int cnt=0; for (string line; getline(myfile, line);) { if(line == "###" ) break; // if(cnt >= sz) break; vector<string> entries; Tokenize(line,entries," "); IOSet *t = new IOSet; for(int i=0; i < entries.size(); i++) t->Add(atoi(entries[i].c_str())); t->SetId(cnt); sets.push_back(t); cnt++; } return new NCluster(sets.size(),sets); }else{ string errMsg = "Could not open the FIMI file: "+inputFile; Error(errMsg); } }
void GetOptions (void) { char *endch; char opt; // StrLCpy (kargstr, (char*)mbi->cmdline, 512); endch = Tokenize (kargstr); kargc = InitKargvArray (kargstr, endch, kargv); while ((opt = GetOpt(kargv, kargc, "D")) != -1) { switch (opt) { case 'D': /* -D : Enable Debugging Output */ __debug_enabled = TRUE; break; default: break; } } }
/// /// string: /// comma separated list of channels or channel ranges /// e.g. "0,3,6-9,12" /// const channels_t VKeithley2700::ParseChannelString( const std::string& channelString ) const { channels_t channels; channels.resize( 0 ); if( 0 == channelString.size() ) { std::cerr << " [VKeithley2700::ParseChannelString] ** DEBUG: Received empty string." << std::endl; return channels; } // tokenize comma-separated channels/channel-ranges std::vector<std::string> tokens; Tokenize( channelString, tokens, "," ); for( std::vector<std::string>::const_iterator it = tokens.begin(); it < tokens.end(); ++it ) { // this token gives a single channel (no "-") if( std::string::npos == it->find( "-" ) ) { unsigned int aChannel = EvaluateChannelToken( *it ); channels.push_back( aChannel ); } // this is supposed to be a range else { range_t range = EvaluateRangeToken( *it ); for( unsigned int aChannel = range.first; aChannel <= range.second; ++aChannel ) { channels.push_back( aChannel ); } } } // sort & remove duplicates std::sort( channels.begin(), channels.end() ); channels.erase( std::unique( channels.begin(), channels.end() ), channels.end() ); return channels; }
Patient PatientDatabase::getPatient(CprNumber cprNumber) { ifstream infile("patients.txt"); string line; while (getline(infile, line)) { string matchString = line.substr(0,11); if (strcmp(cprNumber.getCprNumber().c_str(), matchString.c_str())) continue; vector<string> raw; Tokenize(line, raw, ";"); CprNumber cpr(raw[0]); string name = raw[1]; string address = raw[2]; return Patient(cpr, name, address); } return Patient(CprNumber("000000-0000"), "Not Found", "Not in database"); }