const TensorIndex& Environment::getTensorIndex(const pe::PathExpression& pexpr) const { iassert(pexpr.defined()) << "Tensors in the environment have defined path expressions"; iassert(util::contains(content->locationOfTensorIndex, pexpr)) << "Could not find " << pexpr << " in environment"; return content->tensorIndices[content->locationOfTensorIndex.at(pexpr)]; }
Expr IRBuilder::binaryElwiseExpr(Expr l, BinaryOperator op, Expr r) { const TensorType *ltype = l.type().toTensor(); const TensorType *rtype = r.type().toTensor(); Expr tensor = (ltype->order() > 0) ? l : r; std::vector<IndexVar> indexVars; const TensorType *tensorType = tensor.type().toTensor(); vector<IndexDomain> dimensions = tensorType->getDimensions(); for (unsigned int i=0; i < tensorType->order(); ++i) { IndexDomain domain = dimensions[i]; indexVars.push_back(factory.createIndexVar(domain)); } Expr a, b; if (ltype->order() == 0 || rtype->order() == 0) { std::vector<IndexVar> scalarIndexVars; std::vector<IndexVar> *lIndexVars; std::vector<IndexVar> *rIndexVars; if (ltype->order() == 0) { lIndexVars = &scalarIndexVars; rIndexVars = &indexVars; } else { lIndexVars = &indexVars; rIndexVars = &scalarIndexVars; } a = IndexedTensor::make(l, *lIndexVars); b = IndexedTensor::make(r, *rIndexVars); } else { iassert(l.type() == r.type()); a = IndexedTensor::make(l, indexVars); b = IndexedTensor::make(r, indexVars); } iassert(a.defined() && b.defined()); Expr val; switch (op) { case Add: val = Add::make(a, b); break; case Sub: val = Sub::make(a, b); break; case Mul: val = Mul::make(a, b); break; case Div: val = Div::make(a, b); break; } iassert(val.defined()); const bool isColumnVector = tensor.type().toTensor()->isColumnVector; return IndexExpr::make(indexVars, val, isColumnVector); }
void mysplit(const string &s,string &s1,string &s2) { unsigned int i=0; for(;i<s.length();i++)if( s[i]==' ' || s[i]=='\t' || s[i]==' ')break; s1=s.substr(0,i); for(;i<s.length();i++)if( !(s[i]==' ' || s[i]=='\t' || s[i]==' ') )break; s2=s.substr(i,s.length()-i); iassert(s1.size()); iassert(s2.size()); }
std::string generatePtx(llvm::Module *module, int devMajor, int devMinor) { std::string mcpu; if ((devMajor == 3 && devMinor >= 5) || devMajor > 3) { mcpu = "sm_35"; } else if (devMajor >= 3 && devMinor >= 0) { mcpu = "sm_30"; } else { mcpu = "sm_20"; } // Select target given the module's triple llvm::Triple triple(module->getTargetTriple()); std::string errStr; const llvm::Target* target = nullptr; target = llvm::TargetRegistry::lookupTarget(triple.str(), errStr); iassert(target) << errStr; llvm::TargetOptions targetOptions; std::string features = "+ptx40"; std::unique_ptr<llvm::TargetMachine> targetMachine( target->createTargetMachine(triple.str(), mcpu, features, targetOptions, // llvm::Reloc::PIC_, llvm::Reloc::Default, llvm::CodeModel::Default, llvm::CodeGenOpt::Default)); // Make a passmanager and add emission to string llvm::legacy::PassManager pm; pm.add(new llvm::TargetLibraryInfoWrapperPass(triple)); // Set up constant NVVM reflect mapping llvm::StringMap<int> reflectMapping; reflectMapping["__CUDA_FTZ"] = 1; // Flush denormals to zero pm.add(llvm::createNVVMReflectPass(reflectMapping)); pm.add(llvm::createAlwaysInlinerPass()); targetMachine->Options.MCOptions.AsmVerbose = true; llvm::SmallString<8> ptxStr; llvm::raw_svector_ostream outStream(ptxStr); outStream.SetUnbuffered(); bool failed = targetMachine->addPassesToEmitFile( pm, outStream, targetMachine->CGFT_AssemblyFile, false); iassert(!failed); pm.run(*module); outStream.flush(); return ptxStr.str(); }
const TensorIndex& Environment::getTensorIndex( const StencilLayout& stencil) const { iassert(util::contains(content->locationOfTensorIndexStencil, stencil)) << "Could not find " << stencil << " in environment"; return content->tensorIndices[ content->locationOfTensorIndexStencil.at(stencil)]; }
Expr compare(const vector<Expr> &expressions) { iassert(expressions.size() > 0); Expr result; if (expressions.size() == 1) { result = expressions[0]; } else { result = Eq::make(expressions[0], expressions[1]); for (size_t i=2; i < expressions.size(); ++i) { result = And::make(result, Eq::make(expressions[i-1], expressions[i])); } } iassert(result.defined()); return result; }
Stmt find(const Var &result, const std::vector<Expr> &exprs, string name, function<Expr(Expr,Expr)> compare) { iassert(exprs.size() > 0); Stmt resultStmt; if (exprs.size() == 1) { resultStmt = AssignStmt::make(result, exprs[0]); } else if (exprs.size() == 2) { resultStmt = IfThenElse::make(compare(exprs[0], exprs[1]), AssignStmt::make(result, exprs[0]), AssignStmt::make(result, exprs[1])); } else { resultStmt = AssignStmt::make(result, exprs[0]); vector<Stmt> tests; for (size_t i=1; i < exprs.size(); ++i) { Stmt test = IfThenElse::make(compare(exprs[i], result), AssignStmt::make(result, exprs[i])); tests.push_back(test); } resultStmt = Block::make(resultStmt, Block::make(tests)); } string commentString = result.getName() + " = " + name + "(" + util::join(exprs) + ")"; return Comment::make(commentString, resultStmt); }
// class IndexVar std::ostream &operator<<(std::ostream &os, const IndexVar &var) { iassert(var.defined()) << "Undefined IndexVar"; if (var.isReductionVar()) { os << var.getOperator(); } return os << var.getName(); }
Expr IRBuilder::gevm(Expr l, Expr r) { const TensorType *ltype = l.type().toTensor(); const TensorType *rtype = r.type().toTensor(); vector<IndexDomain> ldimensions = ltype->getDimensions(); vector<IndexDomain> rdimensions = rtype->getDimensions(); iassert(ltype->order() == 1 && rtype->order() == 2); iassert(ldimensions[0] == rdimensions[0]); auto i = factory.createIndexVar(rdimensions[1]); auto j = factory.createIndexVar(rdimensions[0], ReductionOperator::Sum); Expr a = IndexedTensor::make(l, {j}); Expr b = IndexedTensor::make(r, {j,i}); Expr val = Mul::make(a, b); return IndexExpr::make({i}, val); }
// struct SetType Type SetType::make(Type elementType, const std::vector<Expr>& endpointSets) { iassert(elementType.isElement()); SetType *type = new SetType; type->elementType = elementType; for (auto& eps : endpointSets) { type->endpointSets.push_back(new Expr(eps)); } return type; }
Type TensorType::getBlockType() const { vector<IndexDomain> dimensions = getDimensions(); // TODO (grab blocktype computation in ir.h/ir.cpp) if (dimensions.size() == 0) { return TensorType::make(componentType); } std::vector<IndexDomain> blockDimensions; size_t numNests = dimensions[0].getIndexSets().size(); iassert(numNests > 0); Type blockType; if (numNests == 1) { blockType = TensorType::make(componentType); } else { unsigned maxNesting = 0; for (auto& dim : dimensions) { if (dim.getIndexSets().size() > maxNesting) { maxNesting = dim.getIndexSets().size(); } } for (auto& dim : dimensions) { if (dim.getIndexSets().size() < maxNesting) { const std::vector<IndexSet>& nests = dim.getIndexSets(); std::vector<IndexSet> blockNests(nests.begin(), nests.end()); blockDimensions.push_back(IndexDomain(blockNests)); } else { const std::vector<IndexSet>& nests = dim.getIndexSets(); std::vector<IndexSet> blockNests(nests.begin()+1, nests.end()); blockDimensions.push_back(IndexDomain(blockNests)); } } blockType = TensorType::make(componentType, blockDimensions, isColumnVector); } iassert(blockType.defined()); return blockType; }
void Environment::addTensorIndex(const pe::PathExpression& pexpr, const Var& var) { iassert(pexpr.defined()) << "Attempting to add tensor " << util::quote(var) << " index with an undefined path expression"; iassert(var.defined()) << "attempting to add a tensor index to an undefined var"; string name = var.getName(); // Lazily create a new index if no index with the given pexpr exist. // TODO: Maybe rename indices as they get used by multiple tensors if (!hasTensorIndex(pexpr)) { TensorIndex ti(name+"_index", pexpr); content->tensorIndices.push_back(ti); size_t loc = content->tensorIndices.size() - 1; content->locationOfTensorIndex.insert({pexpr, loc}); } content->tensorIndexOfVar.insert({var, getTensorIndex(pexpr)}); }
void Environment::addExtern(const Var& var) { iassert(!hasExtern(var.getName())) << var << " already in environment"; content->externs.push_back(var); size_t loc = content->externs.size()-1; content->externLocationByName.insert({var.getName(), loc}); // TODO: Change so that variables are not mapped to themselves. This means // lowering must map (sparse/dense) tensor value storage to arrays. addExternMapping(var, var); }
Expr IRBuilder::innerProduct(Expr l, Expr r) { iassert(l.type() == r.type()); const TensorType *type = l.type().toTensor(); vector<IndexDomain> dimensions = type->getDimensions(); auto i = factory.createIndexVar(dimensions[0], ReductionOperator::Sum); Expr a = IndexedTensor::make(l, {i}); Expr b = IndexedTensor::make(r, {i}); Expr val = Mul::make(a, b); std::vector<IndexVar> none; return IndexExpr::make(none, val); }
Expr IRBuilder::outerProduct(Expr l, Expr r) { iassert(l.type() == r.type()); const TensorType *type = l.type().toTensor(); vector<IndexDomain> dimensions = type->getDimensions(); auto i = factory.createIndexVar(dimensions[0]); auto j = factory.createIndexVar(dimensions[0]); Expr a = IndexedTensor::make(l, {i}); Expr b = IndexedTensor::make(r, {j}); Expr val = Mul::make(a, b); return IndexExpr::make({i,j}, val); }
Expr IRBuilder::transposedMatrix(Expr mat) { const TensorType *mattype = mat.type().toTensor(); iassert(mattype->order() == 2); const std::vector<IndexDomain> &dimensions = mattype->getDimensions(); std::vector<IndexVar> indexVars; indexVars.push_back(factory.createIndexVar(dimensions[1])); indexVars.push_back(factory.createIndexVar(dimensions[0])); std::vector<IndexVar> operandIndexVars(indexVars.rbegin(), indexVars.rend()); Expr val = IndexedTensor::make(mat, operandIndexVars); return IndexExpr::make(indexVars, val); }
void Environment::addTensorIndex(const StencilLayout& stencil, const Var& var) { iassert(var.defined()) << "attempting to add a tensor index to an undefined var"; string name = var.getName(); // Lazily create a new index if no index with the given pexpr exist. // TODO: Maybe rename indices as they get used by multiple tensors if (!hasTensorIndex(stencil)) { TensorIndex ti(name+"_index", stencil); content->tensorIndices.push_back(ti); size_t loc = content->tensorIndices.size() - 1; content->locationOfTensorIndexStencil.insert({stencil, loc}); } content->tensorIndexOfVar.insert({var, getTensorIndex(stencil)}); }
std::vector<Var> Environment::getExternVars() const { vector<Var> externVars; set<Var> included; for (const VarMapping& externMapping : getExterns()) { if (externMapping.getMappings().size() == 0) { const Var& ext = externMapping.getVar(); iassert(!util::contains(included, ext)); externVars.push_back(ext); included.insert(ext); } else { for (const Var& ext : externMapping.getMappings()) { if (!util::contains(included, ext)) { externVars.push_back(ext); included.insert(ext); } } } } return externVars; }
// Free operator functions bool operator==(const Type& l, const Type& r) { iassert(l.defined() && r.defined()); if (l.kind() != r.kind()) { return false; } switch (l.kind()) { case Type::Tensor: return *l.toTensor() == *r.toTensor(); case Type::Element: return *l.toElement() == *r.toElement(); case Type::Set: return *l.toSet() == *r.toSet(); case Type::Tuple: return *l.toTuple() == *r.toTuple(); case Type::Array: return *l.toArray() == *r.toArray(); } unreachable; return false; }
// class IRBuilder Expr IRBuilder::unaryElwiseExpr(UnaryOperator op, Expr e) { vector<IndexVar> indexVars; const TensorType *tensorType = e.type().toTensor(); vector<IndexDomain> dimensions = tensorType->getDimensions(); for (unsigned int i=0; i < tensorType->order(); ++i) { IndexDomain domain = dimensions[i]; indexVars.push_back(factory.createIndexVar(domain)); } Expr a = IndexedTensor::make(e, indexVars); Expr val; switch (op) { case None: val = a; break; case Neg: val = Neg::make(a); break; } iassert(val.defined()); return IndexExpr::make(indexVars, val, e.type().toTensor()->isColumnVector); }
const VarMapping& Environment::getExtern(const std::string& name) const { iassert(hasExtern(name)); return content->externs[content->externLocationByName.at(name)]; }
int main(int argc,char **argv) { double startTime=clockSec(); zufallSeed(); while( argc>1 && argv[1][0]=='-' ) { switch(argv[1][1]) { case 'v': sscanf(argv[1]+2,"%d",&verboseMode); iassert(verboseMode>=0); break; case 'O': sscanf(argv[1]+2,"%d",&OneWithHapas); cout << "OneWithHapas: "******"%d",&nLaeufe); nLaeufeReduce=nLaeufe; iassert( nLaeufe>=1 ); break; case 'l': Criterion=1; if( argv[1][2] ) { sscanf(argv[1]+2,"%lf",&rhoLo); if( verboseMode ) cout << "Parameter rho (for LO) set to" << rhoLo << ".\n"; iassert(0<=rhoLo && rhoLo<=1); } if( verboseMode ) cout << "Criterion LO used.\n"; break; case 'y': Criterion=2; if( argv[1][2] ) { sscanf(argv[1]+2,"%lf",&SigmaVerfaelschung); if( verboseMode ) cout << "Parameter rho (for LO) set to" << SigmaVerfaelschung << ".\n"; iassert(0<SigmaVerfaelschung); } if( verboseMode ) cout << "My special criterion used.\n"; break; case 'p': setKorpusName(argv[1]+2); assert(argv[2]&&argv[2][0]!='-' || argv[2][0]!='i'); break; case 'P': setKorpusName(argv[1]+2); korpusIsText=0; assert(argv[2]&&argv[2][0]!='-' || argv[2][0]!='i'); break; case 'i': setInitValue(argv[1]+2,argv[2]); if( InitValue==INIT_OTHER ) argv++,argc--; break; case 'h': setHapaxInitName(argv[1]+2); break; case 'k': setKwahl(argv[1]+2); break; case 'w': setWwahl(argv[1]+2); break; case 'c': sscanf(argv[1]+2,"%d",&NumberCategories); iassert(NumberCategories>=2); break; case 'm': sscanf(argv[1]+2,"%d",&MinWordFrequency); break; case 'e': setParameter(argv[1]+2,argv[2]); argv++,argc--; break; case 'a': setVerfahren(argv[1]+2); break; case 'r': { int s; sscanf(argv[1]+2,"%d",&s); zufallSeed(s); } break; case 'V': if(argv[1][2]) { char str[1024]; strcpy(str,argv[1]+2); PrintBestTo=new ofstream(str); strcat(str,".cats"); PrintBestTo2=new ofstream(str); } else cout << "AUSGABE auf cout\n"; break; case 'M': sscanf(argv[1]+2,"%d",&MaxIterOptSteps); break; case 's': sscanf(argv[1]+2,"%d",&MaxSecs); break; case 'N': sscanf(argv[1]+2,"%d",&optimizeParameterAnzahl); break; case 'o': GraphOutput = new ofstream(argv[1]+2); if( GraphOutput==0 ) cerr << "Warning: Open failed for file '" << argv[1]+2 << "'.\n"; break; default: cerr << "Fehlerhafte Option: " << argv[1] << endl; printUsage(1); } argv++; argc--; } setKorpus(); if( FileForOther ) { fromCatFile(p,FileForOther); p->initialisierung=InitValue; p->_initialize(InitValue); } if( hapaxInitName ) { fromCatFile(p,hapaxInitName,0); p->fixInitLike(); } double start2Time=clockSec(); if(argc>=2 && strcasecmp(argv[1],"opt")==0 ) makeIterOpt(); else if(argc>=2 && strcasecmp(argv[1],"meta-opt")==0) makeMetaOpt(argc,argv); else if(argc>=2 && strcasecmp(argv[1],"izr-opt")==0) makeIzrOpt(); else { makeIterOpt(); } if( verboseMode ) { cout << " full-time: " << clockSec()-startTime << endl; cout << "optimize-time: " << clockSec()-start2Time << endl; } return 0; }
const TensorIndex& Environment::getTensorIndex(const Var& var) const { iassert(hasTensorIndex(var)) << var << " has no tensor index in environment"; return content->tensorIndexOfVar.at(var); }
KategProblem *makeKategProblem(const leda_h_array<PSS,FreqType>&cTbl,const leda_set<string>&setVokabular, int maxClass,int initialisierung, int auswertung,int nachbarschaft,int minWordFrequency) { int nwrd=0; leda_array<string>&sVok = *new leda_array<string>(setVokabular.size()); string s; unsigned int ctr=0; forall_set(leda_set<string>,s,setVokabular) { if( verboseMode>2 ) cout << "mkcls:Wort " << ctr << " " << s << endl; sVok[ctr++]=s; } for(unsigned int z=0;z<ctr-1;z++) iassert( sVok[z]<sVok[z+1] ); sVok.sort(); if( verboseMode>2 ) cout << "*****Vocabulary: " << sVok; unsigned int vokSize=sVok.size(); massert(vokSize==ctr); massert(vokSize==setVokabular.size()); if(verboseMode) {cout << "Size of vocabulary: " << vokSize << "\n";cout.flush();} KategProblem *k = new KategProblem(vokSize,maxClass,initialisierung, auswertung,nachbarschaft,minWordFrequency); KategProblemWBC &w=k->wordFreq; k->words=&sVok; Array<int> after(vokSize,0); Array<int> before(vokSize,0); nwrd=0; { PSS s; forall_defined_h2(PSS,FreqType,s,cTbl) { const string&ss1=s.first; const string&ss2=s.second; if( ss2.length()&&(ss1!="$" || ss2!="$") ) { int i1=sVok.binary_search(ss1); int i2=sVok.binary_search(ss2); iassert( sVok[i1] == ss1 );iassert( sVok[i2] == ss2 ); after[i1]++; before[i2]++; } if( verboseMode&&((nwrd++)%10000==0) ) {cout<<"Statistiken-1 " << nwrd<< ". \r";cout.flush();} } } for(unsigned int i=0;i<vokSize;i++) { w.setAfterWords(i,after[i]); w.setBeforeWords(i,before[i]); } { nwrd=0; PSS s; forall_defined_h2(PSS,FreqType,s,cTbl) { const string&ss1=s.first; const string&ss2=s.second; FreqType p=cTbl[s]; if( ss2.length()&&(ss1!="$" || ss2!="$") ) { int i1=sVok.binary_search(ss1); int i2=sVok.binary_search(ss2); iassert( sVok[i1] == ss1 );iassert( sVok[i2] == ss2 ); w.setFreq(i1,i2,p); if( verboseMode>2 ) cout << "BIGRAMM-HAEUF: " << ss1 << ":" << i1 << " " << ss2 << ":" << i2 << " " << p << endl; } if( verboseMode&&((nwrd++)%10000==0) ) {cout<<"Statistiken-2 " <<nwrd<< ". \r";cout.flush();} } } w.testFull(); if(verboseMode){cout << "Datenintegritaet getestet.\n";cout.flush();} return k; }
// class Func Func::Func(const std::string& name, const std::vector<Var>& arguments, const std::vector<Var>& results, Kind kind) : Func(name, arguments, results, Stmt(), kind) { iassert(kind != Internal); }
void Environment::addExternMapping(const Var& var, const Var& mapping) { iassert(hasExtern(var.getName())); size_t loc = content->externLocationByName.at(var.getName()); content->externs.at(loc).addMapping(mapping); }
std::vector<std::string> generateLibraryPtx(int devMajor, int devMinor) { if (libdevicePtxCache.size() > 0 && intrinsicsPtxCache.size() > 0) { return {libdevicePtxCache, intrinsicsPtxCache}; } // Build libdevice (math libraries, etc.) module // // Reference: // http://docs.nvidia.com/cuda/libdevice-users-guide/basic-usage.html // // The device to libdevice version mapping is weird (note 3.1-3.4=compute_20) // 2.0 ≤ Arch < 3.0 libdevice.compute_20.XX.bc // Arch = 3.0 libdevice.compute_30.XX.bc // 3.1 ≤ Arch < 3.5 libdevice.compute_20.XX.bc // Arch = 3.5 libdevice.compute_35.XX.bc // Identify device by Compute API level const char *libdevice; int libdevice_length; if ((devMajor == 3 && devMinor >= 5) || devMajor > 3) { libdevice = reinterpret_cast<const char*>(simit_gpu_libdevice_compute_35); libdevice_length = simit_gpu_libdevice_compute_35_length; } else if (devMajor == 3 && devMinor >= 0) { libdevice = reinterpret_cast<const char*>(simit_gpu_libdevice_compute_30); libdevice_length = simit_gpu_libdevice_compute_30_length; } else { libdevice = reinterpret_cast<const char*>(simit_gpu_libdevice_compute_20); libdevice_length = simit_gpu_libdevice_compute_20_length; } llvm::SMDiagnostic errReport; libdevice_length = alignBitreaderLength(libdevice_length); llvm::MemoryBufferRef libdeviceBuf( llvm::StringRef(libdevice, libdevice_length), "libdevice"); std::unique_ptr<llvm::Module> libdeviceModule = llvm::parseIR(libdeviceBuf, errReport, LLVM_CTX); iassert((bool)libdeviceModule) << "Failed to load libdevice: " << printToString(errReport); setNVVMModuleProps(libdeviceModule.get()); logModule(libdeviceModule.get(), "simit-libdevice.ll"); std::string libdevicePtx = generatePtx(libdeviceModule.get(), devMajor, devMinor); // Build intrinsics module const char *intrinsics = reinterpret_cast<const char*>(simit_gpu_intrinsics); int intrinsics_length = alignBitreaderLength(simit_gpu_intrinsics_length); llvm::MemoryBufferRef intrinsicsBuf( llvm::StringRef(intrinsics, intrinsics_length), "intrinsics"); std::unique_ptr<llvm::Module> intrinsicsModule = llvm::parseIR(intrinsicsBuf, errReport, LLVM_CTX); iassert((bool)intrinsicsModule) << "Failed to load intrinsics: " << printToString(errReport); setNVVMModuleProps(intrinsicsModule.get()); logModule(intrinsicsModule.get(), "simit-intrinsics.ll"); std::string intrinsicsPtx = generatePtx(intrinsicsModule.get(), devMajor, devMinor); // Cache the compiled libries libdevicePtxCache = libdevicePtx; intrinsicsPtxCache = intrinsicsPtx; return {libdevicePtx, intrinsicsPtx}; }
void Environment::addTemporary(const Var& var) { iassert(!hasExtern(var.getName())) << var << " already in environment"; content->temporaries.push_back(var); content->temporarySet.insert(var); }
Box::Box(unsigned nX, unsigned nY, unsigned nZ, std::vector<ElementRef> refs, std::map<Box::Coord, ElementRef> coords2edges) : nX(nX), nY(nY), nZ(nZ), refs(refs), coords2edges(coords2edges) { iassert(refs.size() == nX*nY*nZ); }
void setParameter(const char *nr1,const char *nr2) { int n1; float n2; sscanf(nr1,"%d",&n1); sscanf(nr2,"%f",&n2); IterOptSet=1; switch(n1) { case 1: SAOptimization::defaultAnfAnnRate=n2; if(verboseMode)cout << "Parameter gamma_0 (SA) set to " << SAOptimization::defaultAnfAnnRate << endl; iassert(0<=SAOptimization::defaultAnfAnnRate&& SAOptimization::defaultAnfAnnRate<=1); break; case 2: SAOptimization::defaultEndAnnRate=n2; if(verboseMode)cout << "Parameter gamma_e (SA) set to " << SAOptimization::defaultEndAnnRate << endl; iassert(0<=SAOptimization::defaultEndAnnRate &&SAOptimization::defaultEndAnnRate<=1); break; case 3: SAOptimization::defaultMultiple=n2; if(verboseMode)cout << "Parameter nu_e (SA) set to " << SAOptimization::defaultMultiple << endl; iassert( SAOptimization::defaultMultiple>0 ); break; case 4: TAOptimization::defaultAnnRate=n2; if(verboseMode)cout << "Parameter gamma_{TA} set to " << TAOptimization::defaultAnnRate << endl; iassert(0<=TAOptimization::defaultAnnRate &&TAOptimization::defaultAnnRate<=1); break; case 5: TAOptimization::defaultMultiple=n2; if(verboseMode)cout << "Parameter nu_{TA} set to " << TAOptimization::defaultMultiple << endl; iassert( TAOptimization::defaultMultiple>0 ); break; case 6: RRTOptimization::defaultAnnRate=n2; if(verboseMode)cout << "Parameter gamma_{RRT} set to " << RRTOptimization::defaultAnnRate << endl; iassert(0<=RRTOptimization::defaultAnnRate && RRTOptimization::defaultAnnRate<=1); break; case 7: RRTOptimization::defaultMultiple=n2; if(verboseMode)cout << "Parameter nu_{RRT} set to " << RRTOptimization::defaultMultiple << endl; iassert( RRTOptimization::defaultMultiple>0 ); break; case 8: GDAOptimization::defaultAlpha=n2; if(verboseMode)cout << "Parameter alpha set to " << GDAOptimization::defaultAlpha << endl; iassert(0<=GDAOptimization::defaultAlpha && GDAOptimization::defaultAlpha<1 ); break; default: cerr << "Error: Wrong parameter number " << nr1 << " " << n1 << endl; printUsage(1); } }