/// Utility function to adjust option strings, used by ToNormalised()/ToCustomised(). bool AdjustOptions(void * parm, unsigned * parmLen, bool (PluginCodec_MediaFormat:: * adjuster)(OptionMap & original, OptionMap & changed)) { if (parmLen == NULL || parm == NULL || *parmLen != sizeof(char ***)) { PTRACE(1, "Plugin", "Invalid parameters to AdjustOptions."); return false; } OptionMap originalOptions; for (const char * const * option = *(const char * const * *)parm; *option != NULL; option += 2) originalOptions[option[0]] = option[1]; OptionMap changedOptions; if (!(this->*adjuster)(originalOptions, changedOptions)) { PTRACE(1, "Plugin", "Could not normalise/customise options."); return false; } char ** options = (char **)calloc(changedOptions.size()*2+1, sizeof(char *)); *(char ***)parm = options; if (options == NULL) { PTRACE(1, "Plugin", "Could not allocate new option lists."); return false; } for (OptionMap::iterator i = changedOptions.begin(); i != changedOptions.end(); ++i) { *options++ = strdup(i->first.c_str()); *options++ = strdup(i->second.c_str()); } return true; }
void loadOptions( OptionArray& outOptions, void* av_class, int req_flags ) { OptionMap optionMap; loadOptions( optionMap, av_class, req_flags ); for( OptionMap::iterator it = optionMap.begin(); it != optionMap.end(); ++it ) outOptions.push_back( it->second ); }
bool unregisterConstructor(const String& name) { if (FactoryImpl<T,Ftype>::unregisterConstructor(name)) { typename OptionMap::iterator where=mOptionParsers.find(name); if (where==mOptionParsers.end()) return false; mOptionParsers.erase(where); } return true; }
static void Change(const char * value, OptionMap & original, OptionMap & changed, const char * option) { OptionMap::iterator it = original.find(option); if (it != original.end() && it->second != value) changed[option] = value; }
//_________________________________________________________ void ShadowConfiguration::initialize( const OptionMap& options ) { #if OXYGEN_DEBUG std::cerr << "Oxygen::ShadowConfiguration::initialize - " << (_colorGroup == Palette::Active ? "Active": "Inactive" ) << std::endl; #endif if( _colorGroup == Palette::Active) { _innerColor = ColorUtils::Rgba::fromKdeOption( options.getValue( "[ActiveShadow]", "InnerColor", "112,241,255" ) ); _outerColor = ColorUtils::Rgba::fromKdeOption( options.getValue( "[ActiveShadow]", "OuterColor", "84,167,240" ) ); _shadowSize = options.getOption( "[ActiveShadow]","Size" ).toVariant<double>(40); _verticalOffset = options.getOption( "[ActiveShadow]","VerticalOffset" ).toVariant<double>(0.1); _useOuterColor = options.getOption( "[ActiveShadow]","UseOuterColor" ).toVariant<std::string>("true") == "true"; } else { _innerColor = ColorUtils::Rgba::fromKdeOption( options.getValue( "[InactiveShadow]", "InnerColor", "0,0,0" ) ); _outerColor = ColorUtils::Rgba::fromKdeOption( options.getValue( "[InactiveShadow]", "OuterColor", "0,0,0" ) ); _shadowSize = options.getOption( "[InactiveShadow]","Size" ).toVariant<double>(40); _verticalOffset = options.getOption( "[InactiveShadow]","VerticalOffset" ).toVariant<double>(0.2); _useOuterColor = options.getOption( "[InactiveShadow]", "UseOuterColor" ).toVariant<std::string>("false") == "true"; } if(!_useOuterColor) _outerColor=_innerColor; }
void QSparqlConnectionOptionsPrivate::setOption(const QString& name, const QVariant& value) { if (value.isValid()) { QVariant convertedValue; if (validateOptionValue(name, value, convertedValue)) map.insert(name, convertedValue); } else { map.remove(name); } }
QVariant QSparqlConnectionOptionsPrivate::optionOrDefaultValue(const QString& name) const { OptionMap::const_iterator optionIter = map.constFind(name); if (optionIter != map.constEnd()) { return *optionIter; } else { const OptionInfo* optionInfo = connectionOptionRegistry()->lookup(name); return (optionInfo ? optionInfo->defaultValue : QVariant()); } }
const Otype &getOptionParser(const String&name)const { typename OptionMap::const_iterator where=mOptionParsers.find(name); if (where==mOptionParsers.end()) { if (name.length()==0&&getDefault().length()) { return getDefaultOptionParser(); } return mNop; } return where->second; }
size_t TestEnergyMethod::run_test_(){ CppTester tester("Testing EnergyMethod module base type"); ModuleManager& mm=module_manager(); OptionMap om; const auto& none=static_cast<pybind11::object>(pybind11::none()); om.add_option("MAX_DERIV",OptionType::Int,false,none,"",pybind11::cast(0)); om.add_option("FDIFF_DISPLACEMENT",OptionType::Float,false,none,"",pybind11::cast(0.005)); om.add_option("FDIFF_STENCIL_SIZE",OptionType::Int,false,none,"",pybind11::cast(3)); ModuleInfo minfo={"FakeEnergyMethod","c_module","EnergyMethod", "./TestEnergyMethod.so","1.0","A 3*Natoms H.O.",{""},{""}, om }; mm.load_module_from_minfo(minfo,"Test the H.O."); auto egy_mod=create_child<EnergyMethod>("Test the H.O."); Wavefunction wfn; Atom H=create_atom({0.0,0.0,0.0},1),H1=create_atom({0.0,0.0,0.89},1); AtomSetUniverse MyU; MyU.insert(H); MyU.insert(H1); wfn.system=std::make_shared<const System>(System(MyU,true)); //The right answers const std::vector<double> egy({0.39605}); std::vector<double> grad(6,0.0);grad[5]=0.89; std::vector<double> hess(36,0.0); hess[0]=hess[7]=hess[14]=hess[21]=hess[28]=hess[35]=1.0; auto deriv=egy_mod->deriv(0,wfn); tester.test_equal("Energy works",egy,deriv.second); deriv=egy_mod->deriv(1,wfn); tester.test_equal("Grad has right dimensions",grad.size(),deriv.second.size()); for(size_t i=0;i<grad.size();++i) tester.test_equal("FDiff grad comp "+to_string(i),grad[i],deriv.second[i]); deriv=egy_mod->deriv(2,wfn); tester.test_equal("Hessian has right dimensions",hess.size(),deriv.second.size()); for(size_t i=0;i<hess.size();++i) tester.test_equal("FDiff Hessian comp "+to_string(i),hess[i],deriv.second[i]); tester.print_results(); return tester.nfailed(); }
void ClangOptionsEmitter::run(std::ostream &OS) { // Build up a map from options to controlled diagnostics. OptionMap OM; const RecordVector &Opts = Records.getAllDerivedDefinitions("Option"); for (RecordVector::const_iterator I=Opts.begin(), E=Opts.end(); I!=E; ++I) if (const RecordVal* V = findRecordVal(**I, "Members")) if (const ListInit* LV = dynamic_cast<const ListInit*>(V->getValue())) { VisitedLists Visited; BuildGroup(OM[*I], Visited, LV); } // Iterate through the OptionMap and emit the declarations. for (OptionMap::iterator I = OM.begin(), E = OM.end(); I!=E; ++I) { // Output the option. OS << "static const diag::kind " << I->first->getName() << "[] = { "; DiagnosticSet &DS = I->second; bool first = true; for (DiagnosticSet::iterator I2 = DS.begin(), E2 = DS.end(); I2!=E2; ++I2) { if (first) first = false; else OS << ", "; OS << "diag::" << (*I2)->getName(); } OS << " };\n"; } // Now emit the OptionTable table. OS << "\nstatic const WarningOption OptionTable[] = {"; bool first = true; for (OptionMap::iterator I = OM.begin(), E = OM.end(); I!=E; ++I) { if (first) first = false; else OS << ','; OS << "\n {\"" << getOptName(I->first) << "\", DIAGS(" << I->first->getName() << ")}"; } OS << "\n};\n"; }
int main( int argc, char *argv [] ){ /*! parse command line: data file and number of coefficents */ std::cout << "size_t" << sizeof(size_t) << std::endl; std::cout << "ssize_t" << sizeof(ssize_t) << std::endl; std::cout << "Index" << sizeof(GIMLI::Index) << std::endl; std::cout << "Sindex" << sizeof(GIMLI::SIndex) << std::endl; std::cout << "8" << sizeof(GIMLI::int8) << std::endl; std::cout << "16" << sizeof(GIMLI::int16) << std::endl; std::cout << "32" << sizeof(GIMLI::int32) << std::endl; std::cout << "64" << sizeof(GIMLI::int64) << std::endl; std::cout << "8" << sizeof(GIMLI::uint8) << std::endl; std::cout << "16" << sizeof(GIMLI::uint16) << std::endl; std::cout << "32" << sizeof(GIMLI::uint32) << std::endl; std::cout << "64" << sizeof(GIMLI::uint64) << std::endl; std::string datafile; int np = 1; double lambda = 0.001; bool verbose = true; /*! Parse option map using longoption map */ OptionMap oMap; oMap.setDescription("Fits data in datafile with polynominals."); oMap.addLastArg( datafile, "Datafile" ); oMap.add( np , "n:", "np", "Number of polynomials" ); oMap.add( lambda, "l:", "lambda", "Regularization" ); oMap.parse( argc, argv ); /*! load two-column matrix from file (input argument) holding x and y */ RMatrix xy; loadMatrixCol( xy, datafile ); /*! initialize modelling operator */ PolynominalModelling f( np + 1, xy[ 0 ] ); //! two coefficients and x-vector (first data column) /*! initialize inversion with data and forward operator and set options */ RInversion inv( xy[ 1 ], f, verbose, false ); /*! maximum iterations to 1 due to linear problem (not necessary) */ //inv.setMaxIter( 1 ); /*! constant absolute error of 0.01 (not necessary, only for chi^2) */ inv.setAbsoluteError( 0.4 ); /*! the problem is well-posed and does not need regularization */ inv.setLambda( lambda ); /*! actual inversion run yielding coefficient model */ RVector coeff( inv.run() ); /*! get actual response and write to file */ save( inv.response(), "resp.out" ); /*! print result to screen and save coefficient vector to file */ std::cout << "y = " << coeff[ 0 ]; for( int i = 1 ; i <= np ; i++ ) std::cout << " + " << coeff[ i ] << " x^" << i; std::cout << std::endl; save( coeff, "out.vec" ); /*! exit programm legally */ return EXIT_SUCCESS; }
int main( int argc, char *argv [] ) { bool verbose = true, createGradientModel = false; double errTime = 0.001; std::string dataFileName( NOT_DEFINED ), paraMeshFilename( "mesh.bms" ); OptionMap oMap; oMap.setDescription("Description. TTInvOffset - travel time inversion with shot offsets\n"); oMap.addLastArg( dataFileName, "Data file" ); oMap.add( verbose, "v" , "verbose" , "Verbose output." ); oMap.add( paraMeshFilename, "p:", "paraMeshFile" , "Parameter mesh file." ); oMap.add( createGradientModel,"G" , "gradientModel", "Gradient starting model." ); oMap.parse( argc, argv ); //!** load data file DataContainer dataIn( dataFileName ); if ( verbose ) dataIn.showInfos(); //!** load mesh for slownesses Mesh paraMesh; paraMesh.load( paraMeshFilename ); /*! Error model combined of rhoa error and phi error */ TTOffsetModelling f( paraMesh, dataIn, false ); RVector appSlowness ( f.getApparentSlowness() ); RTransLog transSlo; Region * mainRegion = f.regionManager().regions()->begin()->second; mainRegion->setConstraintType( 1 ); //! smoothness mainRegion->setStartValue( median( appSlowness ) ); mainRegion->setTransModel( transSlo ); f.region( NEWREGION )->setConstraintType( 0 ); //! minimum length (no smoothing) f.region( NEWREGION )->setStartValue( 0.0 ); RVector model; /*! Set up inversion with full matrix, data and forward operator */ RInversion inv( dataIn.get( "t" ), f, verbose ); inv.setAbsoluteError( errTime ); /*! actual computation: run the inversion */ model = inv.run(); RVector slowness( model, 0, model.size() - f.nShots() ); RVector velocity( 1 / slowness ); save( velocity, "velocity.vec" ); RVector offsets( model, model.size() - f.nShots(), model.size() ); save( offsets, "offsets.vec" ); vcout << "min/max( velocity ) = " << 1 / max( slowness ) << "/" << 1 / min( slowness ) << std::endl; vcout << "offsets: " << offsets << std::endl; return EXIT_SUCCESS; }
void loadOptions( OptionMap& outOptions, void* av_class, int req_flags ) { if( ! av_class ) return; std::multimap<std::string, std::string> optionUnitToParentName; std::vector<Option> childOptions; const AVOption* avOption = NULL; // iterate on options #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT( 51, 12, 0 ) while( ( avOption = av_next_option( av_class, avOption ) ) ) #else while( ( avOption = av_opt_next( av_class, avOption ) ) ) #endif { if( ! avOption || ! avOption->name || ( avOption->flags & req_flags ) != req_flags ) { continue; } Option option( *const_cast<AVOption*>( avOption ), av_class ); if( option.getType() == eOptionBaseTypeChild ) { childOptions.push_back( option ); } else { outOptions.insert( std::make_pair( option.getName(), option ) ); optionUnitToParentName.insert( std::make_pair( option.getUnit(), option.getName() ) ); } } // iterate on child options for( std::vector<Option>::iterator itOption = childOptions.begin(); itOption != childOptions.end(); ++itOption ) { bool parentFound = false; for( std::multimap<std::string, std::string>::iterator itUnit = optionUnitToParentName.begin(); itUnit != optionUnitToParentName.end(); ++itUnit ) { if( itUnit->first == itOption->getUnit() ) { std::string nameParentOption = itUnit->second; Option& parentOption = outOptions.at( nameParentOption ); parentOption.appendChild( *itOption ); // child of a Choice if( parentOption.getType() == eOptionBaseTypeChoice ) { if( itOption->getDefaultInt() == parentOption.getDefaultInt() ) parentOption.setDefaultChildIndex( parentOption.getChilds().size() - 1 ); } parentFound = true; break; } } if( ! parentFound ) { LOG_WARN( "Can't find a choice option for " << itOption->getName() ) } } }
int main( int argc, char *argv [] ) { std::string dataFile( NOT_DEFINED ); double errPerc = 3.0, lambda = 20.0, lambdaIP = 1.0, maxDepth = 0.0; int nlay = 30, maxIter = 20; bool verbose = false, lambdaOpt = false, isBlocky = false, isRobust = false, optimizeChi1 = false; OptionMap oMap; oMap.setDescription("DC1dsmooth - smooth 1d dc resistivity inversion"); oMap.add( verbose, "v" , "verbose" , "Verbose output." ); oMap.add( lambdaOpt, "O" , "OptimizeLambda", "Optimize model smoothness using L-curve." ); oMap.add( isRobust, "R" , "RobustData" , "Robust (L1) data weighting." ); oMap.add( isBlocky, "B" , "BlockyModel" , "Blocky (L1) model constraints." ); oMap.add( optimizeChi1, "C" , "OptimizeChi1" , "Optimize lambda subject to chi^2=1." ); oMap.add( maxIter, "i:", "maxIter" , "Maximum iteration number." ); oMap.add( lambda, "l:", "lambda" , "Regularization strength lambda." ); oMap.add( lambdaIP, "r:", "lambdaIP" , "Regularization strength lambda for IP." ); oMap.add( errPerc, "e:", "error" , "Error percentage" ); oMap.add( nlay, "n:", "nLay" , "Number of layers" ); oMap.add( maxDepth, "d:", "maxDepth" , "Maximum depth" ); oMap.addLastArg( dataFile, "Datafile" ); oMap.parse( argc, argv ); /*! Data: read data file from column file */ RMatrix abmnr; loadMatrixCol( abmnr, dataFile ); //! read data RVector ab2( abmnr[ 0 ] ); //! first column RVector mn2( abmnr[ 1 ] ); //! second column RVector rhoa( abmnr[ 2 ] ); //! third column /*! Define discretization according to AB/2 */ if ( maxDepth == 0.0 ) { maxDepth = max( ab2 ) / 2; //! rule of thumb std::cout << "Maximum depth estimated to " << maxDepth << std::endl; } RVector thk( nlay - 1, maxDepth / ( nlay - 1 ) ); thk *= ( maxDepth / sum( thk ) ); RVector model( nlay, median( rhoa ) ); /*! Transformations: log for app. resisivity and thickness, logLU for resistivity */ // TransLogLU< RVector > transRho( lbound, ubound ); TransLog< RVector > transRho; TransLog< RVector > transRhoa; /*! Forward operator, transformations and constraints */ DC1dRhoModelling f( thk, ab2, mn2, false ); f.region( 0 )->setTransModel( transRho ); /*! Error estimation */ double current = 0.1, errVolt = 0;//1e-4; RVector voltage( rhoa * f( RVector( 3, 1.0 ) ) * current ); RVector error = errVolt / voltage + errPerc / 100.0; vcout << "error min/max = " << min( error ) << "/" << max( error ) << std::endl; /*! Set up inversion with full matrix, data and forward operator */ RInversion inv( rhoa, f, verbose ); inv.setTransData( transRhoa ); inv.setRelativeError( error ); inv.setLambda( lambda ); inv.setModel( model ); inv.setBlockyModel( isBlocky ); inv.setRobustData( isRobust ); inv.setOptimizeLambda( lambdaOpt ); inv.setMaxIter( maxIter ); inv.setDeltaPhiAbortPercent( 0.5 ); inv.stopAtChi1( false ); if ( optimizeChi1 ) { model = inv.runChi1( 0.1 ); } else { model = inv.run(); } if ( verbose ) std::cout << "model = " << model << std::endl; save( model, "resistivity.vec" ); save( thk, "thickness.vec" ); save( inv.response(), "response.vec" ); if ( abmnr.cols() > 3 ) { if ( verbose ) std::cout << "Found ip values, doing ip inversion" << std::endl; //! imaginary apparent resistivity RVector rhoai( rhoa * sin( abmnr[ 3 ] / 1000 ) ); //! linear modelling operator using the amplitude jacobian Mesh mesh( createMesh1D( thk.size() ) ); LinearModelling fIP( mesh, f.jacobian(), verbose ); fIP.region( 0 )->setTransModel( transRho ); //! IP (imaginary resistivity) inversion using fIP RInversion invIP( rhoai, fIP, verbose ); invIP.setAbsoluteError( rhoai / abmnr[ 3 ] * 1.0 ); invIP.setBlockyModel( isBlocky ); invIP.setRobustData( isRobust ); invIP.setLambda( lambdaIP ); invIP.setRecalcJacobian( false ); invIP.stopAtChi1( false ); RVector ipModel( nlay, median( rhoai ) ); invIP.setModel( ipModel ); ipModel = invIP.run(); RVector phase = atan( ipModel / model ) * 1000.; save( phase, "phase.vec" ); RVector aphase = atan( invIP.response() / rhoa ) * 1000.; save( aphase, "aphase.vec" ); } return EXIT_SUCCESS; }
int main( int argc, char *argv [] ){ bool lambdaOpt = false, isRobust = false, isBlocky = false, doResolution = false; bool useAppPar = false, useTan = false, useWater = false, optimizeChi1 = false; double lambda = 1, lbound = 0, ubound = 0, errbabs = 20; int maxIter = 10, verboseCount = 0, ctype = 0; std::string matFile( "A.mat" ), bFile( "b.vec" ); OptionMap oMap; oMap.setDescription("Description. InvLinearMat - Linear Inversion with given matrix and vector\n"); oMap.addLastArg( bFile, "Data file" ); oMap.add( verboseCount, "v" , "verbose", "Verbose/debug/dosave mode (multiple use)." ); oMap.add( lambdaOpt, "O" , "OptimizeLambda", "Optimize model smoothness using L-curve." ); oMap.add( optimizeChi1, "C" , "OptimizeChi1", "Optimize lambda subject to chi^2=1." ); oMap.add( doResolution, "D" , "doResolution", "Do resolution analysis." ); oMap.add( isRobust, "R" , "RobustData", "Robust (L1) data weighting." ); oMap.add( isBlocky, "B" , "BlockyModel", "Blocky (L1) model constraints." ); oMap.add( useTan, "T" , "useTan", "Use (co-)Tan instead of Log for LU." ); oMap.add( useAppPar, "A" , "useAppPar", "Use apparent parameter transformation." ); oMap.add( useWater, "W" , "useWater", "Use water content transformation." ); oMap.add( matFile, "m:", "matFile", "Matrix file [A.mat]" ); oMap.add( lambda, "l:", "lambda", "Regularization strength lambda[100]." ); oMap.add( lbound, "b:", "lbound", "Lower parameter bound[0]" ); oMap.add( ubound, "u:", "ubound", "Upper parameter bound[0-inactive]" ); oMap.add( errbabs, "e:", "error", "Absolute error level" ); oMap.add( maxIter, "i:", "maxIter", "Maximum Iteration number" ); oMap.parse( argc, argv ); bool verbose = ( verboseCount > 0 ), debug = ( verboseCount > 1 ), dosave = ( verboseCount > 2 ); RMatrix A; if ( ! loadMatrixSingleBin( A, matFile ) ) { std::cerr << "Did not find A.mat!" << std::endl; return EXIT_OPEN_FILE; } RVector b; load( b, bFile ); size_t nModel( A.cols() ); dcout << "size(A) = " << A.rows() << "x" << nModel << "size(b) = " << b.size() << std::endl; RVector Asum( A * RVector( nModel, 1.0 ) ); RVector xapp( b / Asum ); DEBUG save( xapp, "xapp.vec" ); dcout << "apparent x: min/max = " << min( xapp ) << "/" << max( xapp ) << std::endl; Mesh mesh( createMesh1D( nModel ) ); DEBUG mesh.save( "mesh1d.bms" ); mesh.showInfos(); for ( size_t i = 0; i < mesh.cellCount(); i ++ ) mesh.cell( i ).setAttribute( 2.0 + i ); mesh.createNeighbourInfos(); LinearModelling f( mesh, A, verbose ); f.regionManager().region( 0 )->setConstraintType( ctype ); Trans < RVector > * transData; Trans < RVector > * transModel; if ( useAppPar ) { if ( useTan ) { if ( ubound <= lbound ) ubound = lbound + 1.0; transData = new TransNest< RVector >( *new TransCotLU< RVector >( lbound, ubound ), *new TransLinear< RVector >( RVector( xapp / b ) ) ); transModel = new TransCotLU< RVector >( lbound, ubound ); } else { transData = new TransNest< RVector >( *new TransLogLU< RVector >( lbound, ubound ), *new TransLinear< RVector >( RVector( xapp / b ) ) ); transModel = new TransLogLU< RVector >( lbound, ubound ); } } else { transData = new Trans< RVector >( ); transModel = new Trans< RVector >( ); } /*! set up inversion */ RInversion inv( b, f, *transData, *transModel, verbose, dosave ); inv.setRecalcJacobian( false ); //! no need since it is linear inv.setLambda( lambda ); inv.setOptimizeLambda( lambdaOpt ); inv.setRobustData( isRobust ); inv.setBlockyModel( isBlocky ); inv.setMaxIter( maxIter ); inv.setDeltaPhiAbortPercent( 1.0 ); RVector error( errbabs / b); vcout << "error: min/max = " << min( error ) << "/" << max( error ) << std::endl; inv.setError( error ); RVector x( nModel, mean( xapp ) ); inv.setModel( x ); inv.setReferenceModel( x ); if ( optimizeChi1 ) { x = inv.runChi1(); } else { x = inv.run(); } vcout << "x = " << x << std::endl; save( x, "x.vec" ); if ( doResolution ) { RVector resolution( nModel ); RVector resMDiag ( nModel ); RMatrix resM; for ( size_t iModel = 0; iModel < nModel; iModel++ ) { resolution = inv.modelCellResolution( iModel ); resM.push_back( resolution ); resMDiag[ iModel ] = resolution[ iModel ]; } save( resMDiag, "resMDiag.vec" ); save( resM, "resolution.matrix" ); } delete transData; delete transModel; return EXIT_SUCCESS; }
//** MAIN int main( int argc, char *argv [] ) { bool isVelocity = true, isSlowness = false, doError = false, addNoise = false; int nSegments = 6, verboseCount = 0; double relativeInnerMaxEdgeLength = 0.01, errTime = 0.001, errPerc = 0; string meshFilename( NOT_DEFINED ), modelFilename( NOT_DEFINED ), outFilename( "out.dat" ); string dataFileName(NOT_DEFINED ), attributemap ( NOT_DEFINED ); OptionMap oMap; oMap.setDescription("Description. TTMod - Travel time modelling using Dijkstra algorithm.\n"); oMap.addLastArg( dataFileName, "Data file" ); oMap.add( verboseCount, "v" , "verbose", "Verbose mode (2 times for debug mode)." ); oMap.add( doError, "E" , "doError", "Calculate error as well." ); oMap.add( isVelocity, "V" , "isVelocity", "Input is velocity" ); oMap.add( isSlowness, "S" , "isSlowness", "Input is slowness" ); oMap.add( addNoise, "N" , "addNoise", "Noisify data." ); oMap.add( meshFilename, "p:" , "meshFilename", "Mesh file." ); oMap.add( outFilename, "o:" , "outFilename", "Output file name [out.dat]." ); oMap.add( errPerc, "e:" , "errorPercent", "Percentage error (for -N/-E) [0]." ); oMap.add( errTime, "t:" , "errorTime", "Absolute traveltime error (for -N/-E) [1ms]." ); oMap.add( attributemap, "a:" , "attributeMap", "Map file associating velocity/slowness [all 1m/s]." ); oMap.add( modelFilename, "m:" , "modelFilename", "Model file instead of attribute map." ); oMap.parse( argc, argv ); bool verbose = ( verboseCount > 0 );//, debug = ( verboseCount > 1 ); if ( isSlowness ) isVelocity = true; DataContainer data( dataFileName ); if ( verbose ) data.showInfos(); Mesh mesh; if ( meshFilename != NOT_DEFINED ) { mesh.load( meshFilename ); } else { cerr << WHERE_AM_I << " no mesh given. Creating one. ..." << endl; mesh.createClosedGeometryParaMesh( data.sensorPositions(), nSegments, relativeInnerMaxEdgeLength, data.additionalPoints() ); } vcout << "Mesh: "; mesh.showInfos( ); if ( attributemap != NOT_DEFINED ) { std::map < float, float > attMap( loadFloatMap( attributemap ) ); if ( isVelocity ) { for (std::map < float, float >::iterator it = attMap.begin(); it!=attMap.end(); it ++ ) it->second = 1.0 / it->second; } mesh.mapCellAttributes( attMap ); } /*! set up TT modeling class; */ TravelTimeDijkstraModelling f( mesh, data ); // vcout << "model size=" << model.size() << " min/max=" << min(model) << "/" << max(model) << endl; RVector traveltime( data.size() ); if ( modelFilename != NOT_DEFINED ) { RVector model; load( model, modelFilename ); if ( isVelocity ) model = 1.0 / model; SparseMapMatrix < double, size_t > W; f.createJacobian( W, model ); traveltime = W * model; std::cout << "rms(data, modelResponse) = " << rms( data( "t" ), traveltime ) << "s." << std::endl; RVector coverage( model.size() ); coverage = transMult( W, RVector( data.size(), 1.0 ) ); save( coverage, "coverage.vec", Ascii ); // } else { // traveltime = f.response( ); } cout << "min/max traveltime = " << min( traveltime ) << "/" << max( traveltime ) << "s" << endl; data.set( "t", traveltime ); if ( doError ) { /*! estimate error by error time and percentage value; */ vcout << "Estimate error: " << errPerc << "% + " << errTime << "s" << endl; data.set( "err", errTime / data("t") + errPerc / 100.0 ); // always relative error vcout << "Data error:" << " min = " << min( data("err") ) * 100 << "%" << " max = " << max( data("err") ) * 100 << "%" << endl; /*! add Gaussian noise of error level to the data; */ if ( addNoise ) { vcout << "Noisifying data by Gaussian error distribution" << endl; RVector noise( data.size() ); randn( noise ); //! Standard normalized Gaussian distribution data.set( "t", data("t") * ( ( noise * data("err") ) + 1.0 ) ); //! a_noise = a * ( 1 + randn * da / a) } data.save( outFilename, std::string( "a m t err" ) ); // a=shot, m=geophone, t=time } else { data.save( outFilename, std::string( "a m t" ) ); // a=shot, m=geophone, t=time } return EXIT_SUCCESS; }
QVariant QSparqlConnectionOptionsPrivate::option(const QString& name) const { return map.value(name); }
void Settings::SettingsPerModule::merge(OptionMap& out, const OptionMap& with) const { const OptionMap::const_iterator end = with.end(); for (OptionMap::const_iterator i = with.begin(); i != end; ++i) out[i->first] = true; }
int main( int argc, char *argv [] ) { bool lambdaOpt = false, doResolution = false, useTan = false, optimizeChi1 = false; double lambda = 10.0, lambdaIP = 1.0, lbound = 0.0, ubound = 0.0, errPerc = 3.0; int maxIter = 10, nlay = 3, verboseCount = 0, linCount = 0; std::string modelFile( NOT_DEFINED ), dataFileName; OptionMap oMap; oMap.setDescription("Description. DC1dInv - 1D block inversion of dc resistivity data\n"); oMap.addLastArg( dataFileName, "Data file" ); oMap.add( verboseCount, "v" , "verbose", "Verbose/debug/dosave mode (multiple use)." ); oMap.add( lambdaOpt, "O" , "OptimizeLambda", "Optimize model smoothness using L-curve." ); oMap.add( doResolution, "D" , "doResolution", "Do resolution analysis." ); oMap.add( optimizeChi1, "C" , "OptimizeChi1", "Optimize lambda subject to chi^2=1." ); oMap.add( useTan, "T" , "useTan", "Use (co-)Tan instead of Log for LU." ); oMap.add( linCount, "L" , "dataLin", "Use linear trafo for data, 2x also for model." ); oMap.add( maxIter, "i:", "maxIter", "Maximum iteration number." ); oMap.add( lambda, "l:", "lambda", "Regularization strength lambda." ); oMap.add( lambdaIP, "r:", "lambdaIP", "Regularization strength lambda for IP." ); oMap.add( errPerc, "e:", "error", "Error percentage" ); oMap.add( nlay, "n:", "nlay", "Number of layers" ); oMap.add( lbound, "b:", "lbound", "Lower Resistivity bound" ); oMap.add( ubound, "u:", "ubound", "Upper Resistivity bound" ); oMap.add( modelFile, "m:", "modelFile", "Model file for pure modelling. file: thick_i rho_i\\n" ); oMap.parse( argc, argv ); bool verbose = ( verboseCount > 0 ), debug = ( verboseCount > 1 ), dosave = ( verboseCount > 2 ); bool dataLin = ( linCount > 0 ), modelLin = ( linCount > 1 ); /*! Data: read data file from column file */ RMatrix abmnr; loadMatrixCol( abmnr, dataFileName ); //! read data RVector ab2( abmnr[ 0 ] ); //! first column RVector mn2( abmnr[ 1 ] ); //! second column RVector rhoa( abmnr[ 2 ] ); //! third column /*! Define discretization according to AB/2 */ double maxDep = max( ab2 ) / 2; //! rule of thumb std::cout << "Maximum depth estimated to " << maxDep << std::endl; DC1dModelling f( nlay, ab2, mn2, debug ); /*! Error estimation */ double current = 0.1, errVolt = 0;//1e-4; RVector voltage( rhoa * f( RVector( 3, 1.0 ) ) * current ); RVector error = errVolt / voltage + errPerc / 100.0; vcout << "error min/max = " << min( error ) << "/" << max( error ) << std::endl; /*! Transformations: log for app. resisivity and thickness, logLU for resistivity */ // TransLog< RVector > transThk; // TransLogLU< RVector > transRho( lbound, ubound ); // TransLog< RVector > transRhoa; Trans< RVector > *transThk, *transRho, *transRhoa; if ( modelLin ) { transThk = new Trans< RVector >; transRho = new Trans< RVector >; } else { transThk = new TransLog< RVector >; if ( useTan ) { transRho = new TransCotLU< RVector >( lbound, ubound ); } else { transRho = new TransLogLU< RVector >( lbound, ubound ); } } if ( dataLin ) { transRhoa = new Trans< RVector >; } else { transRhoa = new TransLog< RVector >; } f.region( 0 )->setTransModel( *transThk ); f.region( 1 )->setTransModel( *transRho ); double paraDepth = max( ab2 ) / 3; vcout << "Paradepth = " << paraDepth << std::endl; f.region( 0 )->setStartValue( paraDepth / nlay / 2.0 ); f.region( 1 )->setStartValue( median( rhoa ) ); RVector model;//( f.createDefaultStartModel() ); model = f.createStartVector(); // model[ nlay ] *= 1.5; //! in order to make thicknesses sensitive DEBUG save( model, "start.vec" ); /*! Set up inversion with full matrix, data and forward operator */ RInversion inv( rhoa, f, verbose, dosave ); inv.setTransData( *transRhoa ); inv.setMarquardtScheme( 0.8 ); //! 0th order local decreasing regularization inv.setLambda( lambda ); inv.setOptimizeLambda( lambdaOpt ); inv.setMaxIter( maxIter ); inv.setRelativeError( error ); //! error model inv.setModel( model ); //! starting model inv.setDeltaPhiAbortPercent( 0.5 ); /*! actual computation: run the inversion */ if ( optimizeChi1 ) { model = inv.runChi1( 0.1 ); } else { model = inv.run(); } RVector thk( nlay - 1 ); RVector res( nlay ); for ( int i = 0 ; i < nlay - 1 ; i++ ) thk[ i ] = model[ i ]; for ( int i = 0 ; i < nlay ; i++ ) res[ i ] = model[ nlay - 1 + i ]; save( res, "resistivity.vec" ); save( thk, "thickness.vec" ); save( inv.response(), "response.vec" ); if ( verbose ) { RVector cumz( thk ); for ( size_t i = 1 ; i < thk.size() ; i++ ) cumz[i] = cumz[ i-1 ] + thk[i]; cumz.round(0.1); res.round(0.1); std::cout << "Res = " << res << std::endl; std::cout << " z = " << cumz << std::endl; } if ( abmnr.cols() > 3 ) { //** IP present // if ( verbose ) std::cout << "Found ip values, doing ip inversion" << std::endl; //! imaginary apparent resistivity RVector rhoai( rhoa * sin( abmnr[ 3 ] / 1000. ) ); //! modelling operator from fixed layer Mesh ipMesh( createMesh1D( nlay ) ); DC1dRhoModelling fIP( thk, ab2, mn2, verbose ); fIP.region( 0 )->setTransModel( *transRho ); //! IP (imaginary resistivity) inversion using fIP RInversion invIP( rhoai, fIP, verbose ); invIP.setAbsoluteError( rhoai / abmnr[ 3 ] * 1.0 ); //! take jacobian from real valued problems invIP.setModel( res ); invIP.checkJacobian(); //! force jacobian calculation invIP.setRecalcJacobian( false ); invIP.setLambda( lambdaIP ); invIP.setLocalRegularization( true ); //! Marquardt method invIP.setModel( model ); //! starting model invIP.stopAtChi1( false ); RVector ipModel( nlay, median( rhoai ) ); invIP.setModel( ipModel ); ipModel = invIP.run(); RVector phase = atan( ipModel / res ) * 1000.; save( phase, "phase.vec" ); RVector aphase = atan( invIP.response() / rhoa ) * 1000.; save( aphase, "aphase.vec" ); std::cout << " ip = " << phase << std::endl; } if ( doResolution ) { size_t nModel( 2 * nlay - 1 ); RVector resolution( nModel ); RVector resMDiag ( nModel ); RMatrix resM; for ( size_t iModel = 0; iModel < nModel; iModel++ ) { resolution = inv.modelCellResolution( iModel ); resM.push_back( resolution ); resMDiag[ iModel ] = resolution[ iModel ]; } save( resMDiag, "resMDiag.vec" ); save( resM, "resM" ); vcout << "diagRM = " << resMDiag << std::endl; } return EXIT_SUCCESS; }
int main( int argc, char *argv [] ){ bool readVecs = false, allNeumannBoundary = false, saveBin = false, exportVTK = false; bool refineP = false, refineH = false, exportBoundary = false, is2dMesh = false, makeLayers = false; int equiBoundary = 0, prolongBoundary = 0, prolongFactor = 5; std::string meshFile, outFile = NOT_DEFINED, modelFileName = NOT_DEFINED; OptionMap oMap; oMap.setDescription("Description. BMS2VTK - Convert BMS to VTK or create new ones from vectors\n"); oMap.addLastArg( meshFile, "Mesh file or vector basename" ); oMap.add( is2dMesh, "2" , "is2dMesh" , "mesh is 2d instead of 3d (with -R)" ); oMap.add( readVecs, "R" , "readVectors" , "Read vectors .x/y/z from files" ); oMap.add( makeLayers, "L" , "makeLayers" , "Give each layer another boundary" ); oMap.add( allNeumannBoundary,"N" , "allNeumannBoundary", "All boundaries are Neumann" ); oMap.add( refineH, "H" , "refineH" , "Refine mesh spatially" ); oMap.add( exportBoundary, "B" , "exportBoundary" , "Export boundary" ); oMap.add( exportVTK, "V" , "exportVTK" , "Export vtk file" ); oMap.add( refineP, "P" , "refineP" , "Refine mesh polynomially" ); oMap.add( equiBoundary, "e:" , "equiBoundary" , "equidistant boundary around hex mesh" ); oMap.add( prolongBoundary, "p:" , "prolongBoundary" , "prolongated boundary around equiBoundary" ); oMap.add( prolongFactor, "f:" , "prolongFactor" , "prolongation factor for prolongBoundary" ); oMap.add( outFile, "o:" , "outFile" , "filename for output" ); oMap.add( modelFileName, "a:" , "modelFile" , "model file to include" ); oMap.parse( argc, argv ); if ( outFile == NOT_DEFINED ) outFile = meshFile; Mesh mesh; if ( readVecs ) { //! create hexahedral mesh from given x/y/z vectors RVector x, y, z; load( x, meshFile + ".x" ); load( y, meshFile + ".y" ); load( z, meshFile + ".z" ); z = sort( z ); double xmin = x[ 0 ], xmax = x[ x.size() -1 ]; double ymin = y[ 0 ], ymax = y[ y.size() -1 ]; double zmin = z[ 0 ]; if ( equiBoundary > 0 || prolongBoundary > 0 ) { //! create prolongation vector RVector addVec( equiBoundary + prolongBoundary , 1.0 ); for ( size_t i = 1 ; i < addVec.size() ; i++ ) addVec[ i ] += addVec[ i - 1 ]; if ( prolongBoundary > 0 ) { double dx = 1; for ( int i = 0 ; i < prolongBoundary ; i ++ ) { dx *= prolongFactor; addVec[ equiBoundary + i ] = addVec[ equiBoundary + i -1 ] + dx; } } std::cout << addVec << std::endl; RVector revVec = addVec; for ( size_t i = 0 ; i < revVec.size() ; i++ ) revVec[ i ] = addVec[ revVec.size() - i - 1 ]; std::cout << revVec << std::endl; x = cat( cat( RVector( revVec * ( x[ 0 ] - x[ 1 ]) + x[ 0 ] ), x ), RVector( addVec * ( x[ x.size() - 1 ] - x[ x.size() - 2 ] ) + x[ x.size()-1 ] ) ); std::cout << x << std::endl; y = cat( cat( RVector( revVec * ( y[ 0 ] - y[ 1 ]) + y[ 0 ] ), y ), RVector( addVec * ( y[ y.size() - 1 ] - y[ y.size() - 2 ] ) + y[ y.size()-1 ] ) ); std::cout << y << std::endl; z = cat( RVector( revVec * ( z[ 0 ] - z[ 1 ] ) + z[ 0 ] ), z ); std::cout << z << std::endl; } if ( is2dMesh ) { mesh = createMesh2D( x, z ); } else { mesh = createMesh3D( x, y, z ); } if ( equiBoundary + prolongBoundary > 0 ) { //! determine cell marker for ( size_t i = 0 ; i < mesh.cellCount() ; i++ ) { int num = 1; RVector3 midpoint = mesh.cell( i ).center(); if ( is2dMesh ) { if ( midpoint[ 0 ] > xmin && midpoint[ 0 ] < xmax && midpoint[ 1 ] > zmin ) { if ( makeLayers ) { //! each layer another number for ( int k = z.size()-1 ; k>=0 ; k-- ) { if ( midpoint[ 1 ] < z[ k ] ) num++; } } else { //! normal 1/2 mesh num = 2; } } } else { //! a 3D mesh if ( midpoint[ 0 ] > xmin && midpoint[ 0 ] < xmax && midpoint[ 1 ] > ymin && midpoint[ 1 ] < ymax && midpoint[ 2 ] > zmin ) { num = 2; } } mesh.cell( i ).setMarker( num ); } } if ( allNeumannBoundary ) { setAllNeumannBoundaryConditions( mesh ); } else { setDefaultWorldBoundaryConditions( mesh ); } saveBin = true; } else { // read bms file mesh.load( meshFile ); } std::cout << "Mesh:\t"; mesh.showInfos(); if ( refineH ) { mesh = mesh.createH2(); std::cout << "MeshH:\t"; mesh.showInfos(); saveBin = true; } if ( refineP ) { mesh = mesh.createP2( ); std::cout << "MeshP:\t"; mesh.showInfos(); saveBin = true; } if ( modelFileName != NOT_DEFINED ) { RVector model( modelFileName ); mesh.addExportData( modelFileName, model ); } if ( saveBin ) mesh.saveBinary( outFile ); if ( exportVTK || !saveBin ) mesh.exportVTK( outFile ); if ( exportBoundary ) mesh.exportBoundaryVTU( "boundary.vtu" ); return EXIT_SUCCESS; }