Пример #1
0
Filter* ConstantFilter::combinable(Filter* f, Operator *op)
{
    if (typeid(*f) == typeid(SimpleFilter))
    {
        SimpleFilter *sf = dynamic_cast<SimpleFilter*>(f);
        return sf->combinable(this, op);
    }
    
    // constant filter may have NULL fOp, when it's equalivant to a pure simple filter    
    if  (typeid(ConstantFilter) == typeid(*f) && 
        (!fOp || fOp->data().compare(op->data()) == 0 ))
    {
        ConstantFilter *cf = dynamic_cast<ConstantFilter*>(f);
        if (cf && ( !cf->op() || !fOp || fOp->data().compare(cf->op()->data()) == 0 )
            && fCol->sameColumn(cf->col().get()))
        {
            for (unsigned int i = 0; i < cf->filterList().size(); i++)
                fFilterList.push_back(cf->filterList()[i]);
            fOp.reset(op);
            return this;
        }
    }

    return NULL;       
}
Пример #2
0
// >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
SimpleFilter* StrongMotionParameters::findSimpleFilter(const std::string& publicID) const {
	SimpleFilter* object = SimpleFilter::Cast(PublicObject::Find(publicID));
	if ( object != NULL && object->parent() == this )
		return object;
	
	return NULL;
}
void makeAntiJoin(const ParseTree* n)
{
    TreeNode* tn = n->data();
    SimpleFilter* sf = dynamic_cast<SimpleFilter*>(tn);

    if (!sf)
        return;

    uint64_t lJoinInfo = sf->lhs()->joinInfo();

    if (lJoinInfo & JOIN_SEMI)
    {
        lJoinInfo &= ~JOIN_SEMI;
        lJoinInfo |= JOIN_ANTI;

        if (lJoinInfo & JOIN_NULLMATCH_CANDIDATE)
            lJoinInfo |= JOIN_NULL_MATCH;

        sf->lhs()->joinInfo(lJoinInfo);
    }

    uint64_t rJoinInfo = sf->rhs()->joinInfo();

    if (rJoinInfo & JOIN_SEMI)
    {
        rJoinInfo &= ~JOIN_SEMI;
        rJoinInfo |= JOIN_ANTI;

        if (rJoinInfo & JOIN_NULLMATCH_CANDIDATE)
            rJoinInfo |= JOIN_NULL_MATCH;

        sf->rhs()->joinInfo(rJoinInfo);
    }
}
Пример #4
0
void
PidMain::openButterworthAlta()
{
    ImageTabs* current = getCurrentImageWindow();
    if (current != NULL)
    {
        SimpleFilter *filter = new SimpleFilter(this, current, new Butterworth(true));
        filter->setAttribute(Qt::WA_DeleteOnClose);
        filter->exec();
    }
}
Пример #5
0
void
PidMain::openPassaBaixaIdeal()
{
    ImageTabs* current = getCurrentImageWindow();
    if (current != NULL)
    {
        SimpleFilter *filter = new SimpleFilter(this, current, new PassaBaixa);
        filter->setAttribute(Qt::WA_DeleteOnClose);
        filter->exec();
    }
}
Пример #6
0
// >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
bool StrongMotionParameters::add(SimpleFilter* simpleFilter) {
	if ( simpleFilter == NULL )
		return false;

	// Element has already a parent
	if ( simpleFilter->parent() != NULL ) {
		SEISCOMP_ERROR("StrongMotionParameters::add(SimpleFilter*) -> element has already a parent");
		return false;
	}

	if ( PublicObject::IsRegistrationEnabled() ) {
		SimpleFilter* simpleFilterCached = SimpleFilter::Find(simpleFilter->publicID());
		if ( simpleFilterCached ) {
			if ( simpleFilterCached->parent() ) {
				if ( simpleFilterCached->parent() == this )
					SEISCOMP_ERROR("StrongMotionParameters::add(SimpleFilter*) -> element with same publicID has been added already");
				else
					SEISCOMP_ERROR("StrongMotionParameters::add(SimpleFilter*) -> element with same publicID has been added already to another object");
				return false;
			}
			else
				simpleFilter = simpleFilterCached;
		}
	}

	// Add the element
	_simpleFilters.push_back(simpleFilter);
	simpleFilter->setParent(this);

	// Create the notifiers
	if ( Notifier::IsEnabled() ) {
		NotifierCreator nc(OP_ADD);
		simpleFilter->accept(&nc);
	}

	// Notify registered observers
	childAdded(simpleFilter);
	
	return true;
}
Пример #7
0
SimpleFilter* createSimpleFilter
				(
				CalpontSystemCatalog*& csc,
				const CalpontSystemCatalog::TableColName& tcn,
				const string& opstr,
				ConstantColumn* cc
				)
{
	SimpleFilter* lsf = new SimpleFilter();

	Operator* op = new Operator();
	op->data(opstr);
	CalpontSystemCatalog::ColType ccct;
	ccct = op->resultType();
	ccct.colDataType = cc->resultType().colDataType;
	op->operationType(ccct);

	SOP sop(op);
	lsf->op(sop);

	CalpontSystemCatalog::OID oid = csc->lookupOID(tcn);
	CalpontSystemCatalog::ColType ct = csc->colType(oid);

	SimpleColumn* sc = new SimpleColumn();
	sc->schemaName(tcn.schema);
	sc->tableName(tcn.table);
	sc->tableAlias(tcn.table);
	sc->columnName(tcn.column);
	sc->oid(oid);
	sc->resultType(ct);
	sc->alias(tcn.toString());

	lsf->lhs(sc);
	lsf->rhs(cc);

	return lsf;
}
Пример #8
0
// >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
bool StrongMotionParameters::updateChild(Object* child) {
	SimpleFilter* simpleFilterChild = SimpleFilter::Cast(child);
	if ( simpleFilterChild != NULL ) {
		SimpleFilter* simpleFilterElement
			= SimpleFilter::Cast(PublicObject::Find(simpleFilterChild->publicID()));
		if ( simpleFilterElement && simpleFilterElement->parent() == this ) {
			*simpleFilterElement = *simpleFilterChild;
			return true;
		}
		return false;
	}

	Record* recordChild = Record::Cast(child);
	if ( recordChild != NULL ) {
		Record* recordElement
			= Record::Cast(PublicObject::Find(recordChild->publicID()));
		if ( recordElement && recordElement->parent() == this ) {
			*recordElement = *recordChild;
			return true;
		}
		return false;
	}

	StrongOriginDescription* strongOriginDescriptionChild = StrongOriginDescription::Cast(child);
	if ( strongOriginDescriptionChild != NULL ) {
		StrongOriginDescription* strongOriginDescriptionElement
			= StrongOriginDescription::Cast(PublicObject::Find(strongOriginDescriptionChild->publicID()));
		if ( strongOriginDescriptionElement && strongOriginDescriptionElement->parent() == this ) {
			*strongOriginDescriptionElement = *strongOriginDescriptionChild;
			return true;
		}
		return false;
	}

	return false;
}
Пример #9
0
ParseTree* replaceRefCol(ParseTree*& n, CalpontSelectExecutionPlan::ReturnedColumnList& derivedColList)
{
	ParseTree *lhs = n->left();
	ParseTree *rhs = n->right();

	if (lhs)
		n->left(replaceRefCol(lhs, derivedColList));
	if (rhs)
		n->right(replaceRefCol(rhs, derivedColList));
	SimpleFilter *sf = dynamic_cast<SimpleFilter*>(n->data());
	ConstantFilter *cf = dynamic_cast<ConstantFilter*>(n->data());
	ReturnedColumn *rc = dynamic_cast<ReturnedColumn*>(n->data());
	if (sf)
	{
		sf->replaceRealCol(derivedColList);
	}
	else if (cf)
	{
		cf->replaceRealCol(derivedColList);
	}
	else if (rc)
	{
		SimpleColumn* sc = dynamic_cast<SimpleColumn*>(rc);
		if (sc)
		{
			ReturnedColumn* tmp = derivedColList[sc->colPosition()]->clone();
			delete sc;
			n->data(tmp);
		}
		else
		{
			rc->replaceRealCol(derivedColList);
		}
	}
	return n;
}
Пример #10
0
   // Method that will really process information
void example9::process()
{

      // We will read each section name, which is equivalent to station name
      // Station names will be read in alphabetical order
   string station;
   while ( (station = confReader.getEachSection()) != "" )
   {

         // We will skip 'DEFAULT' section because we are waiting for
         // a specific section for each receiver. However, if data is
         // missing we will look for it in 'DEFAULT' (see how we use method
         // 'setFallback2Default()' of 'ConfDataReader' object in 'spinUp()'
      if( station == "DEFAULT" )
      {
         continue;
      }


         // Show a message indicating that we are starting with this station
      cout << "Starting processing for station: '" << station << "'." << endl;


         // Create input observation file stream
      RinexObsStream rin;

         // Enable exceptions
      rin.exceptions(ios::failbit);

         // Try to open Rinex observations file
      try
      {

            // Open Rinex observations file in read-only mode
         rin.open( confReader("rinexObsFile", station), std::ios::in );

      }
      catch(...)
      {

         cerr << "Problem opening file '"
              << confReader.getValue("rinexObsFile", station)
              << "'." << endl;

         cerr << "Maybe it doesn't exist or you don't have "
              << "proper read permissions."
              << endl;

         cerr << "Skipping receiver '" << station << "'."
              << endl;

            // Close current Rinex observation stream
         rin.close();

         continue;

      }  // End of 'try-catch' block


         // Declare a "SP3EphemerisStore" object to handle precise ephemeris
      SP3EphemerisStore SP3EphList;

         // Set flags to reject satellites with bad or absent positional
         // values or clocks
      SP3EphList.rejectBadPositions(true);
      SP3EphList.rejectBadClocks(true);

         // Read if we should check for data gaps.
      if ( confReader.getValueAsBoolean( "checkGaps", station ) )
      {
         SP3EphList.enableDataGapCheck();
         SP3EphList.setGapInterval(
                     confReader.getValueAsDouble("SP3GapInterval",station) );
      }

         // Read if we should check for too wide interpolation intervals
      if ( confReader.getValueAsBoolean( "checkInterval", station ) )
      {
         SP3EphList.enableIntervalCheck();
         SP3EphList.setMaxInterval(
                     confReader.getValueAsDouble("maxSP3Interval",station) );
      }


         // Load all the SP3 ephemerides files from variable list
      string sp3File;
      while ( (sp3File = confReader.fetchListValue("SP3List",station) ) != "" )
      {

            // Try to load each ephemeris file
         try
         {

            SP3EphList.loadFile( sp3File );

         }
         catch (FileMissingException& e)
         {
               // If file doesn't exist, issue a warning
            cerr << "SP3 file '" << sp3File << "' doesn't exist or you don't "
                 << "have permission to read it. Skipping it." << endl;

            continue;

         }

      }  // End of 'while ( (sp3File = confReader.fetchListValue( ... "


         // Load station nominal position
      double xn(confReader.fetchListValueAsDouble("nominalPosition",station));
      double yn(confReader.fetchListValueAsDouble("nominalPosition",station));
      double zn(confReader.fetchListValueAsDouble("nominalPosition",station));
         // The former peculiar code is possible because each time we
         // call a 'fetchListValue' method, it takes out the first element
         // and deletes it from the given variable list.

      Position nominalPos( xn, yn, zn );


         // Create a 'ProcessingList' object where we'll store
         // the processing objects in order
      ProcessingList pList;


         // This object will check that all required observables are present
      RequireObservables requireObs;
      requireObs.addRequiredType(TypeID::P2);
      requireObs.addRequiredType(TypeID::L1);
      requireObs.addRequiredType(TypeID::L2);

         // This object will check that code observations are within
         // reasonable limits
      SimpleFilter pObsFilter;
      pObsFilter.setFilteredType(TypeID::P2);

         // Read if we should use C1 instead of P1
      bool usingC1( confReader.getValueAsBoolean( "useC1", station ) );
      if ( usingC1 )
      {
         requireObs.addRequiredType(TypeID::C1);
         pObsFilter.addFilteredType(TypeID::C1);
      }
      else
      {
         requireObs.addRequiredType(TypeID::P1);
         pObsFilter.addFilteredType(TypeID::P1);
      }

         // Add 'requireObs' to processing list (it is the first)
      pList.push_back(requireObs);

         // IMPORTANT NOTE:
         // It turns out that some receivers don't correct their clocks
         // from drift.
         // When this happens, their code observations may drift well beyond
         // what it is usually expected from a pseudorange. In turn, this
         // effect causes that "SimpleFilter" objects start to reject a lot of
         // satellites.
         // Thence, the "filterCode" option allows you to deactivate the
         // "SimpleFilter" object that filters out C1, P1 and P2, in case you
         // need to.
      bool filterCode( confReader.getValueAsBoolean( "filterCode", station ) );

         // Check if we are going to use this "SimpleFilter" object or not
      if( filterCode )
      {
         pList.push_back(pObsFilter);       // Add to processing list
      }


         // This object defines several handy linear combinations
      LinearCombinations comb;


         // Object to compute linear combinations for cycle slip detection
      ComputeLinear linear1;

         // Read if we should use C1 instead of P1
      if ( usingC1 )
      {
         linear1.addLinear(comb.pdeltaCombWithC1);
         linear1.addLinear(comb.mwubbenaCombWithC1);
      }
      else
      {
         linear1.addLinear(comb.pdeltaCombination);
         linear1.addLinear(comb.mwubbenaCombination);
      }
      linear1.addLinear(comb.ldeltaCombination);
      linear1.addLinear(comb.liCombination);
      pList.push_back(linear1);       // Add to processing list


         // Objects to mark cycle slips
      LICSDetector2 markCSLI2;         // Checks LI cycle slips
      pList.push_back(markCSLI2);      // Add to processing list
      MWCSDetector  markCSMW;          // Checks Merbourne-Wubbena cycle slips
      pList.push_back(markCSMW);       // Add to processing list


         // Object to keep track of satellite arcs
      SatArcMarker markArc;
      markArc.setDeleteUnstableSats(true);
      markArc.setUnstablePeriod(151.0);
      pList.push_back(markArc);       // Add to processing list


         // Object to decimate data
      Decimate decimateData(
               confReader.getValueAsDouble( "decimationInterval", station ),
               confReader.getValueAsDouble( "decimationTolerance", station ),
               SP3EphList.getInitialTime() );
      pList.push_back(decimateData);       // Add to processing list


         // Declare a basic modeler
      BasicModel basic(nominalPos, SP3EphList);

         // Set the minimum elevation
      basic.setMinElev(confReader.getValueAsDouble("cutOffElevation",station));

         // If we are going to use P1 instead of C1, we must reconfigure 'basic'
      if ( !usingC1 )
      {
         basic.setDefaultObservable(TypeID::P1);
      }


         // Add to processing list
      pList.push_back(basic);


         // Object to remove eclipsed satellites
      EclipsedSatFilter eclipsedSV;
      pList.push_back(eclipsedSV);       // Add to processing list


         // Object to compute gravitational delay effects
      GravitationalDelay grDelay(nominalPos);
      pList.push_back(grDelay);       // Add to processing list


         // Vector from monument to antenna ARP [UEN], in meters
      double uARP(confReader.fetchListValueAsDouble( "offsetARP", station ) );
      double eARP(confReader.fetchListValueAsDouble( "offsetARP", station ) );
      double nARP(confReader.fetchListValueAsDouble( "offsetARP", station ) );
      Triple offsetARP( uARP, eARP, nARP );


         // Declare some antenna-related variables
      Triple offsetL1( 0.0, 0.0, 0.0 ), offsetL2( 0.0, 0.0, 0.0 );
      AntexReader antexReader;
      Antenna receiverAntenna;

         // Check if we want to use Antex information
      bool useantex( confReader.getValueAsBoolean( "useAntex", station ) );
      if( useantex )
      {
            // Feed Antex reader object with Antex file
         antexReader.open( confReader.getValue( "antexFile", station ) );

            // Get receiver antenna parameters
         receiverAntenna =
            antexReader.getAntenna( confReader.getValue( "antennaModel",
                                                         station ) );

      }


         // Object to compute satellite antenna phase center effect
      ComputeSatPCenter svPcenter(nominalPos);
      if( useantex )
      {
            // Feed 'ComputeSatPCenter' object with 'AntexReader' object
         svPcenter.setAntexReader( antexReader );
      }

      pList.push_back(svPcenter);       // Add to processing list


         // Declare an object to correct observables to monument
      CorrectObservables corr(SP3EphList);
      corr.setNominalPosition(nominalPos);
      corr.setMonument( offsetARP );

         // Check if we want to use Antex patterns
      bool usepatterns(confReader.getValueAsBoolean("usePCPatterns", station ));
      if( useantex && usepatterns )
      {
         corr.setAntenna( receiverAntenna );

            // Should we use elevation/azimuth patterns or just elevation?
         corr.setUseAzimuth(confReader.getValueAsBoolean("useAzim", station));
      }
      else
      {
            // Fill vector from antenna ARP to L1 phase center [UEN], in meters
         offsetL1[0] = confReader.fetchListValueAsDouble("offsetL1", station);
         offsetL1[1] = confReader.fetchListValueAsDouble("offsetL1", station);
         offsetL1[2] = confReader.fetchListValueAsDouble("offsetL1", station);

            // Vector from antenna ARP to L2 phase center [UEN], in meters
         offsetL2[0] = confReader.fetchListValueAsDouble("offsetL2", station);
         offsetL2[1] = confReader.fetchListValueAsDouble("offsetL2", station);
         offsetL2[2] = confReader.fetchListValueAsDouble("offsetL2", station);

         corr.setL1pc( offsetL1 );
         corr.setL2pc( offsetL2 );

      }

      pList.push_back(corr);       // Add to processing list


         // Object to compute wind-up effect
      ComputeWindUp windup( SP3EphList,
                            nominalPos,
                            confReader.getValue( "satDataFile", station ) );
      pList.push_back(windup);       // Add to processing list


         // Declare a NeillTropModel object, setting its parameters
      NeillTropModel neillTM( nominalPos.getAltitude(),
                              nominalPos.getGeodeticLatitude(),
                              confReader.getValueAsInt("dayOfYear", station) );

         // We will need this value later for printing
      double drytropo( neillTM.dry_zenith_delay() );


         // Object to compute the tropospheric data
      ComputeTropModel computeTropo(neillTM);
      pList.push_back(computeTropo);       // Add to processing list


         // Object to compute ionosphere-free combinations to be used
         // as observables in the PPP processing
      ComputeLinear linear2;

         // Read if we should use C1 instead of P1
      if ( usingC1 )
      {
            // WARNING: When using C1 instead of P1 to compute PC combination,
            //          be aware that instrumental errors will NOT cancel,
            //          introducing a bias that must be taken into account by
            //          other means. This won't be taken into account in this
            //          example.
         linear2.addLinear(comb.pcCombWithC1);
      }
      else
      {
         linear2.addLinear(comb.pcCombination);
      }
      linear2.addLinear(comb.lcCombination);
      pList.push_back(linear2);       // Add to processing list


         // Declare a simple filter object to screen PC
      SimpleFilter pcFilter;
      pcFilter.setFilteredType(TypeID::PC);

         // IMPORTANT NOTE:
         // Like in the "filterCode" case, the "filterPC" option allows you to
         // deactivate the "SimpleFilter" object that filters out PC, in case
         // you need to.
      bool filterPC( confReader.getValueAsBoolean( "filterPC", station ) );

         // Check if we are going to use this "SimpleFilter" object or not
      if( filterPC )
      {
         pList.push_back(pcFilter);       // Add to processing list
      }


         // Object to align phase with code measurements
      PhaseCodeAlignment phaseAlign;
      pList.push_back(phaseAlign);       // Add to processing list


         // Object to compute prefit-residuals
      ComputeLinear linear3(comb.pcPrefit);
      linear3.addLinear(comb.lcPrefit);
      pList.push_back(linear3);       // Add to processing list


         // Declare a base-changing object: From ECEF to North-East-Up (NEU)
      XYZ2NEU baseChange(nominalPos);
         // We always need both ECEF and NEU data for 'ComputeDOP', so add this
      pList.push_back(baseChange);


         // Object to compute DOP values
      ComputeDOP cDOP;
      pList.push_back(cDOP);       // Add to processing list


         // Get if we want results in ECEF or NEU reference system
      bool isNEU( confReader.getValueAsBoolean( "USENEU", station ) );


         // Declare solver objects
      SolverPPP   pppSolver(isNEU);
      SolverPPPFB fbpppSolver(isNEU);


         // Get if we want 'forwards-backwards' or 'forwards' processing only
      int cycles( confReader.getValueAsInt("forwardBackwardCycles", station) );


         // Get if we want to process coordinates as white noise
      bool isWN( confReader.getValueAsBoolean( "coordinatesAsWhiteNoise",
                                               station ) );


         // White noise stochastic model
      WhiteNoiseModel wnM(100.0);      // 100 m of sigma


         // Decide what type of solver we will use for this station
      if ( cycles > 0 )
      {

            // In this case, we will use the 'forwards-backwards' solver

            // Check about coordinates as white noise
         if ( isWN )
         {
               // Reconfigure solver
            fbpppSolver.setCoordinatesModel(&wnM);
         }

            // Add solver to processing list
         pList.push_back(fbpppSolver);

      }
      else
      {

            // In this case, we will use the 'forwards-only' solver

            // Check about coordinates as white noise
         if ( isWN )
         {
               // Reconfigure solver
            pppSolver.setCoordinatesModel(&wnM);
         }

            // Add solver to processing list
         pList.push_back(pppSolver);

      }  // End of 'if ( cycles > 0 )'


         // Object to compute tidal effects
      SolidTides solid;


         // Configure ocean loading model
      OceanLoading ocean;
      ocean.setFilename( confReader.getValue( "oceanLoadingFile", station ) );


         // Numerical values (xp, yp) are pole displacements (arcsec).
      double xp( confReader.fetchListValueAsDouble( "poleDisplacements",
                                                    station ) );
      double yp( confReader.fetchListValueAsDouble( "poleDisplacements",
                                                    station ) );
         // Object to model pole tides
      PoleTides pole;
      pole.setXY( xp, yp );


         // This is the GNSS data structure that will hold all the
         // GNSS-related information
      gnssRinex gRin;


         // Prepare for printing
      int precision( confReader.getValueAsInt( "precision", station ) );


         // Let's open the output file
      string outName(confReader.getValue( "outputFile", station ) );

      ofstream outfile;
      outfile.open( outName.c_str(), ios::out );

         // Let's check if we are going to print the model
      bool printmodel( confReader.getValueAsBoolean( "printModel", station ) );

      string modelName;
      ofstream modelfile;

         // Prepare for model printing
      if( printmodel )
      {
         modelName = confReader.getValue( "modelFile", station );
         modelfile.open( modelName.c_str(), ios::out );
      }


         //// *** Now comes the REAL forwards processing part *** ////


         // Loop over all data epochs
      while(rin >> gRin)
      {

            // Store current epoch
         DayTime time(gRin.header.epoch);

            // Compute solid, oceanic and pole tides effects at this epoch
         Triple tides( solid.getSolidTide( time, nominalPos )  +
                       ocean.getOceanLoading( station, time )  +
                       pole.getPoleTide( time, nominalPos )    );


            // Update observable correction object with tides information
         corr.setExtraBiases(tides);

         try
         {

               // Let's process data. Thanks to 'ProcessingList' this is
               // very simple and compact: Just one line of code!!!.
            gRin >> pList;

         }
         catch(DecimateEpoch& d)
         {
               // If we catch a DecimateEpoch exception, just continue.
            continue;
         }
         catch(Exception& e)
         {
            cerr << "Exception for receiver '" << station <<
                    "' at epoch: " << time << "; " << e << endl;
            continue;
         }
         catch(...)
         {
            cerr << "Unknown exception for receiver '" << station <<
                    " at epoch: " << time << endl;
            continue;
         }


            // Ask if we are going to print the model
         if ( printmodel )
         {
            printModel( modelfile,
                        gRin );

         }

            // Check what type of solver we are using
         if ( cycles < 1 )
         {

               // This is a 'forwards-only' filter. Let's print to output
               // file the results of this epoch
            printSolution( outfile,
                           pppSolver,
                           time,
                           cDOP,
                           isNEU,
                           gRin.numSats(),
                           drytropo,
                           precision );

         }  // End of 'if ( cycles < 1 )'



         // The given epoch hass been processed. Let's get the next one

      }  // End of 'while(rin >> gRin)'


         // Close current Rinex observation stream
      rin.close();


         // If we printed the model, we must close the file
      if ( printmodel )
      {
            // Close model file for this station
         modelfile.close();
      }


         // Clear content of SP3 ephemerides object
      SP3EphList.clear();



         //// *** Forwards processing part is over *** ////



         // Now decide what to do: If solver was a 'forwards-only' version,
         // then we are done and should continue with next station.
      if ( cycles < 1 )
      {

            // Close output file for this station
         outfile.close();

            // We are done with this station. Let's show a message
         cout << "Processing finished for station: '" << station
              << "'. Results in file: '" << outName << "'." << endl;

            // Go process next station
         continue;

      }


         //// *** If we got here, it is a 'forwards-backwards' solver *** ////


         // Now, let's do 'forwards-backwards' cycles
      try
      {

         fbpppSolver.ReProcess(cycles);

      }
      catch(Exception& e)
      {

            // If problems arose, issue an message and skip receiver
         cerr << "Exception at reprocessing phase: " << e << endl;
         cerr << "Skipping receiver '" << station << "'." << endl;

            // Close output file for this station
         outfile.close();

            // Go process next station
         continue;

      }  // End of 'try-catch' block



         // Reprocess is over. Let's finish with the last processing

         // Loop over all data epochs, again, and print results
      while( fbpppSolver.LastProcess(gRin) )
      {

         DayTime time(gRin.header.epoch);

         printSolution( outfile,
                        fbpppSolver,
                        time,
                        cDOP,
                        isNEU,
                        gRin.numSats(),
                        drytropo,
                        precision );

      }  // End of 'while( fbpppSolver.LastProcess(gRin) )'


         // We are done. Close and go for next station

         // Close output file for this station
      outfile.close();


         // We are done with this station. Let's show a message
      cout << "Processing finished for station: '" << station
           << "'. Results in file: '" << outName << "'." << endl;


   }  // end of 'while ( (station = confReader.getEachSection()) != "" )'

   return;

}  // End of 'example9::process()'
/**
 * Handle MySQL's plugin functions
 * This is mostly for handling the null related functions that MySQL adds to the execution plan
 */
void InSub::handleFunc(gp_walk_info* gwip, Item_func* func)
{
    if (func->functype() == Item_func::TRIG_COND_FUNC || func->functype() == Item_func::COND_OR_FUNC)
    {
        // purpose: remove the isnull() function from the parsetree in ptWorkStack.
        // IDB handles the null semantics in the join operation
        // trigcond(or_cond) is the only form we recognize for now
        if (func->argument_count() > 2)
        {
            fGwip.fatalParseError = true;
            fGwip.parseErrorText = "Unsupported item in IN subquery";
            return;
        }

        Item_cond* cond;

        if (func->functype() == Item_func::TRIG_COND_FUNC)
        {
            Item* item;

            if (func->arguments()[0]->type() == Item::REF_ITEM)
                item = (Item_ref*)(func->arguments()[0])->real_item();
            else
                item = func->arguments()[0];

            cond = (Item_cond*)(item);
        }
        else
        {
            cond = (Item_cond*)(func);
        }

        if (cond->functype() == Item_func::COND_OR_FUNC)
        {
            // (cache=item) case. do nothing. ignore trigcond()?
            if (cond->argument_list()->elements == 1)
                return;

            // (cache=item or isnull(item)) case. remove "or isnull()"
            if (cond->argument_list()->elements == 2)
            {
                // don't know how to deal with this. don't think it's a fatal error either.
                if (gwip->ptWorkStack.empty())
                    return;

                ParseTree* pt = gwip->ptWorkStack.top();

                if (!pt->left() || !pt->right())
                    return;

                SimpleFilter* sf = dynamic_cast<SimpleFilter*>(pt->left()->data());

                //assert (sf && sf->op()->op() == execplan::OP_ISNULL);
                if (!sf || sf->op()->op() != execplan::OP_ISNULL)
                    return;

                delete sf;
                sf = dynamic_cast<SimpleFilter*>(pt->right()->data());

                //idbassert(sf && sf->op()->op() == execplan::OP_EQ);
                if (!sf || sf->op()->op() != execplan::OP_EQ)
                    return;

                // set NULLMATCH for both operand. It's really a setting for the join.
                // should only set NULLMATCH when the subtype is NOT_IN. for some IN subquery
                // with aggregation column, MySQL inefficiently convert to:
                // (cache=item or item is null) and item is not null, which is equivalent to
                // cache = item. Do not set NULLMATCH for this case.
                // Because we don't know IN or NOTIN yet, set candidate bit and switch to NULLMATCH
                // later in handleNot function.
                if (sf->lhs()->joinInfo() & JOIN_CORRELATED)
                    sf->lhs()->joinInfo(sf->lhs()->joinInfo() | JOIN_NULLMATCH_CANDIDATE);

                if (sf->rhs()->joinInfo() & JOIN_CORRELATED)
                    sf->rhs()->joinInfo(sf->rhs()->joinInfo() | JOIN_NULLMATCH_CANDIDATE);

                pt = pt->right();
                gwip->ptWorkStack.pop();
                gwip->ptWorkStack.push(pt);
            }
        }
        else if (cond->functype() == Item_func::EQ_FUNC)
        {
            // not in (select const ...)
            if (gwip->ptWorkStack.empty())
                return;

            ParseTree* pt = gwip->ptWorkStack.top();
            SimpleFilter* sf = dynamic_cast<SimpleFilter*>(pt->data());

            if (!sf || sf->op()->op() != execplan::OP_EQ)
                return;

            if (sf->lhs()->joinInfo() & JOIN_CORRELATED)
                sf->lhs()->joinInfo(sf->lhs()->joinInfo() | JOIN_NULLMATCH_CANDIDATE);

            if (sf->rhs()->joinInfo() & JOIN_CORRELATED)
                sf->rhs()->joinInfo(sf->rhs()->joinInfo() | JOIN_NULLMATCH_CANDIDATE);
        }
    }
}
Пример #12
0
int main(void)
{

      /////////////////// INITIALIZATION PART /////////////////////

   cout << fixed << setprecision(4);   // Set a proper output format


      // Create the input observation file stream
   RinexObsStream rin("ebre0300.02o");

      // Create the input observation file stream for REFERENCE STATION
   RinexObsStream rinRef("bell0300.02o");


   //// Broadcast ephemeris part

      // Create the input navigation file stream
   RinexNavStream rnavin("brdc0300.02n");

   RinexNavData rNavData;              // Object to store Rinex navigation data
   GPSEphemerisStore bceStore;         // Object to store satellites ephemeris

      // Storing the ephemeris in "bceStore"
   while (rnavin >> rNavData)
   {
      bceStore.addEphemeris(rNavData);
   }

   bceStore.SearchUser();  // This is the default

   ////


      // EBRE station nominal position
   Position nominalPos(4833520.1852, 41537.0453, 4147461.4963);


      // BELL station nominal position
   Position BnominalPos(4775849.4262, 116814.3084, 4213018.9143);



      // Declare a NeillTropModel object, setting the defaults
   NeillTropModel neillTM( nominalPos.getAltitude(),
                           nominalPos.getGeodeticLatitude(), 30);


      // Declare a NeillTropModel object, setting the defaults (Ref. station)
   NeillTropModel BneillTM( BnominalPos.getAltitude(),
                            BnominalPos.getGeodeticLatitude(), 30);


      // This is the GNSS data structure that will hold all the
      // GNSS-related information
   gnssRinex gRin;
   gnssRinex gRef;


      // Declare base-changing objects: From ECEF to North-East-Up (NEU)
   XYZ2NEU baseChange(nominalPos);
   XYZ2NEU BbaseChange(BnominalPos);


      // Declare a simple filter object to screen PC
   SimpleFilter pcFilter;
   pcFilter.setFilteredType(TypeID::PC);


      // Declare a couple of basic modelers
   BasicModel basic(nominalPos, bceStore);
   BasicModel Bbasic(BnominalPos, bceStore);


      // Objects to mark cycle slips
   LICSDetector2 markCSLI;     // Checks LI cycle slips
   MWCSDetector markCSMW;      // Checks Merbourne-Wubbena cycle slips

   LICSDetector2 BmarkCSLI;     // Checks LI cycle slips
   MWCSDetector BmarkCSMW;      // Checks Merbourne-Wubbena cycle slips


      // Object to compute tidal effects
   SolidTides  solid;

      // Ocean loading model
   OceanLoading ocean("OCEAN-GOT00.dat");

      // Numerical values are x,y pole displacements for Jan/30/2002 (arcsec).
   PoleTides   pole(-0.17153, 0.38661);


      // Vector from EBRE antenna ARP to L1 phase center [UEN]
   Triple offsetL1(0.110, 0.000, 0.000);   // Units in meters

      // Vector from EBRE antenna ARP to L2 phase center [UEN]
   Triple offsetL2(0.128, 0.0000, 0.000);    // Units in meters

      // Vector from monument to antenna ARP [UEN] for EBRE station
   Triple offsetARP(0.0, 0.0, 0.0);    // Units in meters


      // Vector from BELL antenna ARP to L1 phase center [UEN] (TRM29659.00)
   Triple BoffsetL1(0.110, 0.000, 0.000);   // Units in meters
      // Vector from BELL antenna ARP to L2 phase center [UEN] (TRM29659.00)
   Triple BoffsetL2(0.128, 0.0000, 0.000);    // Units in meters
      // Vector from monument to antenna ARP [UEN] for BELL station
   Triple BoffsetARP(0.054, 0.0, 0.0);    // Units in meters


      // Declare an object to correct observables to monument
   CorrectObservables corr(bceStore);
   ((corr.setNominalPosition(nominalPos)).setL1pc(offsetL1)).setL2pc(offsetL2);
   corr.setMonument(offsetARP);

      // Declare an object to correct observables to monument (Ref. station)
   CorrectObservables Bcorr(bceStore);
   Bcorr.setNominalPosition(BnominalPos);
   Bcorr.setL1pc(BoffsetL1);
   Bcorr.setL2pc(BoffsetL2);
   Bcorr.setMonument(BoffsetARP);

      // Objects to compute wind-up effect
   ComputeWindUp windup(bceStore, nominalPos, "PRN_GPS");
   ComputeWindUp Bwindup(bceStore, BnominalPos, "PRN_GPS");


      // Objects to compute satellite antenna phase center effect
   ComputeSatPCenter svPcenter(nominalPos);
   ComputeSatPCenter BsvPcenter(BnominalPos);


      // Objects to compute the tropospheric data
   ComputeTropModel computeTropo(neillTM);
   ComputeTropModel BcomputeTropo(BneillTM);


      // This object defines several handy linear combinations
   LinearCombinations comb;

      // Object to compute linear combinations of data
   ComputeLinear linear1(comb.pcCombWithC1);

   linear1.addLinear(comb.lcCombination);
   linear1.addLinear(comb.pdeltaCombWithC1);
   linear1.addLinear(comb.ldeltaCombination);
   linear1.addLinear(comb.mwubbenaCombWithC1);
   linear1.addLinear(comb.liCombination);

      // Let's use a different object to compute prefit residuals
   ComputeLinear linear2(comb.pcPrefit);
   linear2.addLinear(comb.lcPrefit);


      // Declare an object to process the data using PPP. It is set
      // to use a NEU system
   SolverPPP pppSolver(true);

      // The current processing strategy is "static".
      // The real test for a PPP processing program is to handle coordinates
      // as white noise. In such case, position error should be about 0.25 m or
      // better. Uncomment the following couple of lines to test this.
//   WhiteNoiseModel wnM(100.0);            // 100 m of sigma
//   pppSolver.setCoordinatesModel(&wnM);


      // Object to keep track of satellite arcs
   SatArcMarker markArc;
   markArc.setDeleteUnstableSats(true);
   markArc.setUnstablePeriod(151.0);

      // Objects to compute gravitational delay effects
   GravitationalDelay grDelay(nominalPos);
   GravitationalDelay BgrDelay(BnominalPos);

      // Object to align phase with code measurements
   PhaseCodeAlignment phaseAlign;

      // Object to compute DOP values
   ComputeDOP cDOP;

      // Object to remove eclipsed satellites
   EclipsedSatFilter eclipsedSV;

      // Statistical summary objects
   PowerSum errorVectorStats;


   TypeIDSet tset;
   tset.insert(TypeID::prefitC);
   tset.insert(TypeID::prefitL);

      // Create an object to compute the single differences of prefit residuals
   DeltaOp delta;
      // By default, it will work on code prefit residuals, so we must change
      // the default and provide a set of types to be differenced
   delta.setDiffTypeSet(tset);

      // Create an object to synchronize rover and reference station
      // data streams. This object will take data out from "rinRef" until
      // it is synchronized with data in "gOriginal". Default synchronization
      // tolerance is 1 s.
   Synchronize synchro(rinRef, gRin);

      /////////////////// PROCESING PART /////////////////////


      // Use this variable to select between position printing or model printing
   bool printPosition(true);     // By default, print position and associated
                                 // parameters



      // Loop over all data epochs
   while(rin >> gRin)
   {

      CommonTime time(gRin.header.epoch);

         // Compute the effect of solid, oceanic and pole tides
      Triple tides( solid.getSolidTide(time, nominalPos) +
                    ocean.getOceanLoading("EBRE", time)  +
                    pole.getPoleTide(time, nominalPos)     );

         // Compute the effect of solid, oceanic and pole tides
      Triple Btides( solid.getSolidTide(time, BnominalPos) +
                    ocean.getOceanLoading("BELL", time)  +
                    pole.getPoleTide(time, BnominalPos)     );

         // Update observable correction object with tides information
      corr.setExtraBiases(tides);

         // Update observable correction object with tides information
      Bcorr.setExtraBiases(Btides);

      try
      {

            // First, process reference station
         gRef >> synchro         // Synchronize data streams
              >> Bbasic          // Compute the basic components of model
              >> eclipsedSV      // Remove satellites in eclipse
              >> BgrDelay         // Compute gravitational delay
              >> BsvPcenter       // Compute the effect of satellite phase
              >> Bcorr            // Correct observables from tides, etc.
              >> Bwindup          // Compute wind-up effect
              >> BcomputeTropo    // Compute tropospheric effect
              >> linear1         // Compute common linear combinations
              >> pcFilter        // Filter out spurious data
              >> BmarkCSLI        // Mark cycle slips: LI algorithm
              >> BmarkCSMW        // Mark cycle slips: Melbourne-Wubbena
              >> markArc         // Keep track of satellite arcs
              >> linear2;        // Compute prefit residuals

         delta.setRefData(gRef.body);

      }
      catch(SynchronizeException& e)   // THIS IS VERY IMPORTANT IN ORDER TO
      {                                // MANAGE A POSSIBLE DESYNCHRONIZATION!!
         continue;
      }
      catch(...)
      {
         cerr << "Exception when processing reference station data at epoch: "
              << gRef.header.epoch << endl;
      }


         // Rover data processing is done here:
      try
      {

            // The following lines are indeed just one line
         gRin >> basic           // Compute the basic components of model
              >> eclipsedSV      // Remove satellites in eclipse
              >> grDelay         // Compute gravitational delay
              >> svPcenter       // Compute the effect of satellite phase center
              >> corr            // Correct observables from tides, etc.
              >> windup          // Compute wind-up effect
              >> computeTropo    // Compute tropospheric effect
              >> linear1         // Compute common linear combinations
              >> pcFilter        // Filter out spurious data
              >> markCSLI        // Mark cycle slips: LI algorithm
              >> markCSMW        // Mark cycle slips: Melbourne-Wubbena
              >> markArc         // Keep track of satellite arcs
              >> phaseAlign      // Align phases with codes
              >> linear2         // Compute prefit residuals
              >> delta
              >> baseChange      // Prepare to use North-East-UP reference frame
              >> cDOP            // Compute DOP figures
              >> pppSolver;      // Solve equations with a Kalman filter

      }
      catch(Exception& e)
      {
         cerr << "Exception at epoch: " << time << "; " << e << endl;
         continue;
      }
      catch(...)
      {
         cerr << "Unknown exception at epoch: " << time << endl;
         continue;
      }


         // Check if we want to print model or position
      if(printPosition)
      {
            // Print here the position results
         cout << static_cast<YDSTime>(time).sod      << "  ";     // Epoch - Output field #1

         cout << pppSolver.getSolution(TypeID::dLat) << "  ";    // dLat  - #2
         cout << pppSolver.getSolution(TypeID::dLon) << "  ";    // dLon  - #3
         cout << pppSolver.getSolution(TypeID::dH) << "  ";      // dH    - #4
         cout << pppSolver.getSolution(TypeID::wetMap) << "  ";  // Tropo - #5

         cout << pppSolver.getVariance(TypeID::dLat) << "  "; // Cov dLat - #6
         cout << pppSolver.getVariance(TypeID::dLon) << "  "; // Cov dLon - #7
         cout << pppSolver.getVariance(TypeID::dH) << "  ";   // Cov dH   - #8
         cout << pppSolver.getVariance(TypeID::wetMap) << "  ";//Cov Tropo- #9

         cout << gRin.numSats()        << "  ";       // Satellite number - #10

         cout << cDOP.getGDOP()        << "  ";                   // GDOP - #11
         cout << cDOP.getPDOP()        << "  ";                   // PDOP - #12
         cout << cDOP.getTDOP()        << "  ";                   // TDOP - #13
         cout << cDOP.getHDOP()        << "  ";                   // HDOP - #14
         cout << cDOP.getVDOP()        << "  ";                   // VDOP - #15

         cout << endl;

            // For statistical purposes we discard the first two hours of data
         if (static_cast<YDSTime>(time).sod > 7200.0)
         {
               // Statistical summary
            double errorV( pppSolver.solution[1]*pppSolver.solution[1] +
                           pppSolver.solution[2]*pppSolver.solution[2] +
                           pppSolver.solution[3]*pppSolver.solution[3] );

               // Get module of position error vector
            errorV = std::sqrt(errorV);

               // Add to statistical summary object
            errorVectorStats.add(errorV);
         }

      }  // End of position printing
      else
      {
            // Print here the model results
            // First, define types we want to keep
         TypeIDSet types;
         types.insert(TypeID::L1);
         types.insert(TypeID::L2);
         types.insert(TypeID::P1);
         types.insert(TypeID::P2);
         types.insert(TypeID::PC);
         types.insert(TypeID::LC);
         types.insert(TypeID::rho);
         types.insert(TypeID::dtSat);
         types.insert(TypeID::rel);
         types.insert(TypeID::gravDelay);
         types.insert(TypeID::tropo);
         types.insert(TypeID::dryTropo);
         types.insert(TypeID::dryMap);
         types.insert(TypeID::wetTropo);
         types.insert(TypeID::wetMap);
         types.insert(TypeID::tropoSlant);
         types.insert(TypeID::windUp);
         types.insert(TypeID::satPCenter);
         types.insert(TypeID::satX);
         types.insert(TypeID::satY);
         types.insert(TypeID::satZ);
         types.insert(TypeID::elevation);
         types.insert(TypeID::azimuth);
         types.insert(TypeID::satArc);
         types.insert(TypeID::prefitC);
         types.insert(TypeID::prefitL);
         types.insert(TypeID::dx);
         types.insert(TypeID::dy);
         types.insert(TypeID::dz);
         types.insert(TypeID::dLat);
         types.insert(TypeID::dLon);
         types.insert(TypeID::dH);
         types.insert(TypeID::cdt);

         gRin.keepOnlyTypeID(types);   // Delete the types not in 'types'

            // Iterate through the GNSS Data Structure
         satTypeValueMap::const_iterator it;
         for (it = gRin.body.begin(); it!= gRin.body.end(); it++) 
         {

               // Print epoch
            cout << static_cast<YDSTime>(time).year        << " ";
            cout << static_cast<YDSTime>(time).doy         << " ";
            cout << static_cast<YDSTime>(time).sod  << " ";

            cout << cDOP.getGDOP()        << "  ";  // GDOP #4
            cout << cDOP.getPDOP()        << "  ";  // PDOP #5
            cout << cDOP.getTDOP()        << "  ";  // TDOP #6
            cout << cDOP.getHDOP()        << "  ";  // HDOP #7
            cout << cDOP.getVDOP()        << "  ";  // VDOP #8

               // Print satellite information (system and PRN)
            cout << (*it).first << " ";

               // Print model values
            typeValueMap::const_iterator itObs;
            for( itObs  = (*it).second.begin(); 
                 itObs != (*it).second.end();
                 itObs++ )
            {
               bool printNames(true);  // Whether print types' names or not
               if (printNames)
               {
                  cout << (*itObs).first << " ";
               }

               cout << (*itObs).second << " ";

            }  // End for( itObs = ... )

            cout << endl;

         }  // End for (it = gRin.body.begin(); ... )

      }  // End of model printing

   }  // End of 'while(rin >> gRin)...'



      // Print statistical summary in cerr
   if(printPosition)
   {
      cerr << "Module of error vector: Average = "
           << errorVectorStats.average() << " m    Std. dev. = "
           << std::sqrt(errorVectorStats.variance()) << " m" << endl;
   }



   exit(0);       // End of program

}
Пример #13
0
SimpleFilter::SimpleFilter(const SimpleFilter& rhs) :
	fOp(rhs.op()),
	fIndexFlag(rhs.indexFlag()),
	fJoinFlag(rhs.joinFlag())
{
	fLhs = rhs.lhs()->clone();
	fRhs = rhs.rhs()->clone();

	fSimpleColumnList.clear();
	fAggColumnList.clear();
	fWindowFunctionColumnList.clear();

	SimpleColumn *lsc = dynamic_cast<SimpleColumn*>(fLhs);
	FunctionColumn *lfc = dynamic_cast<FunctionColumn*>(fLhs);
	ArithmeticColumn *lac = dynamic_cast<ArithmeticColumn*>(fLhs);
	WindowFunctionColumn *laf = dynamic_cast<WindowFunctionColumn*>(fLhs);
	AggregateColumn *lagc = dynamic_cast<AggregateColumn*>(fLhs);
	SimpleColumn *rsc = dynamic_cast<SimpleColumn*>(fRhs);
	FunctionColumn *rfc = dynamic_cast<FunctionColumn*>(fRhs);
	ArithmeticColumn *rac = dynamic_cast<ArithmeticColumn*>(fRhs);
	AggregateColumn *ragc = dynamic_cast<AggregateColumn*>(fRhs);
	WindowFunctionColumn *raf = dynamic_cast<WindowFunctionColumn*>(fRhs);

	if (lsc)
	{
		fSimpleColumnList.push_back(lsc);
	}
	else if (lagc)
	{
		fAggColumnList.push_back(lagc);
	}
	else if (lfc)
	{
		fSimpleColumnList.insert(fSimpleColumnList.end(), lfc->simpleColumnList().begin(), lfc->simpleColumnList().end());
		fAggColumnList.insert(fAggColumnList.end(), lfc->aggColumnList().begin(), lfc->aggColumnList().end());
		fWindowFunctionColumnList.insert
		  (fWindowFunctionColumnList.end(), lfc->windowfunctionColumnList().begin(), lfc->windowfunctionColumnList().end());
	}
	else if (lac)
	{
		fSimpleColumnList.insert(fSimpleColumnList.end(), lac->simpleColumnList().begin(), lac->simpleColumnList().end());
		fAggColumnList.insert(fAggColumnList.end(), lac->aggColumnList().begin(), lac->aggColumnList().end());
		fWindowFunctionColumnList.insert
		  (fWindowFunctionColumnList.end(), lac->windowfunctionColumnList().begin(), lac->windowfunctionColumnList().end());
	}
	else if (laf)
	{
		fWindowFunctionColumnList.push_back(laf);
	}

	if (rsc)
	{
		fSimpleColumnList.push_back(rsc);
	}
	else if (ragc)
	{
		fAggColumnList.push_back(ragc);
	}
	else if (rfc)
	{
		fSimpleColumnList.insert
		  (fSimpleColumnList.end(), rfc->simpleColumnList().begin(), rfc->simpleColumnList().end());
		fAggColumnList.insert
		  (fAggColumnList.end(), rfc->aggColumnList().begin(), rfc->aggColumnList().end());
		fWindowFunctionColumnList.insert
		  (fWindowFunctionColumnList.end(), rfc->windowfunctionColumnList().begin(), rfc->windowfunctionColumnList().end());
	}
	else if (rac)
	{
		fSimpleColumnList.insert(fSimpleColumnList.end(), rac->simpleColumnList().begin(), rac->simpleColumnList().end());
		fAggColumnList.insert(fAggColumnList.end(), rac->aggColumnList().begin(), rac->aggColumnList().end());
		fWindowFunctionColumnList.insert
		  (fWindowFunctionColumnList.end(), rac->windowfunctionColumnList().begin(), rac->windowfunctionColumnList().end());
	}
	else if (raf)
	{
		fWindowFunctionColumnList.push_back(raf);
	}
}
Пример #14
0
	void serializeCSEP()
	{
	   /*
		* erydbSelectExecutionPlan
		* This is a large class; it makes more sense to write == operators
		* for everything than to write a giant equivalance test here.
		* For now this is mostly a regression test.
		*/

		erydbSelectExecutionPlan csep1, csep2;
		erydbSelectExecutionPlan::ReturnedColumnList colList;
		ParseTree* filterList;
		erydbExecutionPlan *cep;
		ByteStream b;
		
		cep = &csep2;
		
		CPPUNIT_ASSERT(csep1 == csep2);
		CPPUNIT_ASSERT(!(csep1 != csep2));
		CPPUNIT_ASSERT(csep1 == cep);
		CPPUNIT_ASSERT(!(csep1 != cep));
		
        // returned columns
		SimpleColumn *sc = new SimpleColumn("tpch.region.r_regionkey");
		colList.push_back(sc);
               
        // filters
		erydbSelectExecutionPlan::Parser parser;
		std::vector<Token> tokens;
		Token t;
        
		SimpleFilter *sf = new SimpleFilter();
		SimpleColumn *lhs = new SimpleColumn(*sc);       
		SimpleColumn *rhs = new SimpleColumn("tpch.nation.n_regionkey");
		Operator *op = new Operator("=");
        
		sf->op(op);
		sf->lhs(lhs);
		sf->rhs(rhs);
        
		t.value = sf;
		tokens.push_back(t);
        
		Operator *op1 = new Operator ("and");
		t.value = op1;
		tokens.push_back(t);
        
		SimpleFilter *sf1 = new SimpleFilter();
		SimpleColumn *lhs1 = new SimpleColumn (*rhs);       
		ConstantColumn *constCol = new ConstantColumn("3", ConstantColumn::NUM);        
		Operator *op2 = new Operator("!=");
        
		sf1->op(op2);
		sf1->lhs(lhs1);
		sf1->rhs(constCol);

		t.value = sf1;
		tokens.push_back(t);
        
		filterList = parser.parse(tokens.begin(), tokens.end());
        
        // draw filterList tree
		filterList->drawTree("selectExecutionPlan_1.dot");
                     
        // erydb execution plan        
		csep1.returnedCols (colList);
		csep1.filters (filterList);
		
		CPPUNIT_ASSERT(csep1 != csep2);
		CPPUNIT_ASSERT(!(csep1 == csep2));
		CPPUNIT_ASSERT(csep1 != cep);
		CPPUNIT_ASSERT(!(csep1 == cep));
		
		csep1.serialize(b);
		csep2.unserialize(b);
		CPPUNIT_ASSERT(b.length() == 0);
		
		CPPUNIT_ASSERT(csep1 == csep2);
		CPPUNIT_ASSERT(!(csep1 != csep2));
		CPPUNIT_ASSERT(csep1 == cep);
		CPPUNIT_ASSERT(!(csep1 != cep));
		
		erydbSelectExecutionPlan csep3, csep4;
        
        // subselect
		erydbSelectExecutionPlan *subselect = new erydbSelectExecutionPlan;
		subselect->location(erydbSelectExecutionPlan::WHERE);
		subselect->dependent (false);
		CPPUNIT_ASSERT (subselect->location() == erydbSelectExecutionPlan::WHERE);
		CPPUNIT_ASSERT (subselect->dependent() == false);
		erydbSelectExecutionPlan::SelectList selectList;
		selectList.push_back(subselect);
		csep3.subSelects(selectList);
        
        // exist filter
		erydbSelectExecutionPlan* cep1 = new erydbSelectExecutionPlan();
		ExistsFilter *filter = new ExistsFilter();
		delete filter;
		filter = new ExistsFilter(cep1);        
		filter->exists(cep1);
		//erydbSelectExecutionPlan* cep2 = const_cast<erydbSelectExecutionPlan*>(filter->exists());

		erydbSelectExecutionPlan::Parser parser1;
		std::vector<Token> tokens1;
		Token t1;
		t1.value = filter;
		tokens1.push_back(t1);
		csep3.filters(parser1.parse(tokens1.begin(), tokens1.end()));
		
		csep3.serialize(b);
		csep4.unserialize(b);
		
		CPPUNIT_ASSERT(csep3 == csep4);
		CPPUNIT_ASSERT(!(csep3 != csep4));
		
		
	}
Пример #15
0
    void selectExecutionPlan_1() {
        cout << "SQL: select region.r_regionkey from region, nation where nation.n_regionkey = region.r_regionkey and nation.n_regionkey != 3;" << endl;

        erydbSelectExecutionPlan csep;
        CPPUNIT_ASSERT (csep.location() == erydbSelectExecutionPlan::MAIN);
        CPPUNIT_ASSERT (csep.dependent() == false);
        CPPUNIT_ASSERT (csep.subSelects().size() == 0);
        
        // returned columns
        erydbSelectExecutionPlan::ReturnedColumnList colList;        
        SimpleColumn *sc = new SimpleColumn("tpch.region.r_regionkey", 0);
        colList.push_back(sc);        
        ArithmeticColumn *ac = new ArithmeticColumn("a+sum(r_regionkey)", 0);
        colList.push_back(ac);
        csep.returnedCols (colList);   
        CPPUNIT_ASSERT(csep.returnedCols().size() == 2);     
               
        // filters
        erydbSelectExecutionPlan::FilterTokenList filterTokenList;
       
        SimpleFilter *sf = new SimpleFilter();
        SimpleColumn *lhs = new SimpleColumn();
        *lhs = *sc;       
        SimpleColumn *rhs = new SimpleColumn("tpch.nation.n_regionkey", 0);
        CPPUNIT_ASSERT (*lhs == *sc);
        CPPUNIT_ASSERT (*rhs != *lhs);
        Operator *op = new Operator("=");
        
        sf->op(op);
        sf->lhs(lhs);
        sf->rhs(rhs);
        filterTokenList.push_back (sf);
        
        filterTokenList.push_back( new Operator ("And") );   
        SimpleFilter *sf1 = new SimpleFilter (new Operator("="), sc->clone(), ac->clone());
        
        filterTokenList.push_back (sf1);
               
        csep.filterTokenList (filterTokenList);
        ParseTree *filterList = const_cast<ParseTree*> (csep.filters());
        
        // draw filterList tree
        filterList->drawTree("selectExecutionPlan_1.dot");                     
        csep.filters (filterList);

        // Group by
	    erydbSelectExecutionPlan::GroupByColumnList groupByList;
        groupByList.push_back(sc->clone());        
        csep.groupByCols (groupByList);
        CPPUNIT_ASSERT(csep.groupByCols().size() == 1);
        
        // Having
        erydbSelectExecutionPlan::FilterTokenList havingTokenList;
        SimpleFilter *having = new SimpleFilter( new Operator("="),
                                                 new ArithmeticColumn("sum(volumn)", 0),
                                                 new ConstantColumn(8));
        havingTokenList.push_back (having);
        csep.havingTokenList (havingTokenList);
        CPPUNIT_ASSERT (*sf1 != *having);
        CPPUNIT_ASSERT (csep.havingTokenList().size() == 1);                                                         
        
        // Order by                                                
	    erydbSelectExecutionPlan::OrderByColumnList orderByList;
	    ArithmeticColumn *o1 = new ArithmeticColumn(*ac);
	    o1->asc(false);
	    orderByList.push_back(o1);
        csep.orderByCols(orderByList);
        CPPUNIT_ASSERT(csep.orderByCols().size() == 1);        
        
        // another csep
        erydbSelectExecutionPlan *newcsep = new erydbSelectExecutionPlan(erydbSelectExecutionPlan::FROM);
        erydbSelectExecutionPlan::ReturnedColumnList ncolList;        
        SimpleColumn *newsc = new SimpleColumn("tpch.region.r_regionkey", 0);
        ncolList.push_back(newsc);        
        newcsep->returnedCols (ncolList);  
        erydbSelectExecutionPlan::FilterTokenList nfilterTokenList; 
        SimpleFilter *newsf = new SimpleFilter ( new Operator (">"),
                                    sc->clone(),
                                    newsc->clone());
        nfilterTokenList.push_back(newsf);
        newcsep->filterTokenList (nfilterTokenList);
        erydbSelectExecutionPlan::FilterTokenList nhavingTokenList;
        SimpleFilter *newhaving = new SimpleFilter ( new Operator (">"),
                                    sc->clone(),
                                    newsc->clone());  
        CPPUNIT_ASSERT (*newsf == *newhaving);                                    
        nhavingTokenList.push_back(newhaving);
        newcsep->havingTokenList (nhavingTokenList);
        CPPUNIT_ASSERT (*newcsep != csep); 
        CPPUNIT_ASSERT (*newcsep->filters() == *newcsep->having());
        ByteStream b;
        csep.serialize (b);
        newcsep->unserialize (b);
        CPPUNIT_ASSERT (csep == *newcsep);
        erydbSelectExecutionPlan::SelectList selectList;
        selectList.push_back(newcsep);
        csep.subSelects(selectList);        
        cout << "\nerydb Execution Plan:" << endl;
        cout << csep;
        cout << " --- end of test 1 ---" << endl;
    }