SampleRateConverter:: SampleRateConverter( Float newSampleRate )
	:	input( NULL ),
		sampleRate( math::max( newSampleRate, Float(0) ) ),
		subSampleOffset( 0 )
{
}
int main(int argc, char* argv[])
{
	glutInit(&argc, argv);
	glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH);
	glutInitWindowSize(800, 600);
	glutInitWindowPosition(100,100);
	glutCreateWindow("OGLplus+GLUT+GLEW");

	if(glewInit() == GLEW_OK) try
	{
		glGetError();
		namespace se = oglplus::smart_enums;
		oglplus::Context gl;
		oglplus::AMD_performance_monitor apm;

		const char* f1[2] = {"  +--", "  `--"};
		const char* f2[2] = {"  |", "   "};
		const char* f3[2] = {"  +--", "  `--"};
		const char* f4[2] = {"  |", ""};

		auto groups = apm.GetGroups();
		auto gi=groups.begin(), ge=groups.end();

		std::cout << "--+-{Performance monitor groups}" << std::endl;
		if(gi != ge) std::cout << "  |" << std::endl;

		while(gi != ge)
		{
			auto group = *gi;
			++gi;
			const int fgi = (gi == ge)?1:0;

			std::cout << f1[fgi] << "+-[";
			std::cout << group.GetString();
			std::cout << "]" << std::endl;
			std::cout << f2[fgi] << f4[0] << std::endl;

			GLint max;
			auto counters = group.GetCounters(max);
			auto ci=counters.begin(), ce=counters.end();

			while(ci != ce)
			{
				auto counter = *ci;
				++ci;
				const int fci = (ci == ce)?1:0;
				std::cout << f2[fgi] << f3[fci] << "(";
				std::cout << counter.GetString();
				std::cout << ") [";
				std::cout << EnumValueName(counter.Type());
				std::cout << "]" << std::endl;
				std::cout << f2[fgi] << f4[fci] << std::endl;
			}
			if(fgi != 0)
			{
				oglplus::PerfMonitorAMD mon;
				mon.SelectCounters(true, counters);
				mon.Begin();
				// Imagine some heavy duty rendering here
				gl.Clear().ColorBuffer();
				mon.End();

				if(mon.ResultAvailable())
				{
					std::vector<oglplus::PerfMonitorAMDResult>
						values;
					mon.Result(values);

					auto ri=values.begin(), re=values.end();
					while(ri != re)
					{
						auto counter = ri->Counter();
						auto type = counter.Type();

						std::cout << counter.GetString();
						std::cout << " [";
						std::cout << EnumValueName(type);
						std::cout << "] = ";
						if(type == se::UnsignedInt())
							std::cout << ri->Low();
						else if(type == se::Float())
							std::cout << ri->Float();
						else if(type == se::Percentage())
							std::cout << ri->Float() << "%";
						else if(type == se::UnsignedInt64())
							std::cout << "too big";
						std::cout << std::endl;
						++ri;
					}
				}
			}
		}
		return 0;
	}
	catch(oglplus::Error& err)
	{
		std::cerr
			<< "Error (in "
			<< err.GLFunc()
			<< "'): "
			<< err.what()
			<< " ["
			<< err.SourceFile()
			<< ":"
			<< err.SourceLine()
			<< "] "
			<< std::endl;
	}
	return 1;
}
Example #3
0
bool GMM::train_(ClassificationData &trainingData){
    
    //Clear any old models
	clear();
    
    if( trainingData.getNumSamples() == 0 ){
        errorLog << "train_(ClassificationData &trainingData) - Training data is empty!" << std::endl;
        return false;
    }
    
    //Set the number of features and number of classes and resize the models buffer
    numInputDimensions = trainingData.getNumDimensions();
    numClasses = trainingData.getNumClasses();
    models.resize(numClasses);
    
    if( numInputDimensions >= 6 ){
        warningLog << "train_(ClassificationData &trainingData) - The number of features in your training data is high (" << numInputDimensions << ").  The GMMClassifier does not work well with high dimensional data, you might get better results from one of the other classifiers." << std::endl;
    }
    
    //Get the ranges of the training data if the training data is going to be scaled
	ranges = trainingData.getRanges();
    if( !trainingData.scale(GMM_MIN_SCALE_VALUE, GMM_MAX_SCALE_VALUE) ){
        errorLog << "train_(ClassificationData &trainingData) - Failed to scale training data!" << std::endl;
        return false;
    }

    //Fit a Mixture Model to each class (independently)
    for(UINT k=0; k<numClasses; k++){
        UINT classLabel = trainingData.getClassTracker()[k].classLabel;
        ClassificationData classData = trainingData.getClassData( classLabel );
        
        //Train the Mixture Model for this class
        GaussianMixtureModels gaussianMixtureModel;
        gaussianMixtureModel.setNumClusters( numMixtureModels );
        gaussianMixtureModel.setMinChange( minChange );
        gaussianMixtureModel.setMaxNumEpochs( maxIter );
        
        if( !gaussianMixtureModel.train( classData.getDataAsMatrixFloat() ) ){
            errorLog << "train_(ClassificationData &trainingData) - Failed to train Mixture Model for class " << classLabel << std::endl;
            return false;
        }
        
        //Setup the model container
        models[k].resize( numMixtureModels );
        models[k].setClassLabel( classLabel );
        
        //Store the mixture model in the container
        for(UINT j=0; j<numMixtureModels; j++){
            models[k][j].mu = gaussianMixtureModel.getMu().getRowVector(j);
            models[k][j].sigma = gaussianMixtureModel.getSigma()[j];
            
            //Compute the determinant and invSigma for the realtime prediction
            LUDecomposition ludcmp( models[k][j].sigma );
            if( !ludcmp.inverse( models[k][j].invSigma ) ){
                models.clear();
                errorLog << "train_(ClassificationData &trainingData) - Failed to invert Matrix for class " << classLabel << "!" << std::endl;
                return false;
            }
            models[k][j].det = ludcmp.det();
        }
        
        //Compute the normalize factor
        models[k].recomputeNormalizationFactor();
        
        //Compute the rejection thresholds
        Float mu = 0;
        Float sigma = 0;
        VectorFloat predictionResults(classData.getNumSamples(),0);
        for(UINT i=0; i<classData.getNumSamples(); i++){
            VectorFloat sample = classData[i].getSample();
            predictionResults[i] = models[k].computeMixtureLikelihood( sample );
            mu += predictionResults[i];
        }
        
        //Update mu
        mu /= Float( classData.getNumSamples() );
        
        //Calculate the standard deviation
        for(UINT i=0; i<classData.getNumSamples(); i++) 
            sigma += grt_sqr( (predictionResults[i]-mu) );
        sigma = grt_sqrt( sigma / (Float(classData.getNumSamples())-1.0) );
        sigma = 0.2;
        
        //Set the models training mu and sigma 
        models[k].setTrainingMuAndSigma(mu,sigma);
        
        if( !models[k].recomputeNullRejectionThreshold(nullRejectionCoeff) && useNullRejection ){
            warningLog << "train_(ClassificationData &trainingData) - Failed to recompute rejection threshold for class " << classLabel << " - the nullRjectionCoeff value is too high!" << std::endl;
        }
        
        //cout << "Training Mu: " << mu << " TrainingSigma: " << sigma << " RejectionThreshold: " << models[k].getNullRejectionThreshold() << std::endl;
        //models[k].printModelValues();
    }
    
    //Reset the class labels
    classLabels.resize(numClasses);
    for(UINT k=0; k<numClasses; k++){
        classLabels[k] = models[k].getClassLabel();
    }
    
    //Resize the rejection thresholds
    nullRejectionThresholds.resize(numClasses);
    for(UINT k=0; k<numClasses; k++){
        nullRejectionThresholds[k] = models[k].getNullRejectionThreshold();
    }
    
    //Flag that the models have been trained
    trained = true;
    
    return true;
}
Example #4
0
TemporaryRef<ID2D1Brush>
DrawTargetD2D1::CreateBrushForPattern(const Pattern &aPattern, Float aAlpha)
{
  if (!IsPatternSupportedByD2D(aPattern)) {
    RefPtr<ID2D1SolidColorBrush> colBrush;
    mDC->CreateSolidColorBrush(D2D1::ColorF(1.0f, 1.0f, 1.0f, 1.0f), byRef(colBrush));
    return colBrush;
  }

  if (aPattern.GetType() == PatternType::COLOR) {
    RefPtr<ID2D1SolidColorBrush> colBrush;
    Color color = static_cast<const ColorPattern*>(&aPattern)->mColor;
    mDC->CreateSolidColorBrush(D2D1::ColorF(color.r, color.g,
                                            color.b, color.a),
                               D2D1::BrushProperties(aAlpha),
                               byRef(colBrush));
    return colBrush;
  } else if (aPattern.GetType() == PatternType::LINEAR_GRADIENT) {
    RefPtr<ID2D1LinearGradientBrush> gradBrush;
    const LinearGradientPattern *pat =
      static_cast<const LinearGradientPattern*>(&aPattern);

    GradientStopsD2D *stops = static_cast<GradientStopsD2D*>(pat->mStops.get());

    if (!stops) {
      gfxDebug() << "No stops specified for gradient pattern.";
      return nullptr;
    }

    if (pat->mBegin == pat->mEnd) {
      RefPtr<ID2D1SolidColorBrush> colBrush;
      uint32_t stopCount = stops->mStopCollection->GetGradientStopCount();
      vector<D2D1_GRADIENT_STOP> d2dStops(stopCount);
      stops->mStopCollection->GetGradientStops(&d2dStops.front(), stopCount);
      mDC->CreateSolidColorBrush(d2dStops.back().color,
                                 D2D1::BrushProperties(aAlpha),
                                 byRef(colBrush));
      return colBrush;
    }

    mDC->CreateLinearGradientBrush(D2D1::LinearGradientBrushProperties(D2DPoint(pat->mBegin),
                                                                       D2DPoint(pat->mEnd)),
                                   D2D1::BrushProperties(aAlpha, D2DMatrix(pat->mMatrix)),
                                   stops->mStopCollection,
                                   byRef(gradBrush));
    return gradBrush;
  } else if (aPattern.GetType() == PatternType::RADIAL_GRADIENT) {
    RefPtr<ID2D1RadialGradientBrush> gradBrush;
    const RadialGradientPattern *pat =
      static_cast<const RadialGradientPattern*>(&aPattern);

    GradientStopsD2D *stops = static_cast<GradientStopsD2D*>(pat->mStops.get());

    if (!stops) {
      gfxDebug() << "No stops specified for gradient pattern.";
      return nullptr;
    }

    // This will not be a complex radial gradient brush.
    mDC->CreateRadialGradientBrush(
      D2D1::RadialGradientBrushProperties(D2DPoint(pat->mCenter2),
                                          D2DPoint(pat->mCenter1 - pat->mCenter2),
                                          pat->mRadius2, pat->mRadius2),
      D2D1::BrushProperties(aAlpha, D2DMatrix(pat->mMatrix)),
      stops->mStopCollection,
      byRef(gradBrush));

    return gradBrush;
  } else if (aPattern.GetType() == PatternType::SURFACE) {
    const SurfacePattern *pat =
      static_cast<const SurfacePattern*>(&aPattern);

    if (!pat->mSurface) {
      gfxDebug() << "No source surface specified for surface pattern";
      return nullptr;
    }


    Matrix mat = pat->mMatrix;
    
    RefPtr<ID2D1ImageBrush> imageBrush;
    RefPtr<ID2D1Image> image = GetImageForSurface(pat->mSurface, mat, pat->mExtendMode);
    mDC->CreateImageBrush(image,
                          D2D1::ImageBrushProperties(D2D1::RectF(0, 0,
                                                                  Float(pat->mSurface->GetSize().width),
                                                                  Float(pat->mSurface->GetSize().height)),
                                  D2DExtend(pat->mExtendMode), D2DExtend(pat->mExtendMode),
                                  D2DInterpolationMode(pat->mFilter)),
                          D2D1::BrushProperties(aAlpha, D2DMatrix(mat)),
                          byRef(imageBrush));
    return imageBrush;
  }

  gfxWarning() << "Invalid pattern type detected.";
  return nullptr;
}
Example #5
0
 Expr::Expr(float val) : contents(new ExprContents(makeFloatImm(val), Float(32))) {
     contents->isImmediate = true;
 }
Example #6
0
void
ImageHost::Composite(EffectChain& aEffectChain,
                     float aOpacity,
                     const gfx::Matrix4x4& aTransform,
                     const gfx::Filter& aFilter,
                     const gfx::Rect& aClipRect,
                     const nsIntRegion* aVisibleRegion)
{
  if (!GetCompositor()) {
    // should only happen when a tab is dragged to another window and
    // async-video is still sending frames but we haven't attached the
    // set the new compositor yet.
    return;
  }
  if (!mFrontBuffer) {
    return;
  }

  // Make sure the front buffer has a compositor
  mFrontBuffer->SetCompositor(GetCompositor());

  AutoLockCompositableHost autoLock(this);
  if (autoLock.Failed()) {
    NS_WARNING("failed to lock front buffer");
    return;
  }

  if (!mFrontBuffer->BindTextureSource(mTextureSource)) {
    return;
  }

  if (!mTextureSource) {
    // BindTextureSource above should have returned false!
    MOZ_ASSERT(false);
    return;
  }

  bool isAlphaPremultiplied = !(mFrontBuffer->GetFlags() & TextureFlags::NON_PREMULTIPLIED);
  RefPtr<TexturedEffect> effect = CreateTexturedEffect(mFrontBuffer->GetFormat(),
                                                       mTextureSource.get(),
                                                       aFilter,
                                                       isAlphaPremultiplied);
  if (!effect) {
    return;
  }

  aEffectChain.mPrimaryEffect = effect;
  IntSize textureSize = mTextureSource->GetSize();
  gfx::Rect gfxPictureRect
    = mHasPictureRect ? gfx::Rect(0, 0, mPictureRect.width, mPictureRect.height)
                      : gfx::Rect(0, 0, textureSize.width, textureSize.height);

  gfx::Rect pictureRect(0, 0,
                        mPictureRect.width,
                        mPictureRect.height);
  BigImageIterator* it = mTextureSource->AsBigImageIterator();
  if (it) {

    // This iteration does not work if we have multiple texture sources here
    // (e.g. 3 YCbCr textures). There's nothing preventing the different
    // planes from having different resolutions or tile sizes. For example, a
    // YCbCr frame could have Cb and Cr planes that are half the resolution of
    // the Y plane, in such a way that the Y plane overflows the maximum
    // texture size and the Cb and Cr planes do not. Then the Y plane would be
    // split into multiple tiles and the Cb and Cr planes would just be one
    // tile each.
    // To handle the general case correctly, we'd have to create a grid of
    // intersected tiles over all planes, and then draw each grid tile using
    // the corresponding source tiles from all planes, with appropriate
    // per-plane per-tile texture coords.
    // DrawQuad currently assumes that all planes use the same texture coords.
    MOZ_ASSERT(it->GetTileCount() == 1 || !mTextureSource->GetNextSibling(),
               "Can't handle multi-plane BigImages");

    it->BeginBigImageIteration();
    do {
      nsIntRect tileRect = it->GetTileRect();
      gfx::Rect rect(tileRect.x, tileRect.y, tileRect.width, tileRect.height);
      if (mHasPictureRect) {
        rect = rect.Intersect(pictureRect);
        effect->mTextureCoords = Rect(Float(rect.x - tileRect.x)/ tileRect.width,
                                      Float(rect.y - tileRect.y) / tileRect.height,
                                      Float(rect.width) / tileRect.width,
                                      Float(rect.height) / tileRect.height);
      } else {
        effect->mTextureCoords = Rect(0, 0, 1, 1);
      }
      if (mFrontBuffer->GetFlags() & TextureFlags::ORIGIN_BOTTOM_LEFT) {
        effect->mTextureCoords.y = effect->mTextureCoords.YMost();
        effect->mTextureCoords.height = -effect->mTextureCoords.height;
      }
      GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                                aOpacity, aTransform);
      GetCompositor()->DrawDiagnostics(DiagnosticFlags::IMAGE | DiagnosticFlags::BIGIMAGE,
                                       rect, aClipRect, aTransform, mFlashCounter);
    } while (it->NextTile());
    it->EndBigImageIteration();
    // layer border
    GetCompositor()->DrawDiagnostics(DiagnosticFlags::IMAGE,
                                     gfxPictureRect, aClipRect,
                                     aTransform, mFlashCounter);
  } else {
    IntSize textureSize = mTextureSource->GetSize();
    gfx::Rect rect;
    if (mHasPictureRect) {
      effect->mTextureCoords = Rect(Float(mPictureRect.x) / textureSize.width,
                                    Float(mPictureRect.y) / textureSize.height,
                                    Float(mPictureRect.width) / textureSize.width,
                                    Float(mPictureRect.height) / textureSize.height);
      rect = pictureRect;
    } else {
      effect->mTextureCoords = Rect(0, 0, 1, 1);
      rect = gfx::Rect(0, 0, textureSize.width, textureSize.height);
    }

    if (mFrontBuffer->GetFlags() & TextureFlags::ORIGIN_BOTTOM_LEFT) {
      effect->mTextureCoords.y = effect->mTextureCoords.YMost();
      effect->mTextureCoords.height = -effect->mTextureCoords.height;
    }

    GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                              aOpacity, aTransform);
    GetCompositor()->DrawDiagnostics(DiagnosticFlags::IMAGE,
                                     rect, aClipRect,
                                     aTransform, mFlashCounter);
  }
}
    void ParameterOptimizationServer<  SamplePointT, SamplePointGrid >::Process( )
    {
      typedef GeometricOptimizationBase<SamplePointT> Base;
      //      typedef ParameterOptimizationServer< Reconstructor, SamplePointT, SamplePointGrid > Self;
      RUNTIME_ASSERT( LocalSetup.InputParameters().fThermalizeFraction >= 0, "ParallelReconstruction:  fThermalizeFraction < 0" );
      RUNTIME_ASSERT( LocalSetup.InputParameters().fCoolingFraction >= 0,    "ParallelReconstruction:  fCoolingFraction < 0" );
      RUNTIME_ASSERT( ( LocalSetup.InputParameters().fThermalizeFraction + LocalSetup.InputParameters().fCoolingFraction <= 1),
                      "ParallelReconstruction: fThermalizeFraction + fCoolingFraction > 1 ");
      
      vector<SStepSizeInfo>   vGlobalMaxDeviation = LocalSetup.ExperimentalSetup().GetOptimizationInfo();
      vector<SDetParamMsg>      vCurrentDetParams = LocalSetup.ExperimentalSetup().GetExperimentalParameters();
      const vector<SStepSizeInfo> & vMinStepSizes = LocalSetup.ExperimentalSetup().GetDetectorSensitivity();
      //-----------------------------------
      // reduce step size by the cube root of the number or processors
      // (This is the approximate volume taken by each processor)
      //-----------------------------------
      Float fScale = Float ( 1 )
                   / ( pow( (nProcessingElements - 1)  * LocalSetup.InputParameters().nNumParamOptSteps,
                            Float( 1 ) / Float ( 3 ) ) );
    
      GET_LOG( osLogFile ) << "Scaling Factor for parameter MC: " << fScale << " " << nProcessingElements << std::endl;
    
      vector<SStepSizeInfo> vClientStepSizeInfo = vGlobalMaxDeviation;
      Base::ScaleParamOptMsg( vClientStepSizeInfo, vMinStepSizes, fScale );

      Int                  nIter           = 0;
      Float                fGlobalBestCost = 2;
      
      const Float fReductionScale = std::min( LocalSetup.InputParameters().fSearchVolReductionFactor /
                                              pow( Float( nProcessingElements -1 ), Float(1) / Float(3) ),
                                              Float( 0.8 ) );  // 4 is the fudge factor
 
      vector< SParamOptMsg<SamplePointT> > vSamplePoints;    
      VoxelQueue.RandomizeReset();
      SEnergyOpt oEnergyLoc;
      oEnergyLoc.fBeamEnergy = LocalSetup.ExperimentalSetup().GetBeamEnergy();
      oEnergyLoc.fEnergyStep = LocalSetup.InputParameters().BeamEnergyWidth / Float(2) * fScale;
      
      SEnergyOpt            oBestEnergyLoc     = oEnergyLoc;
      vector<SDetParamMsg>   vBestParams        = vCurrentDetParams;

      Int                   nLocalRestarts     = 0;
      Int                   nLargeStepRestarts = 0;
      bool                  bFitNewVoxels      = true;
    
      while ( nIter < LocalSetup.InputParameters().nParameterRefinements )
      {
        Bool bFitSuccess = false;
        if( bFitNewVoxels )
        {
          boost::tie(vSamplePoints, bFitSuccess) = GetNConvergedElements( );
          VoxelQueue.RandomizeReset();
          bFitNewVoxels = false;
        }
        Bool bNeedRestart = true;
        if( bFitSuccess || (!bFitNewVoxels) )
        {
          // Run LocalParamOptimization
          SEnergyOpt            OptimizedEnergyLoc;            
          vector<SDetParamMsg>   OptimizedParams;
          Float                 fCurrentCost = 2;
          boost::tie( OptimizedParams, fCurrentCost, OptimizedEnergyLoc ) = 
            LocalParamOptimization( oEnergyLoc, vCurrentDetParams,
                                    vSamplePoints, vClientStepSizeInfo,
                                    vGlobalMaxDeviation );
        
          GET_LOG( osLogFile ) << "[Best Cost, New Cost] " << fGlobalBestCost << " " << fCurrentCost << std::endl;
          if( fCurrentCost < fGlobalBestCost )   // if something's found, refine
          {
            fGlobalBestCost       = fCurrentCost;
            vCurrentDetParams     = OptimizedParams;
            vBestParams           = OptimizedParams;
            oBestEnergyLoc        = OptimizedEnergyLoc;
            oEnergyLoc            = OptimizedEnergyLoc;
            GET_LOG( osLogFile ) << "Decided to refine, reduced by: " << fReductionScale << std::endl;
            oEnergyLoc.fEnergyStep *= fReductionScale;
            Base::ScaleParamOptMsg( vGlobalMaxDeviation, vMinStepSizes, fReductionScale );
            Base::ScaleParamOptMsg( vClientStepSizeInfo, vMinStepSizes, fReductionScale );
            bNeedRestart = false;
          }
        }

        if( bNeedRestart )   // is this even useful?
        {
          bFitNewVoxels = true;
          VoxelQueue.RandomizeReset();
          if(  nLocalRestarts < LocalSetup.InputParameters().nMaxParamMCLocalRestarts )
          {
            vCurrentDetParams      = Base::RandomMoveDet( vCurrentDetParams, vClientStepSizeInfo );
            oEnergyLoc.fBeamEnergy = oEnergyLoc.fBeamEnergy
                                   + oRandomReal( -oEnergyLoc.fEnergyStep, oEnergyLoc.fEnergyStep );
            SetClientParameters( oEnergyLoc, vCurrentDetParams );
            nLocalRestarts ++;
          }
          else if ( nLargeStepRestarts < LocalSetup.InputParameters().nMaxParamMCGlobalRestarts )
          {
            nLocalRestarts       = 0;
            Bool bCandidateFound = false;
            while( nLargeStepRestarts < LocalSetup.InputParameters().nMaxParamMCGlobalRestarts && ! bCandidateFound )
            {
              nLargeStepRestarts ++;
              vector< SLargeScaleOpt > oCandidateList =
                GetLargeVolumeCandidates( oEnergyLoc, vCurrentDetParams, vGlobalMaxDeviation );
              if( oCandidateList.size() > 0 )
              {
                bCandidateFound = true;
                std::sort( oCandidateList.begin(), oCandidateList.end() );  // sort ascending order by cost
                vCurrentDetParams      = oCandidateList[0].vDetParams;
                oEnergyLoc.fBeamEnergy = oCandidateList[0].fEnergy;
                SetClientParameters( oEnergyLoc, vCurrentDetParams );
              }
            }
          }
        }
        nIter ++;
      }  //----------------------------------------
    
      //-----------------------------------
      // save mic file
      //-----------------------------------

      RUNTIME_ASSERT( 0, "Need to rewrite the part that saves to mic");
      
      VoxelQueue.ClearSolution();
      for( Size_Type i = 0; i < vSamplePoints.size(); i ++ )
        VoxelQueue.Push( vSamplePoints[i].oVoxel );
      
      //      Base::WriteFitResult( VoxelQueue.Solution(), ".opt");
      Base::SetExperimentalParameters( oBestEnergyLoc.fBeamEnergy, vBestParams );
      
      GET_LOG( osLogFile ) << "Best Energy: " << oBestEnergyLoc.fBeamEnergy << std::endl; 
      Base::Comm.SendCommand( nMyID, 1, nProcessingElements -1, XDMParallel::PROCESS_DONE );
      //      VoxelQueue.ClearSolution();
      
      
    }
Example #8
0
Matrix4x4& Matrix4x4::zero() {
  _11 = Float(0);  _12 = Float(0);  _13 = Float(0);  _14 = Float(0);
  _21 = Float(0);  _22 = Float(0);  _23 = Float(0);  _24 = Float(0);
  _31 = Float(0);  _32 = Float(0);  _33 = Float(0);  _34 = Float(0);
  _41 = Float(0);  _42 = Float(0);  _43 = Float(0);  _44 = Float(0);
  return *this;
}
Example #9
0
DataSourceSurfaceD2D::DataSourceSurfaceD2D(SourceSurfaceD2D* aSourceSurface)
  : mTexture(nullptr)
  , mFormat(aSourceSurface->mFormat)
  , mSize(aSourceSurface->mSize)
  , mMapped(false)
{
  // We allocate ourselves a regular D3D surface (sourceTexture) and paint the
  // D2D bitmap into it via a DXGI render target. Then we need to copy
  // sourceTexture into a staging texture (mTexture), which we will lazily map
  // to get the data.

  CD3D10_TEXTURE2D_DESC desc(DXGIFormat(mFormat), mSize.width, mSize.height);
  desc.MipLevels = 1;
  desc.Usage = D3D10_USAGE_DEFAULT;
  desc.BindFlags = D3D10_BIND_RENDER_TARGET | D3D10_BIND_SHADER_RESOURCE;
  RefPtr<ID3D10Texture2D> sourceTexture;
  HRESULT hr = aSourceSurface->mDevice->CreateTexture2D(&desc, nullptr,
                                                        byRef(sourceTexture));
  if (FAILED(hr)) {
    gfxWarning() << "Failed to create texture. Code: " << hr;
    return;
  }

  RefPtr<IDXGISurface> dxgiSurface;
  hr = sourceTexture->QueryInterface((IDXGISurface**)byRef(dxgiSurface));
  if (FAILED(hr)) {
    gfxWarning() << "Failed to create DXGI surface. Code: " << hr;
    return;
  }

  D2D1_RENDER_TARGET_PROPERTIES rtProps = D2D1::RenderTargetProperties(
            D2D1_RENDER_TARGET_TYPE_DEFAULT,
            D2D1::PixelFormat(DXGI_FORMAT_UNKNOWN, D2D1_ALPHA_MODE_PREMULTIPLIED));

  RefPtr<ID2D1RenderTarget> renderTarget;
  hr = DrawTargetD2D::factory()->CreateDxgiSurfaceRenderTarget(dxgiSurface,
                                                               &rtProps,
                                                               byRef(renderTarget));
  if (FAILED(hr)) {
    gfxWarning() << "Failed to create render target. Code: " << hr;
    return;
  }

  renderTarget->BeginDraw();
  renderTarget->DrawBitmap(aSourceSurface->mBitmap,
                           D2D1::RectF(0, 0,
                                       Float(mSize.width),
                                       Float(mSize.height)));
  renderTarget->EndDraw();

  desc.CPUAccessFlags = D3D10_CPU_ACCESS_READ;
  desc.Usage = D3D10_USAGE_STAGING;
  desc.BindFlags = 0;
  hr = aSourceSurface->mDevice->CreateTexture2D(&desc, nullptr, byRef(mTexture));
  if (FAILED(hr)) {
    gfxWarning() << "Failed to create staging texture. Code: " << hr;
    mTexture = nullptr;
    return;
  }

  aSourceSurface->mDevice->CopyResource(mTexture, sourceTexture);
}
Example #10
0
/* The commutate function uses the High, Low and Float functions to commutate
 * the motor. The input to the function is the motor state from the hall effect 
 * sensors. Depending on the value of the global variable "direction" the
 * function sets the motor outputs according to the motor's commutation pattern.
 * The commutation pattern is derived from the commutation diagram found on p32
 * of Maxon's E-Paper Catalog and is shown below.
 *
 * Hall Sen 321 | 101 | 001 | 011 | 010 | 110 | 100 |
 *     State    |  5  |  1  |  3  |  2  |  6  |  4  |
 * -------------|-----|-----|-----|-----|-----|-----|
 * 1 -   Red    |  +  |  +  |  0  |  -  |  -  |  0  |
 * 2 -  Black   |  -  |  0  |  +  |  +  |  0  |  -  |
 * 3 -  White   |  0  |  -  |  -  |  0  |  +  |  +  |
 *
 * An input of 0 to the commutate function will turn off the motor.
 */
void commutate(int state){
    switch(state){
        case 0:
            Float(1); Float(2); Float(3);
            break;
        case 1:
            if(direction){
                High(1); Float(2); Low(3);
            } else{
                Low(1); Float(2); High(3);
            }
            break;
        case 2:
            if(direction){
                Low(1); High(2); Float(3);
            } else{
                High(1); Low(2); Float(3);
            }
            break;
        case 3:
            if(direction){
                Float(1); High(2); Low(3);
            } else{
                Float(1); Low(2); High(3);
            }
            break;
        case 4:
            if(direction){
                Float(1); Low(2); High(3);
            } else{
                Float(1); High(2); Low(3);
            }
            break;
        case 5:
            if(direction){
                High(1); Low(2); Float(3);
            } else{
                Low(1); High(2); Float(3);
            }
            break;
        case 6:
            if(direction){
                Low(1); Float(2); High(3);
            } else{
                High(1); Float(2); Low(3);
            }
            break;
        //case 7:
          //  Low(1); High(2);
            //break;
        default:
            Float(1); Float(2); Float(3);
            break;
    }
}
Example #11
0
Expr lower_lerp(Expr zero_val, Expr one_val, Expr weight) {

    Expr result;

    internal_assert(zero_val.type() == one_val.type());
    internal_assert(weight.type().is_uint() || weight.type().is_float());

    Type result_type = zero_val.type();

    Expr bias_value = make_zero(result_type);
    Type computation_type = result_type;

    if (zero_val.type().is_int()) {
        computation_type = UInt(zero_val.type().bits(), zero_val.type().lanes());
        bias_value = result_type.min();
    }

    // For signed integer types, just convert everything to unsigned
    // and then back at the end to ensure proper rounding, etc.
    // There is likely a better way to handle this.
    if (result_type != computation_type) {
        zero_val = Cast::make(computation_type, zero_val - bias_value);
        one_val =  Cast::make(computation_type, one_val  - bias_value);
    }

    if (result_type.is_bool()) {
        Expr half_weight;
        if (weight.type().is_float())
            half_weight = 0.5f;
        else {
            half_weight = weight.type().max() / 2;
        }

        result = select(weight > half_weight, one_val, zero_val);
    } else {
        Expr typed_weight;
        Expr inverse_typed_weight;

        if (weight.type().is_float()) {
            typed_weight = weight;
            if (computation_type.is_uint()) {
                // TODO: Verify this reduces to efficient code or
                // figure out a better way to express a multiply
                // of unsigned 2^32-1 by a double promoted weight
                if (computation_type.bits() == 32) {
                    typed_weight =
                        Cast::make(computation_type,
                                   cast<double>(Expr(65535.0f)) * cast<double>(Expr(65537.0f)) *
                                   Cast::make(Float(64, typed_weight.type().lanes()), typed_weight));
                } else {
                    typed_weight =
                        Cast::make(computation_type,
                                   computation_type.max() * typed_weight);
                }
                inverse_typed_weight = computation_type.max() - typed_weight;
            } else {
                inverse_typed_weight = 1.0f - typed_weight;
            }

        } else {
            if (computation_type.is_float()) {
                int weight_bits = weight.type().bits();
                if (weight_bits == 32) {
                    // Should use ldexp, but can't make Expr from result
                    // that is double
                    typed_weight =
                        Cast::make(computation_type,
                                   cast<double>(weight) / (pow(cast<double>(2), 32) - 1));
                } else {
                    typed_weight =
                        Cast::make(computation_type,
                                   weight / ((float)ldexp(1.0f, weight_bits) - 1));
                }
                inverse_typed_weight = 1.0f - typed_weight;
            } else {
                // This code rescales integer weights to the right number of bits.
                // It takes advantage of (2^n - 1) == (2^(n/2) - 1)(2^(n/2) + 1)
                // e.g. 65535 = 255 * 257. (Ditto for the 32-bit equivalent.)
                // To recale a weight of m bits to be n bits, we need to do:
                //     scaled_weight = (weight / (2^m - 1)) * (2^n - 1)
                // which power of two values for m and n, results in a series like
                // so:
                //     (2^(m/2) + 1) * (2^(m/4) + 1) ... (2^(n*2) + 1)
                // The loop below computes a scaling constant and either multiples
                // or divides by the constant and relies on lowering and llvm to
                // generate efficient code for the operation.
                int bit_size_difference = weight.type().bits() - computation_type.bits();
                if (bit_size_difference == 0) {
                    typed_weight = weight;
                } else {
                    typed_weight = Cast::make(computation_type, weight);

                    int bits_left = ::abs(bit_size_difference);
                    int shift_amount = std::min(computation_type.bits(), weight.type().bits());
                    uint64_t scaling_factor = 1;
                    while (bits_left != 0) {
                        internal_assert(bits_left > 0);
                        scaling_factor = scaling_factor + (scaling_factor << shift_amount);
                        bits_left -= shift_amount;
                        shift_amount *= 2;
                    }
                    if (bit_size_difference < 0) {
                        typed_weight =
                            Cast::make(computation_type, weight) *
                            cast(computation_type, (int32_t)scaling_factor);
                    } else {
                        typed_weight =
                            Cast::make(computation_type,
                                       weight / cast(weight.type(), (int32_t)scaling_factor));
                    }
                }
                inverse_typed_weight =
                    Cast::make(computation_type,
                               computation_type.max() - typed_weight);
            }
        }

        if (computation_type.is_float()) {
            result = zero_val * inverse_typed_weight +
                one_val * typed_weight;
        } else {
            int32_t bits = computation_type.bits();
            switch (bits) {
            case 1:
                result = select(typed_weight, one_val, zero_val);
                break;
            case 8:
            case 16:
            case 32: {
                Expr zero_expand = Cast::make(UInt(2 * bits, computation_type.lanes()),
                                              zero_val);
                Expr  one_expand = Cast::make(UInt(2 * bits, one_val.type().lanes()),
                                              one_val);

                Expr rounding = Cast::make(UInt(2 * bits), 1) << Cast::make(UInt(2 * bits), (bits - 1));
                Expr divisor  = Cast::make(UInt(2 * bits), 1) << Cast::make(UInt(2 * bits), bits);

                Expr prod_sum = zero_expand * inverse_typed_weight +
                    one_expand * typed_weight + rounding;
                Expr divided = ((prod_sum / divisor) + prod_sum) / divisor;

                result = Cast::make(UInt(bits, computation_type.lanes()), divided);
                break;
            }
            case 64:
                // TODO: 64-bit lerp is not supported as current approach
                // requires double-width multiply.
                // There is an informative error message in IROperator.h.
                internal_error << "Can't do a 64-bit lerp.\n";
                break;
            default:
                break;
            }
        }

        if (!is_zero(bias_value)) {
            result = Cast::make(result_type, result) + bias_value;
        }
    }

    return simplify(result);
}
Example #12
0
//--------------------------------------------------------------
void ofApp::setup() {

	ofSetFrameRate(60);
	ofSetVerticalSync(true);
	//ofSetLogLevel("Pd", OF_LOG_VERBOSE); // see verbose info inside

	// double check where we are ...
	cout << ofFilePath::getCurrentWorkingDirectory() << endl;

	// the number of libpd ticks per buffer,
	// used to compute the audio buffer len: tpb * blocksize (always 64)
	#ifdef TARGET_LINUX_ARM
		// longer latency for Raspberry PI
		int ticksPerBuffer = 32; // 32 * 64 = buffer len of 2048
		int numInputs = 0; // no built in mic
	#else
		int ticksPerBuffer = 8; // 8 * 64 = buffer len of 512
		int numInputs = 1;
	#endif

	// setup OF sound stream
	ofSoundStreamSetup(2, numInputs, this, 44100, ofxPd::blockSize()*ticksPerBuffer, 3);

	// setup Pd
	//
	// set 4th arg to true for queued message passing using an internal ringbuffer,
	// this is useful if you need to control where and when the message callbacks
	// happen (ie. within a GUI thread)
	//
	// note: you won't see any message prints until update() is called since
	// the queued messages are processed there, this is normal
	//
	if(!pd.init(2, numInputs, 44100, ticksPerBuffer, false)) {
		OF_EXIT_APP(1);
	}

	midiChan = 1; // midi channels are 1-16

	// subscribe to receive source names
	pd.subscribe("toOF");
	pd.subscribe("env");

	// add message receiver, required if you want to recieve messages
	pd.addReceiver(*this); // automatically receives from all subscribed sources
	pd.ignoreSource(*this, "env");        // don't receive from "env"
	//pd.ignoreSource(*this);             // ignore all sources
	//pd.receiveSource(*this, "toOF");	  // receive only from "toOF"

	// add midi receiver, required if you want to recieve midi messages
	pd.addMidiReceiver(*this); // automatically receives from all channels
	//pd.ignoreMidiChannel(*this, 1);     // ignore midi channel 1
	//pd.ignoreMidiChannel(*this);        // ignore all channels
	//pd.receiveMidiChannel(*this, 1);    // receive only from channel 1

	// add the data/pd folder to the search path
	pd.addToSearchPath("pd/abs");

	// audio processing on
	pd.start();

	// -----------------------------------------------------
	cout << endl << "BEGIN Patch Test" << endl;

	// open patch
	Patch patch = pd.openPatch("pd/test.pd");
	cout << patch << endl;

	// close patch
	pd.closePatch(patch);
	cout << patch << endl;

	// open patch again
	patch = pd.openPatch(patch);
	cout << patch << endl;
	
	cout << "FINISH Patch Test" << endl;

	// -----------------------------------------------------
	cout << endl << "BEGIN Message Test" << endl;

	// test basic atoms
	pd.sendBang("fromOF");
	pd.sendFloat("fromOF", 100);
	pd.sendSymbol("fromOF", "test string");

	// stream interface
	pd << Bang("fromOF")
	   << Float("fromOF", 100)
	   << Symbol("fromOF", "test string");

	// send a list
	pd.startMessage();
	pd.addFloat(1.23);
	pd.addSymbol("a symbol");
	pd.finishList("fromOF");

	// send a message to the $0 receiver ie $0-fromOF
	pd.startMessage();
	pd.addFloat(1.23);
	pd.addSymbol("a symbol");
	pd.finishList(patch.dollarZeroStr()+"-fromOF");

	// send a list using the List object
	List testList;
	testList.addFloat(1.23);
	testList.addSymbol("sent from a List object");
	pd.sendList("fromOF", testList);
	pd.sendMessage("fromOF", "msg", testList);

	// stream interface for list
	pd << StartMessage() << 1.23 << "sent from a streamed list" << FinishList("fromOF");

	cout << "FINISH Message Test" << endl;

	// -----------------------------------------------------
	cout << endl << "BEGIN MIDI Test" << endl;

	// send functions
	pd.sendNoteOn(midiChan, 60);
	pd.sendControlChange(midiChan, 0, 64);
	pd.sendProgramChange(midiChan, 100);    // note: pgm num range is 1 - 128
	pd.sendPitchBend(midiChan, 2000);   // note: ofxPd uses -8192 - 8192 while [bendin] returns 0 - 16383,
										// so sending a val of 2000 gives 10192 in pd
	pd.sendAftertouch(midiChan, 100);
	pd.sendPolyAftertouch(midiChan, 64, 100);
	pd.sendMidiByte(0, 239);    // note: pd adds +2 to the port number from [midiin], [sysexin], & [realtimein]
	pd.sendSysex(0, 239);       // so sending to port 0 gives port 2 in pd
	pd.sendSysRealTime(0, 239);

	// stream
	pd << NoteOn(midiChan, 60) << ControlChange(midiChan, 100, 64)
	   << ProgramChange(midiChan, 100) << PitchBend(midiChan, 2000)
	   << Aftertouch(midiChan, 100) << PolyAftertouch(midiChan, 64, 100)
	   << StartMidi(0) << 239 << Finish()
	   << StartSysex(0) << 239 << Finish()
	   << StartSysRealTime(0) << 239 << Finish();

	cout << "FINISH MIDI Test" << endl;

	// -----------------------------------------------------
	cout << endl << "BEGIN Array Test" << endl;

	// array check length
	cout << "array1 len: " << pd.arraySize("array1") << endl;

	// read array
	std::vector<float> array1;
	pd.readArray("array1", array1);	// sets array to correct size
	cout << "array1 ";
	for(int i = 0; i < array1.size(); ++i)
		cout << array1[i] << " ";
	cout << endl;

	// write array
	for(int i = 0; i < array1.size(); ++i)
		array1[i] = i;
	pd.writeArray("array1", array1);

	// ready array
	pd.readArray("array1", array1);
	cout << "array1 ";
	for(int i = 0; i < array1.size(); ++i)
		cout << array1[i] << " ";
	cout << endl;

	// clear array
	pd.clearArray("array1", 10);

	// ready array
	pd.readArray("array1", array1);
	cout << "array1 ";
	for(int i = 0; i < array1.size(); ++i)
		cout << array1[i] << " ";
	cout << endl;

	cout << "FINISH Array Test" << endl;

	// -----------------------------------------------------
	cout << endl << "BEGIN PD Test" << endl;

	pd.sendSymbol("fromOF", "test");

	cout << "FINISH PD Test" << endl << endl;

	// -----------------------------------------------------
	cout << endl << "BEGIN Instance Test" << endl;

	// open 10 instances
	for(int i = 0; i < 10; ++i) {
		Patch p = pd.openPatch("pd/instance.pd");
		instances.push_back(p);
	}

	// send a hello bang to each instance individually using the dollarZero
	// to [r $0-instance] which should print the instance dollarZero unique id
	// and a unique random number
	for(int i = 0; i < instances.size(); ++i) {
		pd.sendBang(instances[i].dollarZeroStr()+"-instance");
	}

	// send a random float between 0 and 100
	for(int i = 0; i < instances.size(); ++i) {
		pd.sendFloat(instances[i].dollarZeroStr()+"-instance", int(ofRandom(0, 100)));
	}

	// send a symbol
	for(int i = 0; i < instances.size(); ++i) {
		pd.sendSymbol(instances[i].dollarZeroStr()+"-instance", "howdy dude");
	}

	// close all instances
	for(int i = 0; i < instances.size(); ++i) {
		pd.closePatch(instances[i]);
	}
	instances.clear();

	cout << "FINISH Instance Test" << endl;

	// -----------------------------------------------------
	// play a tone by sending a list
	// [list tone pitch 72 (
	pd.startMessage();
	pd.addSymbol("pitch");
	pd.addFloat(72);
	pd.finishList("tone");
	pd.sendBang("tone");
}
Example #13
0
  //--------------------------------------------------------------------------------------------------------
  //  Public:   RandomRestartZeroTemp
  //
  //  Parameter:  fDriftDistance is the side of the box of the local search space.
  //
  //--------------------------------------------------------------------------------------------------------
  std::pair<SMatrix3x3, Float>
  COrientationMC::RandomRestartZeroTemp( const SMatrix3x3 & oInitialOrientation,
                                         Float fAngularStepSize, Float fAngularBoxSideLength, 
                                         COverlapFunction & oObjectiveFunction,
                                         Int nMaxMCStep,
                                         Int nMaxFailedRestarts,
                                         Float fMaxConvergenceCost )
  {
    SMatrix3x3 oGlobalOptimalState = oInitialOrientation;
    SMatrix3x3 oCurrentState =  oInitialOrientation;
    COverlapFunction::ValueType fGlobalMinCost = oObjectiveFunction( oInitialOrientation );
    
    
    Float fCurAngularStepSize = fAngularStepSize;
    
    Int nTotalStepTaken = 0;
    Int nSuccessiveRestarts = 0;
    Int nMinErgodicSteps = 2 * pow( fAngularBoxSideLength / fCurAngularStepSize, 3 );  // Number of steps required to be ergodic
    Int nOptimizationSteps = nMinErgodicSteps;
    
    while(  nTotalStepTaken < nMaxMCStep )
    {
     
      nOptimizationSteps = std::min( nOptimizationSteps, ( nMaxMCStep - nTotalStepTaken ) );
      nOptimizationSteps = std::max( nOptimizationSteps, 0 );
      
      Float fCurrentCost;
      SMatrix3x3 oTmpResOrient;
      boost::tie( oTmpResOrient, fCurrentCost ) =  ZeroTemperatureOptimization( oCurrentState, fCurAngularStepSize, 
                                                                                oObjectiveFunction, nOptimizationSteps );
      nTotalStepTaken += nOptimizationSteps;
      
      if( fCurrentCost >=  fGlobalMinCost )           // restart with new position
      {
        Float fRadius     = tan( fAngularBoxSideLength ) / sqrt( 48.0 ); // sqrt(48) = 4 * sqrt(3)   ( random start radius )
        Float fX =  GetRandomVariable( -fRadius, fRadius );
        Float fY =  GetRandomVariable( -fRadius, fRadius );
        Float fZ =  GetRandomVariable( -fRadius, fRadius );
        SQuaternion q = oUniformGridGen.GetNearIdentityPoint( fX, fY, fZ );
        SMatrix3x3 oDelta = q.GetRotationMatrix3x3();
        oCurrentState = oDelta * oInitialOrientation;

        //----------
        // Reset search parameters
        //----------
        fCurAngularStepSize = fAngularStepSize;  
        nSuccessiveRestarts ++;
        nOptimizationSteps = nMinErgodicSteps;
      }
      else                                   // continuation of search, with narrowing of steps size
      {
        nSuccessiveRestarts = 0;
        fGlobalMinCost      = fCurrentCost;
        oGlobalOptimalState = oTmpResOrient;
        
        oCurrentState        = oTmpResOrient;
        fCurAngularStepSize *= Float( 0.5 );    // reduce step size
      }

      if( fGlobalMinCost < fMaxConvergenceCost )    // convergence  -- move criterion to user defined
        break;                                      // cheating -- should really allow convergence instead
      
      if( nSuccessiveRestarts > nMaxFailedRestarts )   // search failed change this to user defined
        break;
    }
    
    // Named return value optimization
    std::pair<SMatrix3x3, Float> oRes = std::make_pair( oGlobalOptimalState, fGlobalMinCost ); 
    return oRes;
  }
Example #14
0
  //--------------------------------------------------------------------------------------------------------
  //  Public:   AdaptiveSamplingZeroTemp
  //
  //   This is essetially depth first search, non-recursive
  //
  //--------------------------------------------------------------------------------------------------------
  std::pair<SMatrix3x3, Float>
  COrientationMC::AdaptiveSamplingZeroTemp( const SMatrix3x3 & oInitialOrientation,
                                            Float fCostFnAngularResolution,
                                            Float fSearchRegionAngularSideLength,
                                            COverlapFunction & oObjectiveFunction,
                                            Int nMaxMCStep,
                                            Int NumMaxStratifiedSamples,
                                            Float fConvergenceVariance,
                                            Float fMaxConvergenceCost )
  {
    SMatrix3x3 oGlobalOptimalState = oInitialOrientation;
    SMatrix3x3 oCurrentState =  oInitialOrientation;
    COverlapFunction::ValueType fGlobalMinCost = oObjectiveFunction( oInitialOrientation );
    
    Float InitialSubregionRadius  = tan( fSearchRegionAngularSideLength ) / sqrt( 48.0 ); // sqrt(48) = 4 * sqrt(3)   ( random start radius )
    
    Int nTotalStepTaken = 0;
    Int NumGlobalPointsTaken = 0;
    Float fCurrentVariance;
    Float SubregionRadius = InitialSubregionRadius;
    
    while( nTotalStepTaken < nMaxMCStep )
    {
      Int NumSubregionSteps = Int( pow( ceil(SubregionRadius / fCostFnAngularResolution), 2.7 ) ); // Number of steps it takes to adequately
                                                                                                 // sample each subregions without missing
                                                                                                 // a cost function (giving it less more steps)

      NumSubregionSteps = std::max( NumSubregionSteps, 10 ); // at least 20 steps
      Float fCurrentCost;
      SMatrix3x3 oTmpResOrient;
      
      boost::tie( oTmpResOrient, fCurrentCost, fCurrentVariance )
        =  ZeroTemperatureOptimizationWithVariance( oCurrentState, SubregionRadius, 
                                                    oObjectiveFunction, NumSubregionSteps );
      if( fCurrentVariance > fConvergenceVariance )
        nMaxMCStep += NumSubregionSteps;
      
      nTotalStepTaken += NumSubregionSteps;
      if( fCurrentCost >=  fGlobalMinCost )           // restart with new position
      {
        //----------
        // Backtrack/Reset search parameters
        //----------
        SubregionRadius = std::min( static_cast<Float>( 2.0 * SubregionRadius), fSearchRegionAngularSideLength );
        NumGlobalPointsTaken ++;
        
        Float fX =  GetRandomVariable( -SubregionRadius, SubregionRadius );
        Float fY =  GetRandomVariable( -SubregionRadius, SubregionRadius );
        Float fZ =  GetRandomVariable( -SubregionRadius, SubregionRadius );
        SQuaternion q = oUniformGridGen.GetNearIdentityPoint( fX, fY, fZ );
        SMatrix3x3 oDelta = q.GetRotationMatrix3x3();
        oCurrentState = oDelta * oInitialOrientation;
      }
      else                                   // continuation of search, with narrowing of steps size
      {
        fGlobalMinCost      = fCurrentCost;
        oGlobalOptimalState = oTmpResOrient;
        oCurrentState       = oTmpResOrient;
        SubregionRadius     *= Float( 0.5 );    // reduce step size
      }
      
      if( (fGlobalMinCost < fMaxConvergenceCost)  && fabs( fCurrentVariance ) < fConvergenceVariance )    // convergence  -- move criterion to user defined
        break;
    }
    
        
    std::cout << RADIAN_TO_DEGREE( LatticeSymmetry::GetMisorientation( LatticeSymmetry::CCubicSymmetry::Get(), oGlobalOptimalState, oInitialOrientation )  )
	      << "\t|R_0| " << RADIAN_TO_DEGREE( InitialSubregionRadius )
	      << "\t|Step | " << nTotalStepTaken 
	      << "\t|GlbPts| " << NumGlobalPointsTaken
	      << "\t|R_c| " << RADIAN_TO_DEGREE( SubregionRadius )
	      << "\t|MCS| " << nMaxMCStep 
	      << "\t|Var| " << fCurrentVariance 
	      << "\t|C_i| " << oObjectiveFunction( oInitialOrientation )
	      << "\t|C_f|" << fGlobalMinCost 
	      << "\t[ " << RadianToDegree( oInitialOrientation.GetEulerAngles() ) << "]"
	      << std::endl; 
    
    // Named return value optimization
    std::pair<SMatrix3x3, Float> oRes = std::make_pair( oGlobalOptimalState, fGlobalMinCost ); 
    return oRes;
  }
Example #15
0
int LuSolve(
	size_t             n      ,
	size_t             m      ,
	const FloatVector &A      ,
	const FloatVector &B      ,
	FloatVector       &X      ,
	Float        &logdet      )
{
	// check numeric type specifications
	CheckNumericType<Float>();

	// check simple vector class specifications
	CheckSimpleVector<Float, FloatVector>();

	size_t        p;       // index of pivot element (diagonal of L)
	int     signdet;       // sign of the determinant
	Float     pivot;       // pivot element

	// the value zero
	const Float zero(0);

	// pivot row and column order in the matrix
	std::vector<size_t> ip(n);
	std::vector<size_t> jp(n);

	// -------------------------------------------------------
	CPPAD_ASSERT_KNOWN(
		size_t(A.size()) == n * n,
		"Error in LuSolve: A must have size equal to n * n"
	);
	CPPAD_ASSERT_KNOWN(
		size_t(B.size()) == n * m,
		"Error in LuSolve: B must have size equal to n * m"
	);
	CPPAD_ASSERT_KNOWN(
		size_t(X.size()) == n * m,
		"Error in LuSolve: X must have size equal to n * m"
	);
	// -------------------------------------------------------

	// copy A so that it does not change
	FloatVector Lu(A);

	// copy B so that it does not change
	X = B;

	// Lu factor the matrix A
	signdet = LuFactor(ip, jp, Lu);

	// compute the log of the determinant
	logdet  = Float(0);
	for(p = 0; p < n; p++)
	{	// pivot using the max absolute element
		pivot   = Lu[ ip[p] * n + jp[p] ];

		// check for determinant equal to zero
		if( pivot == zero )
		{	// abort the mission
			logdet = Float(0);
			return   0;
		}

		// update the determinant
		if( LeqZero ( pivot ) )
		{	logdet += log( - pivot );
			signdet = - signdet;
		}
		else	logdet += log( pivot );

	}

	// solve the linear equations
	LuInvert(ip, jp, Lu, X);

	// return the sign factor for the determinant
	return signdet;
}
Example #16
0
void
PathBuilderD2D::Arc(const Point &aOrigin, Float aRadius, Float aStartAngle,
                 Float aEndAngle, bool aAntiClockwise)
{
  MOZ_ASSERT(aRadius >= 0);

  if (aAntiClockwise && aStartAngle < aEndAngle) {
    // D2D does things a little differently, and draws the arc by specifying an
    // beginning and an end point. This means the circle will be the wrong way
    // around if the start angle is smaller than the end angle. It might seem
    // tempting to invert aAntiClockwise but that would change the sweeping
    // direction of the arc so instead we exchange start/begin.
    Float oldStart = aStartAngle;
    aStartAngle = aEndAngle;
    aEndAngle = oldStart;
  }

  // XXX - Workaround for now, D2D does not appear to do the desired thing when
  // the angle sweeps a complete circle.
  bool fullCircle = false;
  if (aEndAngle - aStartAngle >= 2 * M_PI) {
    fullCircle = true;
    aEndAngle = Float(aStartAngle + M_PI * 1.9999);
  } else if (aStartAngle - aEndAngle >= 2 * M_PI) {
    fullCircle = true;
    aStartAngle = Float(aEndAngle + M_PI * 1.9999);
  }

  Point startPoint;
  startPoint.x = aOrigin.x + aRadius * cos(aStartAngle);
  startPoint.y = aOrigin.y + aRadius * sin(aStartAngle);

  if (!mFigureActive) {
    EnsureActive(startPoint);
  } else {
    mSink->AddLine(D2DPoint(startPoint));
  }

  Point endPoint;
  endPoint.x = aOrigin.x + aRadius * cosf(aEndAngle);
  endPoint.y = aOrigin.y + aRadius * sinf(aEndAngle);

  D2D1_ARC_SIZE arcSize = D2D1_ARC_SIZE_SMALL;
  D2D1_SWEEP_DIRECTION direction =
    aAntiClockwise ? D2D1_SWEEP_DIRECTION_COUNTER_CLOCKWISE :
                     D2D1_SWEEP_DIRECTION_CLOCKWISE;

  // if startPoint and endPoint of our circle are too close there are D2D issues
  // with drawing the circle as a single arc
  const Float kEpsilon = 1e-5f;
  if (!fullCircle ||
      (std::abs(startPoint.x - endPoint.x) +
       std::abs(startPoint.y - endPoint.y) > kEpsilon)) {

    if (aAntiClockwise) {
      if (aStartAngle - aEndAngle > M_PI) {
        arcSize = D2D1_ARC_SIZE_LARGE;
      }
    } else {
      if (aEndAngle - aStartAngle > M_PI) {
        arcSize = D2D1_ARC_SIZE_LARGE;
      }
    }

    mSink->AddArc(D2D1::ArcSegment(D2DPoint(endPoint),
                                   D2D1::SizeF(aRadius, aRadius),
                                   0.0f,
                                   direction,
                                   arcSize));
  }
  else {
    // our first workaround attempt didn't work, so instead draw the circle as
    // two half-circles
    Float midAngle = aEndAngle > aStartAngle ?
      Float(aStartAngle + M_PI) : Float(aEndAngle + M_PI);
    Point midPoint;
    midPoint.x = aOrigin.x + aRadius * cosf(midAngle);
    midPoint.y = aOrigin.y + aRadius * sinf(midAngle);

    mSink->AddArc(D2D1::ArcSegment(D2DPoint(midPoint),
                                   D2D1::SizeF(aRadius, aRadius),
                                   0.0f,
                                   direction,
                                   arcSize));

    // if the adjusted endPoint computed above is used here and endPoint !=
    // startPoint then this half of the circle won't render...
    mSink->AddArc(D2D1::ArcSegment(D2DPoint(startPoint),
                                   D2D1::SizeF(aRadius, aRadius),
                                   0.0f,
                                   direction,
                                   arcSize));
  }

  mCurrentPoint = endPoint;
}
Example #17
0
bool RegressionTree::computeBestSpiltBestIterativeSpilt( const RegressionData &trainingData, const Vector< UINT > &features, UINT &featureIndex, Float &threshold, Float &minError ){
    
    const UINT M = trainingData.getNumSamples();
    const UINT N = (UINT)features.size();
    
    if( N == 0 ) return false;
    
    minError = grt_numeric_limits< Float >::max();
    UINT bestFeatureIndex = 0;
    UINT groupID = 0;
    Float bestThreshold = 0;
    Float error = 0;
    Float minRange = 0;
    Float maxRange = 0;
    Float step = 0;
    Vector< UINT > groupIndex(M);
    VectorFloat groupCounter(2,0);
    VectorFloat groupMean(2,0);
    VectorFloat groupMSE(2,0);
    Vector< MinMax > ranges = trainingData.getInputRanges();
    
    //Loop over each feature and try and find the best split point
    for(UINT n=0; n<N; n++){
        minRange = ranges[n].minValue;
        maxRange = ranges[n].maxValue;
        step = (maxRange-minRange)/Float(numSplittingSteps);
        threshold = minRange;
        featureIndex = features[n];
        while( threshold <= maxRange ){
            
            //Iterate over each sample and work out what group it falls into
            for(UINT i=0; i<M; i++){
                groupID = trainingData[i].getInputVector()[featureIndex] >= threshold ? 1 : 0;
                groupIndex[i] = groupID;
                groupMean[ groupID ] += trainingData[i].getInputVector()[featureIndex];
                groupCounter[ groupID ]++;
            }
            groupMean[0] /= groupCounter[0] > 0 ? groupCounter[0] : 1;
            groupMean[1] /= groupCounter[1] > 0 ? groupCounter[1] : 1;
            
            //Compute the MSE for each group
            for(UINT i=0; i<M; i++){
                groupMSE[ groupIndex[i] ] += grt_sqr( groupMean[ groupIndex[i] ] - trainingData[ i ].getInputVector()[features[n]] );
            }
            groupMSE[0] /= groupCounter[0] > 0 ? groupCounter[0] : 1;
            groupMSE[1] /= groupCounter[1] > 0 ? groupCounter[1] : 1;
            
            error = sqrt( groupMSE[0] + groupMSE[1] );
            
            //Store the best threshold and feature index
            if( error < minError ){
                minError = error;
                bestThreshold = threshold;
                bestFeatureIndex = featureIndex;
            }
            
            //Update the threshold
            threshold += step;
        }
    }
    
    //Set the best feature index and threshold
    featureIndex = bestFeatureIndex;
    threshold = bestThreshold;
    
    return true;
}
Example #18
0
void
ImageHost::Composite(EffectChain& aEffectChain,
                     float aOpacity,
                     const gfx::Matrix4x4& aTransform,
                     const gfx::Point& aOffset,
                     const gfx::Filter& aFilter,
                     const gfx::Rect& aClipRect,
                     const nsIntRegion* aVisibleRegion,
                     TiledLayerProperties* aLayerProperties)
{
  if (!GetCompositor()) {
    // should only happen when a tab is dragged to another window and
    // async-video is still sending frames but we haven't attached the
    // set the new compositor yet.
    return;
  }
  if (!mFrontBuffer) {
    return;
  }
  if (!mFrontBuffer->Lock()) {
    NS_WARNING("failed to lock front buffer");
    return;
  }
  RefPtr<NewTextureSource> source = mFrontBuffer->GetTextureSources();
  if (!source) {
    return;
  }
  RefPtr<TexturedEffect> effect = CreateTexturedEffect(mFrontBuffer->GetFormat(),
                                                       source,
                                                       aFilter);
  aEffectChain.mPrimaryEffect = effect;
  IntSize textureSize = source->GetSize();
  gfx::Rect gfxPictureRect
    = mHasPictureRect ? gfx::Rect(0, 0, mPictureRect.width, mPictureRect.height)
                      : gfx::Rect(0, 0, textureSize.width, textureSize.height);

  gfx::Rect pictureRect(0, 0,
                        mPictureRect.width,
                        mPictureRect.height);
  //XXX: We might have multiple texture sources here (e.g. 3 YCbCr textures), and we're
  // only iterating over the tiles of the first one. Are we assuming that the tiling
  // will be identical? Can we ensure that somehow?
  TileIterator* it = source->AsTileIterator();
  if (it) {
    it->BeginTileIteration();
    do {
      nsIntRect tileRect = it->GetTileRect();
      gfx::Rect rect(tileRect.x, tileRect.y, tileRect.width, tileRect.height);
      if (mHasPictureRect) {
        rect = rect.Intersect(pictureRect);
        effect->mTextureCoords = Rect(Float(rect.x - tileRect.x)/ tileRect.width,
                                      Float(rect.y - tileRect.y) / tileRect.height,
                                      Float(rect.width) / tileRect.width,
                                      Float(rect.height) / tileRect.height);
      } else {
        effect->mTextureCoords = Rect(0, 0, 1, 1);
      }
      GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                                aOpacity, aTransform, aOffset);
      GetCompositor()->DrawDiagnostics(DIAGNOSTIC_IMAGE|DIAGNOSTIC_BIGIMAGE,
                                       rect, aClipRect, aTransform, aOffset);
    } while (it->NextTile());
    it->EndTileIteration();
    // layer border
    GetCompositor()->DrawDiagnostics(DIAGNOSTIC_IMAGE,
                                     gfxPictureRect, aClipRect,
                                     aTransform, aOffset);    
  } else {
    IntSize textureSize = source->GetSize();
    gfx::Rect rect;
    if (mHasPictureRect) {
      effect->mTextureCoords = Rect(Float(mPictureRect.x) / textureSize.width,
                                    Float(mPictureRect.y) / textureSize.height,
                                    Float(mPictureRect.width) / textureSize.width,
                                    Float(mPictureRect.height) / textureSize.height);
      rect = pictureRect;
    } else {
      effect->mTextureCoords = Rect(0, 0, 1, 1);
      rect = gfx::Rect(0, 0, textureSize.width, textureSize.height);
    }

    if (mFrontBuffer->GetFlags() & TEXTURE_NEEDS_Y_FLIP) {
      effect->mTextureCoords.y = effect->mTextureCoords.YMost();
      effect->mTextureCoords.height = -effect->mTextureCoords.height;
    }

    GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                              aOpacity, aTransform, aOffset);
    GetCompositor()->DrawDiagnostics(DIAGNOSTIC_IMAGE,
                                     rect, aClipRect,
                                     aTransform, aOffset);
  }
  mFrontBuffer->Unlock();
}
Example #19
0
void
ContentHostIncremental::Composite(EffectChain& aEffectChain,
                                  float aOpacity,
                                  const gfx::Matrix4x4& aTransform,
                                  const Filter& aFilter,
                                  const Rect& aClipRect,
                                  const nsIntRegion* aVisibleRegion)
{
  NS_ASSERTION(aVisibleRegion, "Requires a visible region");

  AutoLockCompositableHost lock(this);
  if (lock.Failed()) {
    return;
  }

  if (!mSource) {
    return;
  }

  RefPtr<TexturedEffect> effect = CreateTexturedEffect(mSource.get(),
                                                       mSourceOnWhite.get(),
                                                       aFilter, true);
  if (!effect) {
    return;
  }

  aEffectChain.mPrimaryEffect = effect;

  nsIntRegion tmpRegion;
  const nsIntRegion* renderRegion;
  if (PaintWillResample()) {
    // If we're resampling, then the texture image will contain exactly the
    // entire visible region's bounds, and we should draw it all in one quad
    // to avoid unexpected aliasing.
    tmpRegion = aVisibleRegion->GetBounds();
    renderRegion = &tmpRegion;
  } else {
    renderRegion = aVisibleRegion;
  }

  nsIntRegion region(*renderRegion);
  nsIntPoint origin = GetOriginOffset();
  // translate into TexImage space, buffer origin might not be at texture (0,0)
  region.MoveBy(-origin);

  // Figure out the intersecting draw region
  gfx::IntSize texSize = mSource->GetSize();
  nsIntRect textureRect = nsIntRect(0, 0, texSize.width, texSize.height);
  textureRect.MoveBy(region.GetBounds().TopLeft());
  nsIntRegion subregion;
  subregion.And(region, textureRect);
  if (subregion.IsEmpty()) {
    // Region is empty, nothing to draw
    return;
  }

  nsIntRegion screenRects;
  nsIntRegion regionRects;

  // Collect texture/screen coordinates for drawing
  nsIntRegionRectIterator iter(subregion);
  while (const nsIntRect* iterRect = iter.Next()) {
    nsIntRect regionRect = *iterRect;
    nsIntRect screenRect = regionRect;
    screenRect.MoveBy(origin);

    screenRects.Or(screenRects, screenRect);
    regionRects.Or(regionRects, regionRect);
  }

  BigImageIterator* bigImgIter = mSource->AsBigImageIterator();
  BigImageIterator* iterOnWhite = nullptr;
  if (bigImgIter) {
    bigImgIter->BeginBigImageIteration();
  }

  if (mSourceOnWhite) {
    iterOnWhite = mSourceOnWhite->AsBigImageIterator();
    MOZ_ASSERT(!bigImgIter || bigImgIter->GetTileCount() == iterOnWhite->GetTileCount(),
               "Tile count mismatch on component alpha texture");
    if (iterOnWhite) {
      iterOnWhite->BeginBigImageIteration();
    }
  }

  bool usingTiles = (bigImgIter && bigImgIter->GetTileCount() > 1);
  do {
    if (iterOnWhite) {
      MOZ_ASSERT(iterOnWhite->GetTileRect() == bigImgIter->GetTileRect(),
                 "component alpha textures should be the same size.");
    }

    nsIntRect texRect = bigImgIter ? bigImgIter->GetTileRect()
                                   : nsIntRect(0, 0,
                                               texSize.width,
                                               texSize.height);

    // Draw texture. If we're using tiles, we do repeating manually, as texture
    // repeat would cause each individual tile to repeat instead of the
    // compound texture as a whole. This involves drawing at most 4 sections,
    // 2 for each axis that has texture repeat.
    for (int y = 0; y < (usingTiles ? 2 : 1); y++) {
      for (int x = 0; x < (usingTiles ? 2 : 1); x++) {
        nsIntRect currentTileRect(texRect);
        currentTileRect.MoveBy(x * texSize.width, y * texSize.height);

        nsIntRegionRectIterator screenIter(screenRects);
        nsIntRegionRectIterator regionIter(regionRects);

        const nsIntRect* screenRect;
        const nsIntRect* regionRect;
        while ((screenRect = screenIter.Next()) &&
               (regionRect = regionIter.Next())) {
          nsIntRect tileScreenRect(*screenRect);
          nsIntRect tileRegionRect(*regionRect);

          // When we're using tiles, find the intersection between the tile
          // rect and this region rect. Tiling is then handled by the
          // outer for-loops and modifying the tile rect.
          if (usingTiles) {
            tileScreenRect.MoveBy(-origin);
            tileScreenRect = tileScreenRect.Intersect(currentTileRect);
            tileScreenRect.MoveBy(origin);

            if (tileScreenRect.IsEmpty())
              continue;

            tileRegionRect = regionRect->Intersect(currentTileRect);
            tileRegionRect.MoveBy(-currentTileRect.TopLeft());
          }
          gfx::Rect rect(tileScreenRect.x, tileScreenRect.y,
                         tileScreenRect.width, tileScreenRect.height);

          effect->mTextureCoords = Rect(Float(tileRegionRect.x) / texRect.width,
                                        Float(tileRegionRect.y) / texRect.height,
                                        Float(tileRegionRect.width) / texRect.width,
                                        Float(tileRegionRect.height) / texRect.height);
          GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain, aOpacity, aTransform);
          if (usingTiles) {
            DiagnosticFlags diagnostics = DiagnosticFlags::CONTENT | DiagnosticFlags::BIGIMAGE;
            if (iterOnWhite) {
              diagnostics |= DiagnosticFlags::COMPONENT_ALPHA;
            }
            GetCompositor()->DrawDiagnostics(diagnostics, rect, aClipRect,
                                             aTransform, mFlashCounter);
          }
        }
      }
    }

    if (iterOnWhite) {
      iterOnWhite->NextTile();
    }
  } while (usingTiles && bigImgIter->NextTile());

  if (bigImgIter) {
    bigImgIter->EndBigImageIteration();
  }
  if (iterOnWhite) {
    iterOnWhite->EndBigImageIteration();
  }

  DiagnosticFlags diagnostics = DiagnosticFlags::CONTENT;
  if (iterOnWhite) {
    diagnostics |= DiagnosticFlags::COMPONENT_ALPHA;
  }
  GetCompositor()->DrawDiagnostics(diagnostics, nsIntRegion(mBufferRect), aClipRect,
                                   aTransform, mFlashCounter);
}
Example #20
0
bool BAG::predict_(VectorFloat &inputVector){
    
    if( !trained ){
        errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl;
        return false;
    }
    
    predictedClassLabel = 0;
    maxLikelihood = -10000;
    
    if( !trained ) return false;
    
    if( inputVector.getSize() != numInputDimensions ){
        errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << inputVector.getSize() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
        return false;
    }
    
    if( useScaling ){
        for(UINT n=0; n<numInputDimensions; n++){
            inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0, 1);
        }
    }
    
    if( classLikelihoods.getSize() != numClasses ) classLikelihoods.resize(numClasses);
    if( classDistances.getSize() != numClasses ) classDistances.resize(numClasses);
    
    //Reset the likelihoods and distances
    for(UINT k=0; k<numClasses; k++){
        classLikelihoods[k] = 0;
        classDistances[k] = 0;
    }
    
    //Run the prediction for each classifier
    Float sum = 0;
    UINT ensembleSize = ensemble.getSize();
    for(UINT i=0; i<ensembleSize; i++){
        
        if( !ensemble[i]->predict(inputVector) ){
            errorLog << "predict_(VectorFloat &inputVector) - The " << i << " classifier in the ensemble failed prediction!" << std::endl;
            return false;
        }
        
        classLikelihoods[ getClassLabelIndexValue( ensemble[i]->getPredictedClassLabel() ) ] += weights[i];
        classDistances[ getClassLabelIndexValue( ensemble[i]->getPredictedClassLabel() ) ] += ensemble[i]->getMaximumLikelihood() * weights[i];
        
        sum += weights[i];
    }
    
    //Set the predicted class label as the most common class
    Float maxCount = 0;
    UINT maxIndex = 0;
    for(UINT i=0; i<numClasses; i++){
        if( classLikelihoods[i] > maxCount ){
            maxIndex = i;
            maxCount = classLikelihoods[i];
        }
        classLikelihoods[i] /= sum;
        classDistances[i] /= Float(ensembleSize);
    }
    
    predictedClassLabel = classLabels[ maxIndex ];
    maxLikelihood = classLikelihoods[ maxIndex ];
    
    return true;
}
    vector< SLargeScaleOpt >
    ParameterOptimizationServer< SamplePointT, SamplePointGrid >
    ::GetLargeVolumeCandidates( const SEnergyOpt            &oStartEnergyLoc,
                                const vector<SDetParamMsg>  &vDetParams,
                                const vector<SStepSizeInfo> & vOptMaxDeviation )
    {
      typedef GeometricOptimizationBase<SamplePointT> Base;
      VoxelQueue.RandomizeReset();
      vector<SamplePointT> oLargeSearchList;
      
      VoxelQueue.Get( LocalSetup.InputParameters().nParamMCGlobalSearchElements, std::back_inserter( oLargeSearchList ) ); 
      vector<SLargeScaleOpt>        vCandidates;
      vector<vector<SDetParamMsg> > oDetParamList( nProcessingElements );
      vector<Float>                 oEnergyList  ( nProcessingElements );

      GET_LOG( osLogFile ) << "Large Scale Volume Candidate Search " << std::endl;
      GET_LOG( osLogFile ) << "Beam Energy " << oStartEnergyLoc.fBeamEnergy << std::endl;

      for( Size_Type i = 0; i < vOptMaxDeviation.size(); i ++ )
      {
        GET_LOG( osLogFile ) << "---------------------" << std::endl;
        GET_LOG( osLogFile ) << "Euler Angles Steps:         " << vOptMaxDeviation[i].oEulerSteps << std::endl;
        GET_LOG( osLogFile ) << "vOptMaxDeviation[i].fBeamCenterJ : " << vOptMaxDeviation[i].fBeamCenterJ << std::endl;
        GET_LOG( osLogFile ) << "vOptMaxDeviation[i].fBeamCenterK : " << vOptMaxDeviation[i].fBeamCenterK << std::endl;
        GET_LOG( osLogFile ) << "vOptMaxDeviation[i].xPos :         " << vOptMaxDeviation[i].oDetectorPos.m_fX << std::endl;
        GET_LOG( osLogFile ) << "---------------------" << std::endl;
      }

  
      for( Int nClientID = 1; nClientID < nProcessingElements; nClientID ++ )
      {
        vector<SDetParamMsg> vNewDetectorLoc;
        SEnergyOpt oNewEnergyLoc;
        if ( nClientID > 1 ) // have one of them start from the origin
        {
          vNewDetectorLoc           = Base::RandomMoveDet( vDetParams, vOptMaxDeviation );
          oNewEnergyLoc.fBeamEnergy = oStartEnergyLoc.fBeamEnergy
                                    + oRandomReal( -oStartEnergyLoc.fEnergyStep, oStartEnergyLoc.fEnergyStep );
        }
        else
        {
          vNewDetectorLoc           = vDetParams;
          oNewEnergyLoc.fBeamEnergy = oStartEnergyLoc.fBeamEnergy;
        }
      
        oEnergyList  [ nClientID ] = oNewEnergyLoc.fBeamEnergy;
        oDetParamList[ nClientID ] = vNewDetectorLoc;
      
        Base::Comm.SendCommand( 0, nClientID, XDMParallel::SET_EXP_PARAM );
        Base::SendExpParameters( nClientID, oNewEnergyLoc, vNewDetectorLoc );
        GET_LOG( osLogFile ) << " Sending " << oLargeSearchList.size() << " voxels to client " << nClientID << std::endl;
        Base::Comm.SendCommand( 0, nClientID, XDMParallel::FIT_MC_LIST );
        Base::Comm.SendWorkUnitList( nClientID, oLargeSearchList );
      }
    
      // listen for result
      Int nClientsLeft = nProcessingElements - 1;
      vector<SLargeScaleOpt> oCandidateList;
      while( nClientsLeft > 0 )
      {
        Int nCommand, nClientID;
        Base::Comm.RecvCommand( &nClientID, &nCommand );
        RUNTIME_ASSERT( nCommand == XDMParallel::REPORT_MC_LIST, "Server ERROR!  Wrong comamnd recv'd \n");
        vector< SParamOptMsg<SamplePointT> > vOptResults;
        Base::Comm.RecvWorkUnitList( nClientID, vOptResults );
      
        // calculate cost
        Float fCost   = 0;
        Int nFitted   = 0;
        Int nUnfitted = 0;
        for( Size_Type i = 0; i < vOptResults.size(); i ++ )
        {
          if( vOptResults[i].bConverged )
          {
            fCost += Float(1) - vOptResults[i].oOverlapInfo.fQuality;
            nFitted ++;
          }
          else
          {
            fCost += Float(1);
            nUnfitted ++;
          }
        }
        GET_LOG( osLogFile ) << "Client " << nClientID << " Fitted "
                             << nFitted << " Unfitted " << nUnfitted << " cost = " << fCost << std::endl; 
        if( fCost < static_cast<Float>( oLargeSearchList.size() ) )
        {
          SLargeScaleOpt oNewPoint;
          oNewPoint.fCost      = fCost / static_cast<Float>( oLargeSearchList.size() );
          oNewPoint.fEnergy    = oEnergyList[ nClientID ];
          oNewPoint.vDetParams = oDetParamList[ nClientID ];
          vCandidates.push_back( oNewPoint );
        }
        nClientsLeft --;
      }
      GET_LOG( osLogFile ) << " Finished Large Scale Optimization: Num Candidates = " << vCandidates.size() << std::endl;
      return vCandidates;
    }
Example #22
0
bool BernoulliRBM::train_(MatrixFloat &data){
    
    const UINT numTrainingSamples = data.getNumRows();
    numInputDimensions = data.getNumCols();
    numOutputDimensions = numHiddenUnits;
    numVisibleUnits = numInputDimensions;
    
    trainingLog << "NumInputDimensions: " << numInputDimensions << std::endl;
    trainingLog << "NumOutputDimensions: " << numOutputDimensions << std::endl;
    
    if( randomizeWeightsForTraining ){
        
        //Init the weights matrix
        weightsMatrix.resize(numHiddenUnits, numVisibleUnits);
        
        Float a = 1.0 / numVisibleUnits;
        for(UINT i=0; i<numHiddenUnits; i++) {
            for(UINT j=0; j<numVisibleUnits; j++) {
                weightsMatrix[i][j] = rand.getRandomNumberUniform(-a, a);
            }
        }
        
        //Init the bias units
        visibleLayerBias.resize( numVisibleUnits );
        hiddenLayerBias.resize( numHiddenUnits );
        std::fill(visibleLayerBias.begin(),visibleLayerBias.end(),0);
        std::fill(hiddenLayerBias.begin(),hiddenLayerBias.end(),0);
        
    }else{
        if( weightsMatrix.getNumRows() != numHiddenUnits ){
            errorLog << "train_(MatrixFloat &data) - Weights matrix row size does not match the number of hidden units!" << std::endl;
            return false;
        }
        if( weightsMatrix.getNumCols() != numVisibleUnits ){
            errorLog << "train_(MatrixFloat &data) - Weights matrix row size does not match the number of visible units!" << std::endl;
            return false;
        }
        if( visibleLayerBias.size() != numVisibleUnits ){
            errorLog << "train_(MatrixFloat &data) - Visible layer bias size does not match the number of visible units!" << std::endl;
            return false;
        }
        if( hiddenLayerBias.size() != numHiddenUnits ){
            errorLog << "train_(MatrixFloat &data) - Hidden layer bias size does not match the number of hidden units!" << std::endl;
            return false;
        }
    }
    
    //Flag the model has been trained encase the user wants to save the model during a training iteration using an observer
    trained = true;
    
    //Make sure the data is scaled between [0 1]
    ranges = data.getRanges();
    if( useScaling ){
        for(UINT i=0; i<numTrainingSamples; i++){
            for(UINT j=0; j<numInputDimensions; j++){
                data[i][j] = grt_scale(data[i][j], ranges[j].minValue, ranges[j].maxValue, 0.0, 1.0);
            }
        }
    }
    
    
    const UINT numBatches = static_cast<UINT>( ceil( Float(numTrainingSamples)/batchSize ) );
    
    //Setup the batch indexs
    Vector< BatchIndexs > batchIndexs( numBatches );
    UINT startIndex = 0;
    for(UINT i=0; i<numBatches; i++){
        batchIndexs[i].startIndex = startIndex;
        batchIndexs[i].endIndex = startIndex + batchSize;
        
        //Make sure the last batch end index is not larger than the number of training examples
        if( batchIndexs[i].endIndex >= numTrainingSamples ){
            batchIndexs[i].endIndex = numTrainingSamples;
        }
        
        //Get the batch size
        batchIndexs[i].batchSize = batchIndexs[i].endIndex - batchIndexs[i].startIndex;
        
        //Set the start index for the next batch
        startIndex = batchIndexs[i].endIndex;
    }
    
    Timer timer;
    UINT i,j,n,epoch,noChangeCounter = 0;
    Float startTime = 0;
    Float alpha = learningRate;
    Float error = 0;
    Float err = 0;
    Float delta = 0;
    Float lastError = 0;
    Vector< UINT > indexList(numTrainingSamples);
    TrainingResult trainingResult;
    MatrixFloat wT( numVisibleUnits, numHiddenUnits );       //Stores a transposed copy of the weights vector
    MatrixFloat vW( numHiddenUnits, numVisibleUnits );       //Stores the weight velocity updates
    MatrixFloat tmpW( numHiddenUnits, numVisibleUnits );     //Stores the weight values that will be used to update the main weights matrix at each batch update
    MatrixFloat v1( batchSize, numVisibleUnits );            //Stores the real batch data during a batch update
    MatrixFloat v2( batchSize, numVisibleUnits );            //Stores the sampled batch data during a batch update
    MatrixFloat h1( batchSize, numHiddenUnits );             //Stores the hidden states given v1 and the current weightsMatrix
    MatrixFloat h2( batchSize, numHiddenUnits );             //Stores the sampled hidden states given v2 and the current weightsMatrix
    MatrixFloat c1( numHiddenUnits, numVisibleUnits );       //Stores h1' * v1
    MatrixFloat c2( numHiddenUnits, numVisibleUnits );       //Stores h2' * v2
    MatrixFloat vDiff( batchSize, numVisibleUnits );         //Stores the difference between v1-v2
    MatrixFloat hDiff( batchSize, numVisibleUnits );         //Stores the difference between h1-h2
    MatrixFloat cDiff( numHiddenUnits, numVisibleUnits );    //Stores the difference between c1-c2
    VectorFloat vDiffSum( numVisibleUnits );                 //Stores the column sum of vDiff
    VectorFloat hDiffSum( numHiddenUnits );                  //Stores the column sum of hDiff
    VectorFloat visibleLayerBiasVelocity( numVisibleUnits ); //Stores the velocity update of the visibleLayerBias
    VectorFloat hiddenLayerBiasVelocity( numHiddenUnits );   //Stores the velocity update of the hiddenLayerBias
    
    //Set all the velocity weights to zero
    vW.setAllValues( 0 );
    std::fill(visibleLayerBiasVelocity.begin(),visibleLayerBiasVelocity.end(),0);
    std::fill(hiddenLayerBiasVelocity.begin(),hiddenLayerBiasVelocity.end(),0);
    
    //Randomize the order that the training samples will be used in
    for(UINT i=0; i<numTrainingSamples; i++) indexList[i] = i;
    if( randomiseTrainingOrder ){
        std::random_shuffle(indexList.begin(), indexList.end());
    }
    
    //Start the main training loop
    timer.start();
    for(epoch=0; epoch<maxNumEpochs; epoch++) {
        startTime = timer.getMilliSeconds();
        error = 0;
        
        //Randomize the batch order
        std::random_shuffle(batchIndexs.begin(),batchIndexs.end());
        
        //Run each of the batch updates
        for(UINT k=0; k<numBatches; k+=batchStepSize){
            
            //Resize the data matrices, the matrices will only be resized if the rows cols are different
            v1.resize( batchIndexs[k].batchSize, numVisibleUnits );
            h1.resize( batchIndexs[k].batchSize, numHiddenUnits );
            v2.resize( batchIndexs[k].batchSize, numVisibleUnits );
            h2.resize( batchIndexs[k].batchSize, numHiddenUnits );
            
            //Setup the data pointers, using data pointers saves a few ms on large matrix updates
            Float **w_p = weightsMatrix.getDataPointer();
            Float **wT_p = wT.getDataPointer();
            Float **vW_p = vW.getDataPointer();
            Float **data_p = data.getDataPointer();
            Float **v1_p = v1.getDataPointer();
            Float **v2_p = v2.getDataPointer();
            Float **h1_p = h1.getDataPointer();
            Float **h2_p = h2.getDataPointer();
            Float *vlb_p = &visibleLayerBias[0];
            Float *hlb_p = &hiddenLayerBias[0];
            
            //Get the batch data
            UINT index = 0;
            for(i=batchIndexs[k].startIndex; i<batchIndexs[k].endIndex; i++){
                for(j=0; j<numVisibleUnits; j++){
                    v1_p[index][j] = data_p[ indexList[i] ][j];
                }
                index++;
            }
            
            //Copy a transposed version of the weights matrix, this is used to compute h1 and h2
            for(i=0; i<numHiddenUnits; i++)
            for(j=0; j<numVisibleUnits; j++)
            wT_p[j][i] = w_p[i][j];
            
            //Compute h1
            h1.multiple(v1, wT);
            for(n=0; n<batchIndexs[k].batchSize; n++){
                for(i=0; i<numHiddenUnits; i++){
                    h1_p[n][i] = sigmoidRandom( h1_p[n][i] + hlb_p[i] );
                }
            }
            
            //Compute v2
            v2.multiple(h1, weightsMatrix);
            for(n=0; n<batchIndexs[k].batchSize; n++){
                for(i=0; i<numVisibleUnits; i++){
                    v2_p[n][i] = sigmoidRandom( v2_p[n][i] + vlb_p[i] );
                }
            }
            
            //Compute h2
            h2.multiple(v2,wT);
            for(n=0; n<batchIndexs[k].batchSize; n++){
                for(i=0; i<numHiddenUnits; i++){
                    h2_p[n][i] = grt_sigmoid( h2_p[n][i] + hlb_p[i] );
                }
            }
            
            //Compute c1, c2 and the difference between v1-v2
            c1.multiple(h1,v1,true);
            c2.multiple(h2,v2,true);
            vDiff.subtract(v1, v2);
            
            //Compute the sum of vdiff
            for(j=0; j<numVisibleUnits; j++){
                vDiffSum[j] = 0;
                for(i=0; i<batchIndexs[k].batchSize; i++){
                    vDiffSum[j] += vDiff[i][j];
                }
            }
            
            //Compute the difference between h1 and h2
            hDiff.subtract(h1, h2);
            for(j=0; j<numHiddenUnits; j++){
                hDiffSum[j] = 0;
                for(i=0; i<batchIndexs[k].batchSize; i++){
                    hDiffSum[j] += hDiff[i][j];
                }
            }
            
            //Compute the difference between c1 and c2
            cDiff.subtract(c1,c2);
            
            //Update the weight velocities
            for(i=0; i<numHiddenUnits; i++){
                for(j=0; j<numVisibleUnits; j++){
                    vW_p[i][j] = ((momentum * vW_p[i][j]) + (alpha * cDiff[i][j])) / batchIndexs[k].batchSize;
                }
            }
            for(i=0; i<numVisibleUnits; i++){
                visibleLayerBiasVelocity[i] = ((momentum * visibleLayerBiasVelocity[i]) + (alpha * vDiffSum[i])) / batchIndexs[k].batchSize;
            }
            for(i=0; i<numHiddenUnits; i++){
                hiddenLayerBiasVelocity[i] = ((momentum * hiddenLayerBiasVelocity[i]) + (alpha * hDiffSum[i])) / batchIndexs[k].batchSize;
            }
            
            //Update the weights
            weightsMatrix.add( vW );
            
            //Update the bias for the visible layer
            for(i=0; i<numVisibleUnits; i++){
                visibleLayerBias[i] += visibleLayerBiasVelocity[i];
            }
            
            //Update the bias for the visible layer
            for(i=0; i<numHiddenUnits; i++){
                hiddenLayerBias[i] += hiddenLayerBiasVelocity[i];
            }
            
            //Compute the reconstruction error
            err = 0;
            for(i=0; i<batchIndexs[k].batchSize; i++){
                for(j=0; j<numVisibleUnits; j++){
                    err += SQR( v1[i][j] - v2[i][j] );
                }
            }
            
            error += err / batchIndexs[k].batchSize;
        }
        error /= numBatches;
        delta = lastError - error;
        lastError = error;
        
        trainingLog << "Epoch: " << epoch+1 << "/" << maxNumEpochs;
        trainingLog << " Epoch time: " << (timer.getMilliSeconds()-startTime)/1000.0 << " seconds";
        trainingLog << " Learning rate: " << alpha;
        trainingLog << " Momentum: " << momentum;
        trainingLog << " Average reconstruction error: " << error;
        trainingLog << " Delta: " << delta << std::endl;
        
        //Update the learning rate
        alpha *= learningRateUpdate;
        
        trainingResult.setClassificationResult(epoch, error, this);
        trainingResults.push_back(trainingResult);
        trainingResultsObserverManager.notifyObservers( trainingResult );
        
        //Check for convergance
        if( fabs(delta) < minChange ){
            if( ++noChangeCounter >= minNumEpochs ){
                trainingLog << "Stopping training. MinChange limit reached!" << std::endl;
                break;
            }
        }else noChangeCounter = 0;
        
    }
    trainingLog << "Training complete after " << epoch << " epochs. Total training time: " << timer.getMilliSeconds()/1000.0 << " seconds" << std::endl;
    
    trained = true;
    
    return true;
}
Example #23
0
/* Find the inflection points of a bezier curve. Will return false if the
 * curve is degenerate in such a way that it is best approximated by a straight
 * line.
 *
 * The below algorithm was written by Jeff Muizelaar <*****@*****.**>, explanation follows:
 *
 * The lower inflection point is returned in aT1, the higher one in aT2. In the
 * case of a single inflection point this will be in aT1.
 *
 * The method is inspired by the algorithm in "analysis of in?ection points for planar cubic bezier curve"
 *
 * Here are some differences between this algorithm and versions discussed elsewhere in the literature:
 *
 * zhang et. al compute a0, d0 and e0 incrementally using the follow formula:
 *
 * Point a0 = CP2 - CP1
 * Point a1 = CP3 - CP2
 * Point a2 = CP4 - CP1
 *
 * Point d0 = a1 - a0
 * Point d1 = a2 - a1
 
 * Point e0 = d1 - d0
 *
 * this avoids any multiplications and may or may not be faster than the approach take below.
 *
 * "fast, precise flattening of cubic bezier path and ofset curves" by hain et. al
 * Point a = CP1 + 3 * CP2 - 3 * CP3 + CP4
 * Point b = 3 * CP1 - 6 * CP2 + 3 * CP3
 * Point c = -3 * CP1 + 3 * CP2
 * Point d = CP1
 * the a, b, c, d can be expressed in terms of a0, d0 and e0 defined above as:
 * c = 3 * a0
 * b = 3 * d0
 * a = e0
 *
 *
 * a = 3a = a.y * b.x - a.x * b.y
 * b = 3b = a.y * c.x - a.x * c.y
 * c = 9c = b.y * c.x - b.x * c.y
 *
 * The additional multiples of 3 cancel each other out as show below:
 *
 * x = (-b + sqrt(b * b - 4 * a * c)) / (2 * a)
 * x = (-3 * b + sqrt(3 * b * 3 * b - 4 * a * 3 * 9 * c / 3)) / (2 * 3 * a)
 * x = 3 * (-b + sqrt(b * b - 4 * a * c)) / (2 * 3 * a)
 * x = (-b + sqrt(b * b - 4 * a * c)) / (2 * a)
 *
 * I haven't looked into whether the formulation of the quadratic formula in
 * hain has any numerical advantages over the one used below.
 */
static inline void
FindInflectionPoints(const BezierControlPoints &aControlPoints,
                     Float *aT1, Float *aT2, uint32_t *aCount)
{
  // Find inflection points.
  // See www.faculty.idc.ac.il/arik/quality/appendixa.html for an explanation
  // of this approach.
  Point A = aControlPoints.mCP2 - aControlPoints.mCP1;
  Point B = aControlPoints.mCP3 - (aControlPoints.mCP2 * 2) + aControlPoints.mCP1;
  Point C = aControlPoints.mCP4 - (aControlPoints.mCP3 * 3) + (aControlPoints.mCP2 * 3) - aControlPoints.mCP1;

  Float a = Float(B.x) * C.y - Float(B.y) * C.x;
  Float b = Float(A.x) * C.y - Float(A.y) * C.x;
  Float c = Float(A.x) * B.y - Float(A.y) * B.x;

  if (a == 0) {
    // Not a quadratic equation.
    if (b == 0) {
      // Instead of a linear acceleration change we have a constant
      // acceleration change. This means the equation has no solution
      // and there are no inflection points, unless the constant is 0.
      // In that case the curve is a straight line, but we'll let
      // FlattenBezierCurveSegment deal with this.
      *aCount = 0;
      return;
    }
    *aT1 = -c / b;
    *aCount = 1;
    return;
  } else {
    Float discriminant = b * b - 4 * a * c;

    if (discriminant < 0) {
      // No inflection points.
      *aCount = 0;
    } else if (discriminant == 0) {
      *aCount = 1;
      *aT1 = -b / (2 * a);
    } else {
      /* Use the following formula for computing the roots:
       *
       * q = -1/2 * (b + sign(b) * sqrt(b^2 - 4ac))
       * t1 = q / a
       * t2 = c / q
       */
      Float q = sqrtf(discriminant);
      if (b < 0) {
        q = b - q;
      } else {
        q = b + q;
      }
      q *= Float(-1./2);

      *aT1 = q / a;
      *aT2 = c / q;
      if (*aT1 > *aT2) {
        std::swap(*aT1, *aT2);
      }
      *aCount = 2;
    }
  }

  return;
}
void
DrawTargetSkia::DrawSurfaceWithShadow(SourceSurface *aSurface,
                                      const Point &aDest,
                                      const Color &aColor,
                                      const Point &aOffset,
                                      Float aSigma,
                                      CompositionOp aOperator)
{
  MarkChanged();
  mCanvas->save(SkCanvas::kMatrix_SaveFlag);
  mCanvas->resetMatrix();

  uint32_t blurFlags = SkBlurMaskFilter::kHighQuality_BlurFlag |
                       SkBlurMaskFilter::kIgnoreTransform_BlurFlag;
  const SkBitmap& bitmap = static_cast<SourceSurfaceSkia*>(aSurface)->GetBitmap();
  SkShader* shader = SkShader::CreateBitmapShader(bitmap, SkShader::kClamp_TileMode, SkShader::kClamp_TileMode);
  SkMatrix matrix;
  matrix.reset();
  matrix.setTranslateX(SkFloatToScalar(aDest.x));
  matrix.setTranslateY(SkFloatToScalar(aDest.y));
  shader->setLocalMatrix(matrix);
  SkLayerDrawLooper* dl = new SkLayerDrawLooper;
  SkLayerDrawLooper::LayerInfo info;
  info.fPaintBits |= SkLayerDrawLooper::kShader_Bit;
  SkPaint *layerPaint = dl->addLayer(info);
  layerPaint->setShader(shader);

  info.fPaintBits = 0;
  info.fPaintBits |= SkLayerDrawLooper::kMaskFilter_Bit;
  info.fPaintBits |= SkLayerDrawLooper::kColorFilter_Bit;
  info.fColorMode = SkXfermode::kDst_Mode;
  info.fOffset.set(SkFloatToScalar(aOffset.x), SkFloatToScalar(aOffset.y));
  info.fPostTranslate = true;

  SkMaskFilter* mf = SkBlurMaskFilter::Create(aSigma, SkBlurMaskFilter::kNormal_BlurStyle, blurFlags);
  SkColor color = ColorToSkColor(aColor, 1);
  SkColorFilter* cf = SkColorFilter::CreateModeFilter(color, SkXfermode::kSrcIn_Mode);


  layerPaint = dl->addLayer(info);
  SkSafeUnref(layerPaint->setMaskFilter(mf));
  SkSafeUnref(layerPaint->setColorFilter(cf));
  layerPaint->setColor(color);
  
  // TODO: This is using the rasterizer to calculate an alpha mask
  // on both the shadow and normal layers. We should fix this
  // properly so it only happens for the shadow layer
  SkLayerRasterizer *raster = new SkLayerRasterizer();
  SkPaint maskPaint;
  SkSafeUnref(maskPaint.setShader(shader));
  raster->addLayer(maskPaint, 0, 0);
  
  SkPaint paint;
  paint.setAntiAlias(true);
  SkSafeUnref(paint.setRasterizer(raster));
  paint.setXfermodeMode(GfxOpToSkiaOp(aOperator));
  SkSafeUnref(paint.setLooper(dl));

  SkRect rect = RectToSkRect(Rect(Float(aDest.x), Float(aDest.y),
                                  Float(bitmap.width()), Float(bitmap.height())));
  mCanvas->drawRect(rect, paint);
  mCanvas->restore();
}
Example #25
0
 Expr::Expr(double val) : contents(new ExprContents(makeCast(Float(64).mlval, makeFloatImm(val)), Float(64))) {
     contents->isImmediate = true;
 }
Example #26
0
// Resize function
void OGLViewer::resizeGL(int w, int h)
{
	// Widget resize operations
	mViewCamera->resizeViewport(width() / Float(height()));
}
Example #27
0
void
AppendRoundedRectToPath(PathBuilder* aPathBuilder,
                        const Rect& aRect,
                        const RectCornerRadii& aRadii,
                        bool aDrawClockwise)
{
  // For CW drawing, this looks like:
  //
  //  ...******0**      1    C
  //              ****
  //                  ***    2
  //                     **
  //                       *
  //                        *
  //                         3
  //                         *
  //                         *
  //
  // Where 0, 1, 2, 3 are the control points of the Bezier curve for
  // the corner, and C is the actual corner point.
  //
  // At the start of the loop, the current point is assumed to be
  // the point adjacent to the top left corner on the top
  // horizontal.  Note that corner indices start at the top left and
  // continue clockwise, whereas in our loop i = 0 refers to the top
  // right corner.
  //
  // When going CCW, the control points are swapped, and the first
  // corner that's drawn is the top left (along with the top segment).
  //
  // There is considerable latitude in how one chooses the four
  // control points for a Bezier curve approximation to an ellipse.
  // For the overall path to be continuous and show no corner at the
  // endpoints of the arc, points 0 and 3 must be at the ends of the
  // straight segments of the rectangle; points 0, 1, and C must be
  // collinear; and points 3, 2, and C must also be collinear.  This
  // leaves only two free parameters: the ratio of the line segments
  // 01 and 0C, and the ratio of the line segments 32 and 3C.  See
  // the following papers for extensive discussion of how to choose
  // these ratios:
  //
  //   Dokken, Tor, et al. "Good approximation of circles by
  //      curvature-continuous Bezier curves."  Computer-Aided
  //      Geometric Design 7(1990) 33--41.
  //   Goldapp, Michael. "Approximation of circular arcs by cubic
  //      polynomials." Computer-Aided Geometric Design 8(1991) 227--238.
  //   Maisonobe, Luc. "Drawing an elliptical arc using polylines,
  //      quadratic, or cubic Bezier curves."
  //      http://www.spaceroots.org/documents/ellipse/elliptical-arc.pdf
  //
  // We follow the approach in section 2 of Goldapp (least-error,
  // Hermite-type approximation) and make both ratios equal to
  //
  //          2   2 + n - sqrt(2n + 28)
  //  alpha = - * ---------------------
  //          3           n - 4
  //
  // where n = 3( cbrt(sqrt(2)+1) - cbrt(sqrt(2)-1) ).
  //
  // This is the result of Goldapp's equation (10b) when the angle
  // swept out by the arc is pi/2, and the parameter "a-bar" is the
  // expression given immediately below equation (21).
  //
  // Using this value, the maximum radial error for a circle, as a
  // fraction of the radius, is on the order of 0.2 x 10^-3.
  // Neither Dokken nor Goldapp discusses error for a general
  // ellipse; Maisonobe does, but his choice of control points
  // follows different constraints, and Goldapp's expression for
  // 'alpha' gives much smaller radial error, even for very flat
  // ellipses, than Maisonobe's equivalent.
  //
  // For the various corners and for each axis, the sign of this
  // constant changes, or it might be 0 -- it's multiplied by the
  // appropriate multiplier from the list before using.

  const Float alpha = Float(0.55191497064665766025);

  typedef struct { Float a, b; } twoFloats;

  twoFloats cwCornerMults[4] = { { -1,  0 },    // cc == clockwise
                                 {  0, -1 },
                                 { +1,  0 },
                                 {  0, +1 } };
  twoFloats ccwCornerMults[4] = { { +1,  0 },   // ccw == counter-clockwise
                                  {  0, -1 },
                                  { -1,  0 },
                                  {  0, +1 } };

  twoFloats *cornerMults = aDrawClockwise ? cwCornerMults : ccwCornerMults;

  Point cornerCoords[] = { aRect.TopLeft(), aRect.TopRight(),
                           aRect.BottomRight(), aRect.BottomLeft() };

  Point pc, p0, p1, p2, p3;

  if (aDrawClockwise) {
    aPathBuilder->MoveTo(Point(aRect.X() + aRadii[RectCorner::TopLeft].width,
                               aRect.Y()));
  } else {
    aPathBuilder->MoveTo(Point(aRect.X() + aRect.Width() - aRadii[RectCorner::TopRight].width,
                               aRect.Y()));
  }

  for (int i = 0; i < 4; ++i) {
    // the corner index -- either 1 2 3 0 (cw) or 0 3 2 1 (ccw)
    int c = aDrawClockwise ? ((i+1) % 4) : ((4-i) % 4);

    // i+2 and i+3 respectively.  These are used to index into the corner
    // multiplier table, and were deduced by calculating out the long form
    // of each corner and finding a pattern in the signs and values.
    int i2 = (i+2) % 4;
    int i3 = (i+3) % 4;

    pc = cornerCoords[c];

    if (aRadii[c].width > 0.0 && aRadii[c].height > 0.0) {
      p0.x = pc.x + cornerMults[i].a * aRadii[c].width;
      p0.y = pc.y + cornerMults[i].b * aRadii[c].height;

      p3.x = pc.x + cornerMults[i3].a * aRadii[c].width;
      p3.y = pc.y + cornerMults[i3].b * aRadii[c].height;

      p1.x = p0.x + alpha * cornerMults[i2].a * aRadii[c].width;
      p1.y = p0.y + alpha * cornerMults[i2].b * aRadii[c].height;

      p2.x = p3.x - alpha * cornerMults[i3].a * aRadii[c].width;
      p2.y = p3.y - alpha * cornerMults[i3].b * aRadii[c].height;

      aPathBuilder->LineTo(p0);
      aPathBuilder->BezierTo(p1, p2, p3);
    } else {
      aPathBuilder->LineTo(pc);
    }
  }

  aPathBuilder->Close();
}
Example #28
0
inline bool LeqZero(const Float &x)
{	return x <= Float(0); }
Example #29
0
void
DeprecatedImageHostSingle::Composite(EffectChain& aEffectChain,
                                     float aOpacity,
                                     const gfx::Matrix4x4& aTransform,
                                     const gfx::Point& aOffset,
                                     const gfx::Filter& aFilter,
                                     const gfx::Rect& aClipRect,
                                     const nsIntRegion* aVisibleRegion,
                                     TiledLayerProperties* aLayerProperties)
{
  if (!mDeprecatedTextureHost) {
    NS_WARNING("Can't composite an invalid or null DeprecatedTextureHost");
    return;
  }

  if (!mDeprecatedTextureHost->IsValid()) {
    NS_WARNING("Can't composite an invalid DeprecatedTextureHost");
    return;
  }

  if (!GetCompositor()) {
    // should only happen during tabswitch if async-video is still sending frames.
    return;
  }

  if (!mDeprecatedTextureHost->Lock()) {
    NS_ASSERTION(false, "failed to lock texture host");
    return;
  }

  RefPtr<TexturedEffect> effect =
    CreateTexturedEffect(mDeprecatedTextureHost, aFilter);

  aEffectChain.mPrimaryEffect = effect;

  TileIterator* it = mDeprecatedTextureHost->AsTileIterator();
  if (it) {
    it->BeginTileIteration();
    do {
      nsIntRect tileRect = it->GetTileRect();
      gfx::Rect rect(tileRect.x, tileRect.y, tileRect.width, tileRect.height);
      GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                                aOpacity, aTransform, aOffset);
      GetCompositor()->DrawDiagnostics(gfx::Color(0.5,0.0,0.0,1.0),
                                       rect, aClipRect, aTransform, aOffset);
    } while (it->NextTile());
    it->EndTileIteration();
  } else {
    IntSize textureSize = mDeprecatedTextureHost->GetSize();
    gfx::Rect rect(0, 0,
                   mPictureRect.width,
                   mPictureRect.height);
    if (mHasPictureRect) {
      effect->mTextureCoords = Rect(Float(mPictureRect.x) / textureSize.width,
                                    Float(mPictureRect.y) / textureSize.height,
                                    Float(mPictureRect.width) / textureSize.width,
                                    Float(mPictureRect.height) / textureSize.height);
    } else {
      effect->mTextureCoords = Rect(0, 0, 1, 1);
      rect = gfx::Rect(0, 0, textureSize.width, textureSize.height);
    }

    if (mDeprecatedTextureHost->GetFlags() & NeedsYFlip) {
      effect->mTextureCoords.y = effect->mTextureCoords.YMost();
      effect->mTextureCoords.height = -effect->mTextureCoords.height;
    }

    GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                              aOpacity, aTransform, aOffset);
    GetCompositor()->DrawDiagnostics(gfx::Color(1.0,0.1,0.1,1.0),
                                     rect, aClipRect, aTransform, aOffset);
  }

  mDeprecatedTextureHost->Unlock();
}
Size SampleRateConverter:: fillBuffer( SoundStream& outputStream, Index startIndex, Size numSamples )
{
	// Write no samples to the output stream if the input is either NULL or has no audio data remaining.
	if ( input == NULL || !input->hasOutputRemaining() )
		return 0;
	
	// Acquire the mutex which indicates that rendering parameters are either being used or changed.
	renderMutex.acquire();
	
	Float inputSampleRate = input->getSampleRate();
	
	// If the input sample rate is the same as the output sample rate,
	// pass the sound directly to the output and return.
	if ( sampleRate == inputSampleRate )
	{
		Size numSamplesRead = input->getSamples( outputStream, startIndex, numSamples );
		
		// Relase the mutex which indicates that rendering parameters are either being used or changed.
		renderMutex.release();
		
		return numSamplesRead;
	}
	
	// If the input or output sample rates are zero, return that no samples were written.
	// This is done to protect the rest of the sample interpolation method from divide by zeros/
	// infinite loops and is physically correct.
	if ( sampleRate == Float(0) || inputSampleRate == Float(0) )
	{
		// Relase the mutex which indicates that rendering parameters are either being used or changed.
		renderMutex.release();
		
		return 0;
	}
	
	// Get the number of outputs and channels from the input object.
	Size numOutputs = input->getNumberOfOutputs();
	Size numChannels = input->getNumberOfChannels();
	
	// Compute the ratio of the input to output sample rates.
	Float sampleRateRatio = inputSampleRate/sampleRate;
	
	// Compute the number of samples to read from the input (to maintain real-time performance).
	Size numSamplesToRead = Size(Float(numSamples)*sampleRateRatio + subSampleOffset);
	
	// Get the audio from the input object in an internal audio stream.
	Size numSamplesRead = input->getSamples( inputStream, 2, numSamplesToRead );
	
	// Compute the number of samples that will be placed in the output stream.
	Size numOutputSamples;
	
	if ( numSamplesRead == numSamplesToRead )
		numOutputSamples = numSamples;
	else
		numOutputSamples = Size(Float(numSamplesRead)/sampleRateRatio);
	
	for ( Index i = 0; i < numOutputs; i++ )
	{
		SoundBuffer& inputBuffer = inputStream.getBuffer(i);
		SoundBuffer& outputBuffer = outputStream.getBuffer(i);
		
		for ( Index c = 0; c < numChannels; c++ )
		{
			Sample* firstInputSample = inputBuffer.getChannelStart(c);
			Sample* secondInputSample = firstInputSample + 1;
			const Sample* inputSample = secondInputSample;
			const Sample* const inputEnd = firstInputSample + 2 + numSamplesRead;
			Sample* outputSample = outputBuffer.getChannelStart(c) + startIndex;
			const Sample* const outputEnd = outputSample + numOutputSamples;
			
			// The interpolated input sample offset which will probably not be a whole number.
			Float currentInputSample = subSampleOffset;
			
			// The mathematical floor of the currentInputSample value.
			Float currentInputSampleIndex = Float(0);
			
			// Setup the input sample rate state.
			Sample lastInputSample = *firstInputSample;
			
			while ( outputSample != outputEnd )
			{
				// A value indicating how far the interpolation is between the last and current input samples.
				Float a = currentInputSample - currentInputSampleIndex;
				
				// Compute the output sample by linearly interpolating between the current and
				// previous input samples.
				*outputSample = sample::mix( sample::scale( *inputSample, a ), sample::scale( lastInputSample, (Float(1) - a) ) );
				
				// Increment the input sample position.
				currentInputSample += sampleRateRatio;
				
				// Update the current input sample state if necessary.
				while ( currentInputSample - currentInputSampleIndex >= Float(1) && inputSample + 1 < inputEnd )
				{
					currentInputSampleIndex += 1.0f;
					lastInputSample = *inputSample;
					inputSample++;
				}
				
				outputSample++;
			}
			
			*firstInputSample = lastInputSample;
			*secondInputSample = *inputSample;
		}
	}
	
	// Update the sub-sample offset value and normalize it to between 0 and 1.
	Float sampleOffset = subSampleOffset + Float(numOutputSamples)*sampleRateRatio;
	subSampleOffset = sampleOffset - math::floor(sampleOffset);
	
	// Relase the mutex which indicates that rendering parameters are either being used or changed.
	renderMutex.release();
	
	return numOutputSamples;
}