Exemplo n.º 1
0
void testSoftICACostFunction()
{
  const int numPatches = 500; // 10000
  const int patchWidth = 9;
  MNISTSamplePatchesDataFunction mnistdf(numPatches, patchWidth);
  Config config;

  config.setValue("debugMode", true);
  config.setValue("addBiasTerm", false);
  config.setValue("meanStddNormalize", false);
  config.setValue("configurePolicyTesting", false);
  config.setValue("trainingMeanAndStdd", false);

  updateMNISTConfig(config);
  mnistdf.configure(&config);

  const int numFeatures = 5; // 50
  const double lambda = 0.5f;
  const double epsilon = 1e-2;

  SoftICACostFunction sf(numFeatures, lambda, epsilon);

  Vector_t theta = sf.configure(mnistdf.getTrainingX(), mnistdf.getTrainingY());

  std::cout << "theta: " << theta.size() << std::endl;
  Vector_t grad;
  double cost = sf.evaluate(theta, mnistdf.getTrainingX(), mnistdf.getTrainingY(), grad);

  std::cout << "cost: " << cost << std::endl;
  std::cout << "grad: " << grad.size() << std::endl;

  double error = sf.getNumGrad(theta, mnistdf.getTrainingX(), mnistdf.getTrainingY(), 10);
  std::cout << "error: " << error << std::endl;

}
Exemplo n.º 2
0
void VectorTest::testResize()
{
	static const size_type staticSize = 10;
	static const size_type maxSize = staticSize * 2;
	typedef Vector<size_type, staticSize> Vector_t;

	Vector_t mutableVector;

	// Upsize.
	for (size_type i = 0; i < maxSize; ++i) {
		mutableVector.resize(i);
		TS_ASSERT_EQUALS(i, mutableVector.size());

		for (size_type j = 0; j < i; ++j) {
			TS_ASSERT_EQUALS(0U, mutableVector[j]);
		}
	}

	// Downsize.
	for (size_type i = maxSize - 1; i > 0; --i) {
		mutableVector.resize(i);
		TS_ASSERT_EQUALS(i, mutableVector.size());

		for (size_type j = 0; j < i; ++j) {
			TS_ASSERT_EQUALS(0U, mutableVector[j]);
		}
	}
}
Exemplo n.º 3
0
void testConvolutionalNeuralNetworkCostFunction()
{
  MNISTDataFunction mnistdf;
  Config config;
  updateMNISTConfig(config);
  config.setValue("addBiasTerm", false);
  config.setValue("meanStddNormalize", false);
  config.setValue("debugMode", true);
  mnistdf.configure(&config);

  const int imageDim = 28; // height/width of image
  const int filterDim = 9; // dimension of convolutional filter
  const int numFilters = 2; // number of convolutional filters
  const int poolDim = 5; // dimension of pooling area
  const int numClasses = 10; // number of classes to predict

  ConvolutionalNeuralNetworkCostFunction cnn(imageDim, filterDim, numFilters, poolDim, numClasses);
  Vector_t theta = cnn.configure(mnistdf.getTrainingX(), mnistdf.getTrainingY());

  std::cout << "theta: " << theta.size() << std::endl;
  Vector_t grad;
  double cost = cnn.evaluate(theta, mnistdf.getTrainingX(), mnistdf.getTrainingY(), grad);

  std::cout << "cost: " << cost << std::endl;
  std::cout << "grad: " << grad.size() << std::endl;

  double error = cnn.getNumGrad(theta, mnistdf.getTrainingX(), mnistdf.getTrainingY());
  std::cout << "error: " << error << std::endl;
}
Exemplo n.º 4
0
 virtual void run()
     {
         Vector_t copy;
         copy = *vector;
         *vector = copy;
         TEST( copy.size() >= vector->size( ));
         cTime_ = _clock.getTimef();
     }
Exemplo n.º 5
0
void testEigenConstMap()
{
  Vector_t x(4);
  x << 1, 2, 3, 4;
  std::cout << x << std::endl;

  const Vector_t y = x;

  std::cout << y << std::endl;

  Matrix_t X = Eigen::Map<const Matrix_t>(y.data(), 2, 2);

  std::cout << X << std::endl;
}
Exemplo n.º 6
0
void DataMapping::deleteLaterFrom(View_t * view, Vector_t & vector)
{
    auto ownedViewIt = findUnique(vector, view);
    if (ownedViewIt == vector.end())
    {
        // Reentered this function while calling view->dockWidgetParent()->close();
        return;
    }

    auto viewOwnership = std::move(*ownedViewIt);
    vector.erase(ownedViewIt);

    // Handle deletion in Qt event loop. If the event loop is not running anymore (application shutdown),
    // deletion will be handled in ~DataMapping
    viewOwnership.release()->deleteLater();
}
Exemplo n.º 7
0
inline NT dot( const Vector_t& a, const Vector_t& b )
{
  const int d = a.size();
  NT result = 0;
  for ( int i = 0; i < d; ++i ) {
    result += a[i] * b[i];
  }
  return result;
}
Exemplo n.º 8
0
void testEigenMap()
{
  Vector_t theta;
  theta.setZero(4 * 5);
  for (int i = 0; i < theta.size(); ++i)
    theta(i) = i;

  std::cout << theta << std::endl;

  Eigen::Map<Matrix_t> Theta(theta.data(), 4, 5); // reshape

  std::cout << Theta << std::endl;

  Vector_t theta2(Eigen::Map<Vector_t>(Theta.data(), 5 * 4));

  std::cout << theta2 << std::endl;

}
Exemplo n.º 9
0
	Vector_s& ShellBuilder::createShells(size_t shellNO, int spaceDimension)
	{
		Vector_s * shells = new Vector_s();
		// The shellIdx is only a candidate shell index based on Fermat's theorem on sum of two squares
		// and Legendre's three-square theorem
		int shellIdx = 1;
		while (shells->size() < shellNO)
		{
			Shell * currentShell = new Shell();
			Vector_t currentIntTuples = backtrackingMethod.decomposeByBacktracking(shellIdx, 
				spaceDimension);
			if (currentIntTuples.size() != 0)
			{
				currentShell->setIntTuplesWithSwapsAndSignChange(currentIntTuples);
				shells->push_back(currentShell);
			}
			shellIdx++;
		}
		return *shells;
	}
Exemplo n.º 10
0
void VectorTest::testReserve()
{
	static const size_type staticSize = 10;
	static const size_type maxSize = staticSize * 2;
	typedef Vector<int, staticSize> Vector_t;

	Vector_t mutableVector;

	TS_ASSERT_EQUALS(staticSize, mutableVector.capacity());
	
	mutableVector.reserve(staticSize + 1);
	TS_ASSERT_EQUALS(staticSize + 1, mutableVector.capacity());

	mutableVector.reserve(maxSize);
	TS_ASSERT_EQUALS(maxSize, mutableVector.capacity());
}
Exemplo n.º 11
0
int main( int, char** )
{
#ifdef LUNCHBOX_USE_OPENMP
    const size_t nThreads = lunchbox::OMP::getNThreads() * 3;
#else
    const size_t nThreads = 16;
#endif

    std::cout << "       read,       write,        push,      copy,     erase, "
              << " flush/ms,  rd, other #threads" << std::endl;
    _runSerialTest< std::vector< size_t >, size_t >();
    _runSerialTest< Vector_t, size_t >();

    std::vector< Reader > readers(nThreads);
    std::vector< Writer > writers(nThreads);
    std::vector< Pusher > pushers(nThreads);

    stage_ = 1;
    size_t stage = 0;

    for( size_t l = 0; l < nThreads; ++l )
    {
        readers[l].start();
        writers[l].start();
        pushers[l].start();
    }
    lunchbox::sleep( 10 );

    for( size_t i = 1; i <= nThreads; i = i<<1 )
        for( size_t j = 1; j <= nThreads; j = j<<1 )
        {
            // concurrent read, write, push
            Vector_t vector;
            for( size_t k = 0; k < nThreads; ++k )
            {
                readers[k].vector = k < i ? &vector : 0;
                writers[k].vector = k < j ? &vector : 0;
                pushers[k].vector = k < j ? &vector : 0;
            }

            const size_t nextStage = ++stage * STAGESIZE;

            _clock.reset();
            stage_ = nextStage;
            stage_.waitEQ( nextStage + (3 * nThreads) );
            TEST( vector.size() >= LOOPSIZE );

            // multi-threaded copy
            std::vector< Copier > copiers(j);

            _clock.reset();
            for( size_t k = 0; k < j; ++k )
            {
                copiers[k].vector = &vector;
                copiers[k].start();
            }
            for( size_t k = 0; k < j; ++k )
                copiers[k].join();

            for( size_t k = 0; k < vector.size(); ++k )
                TEST( vector[k] == k || vector[k] == 0 );

            // multi-threaded erase
            std::vector< Eraser > erasers(j);

            _clock.reset();
            for( size_t k = 0; k < j; ++k )
            {
                erasers[k].vector = &vector;
                erasers[k].start();
            }
            for( size_t k = 0; k < j; ++k )
                erasers[k].join();

            for( size_t k = 0; k < vector.size(); ++k )
            {
                if( vector[k] == 0 )
                    break;
                if( k > vector.size() / 2 )
                {
                    TEST( vector[k] > vector[k-1] );
                }
                else
                {
                    TEST( vector[k] == k );
                }
            }

            // multi-threaded pop_back
            const size_t fOps = vector.size();
            std::vector< Flusher > flushers(j);
            _clock.reset();
            for( size_t k = 0; k < j; ++k )
            {
                flushers[k].vector = &vector;
                flushers[k].start();
            }
            for( size_t k = 0; k < j; ++k )
                flushers[k].join();
            const float fTime = _clock.getTimef();
            TEST( vector.empty( ));

            std::cerr << std::setw(11) << float(i*LOOPSIZE)/rTime_ << ", "
                      << std::setw(11) << float(j*LOOPSIZE)/wTime_ << ", "
                      << std::setw(11) << float(LOOPSIZE)/pTime_ << ", "
                      << std::setw(9) << float(j)/cTime_ << ", "
                      << std::setw(9) << float(j)/eTime_ << ", "
                      << std::setw(9) << float(fOps)/fTime << ", "
                      << std::setw(3) << i << ", " << std::setw(3) << j
                      << std::endl;
        }

    stage_ = std::numeric_limits< size_t >::max();
    for( size_t k = 0; k < nThreads; ++k )
    {
        readers[k].join();
        writers[k].join();
        pushers[k].join();
    }

    return EXIT_SUCCESS;
}
Exemplo n.º 12
0
void VectorTest::testAddAndRead()
{
	static const size_type staticSize = 10;
	static const int maxSize = static_cast<int>(staticSize * 2);
	typedef Vector<int, staticSize> Vector_t;

	Vector_t mutableVector;

	TS_ASSERT(mutableVector.empty());
	TS_ASSERT_EQUALS(staticSize, mutableVector.capacity());

	for (int i = 0; i < maxSize; ++i) {
		mutableVector.push_back(i);

		const Vector_t constVector = mutableVector;

		TS_ASSERT(! mutableVector.empty());
		TS_ASSERT(! constVector.empty());

		TS_ASSERT_EQUALS(0, mutableVector.front());
		TS_ASSERT_EQUALS(i, mutableVector.back());

		TS_ASSERT_EQUALS(0, constVector.front());
		TS_ASSERT_EQUALS(i, constVector.back());

		for (int j = 0; j <= i; ++j) {
			TS_ASSERT_EQUALS(j, mutableVector[j]);
			TS_ASSERT_EQUALS(j, constVector[j]);
		}

		int ix = 0;
		Vector_t::const_iterator ii = constVector.begin();
		for (Vector_t::iterator jj = mutableVector.begin(); jj != mutableVector.end(); ++jj, ++ii) {
			TS_ASSERT_EQUALS(ix, *jj);
			TS_ASSERT_EQUALS(ix, *ii);
			++ix;
		}
	}
}
Exemplo n.º 13
0
void testStlDriver()
{
  const int numPatches = 200000; // 200000
  const int patchWidth = 9;

  const int numFeatures = 50;
  const double lambda = 0.0005f;
  const double epsilon = 1e-2;
  Config config;
  config.setValue("addBiasTerm", false);
  config.setValue("meanStddNormalize", false);
  config.setValue("configurePolicyTesting", false);
  config.setValue("trainingMeanAndStdd", false);
  updateMNISTConfig(config);

  if (false)
  {
    MNISTSamplePatchesUnlabeledDataFunction mnistUnlabeled(numPatches, patchWidth);
    SoftICACostFunction sfc(numFeatures, lambda, epsilon);

    LIBLBFGSOptimizer lbfgs(200); // 1000
    Driver drv1(&config, &mnistUnlabeled, &sfc, &lbfgs);
    const Vector_t optThetaRica = drv1.drive();

    Matrix_t Wrica(
        Eigen::Map<const Matrix_t>(optThetaRica.data(), numFeatures, pow(patchWidth, 2)));

    std::ofstream ofs_wrica("../W2.txt");
    ofs_wrica << Wrica << std::endl;
  }

  Matrix_t Wrica;
  // debug: read off the values
  std::ifstream in("/home/sam/School/online/stanford_dl_ex/W2.txt");
  if (in.is_open())
  {
    std::string str;
    int nbRows = 0;
    while (std::getline(in, str))
    {
      if (str.size() == 0)
        continue;
      std::istringstream iss(str);
      std::vector<double> tokens //
      { std::istream_iterator<double> { iss }, std::istream_iterator<double> { } };
      Wrica.conservativeResize(nbRows + 1, tokens.size());
      for (size_t i = 0; i < tokens.size(); ++i)
        Wrica(nbRows, i) = tokens[i];
      ++nbRows;
    }
  }
  else
  {
    std::cerr << "file W.txt failed" << std::endl;
    exit(EXIT_FAILURE);
  }

  const int imageDim = 28;
  Eigen::Vector2i imageConfig;
  imageConfig << imageDim, imageDim;

  const int numFilters = numFeatures;
  const int poolDim = 5;
  const int filterDim = patchWidth;
  const int convDim = (imageDim - filterDim + 1);
  assert(convDim % poolDim == 0);
  const int outputDim = (convDim / poolDim);

  StlFilterFunction stlFilterFunction(filterDim, Wrica);
  SigmoidFunction sigmoidFunction;
  ConvolutionFunction convolutionFunction(&stlFilterFunction, &sigmoidFunction);
  MeanPoolFunction meanPoolFunction(numFilters, outputDim);

  MNISTSamplePatchesLabeledDataFunction mnistLabeled(&convolutionFunction, &meanPoolFunction,
      imageConfig, numFilters, poolDim, outputDim);

  SoftmaxCostFunction mnistcf(0.01f);
  LIBLBFGSOptimizer lbfgs2(300);
  config.setValue("configurePolicyTesting", false);
  config.setValue("trainingMeanAndStdd", true);
  config.setValue("meanStddNormalize", true);
  config.setValue("addBiasTerm", true);

  config.setValue("numGrd", true);
  config.setValue("training_accuracy", true);
  config.setValue("testing_accuracy", true);
  //config.setValue("addBiasTerm", false);
  Driver drv2(&config, &mnistLabeled, &mnistcf, &lbfgs2);
  drv2.drive();

}
Exemplo n.º 14
0
void testConvolutionAndPool()
{
  if (true)
  {
    const int filterDim = 8;
    const int imageDim = 28;
    const int poolDim = 3;
    const int numFilters = 100;
    const int outputDim = (imageDim - filterDim + 1) / poolDim;
    RandomFilterFunction rff(filterDim, numFilters);
    rff.configure();

    SigmoidFunction sf;
    ConvolutionFunction cf(&rff, &sf);
    MeanPoolFunction pf(numFilters, outputDim);
    //MeanPoolFunction mpf;
    Eigen::Vector2i configImageDim;
    configImageDim << imageDim, imageDim;

    MNISTDataFunction mnistdf;
    Config config;
    updateMNISTConfig(config);
    config.setValue("addBiasTerm", false);
    config.setValue("meanStddNormalize", false);

    mnistdf.configure(&config);

    Convolutions* convolvedFeatures = nullptr;
    for (int i = 0; i < 13; ++i)
      convolvedFeatures = cf.conv(mnistdf.getTrainingX().topRows<199>(), configImageDim);

    assert(convolvedFeatures->unordered_map.size() == 199);
    for (auto i = convolvedFeatures->unordered_map.begin();
        i != convolvedFeatures->unordered_map.end(); ++i)
      assert((int )i->second.size() == rff.getWeights().cols());

    // Validate convoluations
    Matrix_t ConvImages = mnistdf.getTrainingX().topRows<8>();
    for (int i = 0; i < 1000; ++i)
    {
      const int filterNum = rand() % rff.getWeights().cols();
      const int imageNum = rand() % 8;
      const int imageRow = rand() % (configImageDim(0) - rff.getConfig()(0) + 1);
      const int imageCol = rand() % (configImageDim(1) - rff.getConfig()(1) + 1);

      Vector_t im = ConvImages.row(imageNum);
      Eigen::Map<Matrix_t> Image(im.data(), configImageDim(0), configImageDim(1));

      Matrix_t Patch = Image.block(imageRow, imageCol, rff.getConfig()(0), rff.getConfig()(1));

      // Filter
      Eigen::Map<Matrix_t> W(rff.getWeights().col(filterNum).data(), rff.getConfig()(0),
          rff.getConfig()(1));
      const double b = rff.getBiases()(filterNum);

      double feature = Patch.cwiseProduct(W).sum() + b;
      feature = 1.0f / (1.0f + exp(-feature));

      if (fabs(
          feature - convolvedFeatures->unordered_map[imageNum][filterNum]->X(imageRow, imageCol))
          > 1e-9)
      {
        std::cout << "Convolved feature does not match test feature: " << i << std::endl;
        std::cout << "Filter Number: " << filterNum << std::endl;
        std::cout << "Image Number: " << imageNum << std::endl;
        std::cout << "Image Row: " << imageRow << std::endl;
        std::cout << "Image Col: " << imageCol << std::endl;
        std::cout << "Convolved feature: "
            << convolvedFeatures->unordered_map[imageNum][filterNum]->X(imageRow, imageCol)
            << std::endl;
        std::cout << "Test feature: " << feature << std::endl;
        std::cout << "Convolved feature does not match test feature" << std::endl;
        exit(EXIT_FAILURE);
      }

    }

    // Pool
    Poolings* pooling = nullptr;
    for (int i = 0; i < 13; ++i)
      pooling = pf.pool(convolvedFeatures, poolDim);

    assert((int )pooling->unordered_map.size() == 199);
    for (auto iter = pooling->unordered_map.begin(); iter != pooling->unordered_map.end(); ++iter)
    {
      assert(iter->second.size() == (size_t )rff.getWeights().cols());
      for (auto iter2 = iter->second.begin(); iter2 != iter->second.end(); ++iter2)
      {
        assert(iter2->second->X.rows() == (configImageDim(0) - rff.getConfig()(0) + 1) / 3);
        assert(iter2->second->X.rows() == 7);
        assert(iter2->second->X.cols() == (configImageDim(0) - rff.getConfig()(0) + 1) / 3);
        assert(iter2->second->X.cols() == 7);
      }
    }

  }

  if (true)
  {
    // test pool function

    Vector_t testVec(64);
    for (int i = 0; i < testVec.size(); ++i)
      testVec(i) = i + 1;
    Eigen::Map<Matrix_t> TestMatrix(testVec.data(), 8, 8);

    std::cout << "TestMatrix: " << std::endl;
    std::cout << TestMatrix << std::endl;

    Matrix_t ExpectedMatrix(2, 2);
    ExpectedMatrix(0, 0) = TestMatrix.block(0, 0, 4, 4).array().mean();
    ExpectedMatrix(0, 1) = TestMatrix.block(0, 4, 4, 4).array().mean();
    ExpectedMatrix(1, 0) = TestMatrix.block(4, 0, 4, 4).array().mean();
    ExpectedMatrix(1, 1) = TestMatrix.block(4, 4, 4, 4).array().mean();

    std::cout << "Expected: " << std::endl;
    std::cout << ExpectedMatrix << std::endl;

    Convolutions cfs;
    Convolution xcf;
    xcf.X = TestMatrix;
    cfs.unordered_map[0].insert(std::make_pair(0, (&xcf)));

    MeanPoolFunction testMpf(1, 2);
    Poolings* pfs = testMpf.pool(&cfs, 4);

    assert(pfs->unordered_map.size() == 1);
    assert(pfs->unordered_map[0].size() == 1);
    Matrix_t PX = pfs->unordered_map[0][0]->X;

    std::cout << "Obtain: " << std::endl;
    std::cout << PX << std::endl;
  }

}
short orient3d_triangle  (const GenericPointT& p1, 
			  const GenericPointT& p2, 
			  const GenericPointT& p3, 
			  const GenericPointT& ptn ) 
{

  // possible bounding box check
  // [RH] TODO
  //

  typedef GenericPointT Vector_t;

  Vector_t p12(p2 - p1);
  Vector_t p13(p3 - p1);
  Vector_t useNormal = p12.ex(p13);
  useNormal.normalize();
        
  short ori1, ori2;

  // Test line p1 - p2
  GenericPointT temppoint (p1[0] + useNormal[0],
			   p1[1] + useNormal[1],
			   p1[2] + useNormal[2]);
  SwkMatrix3x3<GenericPointT, double>  swk_matrix1 (p1, p2,  temppoint, ptn);
  SwkMatrix3x3<GenericPointT, double>  swk_matrix2 (p1, p2,  temppoint, p3);
  ori1 = swk_matrix1.orient3D_short();
  ori2 = swk_matrix2.orient3D_short();
  //std::cout << "p12: ori1: " << ori1 << std::endl;
  //std::cout << "p12: ori2: " << ori2 << std::endl;
  if (ori1 == RH_ON || ori2 == RH_ON)
    return RH_ON;
  if (ori1 != ori2)
    return (RH_OUT);

  // Test line p1 - p3
  SwkMatrix3x3<GenericPointT, double>  swk_matrix3 (p1, p3,  temppoint, ptn);
  SwkMatrix3x3<GenericPointT, double>  swk_matrix4 (p1, p3,  temppoint, p2);
  ori1 = swk_matrix3.orient3D_short();
  ori2 = swk_matrix4.orient3D_short();
  //std::cout << "p13: ori1: " << ori1 << std::endl;
  //std::cout << "p13: ori2: " << ori2 << std::endl;
  if (ori1 == RH_ON || ori2 == RH_ON)
    return RH_ON;
  if (ori1 != ori2)
    return (RH_OUT);



  // Test line p2 - p3
  GenericPointT temppoint2 (p2[0] + useNormal[0],
			    p2[1] + useNormal[1],
			    p2[2] + useNormal[2]);
  SwkMatrix3x3<GenericPointT, double>  swk_matrix5 (p2, p3,  temppoint2, ptn);
  SwkMatrix3x3<GenericPointT, double>  swk_matrix6 (p2, p3,  temppoint2, p1);
  ori1 = swk_matrix5.orient3D_short();
  ori2 = swk_matrix6.orient3D_short();
  //std::cout << "p23: ori1: " << ori1 << std::endl;
  //std::cout << "p23: ori2: " << ori2 << std::endl;
  if (ori1 == RH_ON || ori2 == RH_ON)
    return RH_ON;
  if (ori1 != ori2)
    return (RH_OUT);

  
  return (RH_IN);
	
}