void VectorTest::testResize() { static const size_type staticSize = 10; static const size_type maxSize = staticSize * 2; typedef Vector<size_type, staticSize> Vector_t; Vector_t mutableVector; // Upsize. for (size_type i = 0; i < maxSize; ++i) { mutableVector.resize(i); TS_ASSERT_EQUALS(i, mutableVector.size()); for (size_type j = 0; j < i; ++j) { TS_ASSERT_EQUALS(0U, mutableVector[j]); } } // Downsize. for (size_type i = maxSize - 1; i > 0; --i) { mutableVector.resize(i); TS_ASSERT_EQUALS(i, mutableVector.size()); for (size_type j = 0; j < i; ++j) { TS_ASSERT_EQUALS(0U, mutableVector[j]); } } }
void testSoftICACostFunction() { const int numPatches = 500; // 10000 const int patchWidth = 9; MNISTSamplePatchesDataFunction mnistdf(numPatches, patchWidth); Config config; config.setValue("debugMode", true); config.setValue("addBiasTerm", false); config.setValue("meanStddNormalize", false); config.setValue("configurePolicyTesting", false); config.setValue("trainingMeanAndStdd", false); updateMNISTConfig(config); mnistdf.configure(&config); const int numFeatures = 5; // 50 const double lambda = 0.5f; const double epsilon = 1e-2; SoftICACostFunction sf(numFeatures, lambda, epsilon); Vector_t theta = sf.configure(mnistdf.getTrainingX(), mnistdf.getTrainingY()); std::cout << "theta: " << theta.size() << std::endl; Vector_t grad; double cost = sf.evaluate(theta, mnistdf.getTrainingX(), mnistdf.getTrainingY(), grad); std::cout << "cost: " << cost << std::endl; std::cout << "grad: " << grad.size() << std::endl; double error = sf.getNumGrad(theta, mnistdf.getTrainingX(), mnistdf.getTrainingY(), 10); std::cout << "error: " << error << std::endl; }
void testConvolutionalNeuralNetworkCostFunction() { MNISTDataFunction mnistdf; Config config; updateMNISTConfig(config); config.setValue("addBiasTerm", false); config.setValue("meanStddNormalize", false); config.setValue("debugMode", true); mnistdf.configure(&config); const int imageDim = 28; // height/width of image const int filterDim = 9; // dimension of convolutional filter const int numFilters = 2; // number of convolutional filters const int poolDim = 5; // dimension of pooling area const int numClasses = 10; // number of classes to predict ConvolutionalNeuralNetworkCostFunction cnn(imageDim, filterDim, numFilters, poolDim, numClasses); Vector_t theta = cnn.configure(mnistdf.getTrainingX(), mnistdf.getTrainingY()); std::cout << "theta: " << theta.size() << std::endl; Vector_t grad; double cost = cnn.evaluate(theta, mnistdf.getTrainingX(), mnistdf.getTrainingY(), grad); std::cout << "cost: " << cost << std::endl; std::cout << "grad: " << grad.size() << std::endl; double error = cnn.getNumGrad(theta, mnistdf.getTrainingX(), mnistdf.getTrainingY()); std::cout << "error: " << error << std::endl; }
virtual void run() { Vector_t copy; copy = *vector; *vector = copy; TEST( copy.size() >= vector->size( )); cTime_ = _clock.getTimef(); }
inline NT dot( const Vector_t& a, const Vector_t& b ) { const int d = a.size(); NT result = 0; for ( int i = 0; i < d; ++i ) { result += a[i] * b[i]; } return result; }
void testEigenMap() { Vector_t theta; theta.setZero(4 * 5); for (int i = 0; i < theta.size(); ++i) theta(i) = i; std::cout << theta << std::endl; Eigen::Map<Matrix_t> Theta(theta.data(), 4, 5); // reshape std::cout << Theta << std::endl; Vector_t theta2(Eigen::Map<Vector_t>(Theta.data(), 5 * 4)); std::cout << theta2 << std::endl; }
Vector_s& ShellBuilder::createShells(size_t shellNO, int spaceDimension) { Vector_s * shells = new Vector_s(); // The shellIdx is only a candidate shell index based on Fermat's theorem on sum of two squares // and Legendre's three-square theorem int shellIdx = 1; while (shells->size() < shellNO) { Shell * currentShell = new Shell(); Vector_t currentIntTuples = backtrackingMethod.decomposeByBacktracking(shellIdx, spaceDimension); if (currentIntTuples.size() != 0) { currentShell->setIntTuplesWithSwapsAndSignChange(currentIntTuples); shells->push_back(currentShell); } shellIdx++; } return *shells; }
int main( int, char** ) { #ifdef LUNCHBOX_USE_OPENMP const size_t nThreads = lunchbox::OMP::getNThreads() * 3; #else const size_t nThreads = 16; #endif std::cout << " read, write, push, copy, erase, " << " flush/ms, rd, other #threads" << std::endl; _runSerialTest< std::vector< size_t >, size_t >(); _runSerialTest< Vector_t, size_t >(); std::vector< Reader > readers(nThreads); std::vector< Writer > writers(nThreads); std::vector< Pusher > pushers(nThreads); stage_ = 1; size_t stage = 0; for( size_t l = 0; l < nThreads; ++l ) { readers[l].start(); writers[l].start(); pushers[l].start(); } lunchbox::sleep( 10 ); for( size_t i = 1; i <= nThreads; i = i<<1 ) for( size_t j = 1; j <= nThreads; j = j<<1 ) { // concurrent read, write, push Vector_t vector; for( size_t k = 0; k < nThreads; ++k ) { readers[k].vector = k < i ? &vector : 0; writers[k].vector = k < j ? &vector : 0; pushers[k].vector = k < j ? &vector : 0; } const size_t nextStage = ++stage * STAGESIZE; _clock.reset(); stage_ = nextStage; stage_.waitEQ( nextStage + (3 * nThreads) ); TEST( vector.size() >= LOOPSIZE ); // multi-threaded copy std::vector< Copier > copiers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { copiers[k].vector = &vector; copiers[k].start(); } for( size_t k = 0; k < j; ++k ) copiers[k].join(); for( size_t k = 0; k < vector.size(); ++k ) TEST( vector[k] == k || vector[k] == 0 ); // multi-threaded erase std::vector< Eraser > erasers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { erasers[k].vector = &vector; erasers[k].start(); } for( size_t k = 0; k < j; ++k ) erasers[k].join(); for( size_t k = 0; k < vector.size(); ++k ) { if( vector[k] == 0 ) break; if( k > vector.size() / 2 ) { TEST( vector[k] > vector[k-1] ); } else { TEST( vector[k] == k ); } } // multi-threaded pop_back const size_t fOps = vector.size(); std::vector< Flusher > flushers(j); _clock.reset(); for( size_t k = 0; k < j; ++k ) { flushers[k].vector = &vector; flushers[k].start(); } for( size_t k = 0; k < j; ++k ) flushers[k].join(); const float fTime = _clock.getTimef(); TEST( vector.empty( )); std::cerr << std::setw(11) << float(i*LOOPSIZE)/rTime_ << ", " << std::setw(11) << float(j*LOOPSIZE)/wTime_ << ", " << std::setw(11) << float(LOOPSIZE)/pTime_ << ", " << std::setw(9) << float(j)/cTime_ << ", " << std::setw(9) << float(j)/eTime_ << ", " << std::setw(9) << float(fOps)/fTime << ", " << std::setw(3) << i << ", " << std::setw(3) << j << std::endl; } stage_ = std::numeric_limits< size_t >::max(); for( size_t k = 0; k < nThreads; ++k ) { readers[k].join(); writers[k].join(); pushers[k].join(); } return EXIT_SUCCESS; }