inline void iuConsole::output(const char *fmt, ...) { va_list va; va_start(va, fmt); voutput(fmt, va); va_end(va); }
void error(const char *fmt, ...) { va_list ap; va_start(ap, fmt); voutput(fmt, ap); va_end(ap); exit(EXIT_FAILURE); }
inline void iuConsole::color_output_impl(Color color, const char* fmt, va_list va) { (void)(fmt); (void)(va); #if defined(IUTEST_OS_WINDOWS) && !defined(IUTEST_OS_WINDOWS_MOBILE) \ && !defined(IUTEST_OS_WINDOWS_PHONE) && !defined(IUTEST_OS_WINDOWS_RT) if( !IsColorModeAnsi() ) { const WORD attr[] = { 0, FOREGROUND_RED, FOREGROUND_GREEN, FOREGROUND_GREEN | FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_RED | FOREGROUND_BLUE, FOREGROUND_GREEN | FOREGROUND_BLUE, FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE }; const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); if( stdout_handle != INVALID_HANDLE_VALUE ) { CONSOLE_SCREEN_BUFFER_INFO csbi; if( ::GetConsoleScreenBufferInfo(stdout_handle, &csbi) ) { const WORD wAttributes = csbi.wAttributes; fflush(stdout); ::SetConsoleTextAttribute(stdout_handle, attr[color] | FOREGROUND_INTENSITY); voutput(fmt, va); fflush(stdout); ::SetConsoleTextAttribute(stdout_handle, wAttributes); return; } } } #endif { output("\033[1;3%cm", '0' + color); voutput(fmt, va); output("\033[m"); } }
void warning(const char *fmt, ...) { va_list ap; va_start(ap, fmt); voutput(fmt, ap); va_end(ap); if (++g_warning_count >= MAX_ERROR_COUNT) Sleep(INFINITE); // block forever to stop error output from subsequent failures }
inline void iuConsole::color_output(Color color, const char *fmt, ...) { va_list va; va_start(va, fmt); if( IsShouldUseColor(true) ) { color_output_impl(color, fmt, va); } else { voutput(fmt, va); } va_end(va); }
int main(int argc, char* argv[]) { if(argc != 2) { std::cerr << "Usage : " << argv[0] << "<0,1>" <<std::endl; std::cerr << "with : " << std::endl; std::cerr << "0 : quadratic loss" << std::endl; std::cerr << "1 : cross entropy loss" << std::endl; return -1; } bool quadratic_loss = (atoi(argv[1]) == 0); srand(time(NULL)); // We compare our computation of the gradient to // a finite difference approximation // The loss is also involved std::cout << "---------------------------------" << std::endl; std::cout << "Comparing the analytical gradient and numerical approximation " << std::endl; auto input = gaml::mlp::input<X>(INPUT_DIM, fillInput); auto l1 = gaml::mlp::layer(input, HIDDEN_LAYER_SIZE, gaml::mlp::mlp_sigmoid(), gaml::mlp::mlp_dsigmoid()); auto l2 = gaml::mlp::layer(l1, HIDDEN_LAYER_SIZE, gaml::mlp::mlp_identity(), gaml::mlp::mlp_didentity()); auto l3 = gaml::mlp::layer(l2, HIDDEN_LAYER_SIZE, gaml::mlp::mlp_tanh(), gaml::mlp::mlp_dtanh()); auto l4 = gaml::mlp::layer(l3, OUTPUT_DIM, gaml::mlp::mlp_sigmoid(), gaml::mlp::mlp_dsigmoid()); auto mlp = gaml::mlp::perceptron(l4, output_of); std::cout << "We use the following architecture : " << std::endl; std::cout << mlp << std::endl; std::cout << "which has a total of " << mlp.psize() << " parameters"<< std::endl; gaml::mlp::parameters_type params(mlp.psize()); gaml::mlp::parameters_type paramsph(mlp.psize()); gaml::mlp::values_type derivatives(mlp.psize()); gaml::mlp::values_type forward_sweep(mlp.size()); X x; auto loss_ce = gaml::mlp::loss::CrossEntropy(); auto loss_quadratic = gaml::mlp::loss::Quadratic(); auto f = [&mlp, ¶ms] (const typename decltype(mlp)::input_type& x) -> gaml::mlp::values_type { auto output = mlp(x, params); gaml::mlp::values_type voutput(mlp.output_size()); fillOutput(voutput.begin(), output); return voutput; }; auto df = [&mlp, &forward_sweep, ¶ms] (const typename decltype(mlp)::input_type& x, unsigned int parameter_dim) -> gaml::mlp::values_type { return mlp.deriv(x, params, forward_sweep, parameter_dim); }; unsigned int nbtrials = 100; unsigned int nbfails = 0; std::cout << "I will compare " << nbtrials << " times a numerical approximation and the analytical gradient we compute" << std::endl; for(unsigned int t = 0 ; t < nbtrials ; ++t) { randomize_data(params, -1.0, 1.0); randomize_data(x, -1.0, 1.0); // Compute the output at params auto output = mlp(x, params); gaml::mlp::values_type raw_output(OUTPUT_DIM); fillOutput(raw_output.begin(), output); gaml::mlp::values_type raw_outputph(OUTPUT_DIM); // For computing the loss, we need a target gaml::mlp::values_type raw_target(OUTPUT_DIM); randomize_data(raw_target); double norm_dh = 0.0; for(unsigned int i = 0 ; i < mlp.psize() ; ++i) { // Let us compute params + h*[0 0 0 0 0 0 1 0 0 0 0 0], the 1 at the ith position std::copy(params.begin(), params.end(), paramsph.begin()); double dh = (sqrt(DBL_EPSILON) * paramsph[i]); paramsph[i] += dh; norm_dh += dh*dh; // Compute the output at params + h auto outputph = mlp(x, paramsph); fillOutput(raw_outputph.begin(), outputph); // We now compute the approximation of the derivative if(quadratic_loss) derivatives[i] = (loss_quadratic(raw_target, raw_outputph) - loss_quadratic(raw_target, raw_output))/dh; else derivatives[i] = (loss_ce(raw_target, raw_outputph) - loss_ce(raw_target, raw_output))/dh; } // We now compute the analytical derivatives mlp(x, params); std::copy(mlp.begin(), mlp.end(), forward_sweep.begin()); gaml::mlp::values_type our_derivatives(mlp.psize()); for(unsigned int i = 0 ; i < mlp.psize() ; ++i) { if(quadratic_loss) our_derivatives[i] = loss_quadratic.deriv(x, raw_target, forward_sweep, f, df, i); else our_derivatives[i] = loss_ce.deriv(x, raw_target, forward_sweep, f, df, i); } // We finally compute the norm of the difference double error = 0.0; auto diter = derivatives.begin(); for(auto& ourdi : our_derivatives) { error = (ourdi - *diter) * (ourdi - *diter); diter++; } error = sqrt(error); std::cout << "Error between the analytical and numerical gradients " << error << " with a step size of " << sqrt(norm_dh) << " in norm" << std::endl; if(error > 1e-7) ++nbfails; /* std::cout << "numerical " << std::endl; for(auto & di : derivatives) std::cout << di << " "; std::cout << std::endl; std::cout << "our :" << std::endl; for(auto& di : our_derivatives) std::cout << di << " "; std::cout << std::endl; */ } std::cout << nbfails << " / " << nbtrials << " with an error higher than 1e-7" << std::endl; }
int main(int argc, char* argv[]) { // get test image Image<byte> input = Raster::ReadGray(argv[1]); Image<float> finput = input; // create operating objects //configIn.openFile("NPclassify.conf"); // convert test image to vector format LINFO("COUNTING SAMPLES"); int vCount = 0; for(int x = 0; x < finput.getWidth(); x++) { for(int y = 0; y < finput.getHeight();y++) { if(finput.getVal(x,y) < 1.0F) { // find sample size from image vCount++; } } } std::vector<TYPE> _vinput(vCount,0); TYPE t = 0.0F; TYPE* tfloat = &t; std::vector<TYPE*> _vTinput(vCount,tfloat); std::vector<std::vector<TYPE> > vinput(2,_vinput); std::vector<std::vector<TYPE*> > vinputP(2,_vTinput); std::vector<std::vector<TYPE> > voutput(2,_vinput); LINFO("ASSEMBLING SAMPLE VECTOR SIZE %" ZU ,vinput[0].size()); vCount = 0; for(int x = 0; x < finput.getWidth(); x++) { for(int y = 0; y < finput.getHeight();y++) { if(finput.getVal(x,y) < 1.0F) { // insert x and y into vector vinput[0][vCount] = 255-x; vinputP[0][vCount] = &vinput[0][vCount]; vinput[1][vCount] = y; vinputP[1][vCount] = &vinput[1][vCount]; vCount++; } } } LINFO("RUNNING COVESTIMATE"); Timer tim; tim.reset(); int t1,t2; int t0 = tim.get(); // to measure display time covHolder<TYPE> chold; chold.resize(2,vCount,0.0F); covEstimate<TYPE> CE(vinputP,chold); t1 = tim.get(); t2 = t1 - t0; std::cout << ">\t(0) TIME: " << t2 << "ms\n"; //CE.printDebug(); t1 = tim.get(); t2 = t1 - t0; std::cout << ">\tTIME: " << t2 << "ms\n"; // (1) find mean value (centroid) in matrix CE.run(); t1 = tim.get(); t2 = t1 - t0; std::cout << ">\tTIME: " << t2 << "ms\n"; Image<float> final = CE.returnCovSlice(0,1,300); //Raster::VisuFloat(translate,0,sformat("translate-%s.pgm",argv[1])); //Raster::VisuFloat(rotate,0,sformat("rotate-%s.pgm",argv[1])); Raster::VisuFloat(final,0,sformat("final-%s.pgm",argv[1])); };