void TestWeightMatrix_Backward() { double epsilon = 1e-3; int n_outputs = 2; int n_inputs = 3; InputLayer input_layer(n_inputs); OutputLayer output_layer(n_outputs); WeightMatrix weights(input_layer, output_layer); weights.set(0, 0, 0.5); weights.set(1, 0, -2.0); weights.set(2, 0, 1.5); weights.set(0, 1, 1.0); weights.set(1, 1, 0.7); weights.set(2, 1, -1.0); weights.setBias(0, 0.8); weights.setBias(1, -0.3); std::vector<double> inputs; inputs.push_back(-2.0); inputs.push_back(1.0); inputs.push_back(3.0); input_layer.receiveInput(inputs); std::vector<double> transition = weights.fire(input_layer); output_layer.receiveInput(transition); assert(output_layer.getInput(0) == 2.3); assert(output_layer.getInput(1) == -4.6); // backpropagation step std::vector<double> actual_outputs; actual_outputs.push_back(1.0); actual_outputs.push_back(1.0); output_layer.computeGradient(actual_outputs); weights.computeGradient(input_layer, output_layer); assert(weights.getPartialDerivative(0, 0) > -1.3 - epsilon && weights.getPartialDerivative(0, 0) < -1.3 + epsilon); assert(weights.getPartialDerivative(1, 0) > 0.65 - epsilon && weights.getPartialDerivative(1, 0) < 0.65 + epsilon); assert(weights.getPartialDerivative(2, 0) > 1.95 - epsilon && weights.getPartialDerivative(2, 0) < 1.95 + epsilon); assert(weights.getPartialDerivative(0, 1) > 5.6 - epsilon && weights.getPartialDerivative(0, 1) < 5.6 + epsilon); assert(weights.getPartialDerivative(1, 1) > -2.8 - epsilon && weights.getPartialDerivative(1, 1) < -2.8 + epsilon); assert(weights.getPartialDerivative(2, 1) > -8.4 - epsilon && weights.getPartialDerivative(2, 1) < -8.4 + epsilon); assert(weights.getBiasPartialDerivative(0) < 0.65 + epsilon && weights.getBiasPartialDerivative(0) > 0.65 - epsilon); assert(weights.getBiasPartialDerivative(1) < -2.8 + epsilon && weights.getBiasPartialDerivative(1) > -2.8 - epsilon); printPass("TestWeightMatrix_Backward()"); }
//------------------ANN construction------------------------------------------ void ANN (int N,float In, float * w_ji, float * w_kj, float b, float * y_pj,float & y_pk) { float x_pi; input_layer(In,x_pi); hidden_layer(N,x_pi, w_ji, y_pj); output_layer(N,y_pj,w_kj,b,y_pk); }
void TestWeightMatrix_Forward() { int n_outputs = 2; InputLayer input_layer(3); OutputLayer output_layer(n_outputs); WeightMatrix weights(input_layer, output_layer); weights.set(0, 0, 0.5); weights.set(1, 0, -2.0); weights.set(2, 0, 1.5); weights.set(0, 1, 1.0); weights.set(1, 1, 0.7); weights.set(2, 1, -1.0); weights.setBias(0, 0.8); weights.setBias(1, -0.3); std::vector<double> inputs; inputs.push_back(-2.0); inputs.push_back(1.0); inputs.push_back(3.0); input_layer.receiveInput(inputs); std::vector<double> transition = weights.fire(input_layer); assert(transition.size() == 2); assert(transition[0] == 2.3); assert(transition[1] == -4.6); output_layer.receiveInput(transition); assert(output_layer.getInput(0) == 2.3); assert(output_layer.getInput(1) == -4.6); assert(output_layer.getOutput(0) == 2.3); assert(output_layer.getOutput(1) == -4.6); printPass("TestWeightMatrix_Forward()"); }
int main (int argc, char* argv[]) { // Initialize CN24 Conv::System::Init(); // Capture command line arguments std::string net_config_fname; if(argc > 1) { net_config_fname = std::string(argv[1]); LOGDEBUG << "Using user specified net: " << net_config_fname; } unsigned int CLASSES = 10; unsigned int INPUTMAPS = 3; unsigned int BENCHMARK_PASSES_FWD = 30; unsigned int BENCHMARK_PASSES_BWD = 15; std::istream* net_config_stream; if(argc > 1) { // Open network and dataset configuration files std::ifstream* net_config_file = new std::ifstream(net_config_fname,std::ios::in); if(!net_config_file->good()) { FATAL("Cannot open net configuration file!"); } net_config_stream = net_config_file; } else { LOGINFO << "Using hardcoded net."; std::stringstream* ss = new std::stringstream(hardcoded_net); net_config_stream = ss; } // Parse network configuration file Conv::ConfigurableFactory* factory = new Conv::ConfigurableFactory(*net_config_stream, 238238, false); // Set image dimensions unsigned int width = 512; unsigned int height = 512; Conv::Tensor data_tensor(factory->optimal_settings().pbatchsize, width, height, INPUTMAPS); data_tensor.Clear(); // Assemble net Conv::NetGraph graph; Conv::InputLayer input_layer(data_tensor); Conv::NetGraphNode input_node(&input_layer); input_node.is_input = true; graph.AddNode(&input_node); bool complete = factory->AddLayers(graph, Conv::NetGraphConnection(&input_node), CLASSES); if (!complete) FATAL("Failed completeness check, inspect model!"); factory->InitOptimalSettings(); LOGINFO << "Initializing net, this may take a while..." << std::flush; graph.Initialize(); graph.SetIsTesting(true); graph.FeedForward(); graph.BackPropagate(); LOGINFO << "Benchmark information"; LOGINFO << "====================="; LOGINFO << "Input width : " << width; LOGINFO << "Input height : " << height; LOGINFO << "Parallel inputs: " << factory->optimal_settings().pbatchsize; LOGINFO << "====================="; LOGINFO << "Running forward benchmark...\n" << std::flush; { auto t_begin = std::chrono::system_clock::now(); for(unsigned int p = 0; p < BENCHMARK_PASSES_FWD; p++) { graph.FeedForward(); std::cout << "." << std::flush; } std::cout << "\n"; auto t_end = std::chrono::system_clock::now(); std::chrono::duration<double> t_diff = t_end - t_begin; double total_pixels = (double)width * (double)height * (double)(factory->optimal_settings().pbatchsize) * (double)BENCHMARK_PASSES_FWD; double total_frames = (double)BENCHMARK_PASSES_FWD * (double)(factory->optimal_settings().pbatchsize); double pixels_per_second = total_pixels / t_diff.count(); double frames_per_second = total_frames / t_diff.count(); LOGINFO << "Forward speed: " << pixels_per_second << " pixel/s"; LOGINFO << "Forward speed: " << frames_per_second << " fps"; LOGINFO << "====================="; } graph.SetIsTesting(false); LOGINFO << "Running forward+backward benchmark...\n" << std::flush; { auto t_begin = std::chrono::system_clock::now(); for(unsigned int p = 0; p < BENCHMARK_PASSES_BWD; p++) { graph.FeedForward(); graph.BackPropagate(); std::cout << "." << std::flush; } std::cout << "\n"; auto t_end = std::chrono::system_clock::now(); std::chrono::duration<double> t_diff = t_end - t_begin; double total_pixels = (double)width * (double)height * (double)(factory->optimal_settings().pbatchsize) * (double)BENCHMARK_PASSES_BWD; double total_frames = (double)BENCHMARK_PASSES_BWD * (double)(factory->optimal_settings().pbatchsize); double pixels_per_second = total_pixels / t_diff.count(); double frames_per_second = total_frames / t_diff.count(); LOGINFO << "F+B speed : " << pixels_per_second << " pixel/s"; LOGINFO << "F+B speed : " << frames_per_second << " fps"; LOGINFO << "====================="; } Conv::Tensor* net_output_tensor = &graph.GetDefaultOutputNode()->output_buffers[0].combined_tensor->data; Conv::Tensor image_output_tensor(1, net_output_tensor->width(), net_output_tensor->height(), 3); //LOGINFO << "Colorizing..." << std::flush; //dataset->Colorize(*net_output_tensor, image_output_tensor); LOGINFO << "DONE!"; LOGEND; return 0; }
int main (int argc, char* argv[]) { if (argc < 6) { LOGERROR << "USAGE: " << argv[0] << " <dataset config file> <net config file> <net parameter tensor> <input image file> <output image file>"; LOGEND; return -1; } // Capture command line arguments std::string output_image_fname (argv[5]); std::string input_image_fname (argv[4]); std::string param_tensor_fname (argv[3]); std::string net_config_fname (argv[2]); std::string dataset_config_fname (argv[1]); // Initialize CN24 Conv::System::Init(); // Open network and dataset configuration files std::ifstream param_tensor_file(param_tensor_fname,std::ios::in | std::ios::binary); std::ifstream net_config_file(net_config_fname,std::ios::in); std::ifstream dataset_config_file(dataset_config_fname,std::ios::in); if(!param_tensor_file.good()) { FATAL("Cannot open param tensor file!"); } if(!net_config_file.good()) { FATAL("Cannot open net configuration file!"); } if(!dataset_config_file.good()) { FATAL("Cannot open dataset configuration file!"); } // Parse network configuration file Conv::ConfigurableFactory* factory = new Conv::ConfigurableFactory(net_config_file, 238238, false); // Parse dataset configuration file Conv::TensorStreamDataset* dataset = Conv::TensorStreamDataset::CreateFromConfiguration(dataset_config_file, true); unsigned int CLASSES = dataset->GetClasses(); // Load image Conv::Tensor original_data_tensor(input_image_fname); // Rescale image unsigned int width = original_data_tensor.width(); unsigned int height = original_data_tensor.height(); if(width & 1) width++; if(height & 1) height++; if(width & 2) width+=2; if(height & 2) height+=2; if(width & 4) width+=4; if(height & 4) height+=4; Conv::Tensor data_tensor(1, width, height, original_data_tensor.maps()); data_tensor.Clear(); Conv::Tensor::CopySample(original_data_tensor, 0, data_tensor, 0); // Assemble net Conv::NetGraph graph; Conv::InputLayer input_layer(data_tensor); Conv::NetGraphNode input_node(&input_layer); input_node.is_input = true; graph.AddNode(&input_node); bool complete = factory->AddLayers(graph, Conv::NetGraphConnection(&input_node), CLASSES); if (!complete) FATAL("Failed completeness check, inspect model!"); graph.Initialize(); // Load network parameters graph.DeserializeParameters(param_tensor_file); graph.SetIsTesting(true); LOGINFO << "Classifying..." << std::flush; graph.FeedForward(); Conv::Tensor* net_output_tensor = &graph.GetDefaultOutputNode()->output_buffers[0].combined_tensor->data; // &net.buffer(output_layer_id)->data; Conv::Tensor image_output_tensor(1, net_output_tensor->width(), net_output_tensor->height(), 3); LOGINFO << "Colorizing..." << std::flush; dataset->Colorize(*net_output_tensor, image_output_tensor); // Recrop image down Conv::Tensor small(1, original_data_tensor.width(), original_data_tensor.height(), 3); for(unsigned int m = 0; m < 3; m++) for(unsigned int y = 0; y < small.height(); y++) for(unsigned int x = 0; x < small.width(); x++) *small.data_ptr(x,y,m,0) = *image_output_tensor.data_ptr_const(x,y,m,0); small.WriteToFile(output_image_fname); LOGINFO << "DONE!"; LOGEND; return 0; }