void print_network(Network* network){ printf("####### NETWORK CONFIGURATION #######\n"); print_layer(network->input_layer); for (size_t i = 0; i < network->hidden_layers_count; i++) { print_layer(network->hidden_layers[i]); } print_layer(network->output_layer); printf("#####################################\n\n"); }
static void DumpEdgeList(DirectedGraph<Layer*>& aGraph) { nsTArray<DirectedGraph<Layer*>::Edge> edges = aGraph.GetEdgeList(); for (uint32_t i = 0; i < edges.Length(); i++) { fprintf(stderr, "From: "); print_layer(stderr, edges.ElementAt(i).mFrom); fprintf(stderr, ", To: "); print_layer(stderr, edges.ElementAt(i).mTo); fprintf(stderr, "\n"); } }
void print(int c) /* show everything (above draft layers) */ { int n; #ifndef NOWRITE #if 0 /* 4/23/91--DBK */ /* Added 10/15/90--DBK. Is there any reason NOT to do this? */ redo_gbox = 1; #else redogbox(); /* just do it here */ #endif if (pipefp) { pclose(pipefp); pipefp = NULL; if ((eqnfp = fopen(psfname,"r")) == NULL) fatal("cannot read eqn output"); } flyback = (c == 'F'); if (c != 'N') for (n = draftlayer; n <= top_layer; ++n) print_layer(n); if (eqnfp) { fclose(eqnfp); eqnfp = NULL; eqn_count = 0; unlink(psfname); } #else fprintf(stderr, "This version of picasso is for on-screen previewing only.\n"); exit(1); #endif }
static void DumpLayerList(nsTArray<Layer*>& aLayers) { for (uint32_t i = 0; i < aLayers.Length(); i++) { print_layer(stderr, aLayers.ElementAt(i)); fprintf(stderr, " "); } fprintf(stderr, "\n"); }
void sastantua(int size) { int base; int layer; base = find_base(size); layer = print_layer(base); if (layer == size) { } }
int do_layer (struct domain *dom, struct pdb_ATOM *model) { struct layer curr_layer; int i, new_site=-1; double z; /* loop over layers in domain */ for (i = 0; i < dom->q; i++) { z = dom->z1 + i*dom->dz; curr_layer = setup_layer(z, dom); print_layer(&curr_layer); new_site = do_ring(&curr_layer, model); }; return new_site; };
int main() { LOG_LEVEL = 0; // ============== // = Unit Tests = // ============== if (!do_unittests()) { printf("UNIT TESTS FAILED, ABORTING."); return -1; }; // ================= // = Setup Network = // ================= // Generate + Load Network Config from network.hpp/network.cpp network_t *net_CPU; net_CPU = get_network_config(); // Assert that layer_t fits into a multiple of bus transactions: // ONLY NECESSARY IF WE CAN MAP LAYER_T TRANSFER ONTO BUS_T AXI MASTER // printf("size of layer_t: %d, size of bus_t: %d", (int)sizeof(layer_t), // (int)sizeof(bus_t)); // assert((sizeof(layer_t) % sizeof(bus_t) == 0) && // "layert_t is not multiple of bus size. adjust size of // layer_t.dummy!"); // ========================== // = Setup FPGA Accelerator = // ========================== // Allocate Shared Memory for Config, Weights, Data. // Copy Layer Config + Weights to FPGA. setup_FPGA(net_CPU); // =========================== // = Load + Copy Input Image = // =========================== /* Structured: generate_structured_input_image(input_image,win,hin,chin); PseudoRandom: generate_random_input_image(input_image, win, hin, chin, 1); ReallyRandom: generate_random_input_image(input_image, win, hin, chin -1); Prepared Input File (convert_image.py): load_prepared_input_image(input_image, "./indata.bin", win, hin, chin); JPG/PNG Input File (!not implemented!): load_image_file(input_image, "./puppy-500x350.jpg", win, hin, chin); do_preprocess(input_image, win, hin, chin); */ // Allocate Memory on CPU Side: layer_t layer0 = net_CPU->layers[0]; int win = layer0.width; int hin = layer0.height; int chin = layer0.channels_in; data_t *input_image = (data_t *)malloc(win * hin * chin * sizeof(data_t)); // Load Input Image load_prepared_input_image(input_image, "./indata.bin", win, hin, chin); // Copy onto FPGA copy_input_image_to_FPGA(net_CPU, input_image); // ============================ // = Execute FPGA Accelerator = // ============================ L_LAYERS: for (int layer_id = 0; layer_id < net_CPU->num_layers; layer_id++) { layer_t layer = net_CPU->layers[layer_id]; LOG("Layer %2d: <%s>\n", layer_id, layer.name); LOG_LEVEL_INCR; // Calculate Memory Pointers LOG("SHARED_DRAM is at address: %lu\n", (long)SHARED_DRAM); int weights_offset = ((long)SHARED_DRAM_WEIGHTS - (long)SHARED_DRAM) / sizeof(data_t); int input_offset = ((long)SHARED_DRAM_DATA - (long)SHARED_DRAM) / sizeof(data_t); numfilterelems_t weights_per_filter = (layer.kernel == 3) ? 9 : 1; weightaddr_t num_weights = layer.channels_in * layer.channels_out * weights_per_filter; // Print some Info on this Layer printf("CPU: Offload CONV Layer "); print_layer(&layer); fflush(stdout); // Offload Layer Calculation to FPGA fpga_top(layer, (data_t *)SHARED_DRAM, weights_offset, num_weights, input_offset); LOG_LEVEL_DECR; } LOG_LEVEL = 0; // =============================== // = Copy Results back from FPGA = // =============================== layer_t *final = &net_CPU->layers[net_CPU->num_layers - 1]; int ch_out = (final->is_second_split_layer ? 2 : 1) * final->channels_out; data_t *results = (data_t *)malloc(ch_out * sizeof(data_t)); copy_results_from_FPGA(net_CPU, results, ch_out); // ===================== // = Calculate Softmax = // ===================== std::vector<std::pair<data_t, int> > probabilities(ch_out); calculate_softmax(net_CPU, results, probabilities); // ================== // = Report Results = // ================== printf("\nResult (top-5):\n====================\n"); for (int i = 0; i < std::min(5, ch_out); i++) { printf(" %5.2f%%: class %3d (output %6.2f)\n", 100 * probabilities[i].first, probabilities[i].second, results[probabilities[i].second]); } // ==================== // = TestBench Result = // ==================== // Check if output is AS EXPECTED (+- 0.5%) (defined in network.hpp) if (fabs(100 * probabilities[0].first - TEST_RESULT_EXPECTED) < 0.1) { printf("\nTestBench Result: SUCCESS\n"); return 0; } else { printf("\nTestBench Result: FAILURE\n"); printf("Actual: %5.2f, Expected: %5.2f\n", 100 * probabilities[0].first, TEST_RESULT_EXPECTED); return -1; } }
int main(int argc, char **argv) { // Load database into memory. The file is not needed after this point. rodb::Database db("example.rodb"); // Database::dump_yaml is helpful to see what's inside a compiled database. if (argc == 2 && strcmp(argv[1], "--dump") == 0) { db.dump_yaml(std::cout); } // It's ok to make copies of values, they are very cheap. rodb::Value root = db.root(); // Walk though an array. rodb::Value layers = root["world"]["layers"]; for (size_t i = 0; i < layers.size(); ++i) { print_layer(layers[i]); } // Map element access is O(log N). If you don't care too much about performance it's // ok to access same elements multiple times. It's fast enough. Otherwise it makes // sense to store root["ball"] and root["ball"]["start_position"] into a temporary // variable. Point p1(root["ball"]["start_position"]["x"], root["ball"]["start_position"]["y"]); Point p2(root["ball"]["start_position"]); assert(p1.x == p2.x && p1.y == p2.y); std::cout << "Point: {" << p1.x << ", " << p1.y << "}\n"; // Boolean values. if (root["game"]["debug"]) { std::cout << "Debug mode is on\n"; } // Arrays don't have be uniform. They can contain mixed type values, including // other arrays and maps. rodb::Value allowed_levels = root["game"]["allowed_levels"]; for (size_t i = 0; i < allowed_levels.size(); ++i) { if (allowed_levels[i].is_int()) { std::cout << "Level #" << (int)allowed_levels[i] << " is allowed\n"; } else if (allowed_levels[i].is_string()) { std::cout << "Level '" << (char const *)allowed_levels[i] << "' is allowed\n"; } else if (allowed_levels[i].is_array() && allowed_levels[i].size() > 0) { std::cout << "Levels "; rodb::Value group = allowed_levels[i]; for (size_t j = 0; j < group.size(); ++j) { if (j == group.size() - 1) { std::cout << " and "; } else if (j > 0) { std::cout << ","; } std::cout << (int)group[j]; } std::cout << " are allowed\n"; } } return 0; }