int main(int argc, const char * argv[]) { // remember the time in order to calculate processing time at the end time_t startTime = time(NULL); // clear screen of terminal window clearScreen(); printf(" MNIST-3LNN: a simple 3-layer neural network processing the MNIST handwritten digit images\n\n"); // Create neural network using a manually allocated memory space Network *nn = createNetwork(MNIST_IMG_HEIGHT*MNIST_IMG_WIDTH, 20, 10); // displayNetworkWeightsForDebugging(nn); // exit(1); // Training the network by adjusting the weights based on error using the TRAINING dataset trainNetwork(nn); // Testing the during training derived network using the TESTING dataset testNetwork(nn); // Free the manually allocated memory for this network free(nn); locateCursor(36, 5); // Calculate and print the program's total execution time time_t endTime = time(NULL); double executionTime = difftime(endTime, startTime); printf("\n DONE! Total execution time: %.1f sec\n\n",executionTime); return 0; }
int main(int argc, char const *argv[]) { readData(); initBPNework(); trainNetwork(); testNetwork(); system("pause"); return 0; }
static void runTest(const std::string& imagePath, float noiseMagnitude, size_t iterations, size_t batchSize, bool seedWithTime, size_t xPixels, size_t yPixels, size_t colors, const std::string& outputPath) { std::default_random_engine randomNumberGenerator; if(seedWithTime) { randomNumberGenerator.seed(std::time(nullptr)); } // create network /// one convolutional layer /// one output layer auto neuralNetwork = createNeuralNetwork(xPixels, yPixels, colors, randomNumberGenerator); // load image // create random image Image image(imagePath, "reference"); image.load(); image = image.downsample(xPixels, yPixels, colors); // iterate /// select default or random image /// add noise to image /// train trainNetwork(neuralNetwork, image, noiseMagnitude, iterations, batchSize, randomNumberGenerator); // test the network's predition ability float accuracy = testNetwork(neuralNetwork, image, noiseMagnitude, iterations, batchSize, randomNumberGenerator); std::cout << "Test accuracy was " << accuracy << "%\n"; if(accuracy < 95.0) { std::cout << "Test Failed! Accuracy is too low.\n"; } // visualize the output float visualizationAccuracy = visualizeNetwork(neuralNetwork, image, outputPath); std::cout << "Visualization accuracy was " << visualizationAccuracy << "%\n"; }
int main(int argc, char **argv) { if (argc == 1) { printf("Run testLargeM -h for test options.\n\n"); } else{ if(strcmp(argv[1], "-nl") == 0) { printf("_Simulating a Network test_\n"); testNetwork("l",atoi(argv[2]),"null"); } if(strcmp(argv[1], "-nc") == 0) { printf("_Simulating a Network test_\n"); testNetwork("c",atoi(argv[2]),argv[3]); } else if(strcmp(argv[1], "-g") == 0) { printf("_Testing GGH Module_\n"); testGGH(1); } else if(strcmp(argv[1], "-gm") == 0) { printf("_Testing GGH Module_\n"); testGGH(atoi(argv[2])); } else if (strcmp(argv[1], "-h") == 0) { printf("This module tests the ggh and network implementations. This is black box testing.\n"); printf("_GGH TESTING_\nAim: Create a private and prublic key. Check properties of keys for correctness.\n"); printf("To test the GGH implementation once in detail: './testLargeM -g'\n"); printf("To test the GGH implementation multiple times: './testLargeM -gm 100'\n\n"); printf("_NETWORK TESTING_\nAim: Create an unencrypted network simulation.\n"); printf("To test network port listening: './testLargeM -nl <port>'\n"); printf("To test network server connection: './testLargeM -nc <port> <hostname>\n\n'"); } else { printf("Run testLargeM -h for test options.\n"); } } return 1; }
int main(int argc, char *argv[]) { /* * Initialize the VBox runtime without loading * the support driver. */ int rc = RTR3InitExe(argc, &argv, 0); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: RTR3InitExe() -> %d\n", rc); return 1; } if (argc > 1 && !strcmp(argv[1], "-child")) { /* We have spawned ourselves as a child process -- scratch the leg */ RTThreadSleep(1000000); return 1; } #ifdef RT_OS_WINDOWS HRESULT hRes = CoInitialize(NULL); /* * Need to initialize security to access performance enumerators. */ hRes = CoInitializeSecurity( NULL, -1, NULL, NULL, RPC_C_AUTHN_LEVEL_NONE, RPC_C_IMP_LEVEL_IMPERSONATE, NULL, EOAC_NONE, 0); #endif pm::CollectorHAL *collector = pm::createHAL(); if (!collector) { RTPrintf("tstCollector: createMetricFactory() failed\n", rc); return 1; } #if 1 pm::CollectorHints hints; hints.collectHostCpuLoad(); hints.collectHostRamUsage(); hints.collectProcessCpuLoad(RTProcSelf()); hints.collectProcessRamUsage(RTProcSelf()); uint64_t start; uint64_t hostUserStart, hostKernelStart, hostIdleStart; uint64_t hostUserStop, hostKernelStop, hostIdleStop, hostTotal; uint64_t processUserStart, processKernelStart, processTotalStart; uint64_t processUserStop, processKernelStop, processTotalStop; RTPrintf("tstCollector: TESTING - CPU load, sleeping for 5 sec\n"); rc = collector->preCollect(hints, 0); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc); return 1; } rc = collector->getRawHostCpuLoad(&hostUserStart, &hostKernelStart, &hostIdleStart); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc); return 1; } rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStart, &processKernelStart, &processTotalStart); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc); return 1; } RTThreadSleep(5000); // Sleep for 5 seconds rc = collector->preCollect(hints, 0); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc); return 1; } rc = collector->getRawHostCpuLoad(&hostUserStop, &hostKernelStop, &hostIdleStop); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc); return 1; } rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStop, &processKernelStop, &processTotalStop); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc); return 1; } hostTotal = hostUserStop - hostUserStart + hostKernelStop - hostKernelStart + hostIdleStop - hostIdleStart; /*printf("tstCollector: host cpu user = %f sec\n", (hostUserStop - hostUserStart) / 10000000.); printf("tstCollector: host cpu kernel = %f sec\n", (hostKernelStop - hostKernelStart) / 10000000.); printf("tstCollector: host cpu idle = %f sec\n", (hostIdleStop - hostIdleStart) / 10000000.); printf("tstCollector: host cpu total = %f sec\n", hostTotal / 10000000.);*/ RTPrintf("tstCollector: host cpu user = %u.%u %%\n", (unsigned)((hostUserStop - hostUserStart) * 100 / hostTotal), (unsigned)((hostUserStop - hostUserStart) * 10000 / hostTotal % 100)); RTPrintf("tstCollector: host cpu kernel = %u.%u %%\n", (unsigned)((hostKernelStop - hostKernelStart) * 100 / hostTotal), (unsigned)((hostKernelStop - hostKernelStart) * 10000 / hostTotal % 100)); RTPrintf("tstCollector: host cpu idle = %u.%u %%\n", (unsigned)((hostIdleStop - hostIdleStart) * 100 / hostTotal), (unsigned)((hostIdleStop - hostIdleStart) * 10000 / hostTotal % 100)); RTPrintf("tstCollector: process cpu user = %u.%u %%\n", (unsigned)((processUserStop - processUserStart) * 100 / (processTotalStop - processTotalStart)), (unsigned)((processUserStop - processUserStart) * 10000 / (processTotalStop - processTotalStart) % 100)); RTPrintf("tstCollector: process cpu kernel = %u.%u %%\n\n", (unsigned)((processKernelStop - processKernelStart) * 100 / (processTotalStop - processTotalStart)), (unsigned)((processKernelStop - processKernelStart) * 10000 / (processTotalStop - processTotalStart) % 100)); RTPrintf("tstCollector: TESTING - CPU load, looping for 5 sec\n"); rc = collector->preCollect(hints, 0); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc); return 1; } rc = collector->getRawHostCpuLoad(&hostUserStart, &hostKernelStart, &hostIdleStart); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc); return 1; } rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStart, &processKernelStart, &processTotalStart); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc); return 1; } start = RTTimeMilliTS(); while(RTTimeMilliTS() - start < 5000) ; // Loop for 5 seconds rc = collector->preCollect(hints, 0); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: preCollect() -> %Rrc\n", rc); return 1; } rc = collector->getRawHostCpuLoad(&hostUserStop, &hostKernelStop, &hostIdleStop); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: getRawHostCpuLoad() -> %Rrc\n", rc); return 1; } rc = collector->getRawProcessCpuLoad(RTProcSelf(), &processUserStop, &processKernelStop, &processTotalStop); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: getRawProcessCpuLoad() -> %Rrc\n", rc); return 1; } hostTotal = hostUserStop - hostUserStart + hostKernelStop - hostKernelStart + hostIdleStop - hostIdleStart; RTPrintf("tstCollector: host cpu user = %u.%u %%\n", (unsigned)((hostUserStop - hostUserStart) * 100 / hostTotal), (unsigned)((hostUserStop - hostUserStart) * 10000 / hostTotal % 100)); RTPrintf("tstCollector: host cpu kernel = %u.%u %%\n", (unsigned)((hostKernelStop - hostKernelStart) * 100 / hostTotal), (unsigned)((hostKernelStop - hostKernelStart) * 10000 / hostTotal % 100)); RTPrintf("tstCollector: host cpu idle = %u.%u %%\n", (unsigned)((hostIdleStop - hostIdleStart) * 100 / hostTotal), (unsigned)((hostIdleStop - hostIdleStart) * 10000 / hostTotal % 100)); RTPrintf("tstCollector: process cpu user = %u.%u %%\n", (unsigned)((processUserStop - processUserStart) * 100 / (processTotalStop - processTotalStart)), (unsigned)((processUserStop - processUserStart) * 10000 / (processTotalStop - processTotalStart) % 100)); RTPrintf("tstCollector: process cpu kernel = %u.%u %%\n\n", (unsigned)((processKernelStop - processKernelStart) * 100 / (processTotalStop - processTotalStart)), (unsigned)((processKernelStop - processKernelStart) * 10000 / (processTotalStop - processTotalStart) % 100)); RTPrintf("tstCollector: TESTING - Memory usage\n"); ULONG total, used, available, processUsed; rc = collector->getHostMemoryUsage(&total, &used, &available); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: getHostMemoryUsage() -> %Rrc\n", rc); return 1; } rc = collector->getProcessMemoryUsage(RTProcSelf(), &processUsed); if (RT_FAILURE(rc)) { RTPrintf("tstCollector: getProcessMemoryUsage() -> %Rrc\n", rc); return 1; } RTPrintf("tstCollector: host mem total = %lu kB\n", total); RTPrintf("tstCollector: host mem used = %lu kB\n", used); RTPrintf("tstCollector: host mem available = %lu kB\n", available); RTPrintf("tstCollector: process mem used = %lu kB\n\n", processUsed); #endif #if 1 rc = testNetwork(collector); #endif #if 1 rc = testFsUsage(collector); #endif #if 1 rc = testDisk(collector); #endif #if 1 RTPrintf("tstCollector: TESTING - Performance\n\n"); measurePerformance(collector, argv[0], 100); #endif delete collector; printf ("\ntstCollector FINISHED.\n"); return rc; }
MainWindow::MainWindow( QWidget *parent, Qt::WindowFlags flags ) : QMainWindow( parent, flags ), Ui::MainWindow() { setupUi( this ); /* QActionGroup for exclusive QAction */ m_modalityGroup = new QActionGroup( this ); m_modalityGroup->addAction( actionDesign_View ); m_modalityGroup->addAction( actionTrain_View ); m_modalityGroup->addAction( actionTest_View ); /* Main QStackedWidget */ m_stackedWidget = new MainStackedWidget( centralwidget ); vboxLayout->addWidget( m_stackedWidget ); /* NetworkManager */ m_netManager = new NetworkManager( m_stackedWidget->graphWidget() ); /* ToolBar for train view */ m_trainToolBar = new QToolBar( this ); m_trainToolBar->setOrientation( Qt::Horizontal ); m_trainToolBar->setWindowTitle( tr( "Train Bar" ) ); m_trainToolBar->addAction( actionStart_Train ); m_trainToolBar->addAction( actionStop_Train ); m_trainToolBar->setVisible( false ); addToolBar( Qt::TopToolBarArea, m_trainToolBar ); /* ToolBar for test view */ m_testToolBar = new QToolBar( this ); m_testToolBar->setOrientation( Qt::Horizontal ); m_testToolBar->setWindowTitle( tr( "Test Bar" ) ); m_testToolBar->addAction( actionTest_Network ); m_testToolBar->setVisible( false ); addToolBar( Qt::TopToolBarArea, m_testToolBar ); /* QDockWidget for choosing input data */ m_inputDock = new QDockWidget( tr( "Input Data" ), this ); QWidget *tmpWidget = new QWidget( m_inputDock ); QWidget *tmpDataWidget = new QWidget( tmpWidget ); m_inputDockLayout = new QVBoxLayout( tmpDataWidget ); QVBoxLayout *inputDockLayout = new QVBoxLayout( tmpWidget ); QPushButton *button = new QPushButton( tr("Edit Data Group"), tmpWidget ); button->setEnabled( false ); inputDockLayout->addWidget( button ); inputDockLayout->addWidget( tmpDataWidget ); inputDockLayout->addItem( new QSpacerItem( 20, 40, QSizePolicy::Minimum, QSizePolicy::Expanding ) ); m_inputDock->setAllowedAreas( Qt::LeftDockWidgetArea | Qt::RightDockWidgetArea ); m_inputDock->setWidget( tmpWidget ); m_inputDock->setVisible( false ); addDockWidget( Qt::LeftDockWidgetArea, m_inputDock ); /* QDockWidget for choosing train parameters */ m_trainDock = new QDockWidget( tr( "Train Parameters" ), this ); m_trainPropertyWidget = new TrainPropertyWidget( m_trainDock ); m_trainDock->setAllowedAreas( Qt::LeftDockWidgetArea | Qt::RightDockWidgetArea ); m_trainDock->setWidget( m_trainPropertyWidget ); m_trainDock->setVisible( false ); addDockWidget( Qt::RightDockWidgetArea, m_trainDock ); /* QDockWidget for choosing output property */ // m_outputDock = new QDockWidget( tr( "Output Property" ), this ); // m_outputPropertyWidget = new OutputPropertyWidget( m_outputDock ); // m_outputDock->setAllowedAreas( Qt::LeftDockWidgetArea | Qt::RightDockWidgetArea ); // m_outputDock->setWidget( m_outputPropertyWidget ); // m_outputDock->setVisible( false ); // addDockWidget( Qt::RightDockWidgetArea, m_outputDock ); /* QDockWidget for editing neurons property */ m_propertyDock = new QDockWidget( tr( "Neuron Parameters" ), this ); m_neuronPropertyWidget = new NeuronPropertyWidget( m_propertyDock ); m_propertyDock->setAllowedAreas( Qt::AllDockWidgetAreas ); m_propertyDock->setVisible( false ); m_propertyDock->setWidget( m_neuronPropertyWidget ); addDockWidget( Qt::RightDockWidgetArea, m_propertyDock ); /* Populate View Menu */ menuView->addAction( m_inputDock->toggleViewAction() ); menuView->addAction( m_trainDock->toggleViewAction() ); // menuView->addAction( m_outputDock->toggleViewAction() ); menuView->addAction( m_propertyDock->toggleViewAction() ); /* Pre-check some actions */ m_propertyDock->toggleViewAction()->activate( QAction::Trigger ); trainViewStatus << m_trainDock->toggleViewAction(); /* Set connections */ connect( m_netManager, SIGNAL( neuronSelectionChanged( DS::Neuron * ) ), m_neuronPropertyWidget, SLOT( updateValues( DS::Neuron * ) ) ); connect( m_netManager, SIGNAL( neuronTypeChanged( DS::Neuron * ) ), m_neuronPropertyWidget, SLOT( updateValues( DS::Neuron * ) ) ); connect( m_netManager, SIGNAL( trainingParametersChanged( DS::TrainingParameters * ) ), m_trainPropertyWidget, SLOT( updateValues( DS::TrainingParameters * ) ) ); connect( m_netManager, SIGNAL( epochTrained( DS::Network::TrainEpochValues ) ), m_stackedWidget->trainWidget(), SLOT( updateTrainEpochValues( DS::Network::TrainEpochValues ) ) ); connect( m_netManager, SIGNAL( trainDataChanged( DS::TrainData * ) ), m_stackedWidget->testWidget(), SLOT( changeTrainData( DS::TrainData * ) ) ); connect( m_netManager, SIGNAL( networkTested( DS::Network::TestNetworkValues ) ), m_stackedWidget->testWidget(), SLOT( showNetworkTest( DS::Network::TestNetworkValues ) ) ); connect( m_netManager, SIGNAL( trainStopped() ), this, SLOT( on_actionStop_Train_triggered() ) ); connect( m_netManager, SIGNAL( trainFinished() ), this, SLOT( on_actionStop_Train_triggered() ) ); connect( m_neuronPropertyWidget, SIGNAL( changeNeuronTypeRequested( DS::Neuron *, const QString & ) ), m_netManager, SLOT( changeNeuronType( DS::Neuron *, const QString & ) ) ); connect( actionStart_Train, SIGNAL( triggered() ), m_netManager, SLOT( startTraining() ) ); connect( actionStop_Train, SIGNAL( triggered() ), m_netManager, SLOT( stopTraining() ) ); connect( actionTest_Network, SIGNAL( triggered() ), m_netManager, SLOT( testNetwork() ) ); connect( actionExit, SIGNAL( triggered() ), qApp, SLOT( quit() ) ); connect( actionAbout_Qt, SIGNAL( triggered() ), qApp, SLOT( aboutQt() ) ); }
int main( int argc, char *argv[] ) { nn_type nn; double atof(); FILE *log_file; FILE *out_file; if( argc != 6 ) { fprintf( stderr, "Usage: nn learning_rate k hidden log_file out_file < digits_train.txt\n" ); fprintf( stderr, " log_file - file to record progress of training\n"); fprintf( stderr, " out_file - file to record final network\n"); exit(0); } nn.learning_rate = atof( argv[1] ); nn.k = atof( argv[2] ); nn.n_hidden = atoi( argv[3] ); if( (log_file = fopen( argv[4], "w" )) == NULL ) { fprintf( stderr, "Could not open file %s\n", argv[4] ); exit( 0 ); } if( (out_file = fopen( argv[5], "w" )) == NULL ) { fprintf( stderr, "Could not open file %s\n", argv[5] ); exit( 0 ); } fprintf( log_file, "learning rate: %0.2f\n", nn.learning_rate ); fprintf( log_file, "multiplicative constant (k): %0.1f\n", nn.k ); fprintf( log_file, "hidden units: %d\n", nn.n_hidden ); /* * Number of input lines. * NO NEED TO CHANGE THIS. */ nn.n_input = 64; /* * Number of output lines. * NO NEED TO CHANGE THIS. */ nn.n_output = 10; /* * Total amount of data. * YOU MAY WISH TO CHANGE THIS. */ all_data.n = N_EXAMPLES; /* total amount of data */ if( all_data.n > N_EXAMPLES ) { fprintf( stderr, "Too many examples; increase N_EXAMPLES\n" ); exit( 0 ); } readData( nn.n_input, nn.n_output, &all_data ); splitData( nn.n_input, nn.n_output, &all_data, &training_data, &test_data ); trainNetwork( log_file, &nn, &training_data ); testNetwork( log_file, &nn, &test_data ); printNetwork( out_file, &nn ); fclose( log_file ); fclose( out_file ); }