//---------------------------------------- // main //---------------------------------------- int main(int /*argc*/, char** /*argv*/) { PrepareConsoleLogger logger(Poco::Logger::ROOT, Poco::Message::PRIO_INFORMATION); const std::string::size_type kNumKeys = 262144; std::vector<Poco::UInt32> intVector(kNumKeys); std::vector<std::string> strVector(kNumKeys); Poco::Random random; for(std::size_t i=0; i<kNumKeys; ++i) { intVector[i] = random.next(); strVector[i] = Poco::format("%08x", intVector[i]); } std::cout << "------------------------------------" << std::endl; std::cout << "Comparison for key type Poco::UInt32" << std::endl; std::cout << "------------------------------------" << std::endl; TestAll(intVector); std::cout << "----------------------------------------------" << std::endl; std::cout << "Comparison for key type std::string (length=8)" << std::endl; std::cout << "----------------------------------------------" << std::endl; TestAll(strVector); return 0; }
int main(int argc, char **argv) { PIX *pixs; L_REGPARAMS *rp; if (regTestSetup(argc, argv, &rp)) return 1; pixs = pixRead("feyn-fract.tif"); TestAll(rp, pixs, FALSE); TestAll(rp, pixs, TRUE); pixDestroy(&pixs); return regTestCleanup(rp); }
void Solver<Dtype>::Solve(const char* resume_file) { LOG(INFO) << "Solving " << net_->name(); LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); if (resume_file) { LOG(INFO) << "Restoring previous solver status from " << resume_file; Restore(resume_file); } // For a network that is trained by the solver, no bottom or top vecs // should be given, and we will just provide dummy vecs. Step(param_.max_iter() - iter_); // If we haven't already, save a snapshot after optimization, unless // overridden by setting snapshot_after_train := false if (param_.snapshot_after_train() && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { Snapshot(); } // After the optimization is done, run an additional train and test pass to // display the train and test loss/outputs if appropriate (based on the // display and test_interval settings, respectively). Unlike in the rest of // training, for the train net we only run a forward pass as we've already // updated the parameters "max_iter" times -- this final pass is only done to // display the loss, which is computed in the forward pass. if (param_.display() && iter_ % param_.display() == 0) { Dtype loss; net_->ForwardPrefilled(&loss); LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; } if (param_.test_interval() && iter_ % param_.test_interval() == 0) { TestAll(); } LOG(INFO) << "Optimization Done."; }
bool Solver::Solve(const char* resume_file) { LOG(INFO) << "Solving " << net_->name(); LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); // Initialize to false every time we start solving. requested_early_exit_ = false; if (resume_file != nullptr) { LOG(INFO) << "Restoring previous solver status from " << resume_file; Restore(resume_file); } callback_soft_barrier(); if (Caffe::restored_iter() != -1) { iter_ = Caffe::restored_iter(); iterations_restored_ = iter_; // for correct benchmarking iterations_last_ = -1; } // For a network that is trained by the solver, no bottom or top vecs // should be given, and we will just provide dummy vecs. int start_iter = iter_; Step(param_.max_iter() - iter_); // If we haven't already, save a snapshot after optimization, unless // overridden by setting snapshot_after_train := false if (param_.snapshot_after_train() && (!param_.snapshot() || iter_ % param_.snapshot() != 0)) { if (Caffe::root_solver()) { Snapshot(); } } Caffe::set_restored_iter(-1); iterations_restored_ = 0; iterations_last_ = 0; if (requested_early_exit_) { LOG(INFO) << "Optimization stopped early."; return true; } // After the optimization is done, run an additional train and test pass to // display the train and test loss/outputs if appropriate (based on the // display and test_interval settings, respectively). Unlike in the rest of // training, for the train net we only run a forward pass as we've already // updated the parameters "max_iter" times -- this final pass is only done to // display the loss, which is computed in the forward pass. if (this->display()) { int average_loss = this->param_.average_loss(); float loss; net_->Forward(&loss); UpdateSmoothedLoss(loss, start_iter, average_loss); LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ << ", loss = " << smoothed_loss_; } if (param_.test_interval() && iter_ % param_.test_interval() == 0) { bool use_multi_gpu_testing = Caffe::solver_count() > 1; TestAll(0, use_multi_gpu_testing); callback_soft_barrier(); } return false; }
void Solver<Dtype>::Step(int iters) { vector<Blob<Dtype>*> bottom_vec; const int start_iter = iter_; const int stop_iter = iter_ + iters; int average_loss = this->param_.average_loss(); vector<Dtype> losses; Dtype smoothed_loss = 0; for (; iter_ < stop_iter; ++iter_) { Messenger::SendMessage("SOLVER_ITER_CHANGED", &iter_); if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization())) { TestAll(); } const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); Dtype loss = net_->ForwardBackward(bottom_vec); if (losses.size() < average_loss) { losses.push_back(loss); int size = losses.size(); smoothed_loss = (smoothed_loss * (size - 1) + loss) / size; } else { int idx = (iter_ - start_iter) % average_loss; smoothed_loss += (loss - losses[idx]) / average_loss; losses[idx] = loss; } if (display) { LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss; const vector<Blob<Dtype>*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); const string& output_name = net_->blob_names()[net_->output_blob_indices()[j]]; const Dtype loss_weight = net_->blob_loss_weights()[net_->output_blob_indices()[j]]; for (int k = 0; k < result[j]->count(); ++k) { ostringstream loss_msg_stream; if (loss_weight) { loss_msg_stream << " (* " << loss_weight << " = " << loss_weight * result_vec[k] << " loss)"; } LOG(INFO) << " Train net output #" << score_index++ << ": " << output_name << " = " << result_vec[k] << loss_msg_stream.str(); } } } ComputeUpdateValue(); net_->Update(); // Save a snapshot if needed. if (param_.snapshot() && (iter_ + 1) % param_.snapshot() == 0) { Snapshot(); } } }
/* ============================================================================= Function: Rmalloc_test // external // Author: Rammi Date: 04/11/1995 Return: --- Parameter: file called from Purpose: Explicitely test all blocks for integrity ============================================================================= */ void Rmalloc_test(const char *file) { #if RM_TEST_DEPTH > 0 TestAll(file); #else fprintf(stderr, HEAD __FILE__ " not compiled with RM_TEST_DEPTH > 0, call in %s senseless.\n", file); #endif }
int main(int argc, char **argv) { plan_tests(728); task_behaviour.SetDefaults(); TestAll(); glide_polar.SetMC(fixed(1)); TestAll(); glide_polar.SetMC(fixed(2)); TestAll(); glide_polar.SetMC(fixed(4)); TestAll(); return exit_status(); }
int main(int argc, char **argv) { plan_tests(717); task_behaviour.SetDefaults(); ordered_task_settings.SetDefaults(); TestAll(); return exit_status(); }
int main(int argc, char **argv) { plan_tests(2095); glide_settings.SetDefaults(); TestAll(); glide_polar.SetMC(fixed(0.1)); TestAll(); glide_polar.SetMC(fixed(1)); TestAll(); glide_polar.SetMC(fixed(4)); TestAll(); glide_polar.SetMC(fixed(10)); TestAll(); return exit_status(); }
void Solver<Dtype>::Step(int iters) { vector<Blob<Dtype>*> bottom_vec; const int start_iter = iter_; const int stop_iter = iter_ + iters; int average_loss = this->param_.average_loss(); vector<Dtype> losses; Dtype smoothed_loss = 0; for (; iter_ < stop_iter; ++iter_) { DLOG(INFO) << "current iteration = " << iter_; if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization())) { TestAll(); } const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); Dtype loss; TIME("ForwardBackward()", { loss = net_->ForwardBackward(bottom_vec); });
/** Call all tests */ GLDEF_C void CallTestsL() { TInt r = client.CreateLocal(0); FailIfError(r); gFileSize = 8; CSelectionBox* TheSelector = CSelectionBox::NewL(test.Console()); // Each test case of the suite has an identifyer for parsing purposes of the results gTestHarness = 6; gTestCase = 1; PrintHeaders(1, _L("t_fsrmkdir. Mkdir")); if(gMode == 0) { // Manual gSessionPath=_L("?:\\"); TCallBack createFiles(TestFileCreate, TheSelector); TCallBack MkDir(TestMake, TheSelector); TCallBack makeMultSame(TestMakeMultSame, TheSelector); TCallBack makeMultDif(TestMakeMultDif, TheSelector); TCallBack makeAll(TestAll, TheSelector); TheSelector->AddDriveSelectorL(TheFs); TheSelector->AddLineL(_L("Create all files"), createFiles); TheSelector->AddLineL(_L("Mkdir "), MkDir); TheSelector->AddLineL(_L("Mkdir mult clients same dir "), makeMultSame); TheSelector->AddLineL(_L("Mkdir mult clients dif dir"), makeMultDif); TheSelector->AddLineL(_L("Execute all options"), makeAll); TheSelector->Run(); } else { // Automatic TestAll(TheSelector); } client.Close(); test.Printf(_L("#~TestEnd_%d\n"), gTestHarness); delete TheSelector; }
/** Call all tests */ GLDEF_C void CallTestsL() { TInt r = client.CreateLocal(0); FailIfError(r); CSelectionBox* TheSelector = CSelectionBox::NewL(test.Console()); // Each test case of the suite has an identifyer for parsing purposes of the results gTestHarness = 3; gTestCase = 1; CreateDirWithNFiles(300, 3); PrintHeaders(1, _L("t_fsrdirscan. Directory scanning")); if(gMode==0) { // Manual gSessionPath=_L("?:\\"); TCallBack createFiles(TestFileCreate,TheSelector); TCallBack findFile(TestFindEntry,TheSelector); TCallBack findFileMC(TestFindEntryMultipleClients,TheSelector); TCallBack findFileMCDD(TestFindEntryMultipleClientsDD,TheSelector); TCallBack findFilePattern(TestFileFindPattern,TheSelector); TheSelector->AddDriveSelectorL(TheFs); TheSelector->AddLineL(_L("Create all files"),createFiles); TheSelector->AddLineL(_L("Find filename"),findFile); TheSelector->AddLineL(_L("Find with mult clients same directory"),findFileMC); TheSelector->AddLineL(_L("Find with mult clients dif directories"),findFileMCDD); TheSelector->AddLineL(_L("All using glob patterns"),findFilePattern); TheSelector->Run(); } else { // Automatic TestAll(TheSelector); } client.Close(); test.Printf(_L("#~TestEnd_%d\n"), gTestHarness); delete TheSelector; }
/** Call all tests */ GLDEF_C void CallTestsL() { CSelectionBox* TheSelector = CSelectionBox::NewL(test.Console()); // Each test case of the suite has an identifyer for parsing purposes of the results gTestHarness = 0; if(gMode == 0) { // Manual gSessionPath=_L("?:\\"); TCallBack createFiles(TestFileCreate,TheSelector); TheSelector->AddDriveSelectorL(TheFs); TheSelector->AddLineL(_L("Create all files"),createFiles); TheSelector->Run(); } else { // Automatic TestAll(TheSelector); } delete TheSelector; }
void main(void) { u8 i = 0; char c; MCU_INIT(); LED=0; while (1) { if(UARTDataAvailable()>0) c = UARTReadData(); switch (c) { case 'h': Menu(); break; case 's': SHA256_Test(); Menu(); break; case 'c': CS_Test(); Menu(); break; case 'C': CS_All_Test(); Menu(); break; case 't': TestOne(); Menu(); break; case 'T': TestAll(); Menu(); break; } c=0; } }
/* ============================================================================= Function: AddBlk // local // Author: Rammi Date: 16.11.1995 Return: --- Parameter: Blk New block (original pos.) file called from Purpose: Add new block to the list ============================================================================= */ static void AddBlk(begin *Blk, const char *file) { int hash = HASH(Blk); /* hash val */ /* make sure everything is initialized */ if (!Global.isInitialized) { Initialize(); } #if RM_TEST_DEPTH > 1 TestAll(file); #else /* prevent compiler warnings about unused variables */ file = NULL; #endif /* --- insert it --- */ Blk->Next = Chain[hash].Next; Blk->Prev = &Chain[hash]; Chain[hash].Next->Prev = Blk; Chain[hash].Next = Blk; Global.BlockCount++; }
void Solver<Dtype>::Step(int iters) { const int start_iter = iter_; const int stop_iter = iter_ + iters; int average_loss = this->param_.average_loss(); losses_.clear(); smoothed_loss_ = 0; while (iter_ < stop_iter) { // zero-init the params net_->ClearParamDiffs(); if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization()) && Caffe::root_solver()) { TestAll(); if (requested_early_exit_) { // Break out of the while loop because stop was requested while testing. break; } } for (int i = 0; i < callbacks_.size(); ++i) { callbacks_[i]->on_start(); } const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); // accumulate the loss and gradient Dtype loss = 0; for (int i = 0; i < param_.iter_size(); ++i) { loss += net_->ForwardBackward(); } loss /= param_.iter_size(); // average the loss across iterations for smoothed reporting UpdateSmoothedLoss(loss, start_iter, average_loss); if (display) { LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ << ", loss = " << smoothed_loss_; const vector<Blob<Dtype>*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); const string& output_name = net_->blob_names()[net_->output_blob_indices()[j]]; const Dtype loss_weight = net_->blob_loss_weights()[net_->output_blob_indices()[j]]; for (int k = 0; k < result[j]->count(); ++k) { ostringstream loss_msg_stream; if (loss_weight) { loss_msg_stream << " (* " << loss_weight << " = " << loss_weight * result_vec[k] << " loss)"; } LOG_IF(INFO, Caffe::root_solver()) << " Train net output #" << score_index++ << ": " << output_name << " = " << result_vec[k] << loss_msg_stream.str(); } } } for (int i = 0; i < callbacks_.size(); ++i) { callbacks_[i]->on_gradients_ready(); } ApplyUpdate(); // Increment the internal iter_ counter -- its value should always indicate // the number of times the weights have been updated. ++iter_; SolverAction::Enum request = GetRequestedAction(); // Save a snapshot if needed. if ((param_.snapshot() && iter_ % param_.snapshot() == 0 && Caffe::root_solver()) || (request == SolverAction::SNAPSHOT)) { Snapshot(); } if (SolverAction::STOP == request) { requested_early_exit_ = true; // Break out of training loop. break; } } }
/* ============================================================================= Function: Rmalloc_stat // extern // Author: Rammi Date: 04/15/1995 Return: --- Parameter: file caled from Purpose: Show statistic ============================================================================= */ void Rmalloc_stat(const char *file) { #if RM_TEST_DEPTH > 0 TestAll(file); #define STAT_HEAD "<MALLOC_STATS>\t" fprintf(stderr, STAT_HEAD "============ STATISTICS (%s) =============\n", file); if (!Global.BlockCount) { fprintf(stderr, STAT_HEAD "Nothing allocated.\n"); } else { const begin **BlockVec; if ((BlockVec = (const begin **)malloc(Global.BlockCount*sizeof(begin *))) == NULL) { fprintf(stderr, STAT_HEAD "Couldn't allocate enough memory for statistics. Going on...\n"); } else { unsigned i = 0; unsigned j; begin *B; unsigned count; size_t Mem = 0; unsigned nrBlocks; #ifdef WITH_FLAGS size_t StaticMem = 0; #endif #ifdef GENERATIONS unsigned gen; #endif /* add all blocks to vector */ for (j = 0; j < HASHSIZE; j++) { for (B = Chain[j].Next; B != &Chain[j]; B = B->Next) { #ifdef WITH_FLAGS if (B->Flags & RM_STATIC) { StaticMem += B->Size; } else { BlockVec[i++] = B; } #else BlockVec[i++] = B; #endif } } #ifdef WITH_FLAGS assert(i <= Global.BlockCount); #else assert(i == Global.BlockCount); #endif nrBlocks = i; /* --- sort --- */ #ifdef GENERATIONS qsort(BlockVec, nrBlocks, sizeof(begin *), (int (*)(const void *, const void *))BlockSortGenerations); #else qsort(BlockVec, nrBlocks, sizeof(begin *), (int (*)(const void *, const void *))BlockSort); #endif for (i = 0; i < nrBlocks; i = j) { count = 1; for (j = i+1; j < nrBlocks; j++) { if (BlockSort(BlockVec+i, BlockVec+j) != 0) { break; } /* are equal */ count++; } #ifdef GENERATIONS fprintf(stderr, STAT_HEAD "%6d x %8u Bytes in %s, generations:", count, (unsigned) BlockVec[i]->Size, BlockVec[i]->File); for (gen = 0; gen < count; gen++) { if (gen == MAX_STAT_GENERATIONS) { fprintf(stderr, " ..."); break; } fprintf(stderr, " %d", BlockVec[gen+i]->Generation); } fprintf(stderr, "\n"); #else fprintf(stderr, STAT_HEAD "%6d x %8u Bytes in %s\n", count, (unsigned) BlockVec[i]->Size, BlockVec[i]->File); #endif Mem += count*BlockVec[i]->Size; } /* and give free */ free(BlockVec); #ifdef WITH_FLAGS fprintf(stderr, STAT_HEAD "*Variable*\t%12u Bytes\n", (unsigned) Mem); fprintf(stderr, STAT_HEAD "*Static* \t%12u Bytes\n", (unsigned) StaticMem); fprintf(stderr, STAT_HEAD "*Total* \t%12u Bytes\n", (unsigned) (Mem+StaticMem)); #else fprintf(stderr, STAT_HEAD "*Total*\t%u Bytes\n", (unsigned) Mem); #endif } } fprintf(stderr, STAT_HEAD "============ END OF STATISTICS =============\n"); #else fprintf(stderr, HEAD __FILE__ " not compiled with RM_TEST_DEPTH > 0, call in %s senseless.\n", file); #endif }
void Solver<Dtype>::Solve(const char* resume_file) { Caffe::set_phase(Caffe::TRAIN); LOG(INFO) << "Solving " << net_->name(); PreSolve(); iter_ = 0; if (resume_file) { LOG(INFO) << "Restoring previous solver status from " << resume_file; Restore(resume_file); } // Remember the initial iter_ value; will be non-zero if we loaded from a // resume_file above. const int start_iter = iter_; // For a network that is trained by the solver, no bottom or top vecs // should be given, and we will just provide dummy vecs. vector<Blob<Dtype>*> bottom_vec; for (; iter_ < param_.max_iter(); ++iter_) { // Save a snapshot if needed. if (param_.snapshot() && iter_ > start_iter && iter_ % param_.snapshot() == 0) { Snapshot(); } if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization())) { TestAll(); } const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); net_->set_sample_print(display && param_.debug_info() && param_.sample_print()); Dtype loss = net_->ForwardBackward(bottom_vec); if (display) { LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; const vector<Blob<Dtype>*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { const Dtype* result_vec = result[j]->cpu_data(); const string& output_name = net_->blob_names()[net_->output_blob_indices()[j]]; const Dtype loss_weight = net_->blob_loss_weights()[net_->output_blob_indices()[j]]; for (int k = 0; k < result[j]->count(); ++k) { ostringstream loss_msg_stream; if (loss_weight) { loss_msg_stream << " (* " << loss_weight << " = " << loss_weight * result_vec[k] << " loss)"; } LOG(INFO) << " Train net output #" << score_index++ << ": " << output_name << " = " << result_vec[k] << loss_msg_stream.str(); } } } ComputeUpdateValue(); net_->Update(); } // Always save a snapshot after optimization, unless overridden by setting // snapshot_after_train := false. if (param_.snapshot_after_train()) { Snapshot(); } // After the optimization is done, run an additional train and test pass to // display the train and test loss/outputs if appropriate (based on the // display and test_interval settings, respectively). Unlike in the rest of // training, for the train net we only run a forward pass as we've already // updated the parameters "max_iter" times -- this final pass is only done to // display the loss, which is computed in the forward pass. if (param_.display() && iter_ % param_.display() == 0) { Dtype loss; net_->Forward(bottom_vec, &loss); LOG(INFO) << "Iteration " << iter_ << ", loss = " << loss; } if (param_.test_interval() && iter_ % param_.test_interval() == 0) { TestAll(); } LOG(INFO) << "Optimization Done."; }
int Board::GameOver (void) { gameover=0; TestAll (Check); return gameover; }
/* ============================================================================= Function: DelBlk // local // Author: Rammi Date: 16.11.1995 Return: --- Parameter: Blk block to remove file called from Purpose: Remove block from list. React angry if block is unknown ============================================================================= */ static void DelBlk(begin *Blk, const char *file) { begin *B; /* run var */ int hash = HASH(Blk); /* hash val */ if (!Global.isInitialized) { fprintf(stderr, HEAD "Calling free without having allocated block via rmalloc\n" "in call from %s", file); abort(); } /* look if block is known */ for (B = Chain[hash].Next; B != &Chain[hash]; B = B->Next) { if (B == Blk) { goto found_actual_block; /* friendly goto */ } } /* not found */ fprintf(stderr, HEAD "Double or false delete\n" "\tHeap adress of block: %p\n" "\tDetected in %s\n", ((char *)Blk)+START_SPACE, file); { void (*old_sigsegv_handler)(int) = SIG_DFL; void (*old_sigbus_handler)(int) = SIG_DFL; if (setjmp(errorbuf)) { /* uh oh, we got a kick in the ass */ signal(SIGSEGV, old_sigsegv_handler); signal(SIGBUS, old_sigbus_handler); } else { /* --- the following is dangerous! So catch signals --- */ old_sigsegv_handler = signal(SIGSEGV, FatalSignal); old_sigbus_handler = signal(SIGBUS, FatalSignal); if (IsPossibleFilePos(Blk->File, Blk->Size)) { fprintf(stderr, "\tTrying identification (may be incorrect!):\n" "\t\tAllocated in %s [%u Bytes]\n", Blk->File, (unsigned) Blk->Size); } signal(SIGSEGV, old_sigsegv_handler); signal(SIGBUS, old_sigbus_handler); } } abort(); /* die loud */ found_actual_block: #if RM_TEST_DEPTH > 1 /* check everything */ TestAll(file); #else /* test integrity of actual block */ ControlBlock(Blk, file); #endif /* remove: */ Blk->Next->Prev = Blk->Prev; Blk->Prev->Next = Blk->Next; Global.BlockCount--; #ifdef ELOQUENT fprintf(stderr, HEAD "Delete: %d Bytes allocated in %s (from %s)\n", Blk->Size, Blk->File, file); #ifdef WITH_FLAGS if (Blk->Flags & RM_STRING) { char *c; /* look for eos */ for (c = (char *)Blk + START_SPACE; c - (char *)Blk + START_SPACE < Blk->Size; c++) { if (!*c) { fprintf(stderr, HEAD "\tContains string: \"%s\"\n", (char *)Blk + START_SPACE); goto found_old_block; } } /* not found */ fprintf(stderr, HEAD "\tContains string without null byte\n"); found_old_block: ; } #endif /* WITH_FLAGS */ #endif /* ELOQUENT */ #ifdef WITH_FLAGS if (Blk->Flags & RM_STATIC) { fprintf(stderr, HEAD "WARNING: freeing block marked as STATIC (in %s)\n", file); } #endif /* WITH_FLAGS */ }
void Solver::Step(int iters) { const int start_iter = iter_; const int stop_iter = iter_ + iters; int average_loss = this->param_.average_loss(); losses_.clear(); smoothed_loss_ = 0; const Caffe::Brew mode = Caffe::mode(); const int solver_count = Caffe::solver_count(); const bool root_solver = this->is_root(); net_->set_solver(this); #ifndef CPU_ONLY for (const shared_ptr<Blob>& param : net_->learnable_params()) { // To prevent allocations inside on_start call: param->allocate_data(mode == Caffe::GPU); } net_->InitializeLearnableDiffSpace(); if (solver_count > 1) { // we need to sync all threads before starting, otherwise some cuda init, // malloc or other cuda stuff could interlock with in-loop cuda GPU sync // called in on_start. callback_soft_barrier(); { unique_ptr<unique_lock<shared_mutex>> lock; if (root_solver) { lock.reset(new unique_lock<shared_mutex>(GPUMemory::read_write_mutex())); } callback_soft_barrier(); callback_->on_start(net_->learnable_params()); } callback_soft_barrier(); LOG(INFO) << "Starting Optimization on GPU " << Caffe::current_device(); } const bool use_multi_gpu_testing = Caffe::solver_count() > 1; const string mgpu_str = use_multi_gpu_testing ? "[MultiGPU] " : ""; #else const bool use_multi_gpu_testing = false; const string mgpu_str; #endif uint64_t random_seed = param_.random_seed() >= 0 ? static_cast<uint64_t>(param_.random_seed()) : Caffe::next_seed(); reduce_thread_.reset(new boost::thread(&Solver::Reduce, this, Caffe::current_device(), mode, random_seed, solver_count, root_solver)); while (iter_ < stop_iter) { if (param_.snapshot_diff()) { net_->ClearParamDiffs(); } // we clean them in ApplyUpdate otherwise // Just started or restored? const bool first_loop = iter_ == 0 || iterations_last_ < 0; if (iter_ == 0) { if (TestAll(1, use_multi_gpu_testing)) { break; } callback_soft_barrier(); LOG_IF(INFO, Caffe::root_solver()) << mgpu_str << "Initial Test completed"; } else if (param_.test_interval() && iter_ % param_.test_interval() == 0 && iterations_last_ >= 0) { test_timer_.Start(); if (TestAll(0, use_multi_gpu_testing)) { break; } callback_soft_barrier(); float lapse = test_timer_.Seconds(); LOG_IF(INFO, Caffe::root_solver()) << mgpu_str << "Tests completed in " << lapse << "s"; } if (requested_early_exit_) { // Break out of the while loop because stop was requested while testing. break; } const bool display = this->display(); net_->set_debug_info(display && param_.debug_info()); // accumulate the loss and gradient float loss = 0.F; if (first_loop) { iterations_last_ = iter_; iteration_timer_.Start(); init_flag_.set(); } iteration_start_signal(); for (int i = 0; i < param_.iter_size(); ++i) { loss += net_->ForwardBackward(i + 1 == param_.iter_size()); if (i == 0) { if (first_loop) { iter0_flag_.set(); net_->wait_layers_init(); } iter_size_complete_ = true; } } loss /= param_.iter_size(); iteration_wait(); if (requested_early_exit_) { total_lapse_ += iteration_timer_.Seconds(); break; } // average the loss across iterations for smoothed reporting UpdateSmoothedLoss(loss, start_iter, average_loss); if (display || iter_ <= 2 || iter_ + 1 >= stop_iter) { float lapse = iteration_timer_.Seconds(); if (iter_ >= 2) { // we skip 0th and 1st for correct benchmarking total_lapse_ += lapse; float per_s = (iter_ - iterations_last_) / (lapse > 0.F ? lapse : 1.F); LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ << " (" << per_s << " iter/s, " << lapse << "s/" << param_.display() << " iter), loss = " << smoothed_loss_; } else { LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ << " (" << lapse << " s), loss = " << smoothed_loss_; } const vector<Blob*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { const float* result_vec = result[j]->cpu_data<float>(); const string& output_name = net_->blob_names()[net_->output_blob_indices()[j]]; const float loss_weight = net_->blob_loss_weights()[net_->output_blob_indices()[j]]; for (int k = 0; k < result[j]->count(); ++k) { ostringstream loss_msg_stream; if (loss_weight) { loss_msg_stream << " (* " << loss_weight << " = " << (loss_weight * result_vec[k]) << " loss)"; } LOG_IF(INFO, Caffe::root_solver()) << " Train net output #" << score_index++ << ": " << output_name << " = " << result_vec[k] << loss_msg_stream.str(); } } PrintRate(); iterations_last_ = iter_; iteration_timer_.Start(); } // Increment the internal iter_ counter -- its value should always indicate // the number of times the weights have been updated. ++iter_; SolverAction::Enum request = GetRequestedAction(); // Save a snapshot if needed. if ((param_.snapshot() && iter_ % param_.snapshot() == 0 && Caffe::root_solver()) || (request == SolverAction::SNAPSHOT)) { Snapshot(); } if (SolverAction::STOP == request) { requested_early_exit_ = true; total_lapse_ += iteration_timer_.Seconds(); // Break out of training loop. break; } } Finalize(); }
int main() { TestAll(); return 0; }