void PumpRate(void) { LCD_SetTextSize(4); LCD_SetTextColor(BLACK,WHITE); while(1) { if(!Read_UP_Button()) { while(!Read_UP_Button); PumpingRate += 10; if(PumpingRate>300) PumpingRate=300; PWM_Freq(PumpingRate); Print_Text_On(Line4,Position4); //LCD_Printf("UP"); //PrintDecimal(PumpingRate); PrintRate(PumpingRate); } else if(!Read_DOWN_Button()) { while(!Read_DOWN_Button()); PumpingRate -= 10; if(PumpingRate<10) PumpingRate=10; PWM_Freq(PumpingRate); Print_Text_On(Line4,Position4); //LCD_Printf("DN"); //PrintDecimal(PumpingRate); PrintRate(PumpingRate); } else if(!Read_OK_Button()) { while(!Read_OK_Button()); LCD_SetCursor(2,Line7); LCD_SetTextSize(2); LCD_SetTextColor(BLUE,WHITE); LCD_Printf(" "); HAL_Delay(1000); //return; break; } } }
void Solver::Step(int iters) { const int start_iter = iter_; const int stop_iter = iter_ + iters; int average_loss = this->param_.average_loss(); losses_.clear(); smoothed_loss_ = 0; const Caffe::Brew mode = Caffe::mode(); const int solver_count = Caffe::solver_count(); const bool root_solver = this->is_root(); net_->set_solver(this); #ifndef CPU_ONLY for (const shared_ptr<Blob>& param : net_->learnable_params()) { // To prevent allocations inside on_start call: param->allocate_data(mode == Caffe::GPU); } net_->InitializeLearnableDiffSpace(); if (solver_count > 1) { // we need to sync all threads before starting, otherwise some cuda init, // malloc or other cuda stuff could interlock with in-loop cuda GPU sync // called in on_start. callback_soft_barrier(); { unique_ptr<unique_lock<shared_mutex>> lock; if (root_solver) { lock.reset(new unique_lock<shared_mutex>(GPUMemory::read_write_mutex())); } callback_soft_barrier(); callback_->on_start(net_->learnable_params()); } callback_soft_barrier(); LOG(INFO) << "Starting Optimization on GPU " << Caffe::current_device(); } const bool use_multi_gpu_testing = Caffe::solver_count() > 1; const string mgpu_str = use_multi_gpu_testing ? "[MultiGPU] " : ""; #else const bool use_multi_gpu_testing = false; const string mgpu_str; #endif uint64_t random_seed = param_.random_seed() >= 0 ? static_cast<uint64_t>(param_.random_seed()) : Caffe::next_seed(); reduce_thread_.reset(new boost::thread(&Solver::Reduce, this, Caffe::current_device(), mode, random_seed, solver_count, root_solver)); while (iter_ < stop_iter) { if (param_.snapshot_diff()) { net_->ClearParamDiffs(); } // we clean them in ApplyUpdate otherwise // Just started or restored? const bool first_loop = iter_ == 0 || iterations_last_ < 0; if (iter_ == 0) { if (TestAll(1, use_multi_gpu_testing)) { break; } callback_soft_barrier(); LOG_IF(INFO, Caffe::root_solver()) << mgpu_str << "Initial Test completed"; } else if (param_.test_interval() && iter_ % param_.test_interval() == 0 && iterations_last_ >= 0) { test_timer_.Start(); if (TestAll(0, use_multi_gpu_testing)) { break; } callback_soft_barrier(); float lapse = test_timer_.Seconds(); LOG_IF(INFO, Caffe::root_solver()) << mgpu_str << "Tests completed in " << lapse << "s"; } if (requested_early_exit_) { // Break out of the while loop because stop was requested while testing. break; } const bool display = this->display(); net_->set_debug_info(display && param_.debug_info()); // accumulate the loss and gradient float loss = 0.F; if (first_loop) { iterations_last_ = iter_; iteration_timer_.Start(); init_flag_.set(); } iteration_start_signal(); for (int i = 0; i < param_.iter_size(); ++i) { loss += net_->ForwardBackward(i + 1 == param_.iter_size()); if (i == 0) { if (first_loop) { iter0_flag_.set(); net_->wait_layers_init(); } iter_size_complete_ = true; } } loss /= param_.iter_size(); iteration_wait(); if (requested_early_exit_) { total_lapse_ += iteration_timer_.Seconds(); break; } // average the loss across iterations for smoothed reporting UpdateSmoothedLoss(loss, start_iter, average_loss); if (display || iter_ <= 2 || iter_ + 1 >= stop_iter) { float lapse = iteration_timer_.Seconds(); if (iter_ >= 2) { // we skip 0th and 1st for correct benchmarking total_lapse_ += lapse; float per_s = (iter_ - iterations_last_) / (lapse > 0.F ? lapse : 1.F); LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ << " (" << per_s << " iter/s, " << lapse << "s/" << param_.display() << " iter), loss = " << smoothed_loss_; } else { LOG_IF(INFO, Caffe::root_solver()) << "Iteration " << iter_ << " (" << lapse << " s), loss = " << smoothed_loss_; } const vector<Blob*>& result = net_->output_blobs(); int score_index = 0; for (int j = 0; j < result.size(); ++j) { const float* result_vec = result[j]->cpu_data<float>(); const string& output_name = net_->blob_names()[net_->output_blob_indices()[j]]; const float loss_weight = net_->blob_loss_weights()[net_->output_blob_indices()[j]]; for (int k = 0; k < result[j]->count(); ++k) { ostringstream loss_msg_stream; if (loss_weight) { loss_msg_stream << " (* " << loss_weight << " = " << (loss_weight * result_vec[k]) << " loss)"; } LOG_IF(INFO, Caffe::root_solver()) << " Train net output #" << score_index++ << ": " << output_name << " = " << result_vec[k] << loss_msg_stream.str(); } } PrintRate(); iterations_last_ = iter_; iteration_timer_.Start(); } // Increment the internal iter_ counter -- its value should always indicate // the number of times the weights have been updated. ++iter_; SolverAction::Enum request = GetRequestedAction(); // Save a snapshot if needed. if ((param_.snapshot() && iter_ % param_.snapshot() == 0 && Caffe::root_solver()) || (request == SolverAction::SNAPSHOT)) { Snapshot(); } if (SolverAction::STOP == request) { requested_early_exit_ = true; total_lapse_ += iteration_timer_.Seconds(); // Break out of training loop. break; } } Finalize(); }