XCamReturn DnnSemanticSegmentation::get_model_input_info (DnnInferInputOutputInfo& info) { if (!_model_created) { XCAM_LOG_ERROR ("Please create the model firstly!"); return XCAM_RETURN_ERROR_ORDER; } int id = 0; InputsDataMap inputs_info (_network.getInputsInfo ()); for (auto & in : inputs_info) { auto& input = in.second; const InferenceEngine::SizeVector input_dims = input->getDims (); info.width[id] = input_dims[0]; info.height[id] = input_dims[1]; info.channels[id] = input_dims[2]; info.object_size[id] = input_dims[3]; info.precision[id] = convert_precision_type (input->getPrecision()); info.layout[id] = convert_layout_type (input->getLayout()); in.second->setPrecision(Precision::U8); id++; } info.batch_size = get_batch_size (); info.numbers = inputs_info.size (); return XCAM_RETURN_NO_ERROR; }
void init_training(RBM& rbm, Iterator input_first, Iterator input_last) { rbm.momentum = rbm.initial_momentum; if (EnableWatcher) { watcher.training_begin(rbm); } //Get the size of each batches batch_size = get_batch_size(rbm); if (std::is_same<typename std::iterator_traits<Iterator>::iterator_category, std::random_access_iterator_tag>::value) { auto size = distance(input_first, input_last); //TODO Better handling of incomplete batch size would solve this problem (this could be done by //cleaning the data before the last batch) if (size % batch_size != 0) { #ifndef DLL_SILENT std::cout << "WARNING: The number of samples should be divisible by the batch size" << std::endl; std::cout << " This may cause discrepancies in the results." << std::endl; #endif } //Only used for debugging purposes, no need to be precise total_batches = size / batch_size; } else { total_batches = 0; } last_error = 0.0; }
bool burst_result::mix(const burst_result& w) { if (!has_same_start_pos_to(w.get_start_pos()) || !has_same_batch_interval(w) || get_batch_size() != w.get_batch_size()) { return false; } if (p_ != w.p_) { if (accumulate_d_vec(*this) < accumulate_d_vec(w)) { p_ = w.p_; } } return true; }
void train(Eigen::MatrixBase<Derived> const &input) { if(!reuse_layer_){ layers_.clear(); } std::random_device rd; std::default_random_engine re(rd()); int const Batch = get_batch_size(input.cols()); int const RandomSize = input.cols() != Batch ? input.cols() - Batch - 1 : 0; std::uniform_int_distribution<int> uni_int(0, RandomSize); #ifdef OCV_TEST_AUTOENCODER gradient_check(); #endif for(size_t i = 0; i < params_.hidden_size_.size(); ++i){ Eigen::MatrixBase<Derived> const &TmpInput = i == 0 ? input : eactivation_; if(!reuse_layer_){ layer es(TmpInput.rows(), params_.hidden_size_[i]); reduce_cost(uni_int, re, Batch, TmpInput, es); generate_activation(es, TmpInput, i==0?true:false); layers_.push_back(es); }else{ if(layers_.size() <= i){ layers_.emplace_back(static_cast<int>(TmpInput.rows()), params_.hidden_size_[i]); } reduce_cost(uni_int, re, Batch, TmpInput, layers_[i]); generate_activation(layers_[i], TmpInput, i==0?true:false); } } act_.clear(); buffer_.clear();//*/ }
void softmax<T>::train(const Eigen::Ref<const EigenMat> &train, const std::vector<int> &labels) { #ifdef OCV_TEST_SOFTMAX gradient_check(); #endif auto const UniqueLabels = get_unique_labels(labels); auto const NumClass = UniqueLabels.size(); weight_ = EigenMat::Random(NumClass, train.rows()); grad_ = EigenMat::Zero(NumClass, train.rows()); auto const TrainCols = static_cast<int>(train.cols()); EigenMat const GroundTruth = get_ground_truth(static_cast<int>(NumClass), TrainCols, UniqueLabels, labels); std::random_device rd; std::default_random_engine re(rd()); int const Batch = (get_batch_size(TrainCols)); int const RandomSize = TrainCols != Batch ? TrainCols - Batch - 1 : 0; std::uniform_int_distribution<int> uni_int(0, RandomSize); for(size_t i = 0; i != params_.max_iter_; ++i){ auto const Cols = uni_int(re); auto const &TrainBlock = train.block(0, Cols, train.rows(), Batch); auto const >Block = GroundTruth.block(0, Cols, NumClass, Batch); auto const Cost = compute_cost(TrainBlock, weight_, GTBlock); if(std::abs(params_.cost_ - Cost) < params_.epsillon_ || Cost < 0){ break; } params_.cost_ = Cost; compute_gradient(TrainBlock, weight_, GTBlock); weight_.array() -= grad_.array() * params_.lrate_;//*/ } }