gfx_animated_object::gfx_animated_object(qtgl::batch const batch_) : m_batch(batch_) , m_animation_names() , m_animation_index(0U) , m_animation_time(0.0f) , m_keyframes() { TMPROF_BLOCK(); ASSUMPTION(get_batch().ready()); ASSUMPTION(get_batch().get_available_resources().skeletal() != nullptr); boost::filesystem::path anim_root_dir = canonical_path(get_batch().get_available_resources().data_root_dir()) / "animations" / "skeletal" / get_batch().get_available_resources().skeletal()->skeleton_name() ; for (boost::filesystem::directory_entry& entry : boost::filesystem::directory_iterator(anim_root_dir)) if (boost::filesystem::is_directory(entry.path())) m_animation_names.push_back(entry.path().string()); if (m_animation_names.empty()) return; m_keyframes.insert_load_request(m_animation_names.at(m_animation_index)); ASSUMPTION(!m_keyframes.empty()); }
LL_Batch *VngoClear3D::get_batch(int num_batches) { if ((BatchCount + num_batches) < CLEAR3D_MAX_BATCH - 1) { BatchCount += num_batches; LL_Batch *ret_val = pBatch; pBatch += num_batches; return ret_val; } else { run_dl(); return get_batch(num_batches); } }
void network::learning(const train_couple* training_data, int data_count, int batch_size, double eta, int epoch_number) { train_couple* data_copy=new train_couple[data_count]; //data to shuffle later in order to have no affect on original training_data for (int i=0; i<data_count; i++) data_copy[i]=training_data[i]; int nbatches=data_count/batch_size; for (int i=0; i<epoch_number; i++) { shuffle(data_copy,data_count); for (int batch_number=0; batch_number<nbatches; batch_number++){ train_couple* mini_batch=get_batch(data_copy,batch_size,batch_number); SGD(mini_batch,batch_size,eta,data_count); delete [] mini_batch; } cout<<"epoch # "<<i<<" has been finished\n"; } delete [] data_copy; }
void get_batch_nwg () { if (get_batch (0)) saved_batch = 1; }
void get_batch_wg () { (void) get_batch (1); }