void ResizeImageTransformer<Dtype>::SampleTransformParams(const vector<int>& in_shape) { ImageTransformer<Dtype>::SampleTransformParams(in_shape); CHECK_GE(in_shape.size(), 2); CHECK_LE(in_shape.size(), 4); int in_width = in_shape[in_shape.size() - 1]; int in_height = in_shape[in_shape.size() - 2]; if (param_.width_size()) { SampleFixedIndependent(); } else if (param_.size_size()) { SampleFixedTied(); } else if (param_.width_perc_size()) { SamplePercIndependent(in_width, in_height); } else if (param_.size_perc_size()) { SamplePercTied(in_width, in_height); } else { CHECK(0) << "Invalid resize param"; } PrintParams(); }
//--------------------------------------------------------- bool MPI_Solver :: MPI_ConseqSolve( int argc, char **argv ) { // read all cnf in current dir and start then in conseq mode std::string dir = std::string("."); int cnf_count = 0; std::vector<std::string> files; std::vector<std::string> cnf_files; std::fstream out_file; int current_obj_val = -1; unsigned long long process_sat_count= 0; double cnf_time_from_node; //PBSolver_cut pbs_cut; std::stringstream solve_sstream; double start_sec; double final_sec; Solver *S; std::vector<std::vector<bool>> interrupted_problems_var_values_from_process, sat_assignments_from_process; // MPI start //MPI_Request request; int corecount = 10, rank = 0; /*MPI_Init( &argc, &argv ); MPI_Comm_size( MPI_COMM_WORLD, &corecount ); MPI_Comm_rank( MPI_COMM_WORLD, &rank );*/ files = std::vector<std::string>( ); cnf_files = std::vector<std::string>( ); // get all files in current dir getdir( dir, files ); for ( unsigned i = 0; i < files.size( ); i++ ) { if ( files[i].find( ".cnf" ) != std::string::npos ) { cnf_count++; cnf_files.push_back( files[i] ); if ( rank == 0 ) std::cout << std::endl << "founded cnf " << files[i].c_str( ); } } if ( cnf_count > corecount ) { if ( rank == 0 ) { std::cout << std::endl << "Warning. Count of cnf-file > corecount"; std::cout << std::endl << "Only first " << corecount << " cnf will be processed"; std::cout << std::endl << "cnf_count changed to corecount"; } cnf_count = corecount; } else if ( cnf_count == 0 ) { if ( rank == 0 ) std::cout << std::endl << "Error. No cnf-files in dir"; return false; } if ( rank > cnf_count - 1 ) std::cout << std::endl << "core # " << rank << " with no job"; else {// do job std::stringstream sstream; sstream << "answer_" << rank + 1; std::string out_file_name = sstream.str( ); sstream.str( "" ); sstream.clear(); start_sec = MPI_Wtime( ); // get init time input_cnf_name = &cnf_files[rank][0]; // set current name of file std::cout << std::endl << "input_cnf_name is " << input_cnf_name; unsigned int zero_mask[FULL_MASK_LEN]; for ( int i = 0; i < FULL_MASK_LEN; i++ ) zero_mask[i] = 0; if ( !ReadIntCNF( ) ) { // Read original CNF std::cout << "\n Error in ReadIntCNF" << std::endl; return 1; } std::cout << std::endl << "end of ReadIntCNF"; if ( rank == 0 ) PrintParams( ); if ( !IsPB ) { int current_task_index = 0; std::cout << std::endl << std::endl << "Standart mode of SAT solving"; if ( !SolverRun( S, process_sat_count, cnf_time_from_node, current_task_index, interrupted_problems_var_values_from_process, sat_assignments_from_process ) ) { std::cout << std::endl << "Error in SolverRun"; return false; } if ( process_sat_count ) { if ( !AnalyzeSATset( cnf_time_from_node ) ) { // is't needed to deallocate memory - MPI_Abort will do it std::cout << "\n Error in Analyzer" << std::endl; MPI_Abort( MPI_COMM_WORLD, 0 ); return false; } } } final_sec = MPI_Wtime( ) - start_sec; sstream << input_cnf_name << " " << final_sec << " sec" << std::endl; out_file.open( out_file_name.c_str( ), std::ios_base :: out ); // open and clear out file out_file << sstream.rdbuf( ); out_file << solve_sstream.rdbuf( ); std::cout << std::endl << "*** sstream " << sstream.str( ); std::cout << std::endl << "*** solve_sstream " << solve_sstream.str( ); out_file.close( ); } MPI_Finalize( ); // MPI end std::cout << std::endl << "End of ControlConseqProcessSolve"; return true; }
int main(int argc, char* argv[]){ // Start timing! boost::timer::cpu_timer myTimer; cout << endl; cout << "BEGIN" << endl; // BEGIN: setup FIELDCONTAINER field; DATA params; GRIDINFO grid; LAPLACIANSTENCIL stencil; // Read in parameter files & populate "params" GetParams(argc,argv,¶ms); CheckParams(¶ms); if( params.flag == 0){ // Use info to setup "grid" and "field" struct SetupGrid(&grid, ¶ms); SetupField(¶ms, &field); SetupLaplacianStencil(¶ms, &stencil); // Print params to screen & logfile ofstream logout; logout.open(params.OutDir + params.RunID + "_log.dat"); PrintParams(cout, ¶ms, &stencil, 0); PrintParams(logout, ¶ms, &stencil, 0); logout.close(); // END: setup // BEGIN: solving // Setup initial conditions InitialConditions(¶ms, &grid, &field); // Solve field equation SolveKG3D(¶ms, &grid, &field, &stencil); // Delete arrays field.CleanField(&field); // END: solving // BEGIN: feedback myTimer.stop(); params.TotalRunTime = myTimer.elapsed().wall / 1e6; logout.open(params.OutDir + params.RunID + "_log.dat",std::ofstream::app); PrintParams(cout, ¶ms, &stencil, 1); PrintParams(logout, ¶ms, &stencil, 1); logout.close(); // END: feedback } // END if( params.flag == 0){} }// end main()
bool MPI_Solver :: ControlProcessSolve( std::vector<int> extern_var_choose_order, std::vector<std::vector<bool>> &interrupted_problems_var_values, std::vector<satisfying_assignment> &satisfying_assignments ) { interrupted_problems_var_values.clear(); std::vector<bool> cur_interrupted_problems_var_values; satisfying_assignment cur_satisfying_assignment; std::cout << std::endl << "ControlProcessSolve is running" << std::endl; std::cout << "solving_iteration_count " << solving_iteration_count << std::endl; if ( solving_iteration_count == 0 ) { if ( !ReadIntCNF() ) { // Read original CNF std::cerr << "Error in ReadIntCNF" << std::endl; MPI_Abort( MPI_COMM_WORLD, 0 ); } if ( !MakeVarChoose() ) { std::cerr << "Error in MakeVarChoose" << std::endl; MPI_Abort( MPI_COMM_WORLD, 0 ); } } if ( extern_var_choose_order.size() > 0 ) var_choose_order = extern_var_choose_order; std::cout << "var_choose_order " << std::endl; for ( auto &x : var_choose_order ) std::cout << x << " "; std::cout << std::endl; // log(a)/log(b) = log(_a)b unsigned max_possible_tasks_count = (unsigned)(pow( 2, ceil( log(corecount - 1)/log(2) ))) * (unsigned)(pow(2,koef_val) ); std::cout << "max_possible_tasks_count " << max_possible_tasks_count << std::endl; std::cout << "current part_mask_var_count " << part_mask_var_count << std::endl; part_mask_var_count = (unsigned)(log(max_possible_tasks_count)/log(2)); if ( part_mask_var_count > var_choose_order.size() ) part_mask_var_count = var_choose_order.size(); // change batch size to treshold value if needed if ( var_choose_order.size() - part_mask_var_count > RECOMMEND_BATCH_VAR_COUNT ) { part_mask_var_count = var_choose_order.size() - RECOMMEND_BATCH_VAR_COUNT; std::cout << "part_mask_var_count changed to " << part_mask_var_count << std::endl; } if ( part_mask_var_count > MAX_PART_MASK_VAR_COUNT ) part_mask_var_count = MAX_PART_MASK_VAR_COUNT; if ( var_choose_order.size() - part_mask_var_count > MAX_BATCH_VAR_COUNT ) { std::cerr << "Error. var_choose_order.size() - part_mask_var_count > MAX_BATCH_VAR_COUNT" << std::endl; std::cerr << var_choose_order.size() - part_mask_var_count << " < " << MAX_BATCH_VAR_COUNT << std::endl; MPI_Abort( MPI_COMM_WORLD, 0 ); } std::cout << "part_mask_var_count " << part_mask_var_count << std::endl; // get default count of tasks = power of part_mask_var_count unsigned part_var_power = ( 1 << part_mask_var_count ); std::cout << "part_var_power " << part_var_power << std::endl; // TODO add extended tasks counting all_tasks_count = part_var_power; std::cout << "all_tasks_count " << all_tasks_count << std::endl; if ( (int)all_tasks_count < corecount-1 ) { std::cerr << "Error. all_tasks_count < corecount-1" << std::endl; std::cerr << all_tasks_count << " < " << corecount-1 << std::endl; MPI_Abort( MPI_COMM_WORLD, 0 ); } if ( solving_iteration_count == 0 ) PrintParams( ); if ( skip_tasks >= all_tasks_count ) { std::cerr << "skip_tasks >= all_tasks_count " << std::endl; std::cerr << skip_tasks << " >= " << all_tasks_count << std::endl; MPI_Abort( MPI_COMM_WORLD, 0 ); } values_arr.resize( all_tasks_count ); for ( unsigned i = 0; i < values_arr.size(); ++i ) values_arr[i].resize( FULL_MASK_LEN ); if ( !MakeStandardMasks( part_var_power ) ) { std::cerr << "Error in MakeStandartMasks" << std::endl; MPI_Abort( MPI_COMM_WORLD, 0 ); } std::cout << "Correct end of MakeStandartMasks" << std::endl; unsigned solved_tasks_count = 0; // write init info WriteSolvingTimeInfo( solving_times, solved_tasks_count ); int *var_choose_order_int = new int[MAX_CORE_LEN]; for( unsigned i=0; i < MAX_CORE_LEN; ++i ) { if ( i < var_choose_order.size() ) var_choose_order_int[i] = var_choose_order[i]; else var_choose_order_int[i] = -1; } std::cout << "before sending configuration info" << std::endl; // send core_len once to every compute process for ( int i=0; i < corecount-1; ++i ) { MPI_Send( &core_len, 1, MPI_INT, i + 1, 0, MPI_COMM_WORLD ); MPI_Send( &all_tasks_count, 1, MPI_UNSIGNED, i + 1, 0, MPI_COMM_WORLD ); MPI_Send( &solving_iteration_count, 1, MPI_INT, i + 1, 0, MPI_COMM_WORLD ); MPI_Send( &max_solving_time, 1, MPI_DOUBLE, i + 1, 0, MPI_COMM_WORLD ); MPI_Send( &start_activity, 1, MPI_DOUBLE, i + 1, 0, MPI_COMM_WORLD ); MPI_Send( var_choose_order_int, MAX_CORE_LEN, MPI_INT, i + 1, 0, MPI_COMM_WORLD ); } delete[] var_choose_order_int; int next_task_index = 0; //unsigned start_tasks_count = min( corecount - 1, (int)all_tasks_count ); unsigned start_tasks_count = ((corecount - 1) < (int)all_tasks_count) ? (unsigned)(corecount - 1) : all_tasks_count; int char_arr_len; char *char_arr; unsigned elem_index; std::cout << "start_tasks_count " << start_tasks_count << std::endl; // send to all cores (except # 0) tasks from 1st range for ( int i = 0; i < (int)start_tasks_count; ++i ) { // send new index of task for reading tasks from file MPI_Send( &next_task_index, 1, MPI_INT, i + 1, 0, MPI_COMM_WORLD ); if ( ( verbosity > 1 ) && ( i == 0 ) ) std::cout << "sended next_task_index " << next_task_index << std::endl; copy( values_arr[i].begin(), values_arr[i].end(), mask_value ); MPI_Send( full_mask, FULL_MASK_LEN, MPI_UNSIGNED, i + 1, 0, MPI_COMM_WORLD ); MPI_Send( part_mask, FULL_MASK_LEN, MPI_UNSIGNED, i + 1, 0, MPI_COMM_WORLD ); MPI_Send( mask_value, FULL_MASK_LEN, MPI_UNSIGNED, i + 1, 0, MPI_COMM_WORLD ); next_task_index++; } std::cout << "after sending start_tasks_count" << std::endl; total_solving_times[0] = 1 << 30; // start min len for ( unsigned i = 1; i < total_solving_times.size(); ++i ) total_solving_times[i] = 0; process_sat_count = 0; MPI_Status status, current_status; while ( solved_tasks_count < all_tasks_count ) { // recieve from core message about solved task MPI_Recv( &process_sat_count, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status ); if ( verbosity > 0 ) std::cout << "recieved process_sat_count " << process_sat_count << std::endl; current_status = status; MPI_Recv( solving_times, SOLVING_TIME_LEN, MPI_DOUBLE, current_status.MPI_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status ); if ( verbosity > 0 ) std::cout << "recieved solving_times " << std::endl; // get interrupted tasks if such exist MPI_Probe( current_status.MPI_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status );; MPI_Get_count( &status, MPI_CHAR, &char_arr_len ); if ( ( char_arr_len > 1 ) && ( char_arr_len % var_choose_order.size() != 0 ) ) { std::cerr << "char_arr_len % var_choose_order.size() != 0" << std::endl; MPI_Abort( MPI_COMM_WORLD, 0 ); } if ( char_arr_len > 0 ) { char_arr = new char[char_arr_len]; MPI_Recv( char_arr, char_arr_len, MPI_CHAR, current_status.MPI_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status ); if ( char_arr_len > 1 ) { //std::cout << "recieved char_arr_len " << char_arr_len << std::endl; cur_interrupted_problems_var_values.resize( var_choose_order.size() ); elem_index=0; for ( int j=0; j < var_choose_order.size(); j++ ) { cur_interrupted_problems_var_values[elem_index++] = (char_arr[j] == '1' ? true : false); if ( (j+1) % var_choose_order.size() == 0 ) { interrupted_problems_var_values.push_back( cur_interrupted_problems_var_values ); elem_index=0; } } } delete[] char_arr; } // get satisfying assignments if such exist MPI_Probe( current_status.MPI_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status );; MPI_Get_count( &status, MPI_CHAR, &char_arr_len ); if ( ( char_arr_len > 1 ) && ( char_arr_len % var_count != 0 ) ) { std::cerr << "char_arr_len % var_count != 0" << std::endl; std::cerr << char_arr_len << " % " << var_count << " != 0 " << std::endl; MPI_Abort( MPI_COMM_WORLD, 0 ); } if ( char_arr_len > 0 ) { cur_satisfying_assignment.solving_time = solving_times[3]; // sat solving time char_arr = new char[char_arr_len]; MPI_Recv( char_arr, char_arr_len, MPI_CHAR, current_status.MPI_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status ); if ( char_arr_len > 1 ) { // read several assignments from one array std::cout << "recieved char_arr_len " << char_arr_len << std::endl; elem_index=0; cur_satisfying_assignment.str.resize( var_count ); for ( int j=0; j < char_arr_len; j++ ) { cur_satisfying_assignment.str[elem_index++] = char_arr[j]; if ( (j+1) % var_count == 0 ) { satisfying_assignments.push_back( cur_satisfying_assignment ); elem_index=0; } } } delete[] char_arr; } if ( char_arr_len > 1 ) std::cout << "interrupted_problems_var_values.size() " << interrupted_problems_var_values.size() << std::endl; /*char_send_array = new char[interrupted_problems_var_values_from_process.size() * var_choose_order.size()]; char_send_array_index = 0; for ( auto &x : interrupted_problems_var_values_from_process ) for ( auto &y : x ) char_send_array[char_send_array_index++] = (y == true ? '1' : '0'); MPI_Send( char_send_array, char_send_array_index, MPI_CHAR, 0, 0, MPI_COMM_WORLD ); delete[] char_send_array;*/ solved_tasks_count++; if ( verbosity > 0 ) std::cout << "solved_tasks_count " << solved_tasks_count << std::endl; if ( process_sat_count ) { sat_count += process_sat_count; std::cout << "sat_count " << sat_count << std::endl; if ( finding_first_sat_time == 0 ) // first time only finding_first_sat_time = MPI_Wtime() - total_start_time; } WriteSolvingTimeInfo( solving_times, solved_tasks_count ); if ( sat_count && !IsSolveAll ) break; // exit if SAT set found if ( next_task_index < (int)all_tasks_count ) { // send new index of task MPI_Send( &next_task_index, 1, MPI_INT, current_status.MPI_SOURCE, 0, MPI_COMM_WORLD ); // send to free core new task in format of minisat input masks copy( values_arr[next_task_index].begin(), values_arr[next_task_index].end(), mask_value ); MPI_Send( mask_value, FULL_MASK_LEN, MPI_UNSIGNED, current_status.MPI_SOURCE, 0, MPI_COMM_WORLD ); next_task_index++; } } // while ( solved_tasks_count < all_tasks_count ) solving_iteration_count++; return true; }
void FNeuralNetLMBase::TrainLM(const string &validationfile, const string &outbase, bool nce_ppl) { // ============= // Prepare for the training // Equivalent to ReadLM word_vocab_.ReadVocabFromTxt(word_vocab_filename_); if (word_vocab_.empty()) { cerr << "empty word vocabulary!" << endl; exit(EXIT_FAILURE); } factor_vocab_.ReadVocabFromTxt(factor_vocab_filename_); if (factor_vocab_.empty()) { cerr << "empty factor vocabulary!" << endl; exit(EXIT_FAILURE); } ReadDecompFromTxt(decomp_filename_); PrintParams(); CheckParams(); AllocateModel(); InitializeNeuralNet(); // ==== END ==== // Read the data FNNLMDataReader train_data(train_filenames_, &word_vocab_, &factor_vocab_, shuffle_datafiles_, shuffle_sentences_); vector<string> validation_filenames = { validationfile }; FNNLMDataReader validation_data(validation_filenames, &word_vocab_, &factor_vocab_, false, false); // Set NCE sampling. if (nce_) { // TODO: flatten noise_distribution_? vector<int> word_count(word_vocab_.size(), 0); int num_word_tokens = 0; const size_t eos_widx = word_vocab().eos_idx(); vector<int> factor_count(factor_vocab_.size(), 0); int num_factor_tokens = 0; const size_t eos_fidx = factor_vocab().eos_idx(); vector<pair<size_t, vector<size_t>>> sentence; train_data.StartEpoch(); while(train_data.GetSentence(sentence)) { for (vector<pair<size_t, vector<size_t>>>::const_iterator it = sentence.begin(); it != sentence.end(); ++it) { word_count[it->first]++; num_word_tokens++; if (weight_factor_output_ > 0) { for (size_t p = 0; p < it->second.size(); p++) { factor_count[it->second[p]]++; num_factor_tokens++; } } } word_count[eos_widx]++; num_word_tokens++; if (weight_factor_output_ > 0) { factor_count[eos_fidx]++; num_factor_tokens++; } } word_noise_distribution_ = Distribution(word_count.begin(), word_count.end()); word_noise_pdf_ = word_noise_distribution_.param().probabilities(); if (weight_factor_output_ > 0) { factor_noise_distribution_ = Distribution(factor_count.begin(), factor_count.end()); factor_noise_pdf_ = factor_noise_distribution_.param().probabilities(); } NCECheckSampling(); log_num_negative_samples_ = log(num_negative_samples_); } BatchSGDTrain(train_data, validation_data, outbase, nce_ppl); cout << "================================================================================" << endl; cout << "Log-likelihood (base e) on validation is: " \ << EvalLM(validation_data, false) << endl; }