void out_action ( char const * const action, char const * const target, char const * const command, char const * const out_data, char const * const err_data, int const exit_reason ) { /* Print out the action + target line, if the action is quiet the action * should be null. */ if ( action ) fprintf( bjam_out, "%s %s\n", action, target ); /* Print out the command executed if given -d+2. */ if ( DEBUG_EXEC ) { fputs( command, bjam_out ); fputc( '\n', bjam_out ); } /* Print out the command executed to the command stream. */ if ( globs.cmdout ) fputs( command, globs.cmdout ); /* If the process expired, make user aware with an explicit message, but do * this only for non-quiet actions. */ if ( exit_reason == EXIT_TIMEOUT && action ) fprintf( bjam_out, "%ld second time limit exceeded\n", globs.timeout ); /* Print out the command output, if requested, or if the program failed, but * only output for non-quiet actions. */ if ( action || exit_reason != EXIT_OK ) { if ( out_data && ( ( globs.pipe_action & 1 /* STDOUT_FILENO */ ) || ( globs.pipe_action == 0 ) ) ) out_( out_data, bjam_out ); if ( err_data && ( globs.pipe_action & 2 /* STDERR_FILENO */ ) ) out_( err_data, bjam_err ); } fflush( bjam_out ); fflush( bjam_err ); fflush( globs.cmdout ); }
//-----------------------------------------------------------------// bool service() noexcept { if(limit_ == 0) return false; for(uint8_t i = 0; i < limit_; ++i) { out_(static_cast<command>(static_cast<uint8_t>(command::DIGIT_0) + i), data_[i]); } return true; }
void test(size_t data_size) { std::vector<char> vec(data_size); char * data = &vec[0]; for (size_t i = 0; i < data_size; ++i) data[i] = rand() & 255; uint128 reference = referenceHash(data, data_size); std::vector<size_t> block_sizes = {56, 128, 513, 2048, 3055, 4097, 4096}; for (size_t read_buffer_block_size : block_sizes) { std::cout << "block size " << read_buffer_block_size << std::endl; std::stringstream io; DB::WriteBufferFromOStream out_(io); DB::HashingWriteBuffer out(out_); out.write(data, data_size); out.next(); //std::cout.write(data, data_size); //std::cout << std::endl; //std::cout << io.str() << std::endl; DB::ReadBufferFromIStream source(io, read_buffer_block_size); DB::HashingReadBuffer buf(source); std::vector<char> read_buf(data_size); buf.read(read_buf.data(), data_size); bool failed_to_read = false; for (size_t i = 0; i < data_size; ++i) if (read_buf[i] != vec[i]) { failed_to_read = true; } if (failed_to_read) { std::cout.write(data, data_size); std::cout << std::endl; std::cout.write(read_buf.data(), data_size); std::cout << std::endl; FAIL("Fail to read data"); } if (buf.getHash() != reference) { //std::cout << uint128ToString(buf.getHash()) << " " << uint128ToString(reference) << std::endl; FAIL("failed on data size " << data_size << " reading by blocks of size " << read_buffer_block_size); } if (buf.getHash() != out.getHash()) FAIL("Hash of HashingReadBuffer doesn't match with hash of HashingWriteBuffer on data size " << data_size << " reading by blocks of size " << read_buffer_block_size); } }
void operator()(ValuePtr entry) { if (!overlaps(*entry)) { assignRegion(*entry); if (!bundle_.empty()) { beginFunc_(); out_(std::move(bundle_)); endFunc_(); bundle_.clear(); } } region_.end = std::max(coordView_.stop(*entry), region_.end); bundle_.push_back(std::move(entry)); }
//-----------------------------------------------------------------// bool start(uint8_t limit = (CHAIN * 8)) noexcept { if(limit_ > (8 * CHAIN) || limit == 0) { return false; } limit_ = limit; SELECT::DIR = 1; // output; SELECT::PU = 0; // pull-up disable SELECT::P = 1; // /CS = H for(uint8_t i = 0; i < sizeof(data_); ++i) { data_[i] = 0; } out_(command::SHUTDOWN, 0x01); // ノーマル・モード out_(command::DECODE_MODE, 0x00); // デコード・モード out_(command::SCAN_LIMIT, limit - 1); // 表示桁設定 set_intensity(0); // 輝度(最低) service(); return true; }
// 线程函数 UINT threadEncryptFunc(LPVOID lpParam) { CString* arr = (CString*)lpParam; CString cs_fileName = arr[0]; CString cs_savePath = arr[1]; CString cs_key = arr[2]; CT2CA pszCAS_key(cs_key); // 将 TCHAR 转换为 LPCSTR string keyStr(pszCAS_key); // 从 LPCSTR 构造 string CT2CA pszCAS_filename(cs_fileName); string fileName(pszCAS_filename); CT2CA pszCAS_savepath(cs_savePath); string savePath(pszCAS_savepath); Aes aes; ifstream in_(fileName.c_str(), ios::binary); ofstream out_(savePath.c_str(), ios::binary); Byte key[16]; aes.charToByte(key, keyStr.c_str()); // 密钥扩展 Word w[4*(Nr+1)]; aes.KeyExpansion(key, w); bitset<128> data; // 临时存放读取的数据 Byte plain[16]; // 加密矩阵 while(in_.read((char*)&data, sizeof(data))) { aes.divideByte(plain, data); aes.encrypt(plain, w); data = aes.mergeByte(plain); out_.write((char*)&data, sizeof(data)); data.reset(); // 置0 } in_.close(); out_.close(); return 0; }
void err_data(char const * const s) { out_( s, bjam_err ); if ( globs.out ) out_( s, globs.out ); }
void flush() { if (!bundle_.empty()) { out_(std::move(bundle_)); bundle_.clear(); } }
//-----------------------------------------------------------------// bool set_intensity(uint8_t inten) noexcept { if(limit_ == 0) return false; out_(command::INTENSITY, inten); return true; }
//#define VERBOSE void Compute_Simple_XOR_network_version_5(int num_iterations) { Timer timer; // TRAINING SET FOR EXCLUSIVE OR GATE vector<vector2d > training; training.push_back(vector2d{ { 0.f, 0.f } }); training.push_back(vector2d{ { 0.f, 1.f } }); training.push_back(vector2d{ { 1.f, 0.f } }); training.push_back(vector2d{ { 1.f, 1.f } }); float desired_output[4] = { 0.f, 1.f, 1.f, 0.f }; int input_data_size = 1; int num_inputs = 2; int num_hidden = 2; int num_outputs = 1; // ========================================== matrix input_matrix( 1, num_inputs + 1 ); matrix w_m_1_2_(num_inputs + 1, num_hidden + 1); matrix hidden_layer_(num_hidden+1, 1); matrix w_m_2_3_(num_hidden + 1, num_outputs); matrix out_(num_outputs, num_outputs); matrix del_3_2_(num_outputs, num_outputs); matrix del_2_1_(1, num_hidden + 1); /*for (int i = 0; i < num_inputs+1; i++) { for (int j = 0; j < num_hidden + 1; j++) { w_m_1_2_(i, j) = RandomFloat(-1.2, 1.2); } } */ w_m_1_2_(0, 0) = 0.5f; w_m_1_2_(0, 1) = 0.9f; w_m_1_2_(0, 2) = 0.0f; w_m_1_2_(1, 0) = 0.4f; w_m_1_2_(1, 1) = 1.0f; w_m_1_2_(1, 2) = 0.0f; w_m_1_2_(2, 0) = 0.8f;// theta 1 w_m_1_2_(2, 1) = -0.1f;//// theta 2 w_m_1_2_(2, 2) = 1.0f; /* for (int i = 0; i < num_hidden + 1; i++) { w_m_2_3_(i, 0) = RandomFloat(-1.2, 1.2); } */ w_m_2_3_(0, 0) = -1.2f; w_m_2_3_(1, 0) = 1.1f; w_m_2_3_(2, 0) = 0.3f; // theta for output float output_error = 0.0f; matrix w_m_delta_1_(3, 3); matrix w_m_delta_2_(3, 1); float alpha = 0.1f; float beta = 0.95f; float sum_squared_errors = 0.0f; timer.Start(); // Sleep(2000); float last_sum_squared_errors = 0.0f; int positive_error_delta_count = 0; int negative_error_delta_count = 0; int alternation_count = 0; for (int p = 0; p < num_iterations; p++) { sum_squared_errors = 0.0f; for (int q = 0; q < 4; q++) { input_matrix(0, 0) = training[q].v[0]; input_matrix(0, 1) = training[q].v[1]; input_matrix(0, 2) = -1.0f; // bias is always -1 float sum[3] = { 0.0f, 0.0f, 0.0f }; hidden_layer_ = input_matrix * w_m_1_2_;// -theta_1_; sigmoid(hidden_layer_, hidden_layer_); // OVERWRITE 3rd INPUT hidden_layer_(0, 2) = -1.0f; out_ = hidden_layer_ * w_m_2_3_; sigmoid(out_, out_); #ifdef VERBOSE if (p % 250 == 0) { hidden_layer_.print(); cout<<endl; out_.print(); cout<<endl; } #endif output_error = desired_output[q] - out_(0, 0); sum_squared_errors += output_error * output_error; // back propogate anti_sigmoid(del_3_2_, out_); del_3_2_ = del_3_2_ * output_error; anti_sigmoid(del_2_1_, hidden_layer_); // put the vector on the diagonal for next operation ... matrix ident_22(3, 3); for (int i = 0; i < 3; i++) { for (int h = 0; h < 3; h++) { if (i == h) ident_22(i, h) = del_2_1_(0, i); else ident_22(i, h) = 0.0f; } } del_2_1_ = ident_22 * w_m_2_3_ * del_3_2_(0, 0); // weight deltas w_m_delta_2_.transpose(); w_m_delta_2_ = w_m_delta_2_ * beta + del_3_2_* hidden_layer_ * alpha ; w_m_delta_2_.transpose(); #ifdef VERBOSE if (p % 250 == 0) { del_2_1_.print(); cout << endl; del_3_2_.print(); cout << endl; } #endif w_m_delta_1_.transpose(); w_m_delta_1_ = w_m_delta_1_ * beta + del_2_1_ * input_matrix * alpha; w_m_delta_1_.transpose(); #ifdef VERBOSE if (p % 250 == 0) { w_m_delta_1_.print(); cout << endl; w_m_delta_2_.print(); cout << endl; } #endif // update weights w_m_1_2_ = w_m_1_2_ + w_m_delta_1_;// w_m_2_3_ = w_m_2_3_ + w_m_delta_2_; #ifdef VERBOSE if (p % 250 == 0) { w_m_1_2_.print(); cout << endl; w_m_2_3_.print(); cout << endl; } #endif } if (sum_squared_errors > last_sum_squared_errors*1.04) alpha *= 0.7; if (sum_squared_errors < last_sum_squared_errors) alpha *= 1.05; // calculate the change in sum_squared_errors float delta_sum_square_errors = sum_squared_errors - last_sum_squared_errors; last_sum_squared_errors = sum_squared_errors; if (delta_sum_square_errors > 0.0f) { if (positive_error_delta_count == 0) { alternation_count++; } else{ alternation_count = 0; } positive_error_delta_count++; negative_error_delta_count = 0; } else { if (negative_error_delta_count == 0) { alternation_count++; } else{ alternation_count = 0; } negative_error_delta_count++; positive_error_delta_count = 0; } // determine change in learning rate if (positive_error_delta_count >= 2 || negative_error_delta_count >= 2) { alpha += 0.1; if (alpha > 1.0f) alpha = 1.0f; } else if (alternation_count >= 2) { alpha -= 0.1; if (alpha < 0.0f) alpha = 0.01; } //cout << sum_squared_errors << endl; if (sum_squared_errors < 0.001) { timer.Update(); timer.Stop(); cout << "Finished on iteration: " << p << ", with sum squared errors less than 0.001" << endl << "Total calculation performed in " << timer.GetTimeDelta() << " seconds" << endl; break; } } }
void Compute_Simple_XOR_network_version_4(int num_iterations) { Timer timer; // TRAINING SET FOR EXCLUSIVE OR GATE vector<vector2d > training; training.push_back(vector2d{ { 0.f, 0.f } }); training.push_back(vector2d{ { 0.f, 1.f } }); training.push_back(vector2d{ { 1.f, 0.f } }); training.push_back(vector2d{ { 1.f, 1.f } }); float desired_output[4] = { 0.f, 1.f, 1.f, 0.f }; // ========================================== matrix input_matrix(1, 3); matrix w_m_1_2_(3, 3); matrix hidden_layer_(3, 1); matrix w_m_2_3_(3, 1); matrix out_(1, 1); matrix del_3_2_(1, 1); matrix del_2_1_(1, 3); matrix theta_1_(1, 2); matrix theta_2_(1, 1); w_m_1_2_(0, 0) = 0.5f; w_m_1_2_(0, 1) = 0.9f; w_m_1_2_(0, 2) = 0.0f; w_m_1_2_(1, 0) = 0.4f; w_m_1_2_(1, 1) = 1.0f; w_m_1_2_(1, 2) = 0.0f; w_m_1_2_(2, 0) = 0.8f;// theta 1 w_m_1_2_(2, 1) = -0.1f;//// theta 2 w_m_1_2_(2, 2) = 1.0f; w_m_2_3_(0, 0) = -1.2f; w_m_2_3_(1, 0) = 1.1f; w_m_2_3_(2, 0) = 0.3f; // theta for output theta_1_(0, 0) = 0.8f; theta_1_(0, 1) = -0.1f; theta_2_(0, 0) = 0.3f; float output_error = 0.0f; matrix w_m_delta_1_(3, 3); matrix w_m_delta_2_(3, 1); float alpha = 0.3; float sum_squared_errors = 0.0f; timer.Start(); // Sleep(2000); float last_sum_squared_errors = 0.0f; int positive_error_delta_count = 0; int negative_error_delta_count = 0; for (int p = 0; p < num_iterations; p++) { sum_squared_errors = 0.0f; for (int q = 0; q < 4; q++) { input_matrix(0, 0) = training[q].v[0]; input_matrix(0, 1) = training[q].v[1]; input_matrix(0, 2) = -1.0f; // bias is always -1 float sum[3] = { 0.0f, 0.0f, 0.0f }; hidden_layer_ = input_matrix * w_m_1_2_;// -theta_1_; sigmoid(hidden_layer_, hidden_layer_); // OVERWRITE 3rd INPUT hidden_layer_(0, 2) = -1.0f; out_ = hidden_layer_ * w_m_2_3_; out_(0, 0) = sigmoid(out_(0, 0)); #ifdef VERBOSE if (p % 250 == 0) { hidden_layer_.print(); cout<<endl; out_(0, 0).print(); cout<<endl; } #endif output_error = desired_output[q] - out_(0, 0); sum_squared_errors += output_error * output_error; // back propogate anti_sigmoid(del_3_2_, out_); del_3_2_ = del_3_2_ * output_error; anti_sigmoid(del_2_1_, hidden_layer_); // put the vector on the diagonal for next operation ... matrix ident_22(3, 3); for (int i = 0; i < 3; i++) { for (int h = 0; h < 3; h++) { if (i == h) ident_22(i, h) = del_2_1_(0, i); else ident_22(i, h) = 0.0f; } } del_2_1_ = ident_22 * w_m_2_3_ * del_3_2_(0, 0); // weight deltas w_m_delta_2_ = hidden_layer_ * alpha * del_3_2_(0, 0); w_m_delta_2_.transpose(); #ifdef VERBOSE if (p % 250 == 0) { del_2_1_.print(); cout<<endl; de_3_2_.print(); cout << endl; } #endif w_m_delta_1_ = del_2_1_ * input_matrix * alpha; w_m_delta_1_.transpose(); #ifdef VERBOSE if (p % 250 == 0) { w_m_delta_1_.print(); cout<<endl; w_m_delta_2_.print(); cout << endl; } #endif // update weights w_m_1_2_ = w_m_1_2_ + w_m_delta_1_;// w_m_2_3_ = w_m_2_3_ + w_m_delta_2_; #ifdef VERBOSE if (p % 250 == 0) { w_m_1_2_.print(); cout<<endl; w_m_2_3_.print(); cout << endl; } #endif } //cout << sum_squared_errors << endl; if (sum_squared_errors < 0.03) { timer.Update(); timer.Stop(); cout << "Finished on iteration: " << p << ", with sum squared errors less than 0.03" << endl << "Total calculation performed in " << timer.GetTimeDelta() << " seconds" << endl; break; } } }
void Compute_Simple_XOR_network_version_3(int num_iterations) { Timer timer; // TRAINING SET FOR EXCLUSIVE OR GATE vector<vector2d > training; training.push_back(vector2d{ { 0.f, 0.f } }); training.push_back(vector2d{ { 0.f, 1.f } }); training.push_back(vector2d{ { 1.f, 0.f } }); training.push_back(vector2d{ { 1.f, 1.f } }); float desired_output[4] = { 0.f, 1.f, 1.f, 0.f }; // ========================================== matrix input_matrix(1, 2); matrix w_m_1_2_(2, 2); matrix hidden_layer_(2, 1); matrix w_m_2_3_(2, 1); matrix out_(1, 1); matrix del_3_2_(1, 1); matrix del_2_1_(1, 2); matrix theta_1_(1, 2); matrix theta_2_(1, 1); w_m_1_2_(0,0) = 0.5f; w_m_1_2_(0, 1) = 0.9f; w_m_1_2_(1, 0) = 0.4f; w_m_1_2_(1, 1) = 1.0f; w_m_2_3_(0, 0) = -1.2f; w_m_2_3_(1, 0) = 1.1f; theta_1_(0, 0) = 0.8f; theta_1_(0,1) = -0.1f; theta_2_(0,0) = 0.3f; float output_error = 0.0f; matrix w_m_delta_1_(3, 2); matrix w_m_delta_2_(3, 1); float alpha = 0.3f; float sum_squared_errors = 0.0f; timer.Start(); // Sleep(2000); for (int p = 0; p < num_iterations; p++) { sum_squared_errors = 0.0f; for (int q = 0; q < 4; q++) { input_matrix(0,0)= training[q].v[0]; input_matrix(0, 1)= training[q].v[1]; // theta's must be in the weight matrix hidden_layer_ = input_matrix * w_m_1_2_ - theta_1_; // computes the elementwise sigmoid of the sum sigmoid(hidden_layer_, hidden_layer_); out_ = hidden_layer_ * w_m_2_3_ - theta_2_; out_(0, 0) = sigmoid(out_(0, 0)); //#define VERBOSE #ifdef VERBOSE if (p % 250 == 0) { cout << "hidden layer: " << endl; hidden_layer_.print(); cout << "output: " << endl; out_.print(); cout << endl; } #endif output_error = desired_output[q] - out_(0, 0); sum_squared_errors += output_error * output_error; // back propogate del_3_2_(0, 0) = out_(0, 0) * (1 - out_(0, 0)) * output_error; // calculation of the debug values. float correct_val_1 = hidden_layer_(0, 0)*(1 - hidden_layer_(0, 0)) * w_m_2_3_(0, 0) * del_3_2_(0, 0); float correct_val_2 = hidden_layer_(0, 1)*(1 - hidden_layer_(0, 1)) * w_m_2_3_(1, 0) * del_3_2_(0, 0); // computes the elementwise differentiation of the sigmoid function anti_sigmoid( del_2_1_, hidden_layer_ ); // the del_2_1_ vector is expanded to inhabit the diagonal of the identity // matrix for the next matrix operation matrix ident_22(2, 2); for (int i = 0; i < 2; i++) { for (int h = 0; h < 2; h++) { if (i == h) ident_22(i, h) = del_2_1_(0, i); else ident_22(i, h) = 0.0f; } } del_2_1_ = ident_22 * w_m_2_3_ * del_3_2_(0, 0); del_2_1_.transpose(); w_m_delta_2_(0, 0) = alpha * hidden_layer_(0, 0) * del_3_2_(0, 0); w_m_delta_2_(1, 0) = alpha * hidden_layer_(0, 1) * del_3_2_(0, 0); w_m_delta_2_(2, 0) = alpha * (-1.0f) * del_3_2_(0, 0); #ifdef VERBOSE if (p % 250 == 0) { cout << "deltas: " << endl; del_2_1_.print(); cout << endl; del_3_2_.print(); cout << endl; } #endif #undef VERBOSE // this operation could be bunched up into a matrix operation, this // shall be left to the next function w_m_delta_1_(0, 0) = alpha * input_matrix(0, 0) * del_2_1_(0, 0); w_m_delta_1_(1, 0) = alpha * input_matrix(0, 1) * del_2_1_(0, 0); w_m_delta_1_(2, 0) = alpha * (-1.0f) * del_2_1_(0, 0); w_m_delta_1_(0, 1) = alpha * input_matrix(0, 0) * del_2_1_(0, 1); w_m_delta_1_(1, 1) = alpha * input_matrix(0, 1) * del_2_1_(0, 1); w_m_delta_1_(2, 1) = alpha * (-1.0f) * del_2_1_(0, 1); // weight_mat_2_delta[2] = alpha * (-1) * deltas[2]; #ifdef VERBOSE if (p % 250 == 0) { w_m_delta_1_.print(); // untested } #endif // update weights for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { w_m_1_2_(i, j) = w_m_1_2_(i, j) + w_m_delta_1_(i, j);// weight_mat_1[i][j] = weight_mat_1[i][j] + weight_mat_1_delta[i][j]; } } // it is clear that the operation above is an elementwise matrix addition // but the w_m_1_2_ matrix is not the same size as the w_m_delta_1_ // because of the storage of theta bias values ... this is the correct // place to store the theta bias values however I have left this stage of // development to the next function, where i will attempt to further generalize // the matrix operations to facilitate an arbitrary number of inputs, outputs, // hidden layer neurons and number of hidden layers w_m_2_3_(0, 0) = w_m_2_3_(0, 0) + w_m_delta_2_(0, 0); w_m_2_3_(1, 0) = w_m_2_3_(1, 0) + w_m_delta_2_(1, 0); //weight_mat_2[1] = weight_mat_2[1] + weight_mat_2_delta[1]; theta_1_(0, 0) = theta_1_(0, 0) + w_m_delta_1_(2, 0); theta_1_(0, 1) = theta_1_(0, 1) + w_m_delta_1_(2, 1); theta_2_(0, 0) = theta_2_(0, 0) + w_m_delta_2_(2, 0); #ifdef VERBOSE if (p % 250 == 0) { } #endif } //cout << sum_squared_errors << endl; if (sum_squared_errors < 0.03) { timer.Update(); timer.Stop(); cout << "Finished on iteration: " << p << ", with sum squared errors less than 0.03" << endl << "Total calculation performed in " << timer.GetTimeDelta() << " seconds" << endl; break; } } }
void out( std::ostream& o, double level ) { for ( const auto& p : points_[level] ) { out_( o, p ); } }
void Foam::CorrectParticleCell<CloudType>::preEvolve() { if( this->owner().mesh().changing() ) { Info << this->modelName() << ":" << this->owner().name() << ":" << this->modelType() << ": Mesh moving" << endl; search_.correct(); } label cnt=0; label outCnt=0; forAllIter(typename CloudType,this->owner(),iter) { parcelType &p=iter(); label oldCellI=p.cell(); label cellI=search_.findCell( p.position(), oldCellI ); if( cellI<0 // || // (cellI % 4)==0 ) { cnt++; if(logCorrected_) { out_("logOutsideParticles") << p << endl; } // Info << "Not in Mesh" << endl; // label tetC=-1,tetP=-1,newCell=-1; // this->owner().mesh().findCellFacePt( // p.position(), // newCell, // tetC, // tetP // ); // Info << p.position() << " " << newCell << " " << tetC << " " << tetP << endl; // Info << "Old: " << oldCellI << " " << p.tetFace() << " " << p.tetPt() << endl; } else if(cellI!=oldCellI) { if(logCorrected_) { out_("logCorrectedParticles") << p << endl; } // Info << "Cell: " << cellI << " old: " << oldCellI << endl; label tetC=-1,tetP=-1,newCell=-1; this->owner().mesh().findCellFacePt( p.position(), newCell, tetC, tetP ); // Info << "Corrected: " << p.position() << " " << newCell << " " << tetC << " " << tetP << endl; // Info << "Old: " << p.cell() << " "<< p.tetFace() << " " << p.tetPt() << endl; outCnt++; p.cell()=newCell; p.tetFace()=tetC; p.tetPt()=tetP; p.cell()=cellI; p.initCellFacePt(); } } if(outCnt>0) { Pout << outCnt << " particles not in the right cell" << endl; } if(Pstream::parRun()) { out_["correctedCellProc"+name(Pstream::myProcNo())] << outCnt << tab << cnt << endl; } reduce(cnt,plusOp<label>()); reduce(outCnt,plusOp<label>()); if(Pstream::master()) { out_["correctedCellTotal"] << outCnt << tab << cnt << endl; } if(outCnt>0) { Info << this->modelName() << ":" << this->owner().name() << ":" << this->modelType() << "Corrected " << outCnt << " particles" << endl; } if(cnt>0) { Info << this->modelName() << ":" << this->owner().name() << ":" << this->modelType() << "Not in mesh " << cnt << " particles" << endl; } }
void out_data(char const * const s) { out_( s, bjam_out ); if ( globs.out ) out_( s, globs.out ); }
void out_action ( char const * action, char const * target, char const * command, char const * out_data, char const * err_data, int exit_reason ) { /* Print out the action+target line, if the action is quite the action * should be null. */ if ( action ) { fprintf( bjam_out, "%s %s\n", action, target ); } /* Print out the command executed if given -d+2. */ if ( DEBUG_EXEC ) { fputs( command, bjam_out ); fputc( '\n', bjam_out ); } /* Print out the command executed to the command stream. */ if ( globs.cmdout ) { fputs( command, globs.cmdout ); } switch ( exit_reason ) { case EXIT_OK: break; case EXIT_FAIL: break; case EXIT_TIMEOUT: { /* Process expired, make user aware with explicit message. */ if ( action ) { /* But only output for non-quietly actions. */ fprintf( bjam_out, "%ld second time limit exceeded\n", globs.timeout ); } break; } default: break; } /* Print out the command output, if requested, or if the program failed. */ if ( action || exit_reason != EXIT_OK) { /* But only output for non-quietly actions. */ if ( ( 0 != out_data ) && ( ( globs.pipe_action & 1 /* STDOUT_FILENO */ ) || ( globs.pipe_action == 0 ) ) ) { out_( out_data, bjam_out ); } if ( ( 0 != err_data ) && ( globs.pipe_action & 2 /* STDERR_FILENO */ ) ) { out_( err_data, bjam_err ); } } fflush( bjam_out ); fflush( bjam_err ); fflush( globs.cmdout ); }
void MainWindow::on_pushButton_6_clicked() { out_ (); }