dgMatrix dgDynamicBody::CalculateInertiaMatrix() const { dgMatrix matrix(m_principalAxis * m_matrix); matrix.m_posit = dgVector::m_wOne; dgMatrix diagonal(dgGetIdentityMatrix()); diagonal[0][0] = m_mass[0]; diagonal[1][1] = m_mass[1]; diagonal[2][2] = m_mass[2]; return matrix * diagonal * matrix.Inverse(); }
double KernelMatrix<ValueType>:: calculate(const ExampleSet& test, const ExampleSet& train, const Kernel& kernel, bool norm_test, bool normalize, uint n_th) { double elapsed = 0.0; #ifdef HAVE_MPI if (MPI::COMM_WORLD.Get_rank()==0) { #endif resize(test.size(), train.size()); //label_.resize(test.size()); //matrix_.resize(boost::extents[test.size()][train.size()]); if (norm_test||normalize) self_.resize(train.size()); #ifdef HAVE_MPI } #endif for (uint i=0; i!=test.size(); ++i) { std::vector<value_type> v(train.size()); if (norm_test||normalize) elapsed += calculate(v, test[i], train, kernel, n_th, &self_[i]); else elapsed += calculate(v, test[i], train, kernel, n_th); #ifdef HAVE_MPI if (MPI::COMM_WORLD.Get_rank()==0) { #endif label_[i] = test[i].first; for (uint j=0; j!=train.size(); ++j) matrix_[i][j] = v[j]; #ifdef HAVE_MPI } #endif } if (normalize) { std::vector<value_type> diag(train.size()); elapsed += diagonal(diag, train, kernel, n_th); #ifdef HAVE_MPI if (MPI::COMM_WORLD.Get_rank()==0) { #endif boost::timer tm; for (uint i=0; i!=test.size(); ++i) { for (uint j=0; j!=train.size(); ++j) { matrix_[i][j] /= sqrt(self_[i]*diag[j]); } } elapsed += tm.elapsed(); #ifdef HAVE_MPI } #endif } return elapsed; }
int check() { int i,j; for(i=0;i<size;i++) for(j=0;j<size;j++) { if(j<=size-length && board[i][j]==duma[0]) { horizontal(1,i,j); if(sreshtano) { print(); return -1; } } if(j>=length-1 && board[i][j]==duma[0]) { horizontal(0,i,j); if(sreshtano) { print(); return -1; } } if(i>=length-1 && board[i][j]==duma[0]) { vertical(0,i,j); if(sreshtano) { print(); return -1; } } if(i<=size-length && board[i][j]==duma[0]) { vertical(1,i,j); if(sreshtano) { print(); return -1; } } if(board[i][j]==duma[0]) { diagonal(i,j); if(sreshtano) { print(); return -1; } } } return 0; }
void SparseMatrix:: initialize_diagonal_index() const { if(diagonal_index.size()) return; Array<int> diagonal(min(rows(),columns()),uninit); for(int i=0;i<diagonal.size();i++){ diagonal[i]=find_entry(i,i); GEODE_ASSERT(diagonal[i]>=0); diagonal[i]+=A.offsets[i];} diagonal_index=diagonal; }
static int evaluate(char grid[SIZE][SIZE], Index2d pos, char player) { int rank = 0; rank = max(rank, horizontal(grid, pos, player)); rank = max(rank, vertical(grid, pos, player)); rank = max(rank, diagonal(grid, pos, player)); rank = max(rank, anti_diagonal(grid, pos, player)); return rank; }
void bi::cov(const GammaPdf& q, M1 Sigma) { /* pre-condition */ BI_ASSERT(Sigma.size1() == q.size()); BI_ASSERT(Sigma.size2() == q.size()); real alpha = q.shape(); real beta = q.scale(); real sigma = alpha*std::pow(beta, 2); Sigma.clear(); set_elements(diagonal(Sigma), sigma); }
void bi::cov(const InverseGammaPdf& q, M1 Sigma) { /* pre-condition */ BI_ASSERT(Sigma.size1() == q.size()); BI_ASSERT(Sigma.size2() == q.size()); BI_ASSERT(q.shape() > 2.0); real alpha = q.shape(); real beta = q.scale(); real sigma = std::pow(beta, 2)/(std::pow(alpha - 1.0, 2)*(alpha - 2.0)); Sigma.clear(); set_elements(diagonal(Sigma), sigma); }
void bi::cov(const UniformPdf<V1>& q, M1 Sigma) { /* pre-condition */ BI_ASSERT(Sigma.size1() == q.size()); BI_ASSERT(Sigma.size2() == q.size()); temp_host_vector<real>::type diff(q.size()); diff = q.upper(); sub_elements(diff, q.lower(), diff); sq_elements(diff, diff); Sigma.clear(); axpy(1.0/12.0, diff, diagonal(Sigma)); }
int main() { /* Enter your code here. Read input from STDIN. Print output to STDOUT */ int t; scanf("%d", &t); int answer = diagonal(t); printf("%d", answer); return 0; }
void RectangularMatrixTest::diagonal() { Vector3 diagonal(-1.0f, 5.0f, 11.0f); Matrix4x3 a(Vector3(-1.0f, 1.0f, 3.0f), Vector3( 4.0f, 5.0f, 7.0f), Vector3( 8.0f, 9.0f, 11.0f), Vector3(12.0f, 13.0f, 15.0f)); CORRADE_COMPARE(a.diagonal(), diagonal); Matrix3x4 b(Vector4(-1.0f, 4.0f, 8.0f, 12.0f), Vector4( 1.0f, 5.0f, 9.0f, 13.0f), Vector4( 3.0f, 7.0f, 11.0f, 15.0f)); CORRADE_COMPARE(b.diagonal(), diagonal); }
void RectangularMatrixTest::constructFromDiagonal() { Vector3 diagonal(-1.0f, 5.0f, 11.0f); Matrix3x4 expectedA(Vector4(-1.0f, 0.0f, 0.0f, 0.0f), Vector4( 0.0f, 5.0f, 0.0f, 0.0f), Vector4( 0.0f, 0.0f, 11.0f, 0.0f)); CORRADE_COMPARE(Matrix3x4::fromDiagonal(diagonal), expectedA); Matrix4x3 expectedB(Vector3(-1.0f, 0.0f, 0.0f), Vector3( 0.0f, 5.0f, 0.0f), Vector3( 0.0f, 0.0f, 11.0f), Vector3( 0.0f, 0.0f, 0.0f)); CORRADE_COMPARE(Matrix4x3::fromDiagonal(diagonal), expectedB); }
int main(void){ int i; COORD pos; pos.X = 79; pos.Y = 24; for (i = 1; i <= 52; i++){ pos = diagonal(pos,UP); if ((pos.X == 79) && (pos.Y != 0)){ pos.Y--; } if (pos.Y == 0){ pos.X--; } pos = diagonal(pos, DOWN); if ((pos.Y == 24) && (pos.X != 0)){ pos.X--; } if (pos.X == 0){ pos.Y--; } } getchar(); return EXIT_SUCCESS; }
/* * Function: calculate_g_score * * @Paramters: Point current ~ current location * Point goal ~ * * Calculates the g score for the neighbooring points to * the current point. The diagonal movemnts cost is increased * since diagonal movement on a grid is sqrt(2) * horizontal/vertical * movement. Elevation change is also factored into the cost. * * @returns: int calculated movement cost */ int calculate_g_score(Point current, Point target) { if (equals(current, target)) { return 0; } else if (diagonal(current, target)) { return 14 + current.g_score + abs(current.height - target.height); //return 50 + current.g_score + abs(current.height - target.height); } else { return 10 + current.g_score + abs(current.height - target.height); } }
Peice verify(Board b){ for(unsigned y=0;y<Board::Y;++y){ auto h = horizontal(b,y); if(h != Peice::EMPTY){ return h; } } for(unsigned x=0;x<Board::X;++x){ auto h = vertical(b,x); if(h != Peice::EMPTY){ return h; } } auto d = diagonal(b); return d; }
Matrix4::Matrix4(const Any& any) { any.verifyNameBeginsWith("Matrix4", "CFrame", "CoordinateFrame"); any.verifyType(Any::ARRAY); const std::string& name = any.name(); if (name == "Matrix4") { any.verifySize(16); for (int r = 0; r < 4; ++r) { for (int c = 0; c < 4; ++c) { elt[r][c] = any[r * 4 + c]; } } } else if (name == "Matrix4::scale") { if (any.size() == 1) { *this = scale(any[0].floatValue()); } else if (any.size() == 3) { *this = scale(any[0], any[1], any[2]); } else { any.verify(false, "Matrix4::scale() takes either 1 or 3 arguments"); } } else if (name == "Matrix4::rollDegrees") { any.verifySize(1); *this = rollDegrees(any[0].floatValue()); } else if (name == "Matrix4::yawDegrees") { any.verifySize(1); *this = yawDegrees(any[0].floatValue()); } else if (name == "Matrix4::pitchDegrees") { any.verifySize(1); *this = pitchDegrees(any[0].floatValue()); } else if (name == "Matrix4::translation") { if (any.size() == 3) { *this = translation(any[0], any[1], any[2]); } else { any.verify(false, "Matrix4::translation() requires 3 arguments"); } } else if (name == "Matrix4::diagonal") { any.verifySize(4); *this = diagonal(any[0], any[1], any[2], any[3]); } else if (name == "Matrix4::identity") { *this = identity(); } else if (beginsWith(name, "CFrame") || beginsWith(name, "CoordinateFrame")) { *this = CFrame(any); } else { any.verify(false, "Expected Matrix4 constructor"); } }
Eigen::MatrixXd Reconstruction3D::computeE(const std::pair<Eigen::MatrixXd, Eigen::MatrixXd>& K, const Eigen::MatrixXd& F, bool applyConstraint) { Eigen::MatrixXd E = K.second.transpose() * F * K.first; //E /= E(2, 2); // o erro aumenta if (applyConstraint) { Eigen::JacobiSVD<Eigen::MatrixXd, Eigen::FullPivHouseholderQRPreconditioner> svd(E, Eigen::ComputeFullU | Eigen::ComputeFullV); Eigen::VectorXd singularValues = svd.singularValues(); Eigen::DiagonalMatrix< double, 3, 3 > diagonal(1, 1, 0); Eigen::MatrixXd D = diagonal.toDenseMatrix(); Eigen::MatrixXd constrainedMat = svd.matrixU() * D * svd.matrixV().transpose(); E = constrainedMat; } //std::cout << std::fixed // << "E In determinant : " << F.determinant() << std::endl // << "E Out determinant : " << constrainedMat.determinant() << std::endl << std::endl; return E; }
player_bullet player_bullet_init(player p) { player_bullet b; b.a = p.a; b.rot = 2 * pi * rand() / RAND_MAX; b.h = p.h + rskala() * 10 * 2.0; b.obraz = load_bitmap("media/pocisk.bmp", NULL); if (!b.obraz) { set_gfx_mode(GFX_TEXT, 0, 0, 0, 0); allegro_message("Nie moge zaladowac grafiki ( pocisk )"); allegro_exit(); } int d = diagonal(b.obraz->w, b.obraz->h); b.obraz_rot = create_bitmap(d, d); return b; }
/** * The main program is now as follows: **/ int main() { typedef float NumericT; bool bResult = false; unsigned int mat_size = 30; /** * Create STL-vectors holding the diagonal, the superdiagonal, and the computed eigenvalues: **/ std::vector<NumericT> diagonal(mat_size); std::vector<NumericT> superdiagonal(mat_size); std::vector<NumericT> eigenvalues_bisect(mat_size); /** * Initialize the data with the helper routine defined earlier: **/ initInputData(diagonal, superdiagonal, mat_size); /** * Run the bisection algorithm for the provided input **/ std::cout << "Start the bisection algorithm" << std::endl; bResult = viennacl::linalg::bisect(diagonal, superdiagonal, eigenvalues_bisect); std::cout << std::endl << "---TUTORIAL COMPLETED---" << std::endl; /** * Uncomment the following code to also have the results printed: **/ /* // ------------Print the results--------------- std::cout << "mat_size = " << mat_size << std::endl; for (unsigned int i = 0; i < mat_size; ++i) { std::cout << "Eigenvalue " << i << ": " << std::setprecision(8) << eigenvalues_bisect[i] << std::endl; } */ exit(bResult == true ? EXIT_SUCCESS : EXIT_FAILURE); }
/** * Compute standardized residuals. * res{_by} = [log(O_{by}/\tilde(O)) - log(E_{by}/\tilde(E))] / (W_y G_y^{0.5}) **/ void logistic_normal::compute_standardized_residuals() { // Assumed the covariance matrix (m_V) has already been calculated. // when the likelihood was called. m_residual.allocate(m_O); m_residual.initialize(); int i,j,k; double n; for( i = m_y1; i <= m_y2; i++ ) { n = m_nB2(i); double gOmean = pow(prod(m_Op(i),n),1./n); dvector t1 = log(m_Op(i)/gOmean); double gEmean = pow(prod(value(m_Ep(i)),n),1./n); dvector t2 = log(value(m_Ep(i))/gEmean); dmatrix I = identity_matrix(m_b1,n-1); dmatrix tF(m_b1,n,m_b1,n-1); tF.sub(m_b1,n-1) = I; tF(n) = 1; dmatrix Hinv = inv(I + 1); dmatrix FHinv = tF * Hinv; dmatrix G = FHinv * value(m_V(i)) * trans(FHinv); dvector sd = sqrt(diagonal(G)); for( j = m_b1; j <= m_nB2(i); j++ ) { k = m_nAgeIndex(i)(j); m_residual(i)(k) = (t1(j)-t2(j)) / (sd(j)*m_dWy(i)); } } }
void ear_init ( ) /******************************************************************************/ /* Purpose: EAR_INIT initializes the data structures, and calls Triangulate2 to clip ears. Modified: 30 April 2007 Author: Joseph O'Rourke Local Parameters: Local, tVertex V0, V1, V2, three consecutive vertices of the polygon. */ { tVertex v0; tVertex v1; tVertex v2; /* Initialize v1->ear for all vertices. */ v1 = vertices; do { v2 = v1->next; v0 = v1->prev; v1->ear = diagonal ( v0, v2 ); v1 = v1->next; } while ( v1 != vertices ); return; }
INLINE Matrix<dim, dim> intersect_metrics( Matrix<dim, dim> m1, Matrix<dim, dim> m2) { auto n = invert(m1) * m2; auto n_decomp = decompose_eigen(n); bool all_above_one = true; bool all_below_one = true; for (Int i = 0; i < dim; ++i) { if (n_decomp.l[i] > 1) all_below_one = false; if (n_decomp.l[i] < 1) all_above_one = false; } if (all_below_one) return m1; if (all_above_one) return m2; auto p = n_decomp.q; Vector<dim> w; for (Int i = 0; i < dim; ++i) { Real u = metric_product(m1, p[i]); Real v = metric_product(m2, p[i]); w[i] = max2(u, v); } auto ip = invert(p); auto m = transpose(ip) * diagonal(w) * ip; return m; }
int main() { int i, j, count; int r=SIZE,c=SIZE; int **arr = (int **)malloc(r * sizeof(int *)); for (i=0; i<r; i++) arr[i] = (int *)malloc(c * sizeof(int)); // Note that arr[i][j] is same as *(*(arr+i)+j) count = 0; for (i = 0; i <r; i++) for (j = 0; j < c; j++) arr[i][j] = ++count;// OR *(*(arr+i)+j) = ++count printMat(arr,SIZE); /* Code for further processing and free the dynamically allocated memory */ int len=0; int **sol = diagonal(arr, SIZE, SIZE, &len); printSolMat(sol,len); return 0; }
void multialign(MultiFasta *mf, int kmersize, int mindiaglen){ unsigned int i,j; /* Allocate memory for plasmid statistics */ plasmidst.ps = malloc(sizeof(PosStat) * mf->num); if(plasmidst.ps == NULL){ fprintf(stderr, "Memory not enough for this number of plasmids"); return; } plasmidst.plasmidnum = mf->num; for(i = 0; i < mf->num; i++){ j = strlen(mf->fasta[i].seq); plasmidst.ps[i].stat = calloc(j, sizeof(int)); plasmidst.ps[i].numpos = j; } /* Calculate diagonals and vote for tile positions */ for(i = 0; i < mf->num - 1; i++){ for(j = i+1; j < mf->num; j++){ printf("*******\n%s VS. %s\n*******\n", mf->fasta[i].header, mf->fasta[j].header); index1 = i; index2 = j; diagonal(mf->fasta[i].seq, mf->fasta[j].seq, kmersize, mindiaglen); } } /* Select best positions from plasmid statistics */ tileplasmids(mf); /* Release resources */ for(i = 0; i < mf->num; i++){ free(plasmidst.ps[i].stat); } free(plasmidst.ps); }
void cantor(long ip){ int diag = diagonal(ip); int p,q,offset; if(diag%2==0){ //going down p = diag; q = 1; offset = offset_from_diag(ip,diag-1); p-=offset; q+=offset; } else{ //shit goes up p = 1; q = diag; offset = offset_from_diag(ip,diag-1); p+=offset; q-=offset; } printf("%d/%d",q,p); }
static int triangulate(int n, const int* verts, int* indices, int* tris) { int ntris = 0; int* dst = tris; // The last bit of the index is used to indicate if the vertex can be removed. for (int i = 0; i < n; i++) { int i1 = next(i, n); int i2 = next(i1, n); if (diagonal(i, i2, n, verts, indices)) indices[i1] |= 0x80000000; } while (n > 3) { int minLen = -1; int mini = -1; for (int i = 0; i < n; i++) { int i1 = next(i, n); if (indices[i1] & 0x80000000) { const int* p0 = &verts[(indices[i] & 0x0fffffff) * 4]; const int* p2 = &verts[(indices[next(i1, n)] & 0x0fffffff) * 4]; int dx = p2[0] - p0[0]; int dy = p2[2] - p0[2]; int len = dx*dx + dy*dy; if (minLen < 0 || len < minLen) { minLen = len; mini = i; } } } if (mini == -1) { // Should not happen. /* printf("mini == -1 ntris=%d n=%d\n", ntris, n); for (int i = 0; i < n; i++) { printf("%d ", indices[i] & 0x0fffffff); } printf("\n");*/ return -ntris; } int i = mini; int i1 = next(i, n); int i2 = next(i1, n); *dst++ = indices[i] & 0x0fffffff; *dst++ = indices[i1] & 0x0fffffff; *dst++ = indices[i2] & 0x0fffffff; ntris++; // Removes P[i1] by copying P[i+1]...P[n-1] left one index. n--; for (int k = i1; k < n; k++) indices[k] = indices[k+1]; if (i1 >= n) i1 = 0; i = prev(i1,n); // Update diagonal flags. if (diagonal(prev(i, n), i1, n, verts, indices)) indices[i] |= 0x80000000; else indices[i] &= 0x0fffffff; if (diagonal(i, next(i1, n), n, verts, indices)) indices[i1] |= 0x80000000; else indices[i1] &= 0x0fffffff; } // Append the remaining triangle. *dst++ = indices[0] & 0x0fffffff; *dst++ = indices[1] & 0x0fffffff; *dst++ = indices[2] & 0x0fffffff; ntris++; return ntris; }
void gridpack::dynamic_simulation_r::DSAppModule::solve( gridpack::dynamic_simulation_r::DSBranch::Event fault) { gridpack::utility::CoarseTimer *timer = gridpack::utility::CoarseTimer::instance(); int t_total = timer->createCategory("Dynamic Simulation: Total Application"); #if 0 timer->start(t_total); int t_matset = timer->createCategory("Dynamic Simulation: Matrix Setup"); timer->start(t_matset); p_factory->setMode(YBUS); gridpack::mapper::FullMatrixMap<DSNetwork> ybusMap(p_network); boost::shared_ptr<gridpack::math::Matrix> orgYbus = ybusMap.mapToMatrix(); ///p_branchIO.header("\n=== orginal ybus: ============\n"); ///orgYbus->print(); // Form constant impedance load admittance yl for all buses and add it to // system Y matrix: ybus = ybus + yl p_factory->setMode(YL); boost::shared_ptr<gridpack::math::Matrix> ybus = ybusMap.mapToMatrix(); ///p_branchIO.header("\n=== ybus after added yl: ============\n"); ///ybus->print(); // Construct permutation matrix perm by checking for multiple generators at a bus p_factory->setMode(PERM); gridpack::mapper::FullMatrixMap<DSNetwork> permMap(p_network); boost::shared_ptr<gridpack::math::Matrix> perm = permMap.mapToMatrix(); timer->stop(t_matset); ///p_busIO->header("\n=== perm: ============\n"); ///perm->print(); // Form a transposed matrix of perm int t_trans = timer->createCategory("Dynamic Simulation: Matrix Transpose"); timer->start(t_trans); boost::shared_ptr<gridpack::math::Matrix> permTrans(transpose(*perm)); timer->stop(t_trans); ///p_busIO->header("\n=== permTrans: ============\n"); ///permTrans->print(); // Construct matrix Y_a using extracted xd and ra from gen data, // and construct its diagonal matrix diagY_a timer->start(t_matset); p_factory->setMode(YA); gridpack::mapper::FullMatrixMap<DSNetwork> yaMap(p_network); boost::shared_ptr<gridpack::math::Matrix> diagY_a = yaMap.mapToMatrix(); ///p_busIO->header("\n=== diagY_a: ============\n"); ///diagY_a->print(); // Convert diagY_a from sparse matrix to dense matrix Y_a so that SuperLU_DIST can solve gridpack::math::MatrixStorageType denseType = gridpack::math::Dense; boost::shared_ptr<gridpack::math::Matrix> Y_a(gridpack::math::storageType(*diagY_a, denseType)); timer->stop(t_matset); // Construct matrix Ymod: Ymod = diagY_a * permTrans int t_matmul = timer->createCategory("Dynamic Simulation: Matrix Multiply"); timer->start(t_matmul); boost::shared_ptr<gridpack::math::Matrix> Ymod(multiply(*diagY_a, *permTrans)); timer->stop(t_matmul); ///p_busIO->header("\n=== Ymod: ============\n"); ///Ymod->print(); // Form matrix Y_b: Y_b(1:ngen, jg) = -Ymod, where jg represents the // corresponding index sets of buses that the generators are connected to. // Then construct Y_b's transposed matrix Y_c: Y_c = Y_b' // This two steps can be done by using a P matrix to get Y_c directly. timer->start(t_matset); p_factory->setMode(PMatrix); gridpack::mapper::FullMatrixMap<DSNetwork> pMap(p_network); boost::shared_ptr<gridpack::math::Matrix> P = pMap.mapToMatrix(); ///p_busIO->header("\n=== P: ============\n"); ///P->print(); p_factory->setMode(YC); gridpack::mapper::FullMatrixMap<DSNetwork> cMap(p_network); boost::shared_ptr<gridpack::math::Matrix> Y_c = cMap.mapToMatrix(); Y_c->scale(-1.0); ///p_busIO->header("\n=== Y_c: ============\n"); ///Y_c->print(); p_factory->setMode(YB); gridpack::mapper::FullMatrixMap<DSNetwork> bMap(p_network); boost::shared_ptr<gridpack::math::Matrix> Y_b = bMap.mapToMatrix(); timer->stop(t_matset); ///p_busIO->header("\n=== Y_b: ============\n"); ///Y_b->print(); Y_c->scale(-1.0); // scale Y_c by -1.0 for linear solving // Convert Y_c from sparse matrix to dense matrix Y_cDense so that SuperLU_DIST can solve //gridpack::math::Matrix::StorageType denseType = gridpack::math::Matrix::Dense; timer->start(t_matset); boost::shared_ptr<gridpack::math::Matrix> Y_cDense(gridpack::math::storageType(*Y_c, denseType)); // Form matrix permYmod p_factory->setMode(permYMOD); gridpack::mapper::FullMatrixMap<DSNetwork> pymMap(p_network); boost::shared_ptr<gridpack::math::Matrix> permYmod= pymMap.mapToMatrix(); ///p_busIO->header("\n=== permYmod: ============\n"); ///permYmod->print(); // Update ybus: ybus = ybus+permYmod (different dimension) => prefy11ybus p_factory->setMode(updateYbus); boost::shared_ptr<gridpack::math::Vector> vPermYmod(diagonal(*permYmod)); ///p_busIO->header("\n=== vPermYmod: ============\n"); ///vPermYmod->print(); gridpack::mapper::BusVectorMap<DSNetwork> permYmodMap(p_network); permYmodMap.mapToBus(vPermYmod); boost::shared_ptr<gridpack::math::Matrix> prefy11ybus = ybusMap.mapToMatrix(); timer->stop(t_matset); ///p_branchIO.header("\n=== prefy11ybus: ============\n"); ///prefy11ybus->print(); // Solve linear equations of ybus * X = Y_c //gridpack::math::LinearSolver solver1(*prefy11ybus); //solver1.configure(cursor); int t_solve = timer->createCategory("Dynamic Simulation: Solve Linear Equation"); timer->start(t_solve); gridpack::math::LinearMatrixSolver solver1(*prefy11ybus); boost::shared_ptr<gridpack::math::Matrix> prefy11X(solver1.solve(*Y_cDense)); timer->stop(t_solve); ///p_branchIO.header("\n=== prefy11X: ============\n"); ///prefy11X->print(); //----------------------------------------------------------------------- // Compute prefy11 //----------------------------------------------------------------------- // Form reduced admittance matrix prefy11: prefy11 = Y_b * X timer->start(t_matmul); boost::shared_ptr<gridpack::math::Matrix> prefy11(multiply(*Y_b, *prefy11X)); timer->stop(t_matmul); // Update prefy11: prefy11 = Y_a + prefy11 prefy11->add(*Y_a); ///p_branchIO.header("\n=== Reduced Ybus: prefy11: ============\n"); ///prefy11->print(); //----------------------------------------------------------------------- // Compute fy11 // Update ybus values at fault stage //----------------------------------------------------------------------- // Read the switch info from faults Event from input.xml int sw2_2 = fault.from_idx - 1; int sw3_2 = fault.to_idx - 1; boost::shared_ptr<gridpack::math::Matrix> fy11ybus(prefy11ybus->clone()); ///p_branchIO.header("\n=== fy11ybus(original): ============\n"); ///fy11ybus->print(); gridpack::ComplexType x(0.0, -1e7); #if 0 fy11ybus->setElement(sw2_2, sw2_2, -x); fy11ybus->ready(); #else timer->start(t_matset); p_factory->setEvent(fault); p_factory->setMode(onFY); ybusMap.overwriteMatrix(fy11ybus); timer->stop(t_matset); #endif ///p_branchIO.header("\n=== fy11ybus: ============\n"); ///fy11ybus->print(); // Solve linear equations of fy11ybus * X = Y_c timer->start(t_solve); gridpack::math::LinearMatrixSolver solver2(*fy11ybus); boost::shared_ptr<gridpack::math::Matrix> fy11X(solver2.solve(*Y_cDense)); timer->stop(t_solve); ///p_branchIO.header("\n=== fy11X: ============\n"); ///fy11X->print(); // Form reduced admittance matrix fy11: fy11 = Y_b * X timer->start(t_matmul); boost::shared_ptr<gridpack::math::Matrix> fy11(multiply(*Y_b, *fy11X)); timer->stop(t_matmul); // Update fy11: fy11 = Y_a + fy11 fy11->add(*Y_a); ///p_branchIO.header("\n=== Reduced Ybus: fy11: ============\n"); ///fy11->print(); //----------------------------------------------------------------------- // Compute posfy11 // Update ybus values at clear fault stage //----------------------------------------------------------------------- // Get the updating factor for posfy11 stage ybus #if 0 gridpack::ComplexType myValue = p_factory->setFactor(sw2_2, sw3_2); #endif timer->start(t_matset); boost::shared_ptr<gridpack::math::Matrix> posfy11ybus(prefy11ybus->clone()); timer->stop(t_matset); ///p_branchIO.header("\n=== posfy11ybus (original): ============\n"); ///posfy11ybus->print(); #if 0 gridpack::ComplexType big(0.0, 1e7); gridpack::ComplexType x11 = big - myValue; posfy11ybus->addElement(sw2_2, sw2_2, x+x11); gridpack::ComplexType x12 = myValue; posfy11ybus->addElement(sw2_2, sw3_2, x12); gridpack::ComplexType x21 = myValue; posfy11ybus->addElement(sw3_2, sw2_2, x21); gridpack::ComplexType x22 = -myValue; posfy11ybus->addElement(sw3_2, sw3_2, x22); posfy11ybus->ready(); #else timer->start(t_matset); p_factory->setMode(posFY); ybusMap.incrementMatrix(posfy11ybus); timer->stop(t_matset); #endif ///p_branchIO.header("\n=== posfy11ybus: ============\n"); ///posfy11ybus->print(); // Solve linear equations of posfy11ybus * X = Y_c timer->start(t_solve); gridpack::math::LinearMatrixSolver solver3(*posfy11ybus); boost::shared_ptr<gridpack::math::Matrix> posfy11X(solver3.solve(*Y_cDense)); timer->stop(t_solve); ///p_branchIO.header("\n=== posfy11X: ============\n"); ///posfy11X->print(); // Form reduced admittance matrix posfy11: posfy11 = Y_b * X timer->start(t_matmul); boost::shared_ptr<gridpack::math::Matrix> posfy11(multiply(*Y_b, *posfy11X)); timer->stop(t_matmul); // Update posfy11: posfy11 = Y_a + posfy11 posfy11->add(*Y_a); ///p_branchIO.header("\n=== Reduced Ybus: posfy11: ============\n"); ///posfy11->print(); //----------------------------------------------------------------------- // Integration implementation (Modified Euler Method) //----------------------------------------------------------------------- // Map to create vector pelect int t_vecset = timer->createCategory("Dynamic Simulation: Setup Vector"); timer->start(t_vecset); p_factory->setMode(init_pelect); gridpack::mapper::BusVectorMap<DSNetwork> XMap1(p_network); boost::shared_ptr<gridpack::math::Vector> pelect = XMap1.mapToVector(); ///p_busIO->header("\n=== pelect: ===\n"); ///pelect->print(); // Map to create vector eprime_s0 p_factory->setMode(init_eprime); gridpack::mapper::BusVectorMap<DSNetwork> XMap2(p_network); boost::shared_ptr<gridpack::math::Vector> eprime_s0 = XMap2.mapToVector(); ///p_busIO->header("\n=== eprime: ===\n"); ///eprime_s0->print(); // Map to create vector mac_ang_s0 p_factory->setMode(init_mac_ang); gridpack::mapper::BusVectorMap<DSNetwork> XMap3(p_network); boost::shared_ptr<gridpack::math::Vector> mac_ang_s0 = XMap3.mapToVector(); ///p_busIO->header("\n=== mac_ang_s0: ===\n"); ///mac_ang_s0->print(); // Map to create vector mac_spd_s0 p_factory->setMode(init_mac_spd); gridpack::mapper::BusVectorMap<DSNetwork> XMap4(p_network); boost::shared_ptr<gridpack::math::Vector> mac_spd_s0 = XMap4.mapToVector(); ///p_busIO->header("\n=== mac_spd_s0: ===\n"); ///mac_spd_s0->print(); // Map to create vector eqprime p_factory->setMode(init_eqprime); gridpack::mapper::BusVectorMap<DSNetwork> XMap5(p_network); boost::shared_ptr<gridpack::math::Vector> eqprime = XMap5.mapToVector(); ///p_busIO->header("\n=== eqprime: ===\n"); ///eqprime->print(); // Map to create vector pmech p_factory->setMode(init_pmech); gridpack::mapper::BusVectorMap<DSNetwork> XMap6(p_network); boost::shared_ptr<gridpack::math::Vector> pmech = XMap6.mapToVector(); ///p_busIO->header("\n=== pmech: ===\n"); ///pmech->print(); // Map to create vector mva p_factory->setMode(init_mva); gridpack::mapper::BusVectorMap<DSNetwork> XMap7(p_network); boost::shared_ptr<gridpack::math::Vector> mva = XMap7.mapToVector(); ///p_busIO->header("\n=== mva: ===\n"); ///mva->print(); // Map to create vector d0 p_factory->setMode(init_d0); gridpack::mapper::BusVectorMap<DSNetwork> XMap8(p_network); boost::shared_ptr<gridpack::math::Vector> d0 = XMap8.mapToVector(); ///p_busIO->header("\n=== d0: ===\n"); ///d0->print(); // Map to create vector h p_factory->setMode(init_h); gridpack::mapper::BusVectorMap<DSNetwork> XMap9(p_network); boost::shared_ptr<gridpack::math::Vector> h = XMap9.mapToVector(); ///p_busIO->header("\n=== h: ===\n"); ///h->print(); ///p_busIO->header("\n============Start of Simulation:=====================\n"); // Declare vector mac_ang_s1, mac_spd_s1 boost::shared_ptr<gridpack::math::Vector> mac_ang_s1(mac_ang_s0->clone()); boost::shared_ptr<gridpack::math::Vector> mac_spd_s1(mac_spd_s0->clone()); // Declare vector eprime_s1 boost::shared_ptr<gridpack::math::Vector> eprime_s1(mac_ang_s0->clone()); // Declare vector dmac_ang_s0, dmac_spd_s0, dmac_ang_s1, dmac_spd_s1 boost::shared_ptr<gridpack::math::Vector> dmac_ang_s0(mac_ang_s0->clone()); boost::shared_ptr<gridpack::math::Vector> dmac_ang_s1(mac_ang_s0->clone()); boost::shared_ptr<gridpack::math::Vector> dmac_spd_s0(mac_spd_s0->clone()); boost::shared_ptr<gridpack::math::Vector> dmac_spd_s1(mac_spd_s0->clone()); // Declare vector curr boost::shared_ptr<gridpack::math::Vector> curr(mac_ang_s0->clone()); timer->stop(t_vecset); timer->start(t_trans); boost::shared_ptr<gridpack::math::Matrix> trans_prefy11(transpose(*prefy11)); boost::shared_ptr<gridpack::math::Matrix> trans_fy11(transpose(*fy11)); boost::shared_ptr<gridpack::math::Matrix> trans_posfy11(transpose(*posfy11)); timer->stop(t_trans); timer->start(t_vecset); boost::shared_ptr<gridpack::math::Vector> vecTemp(mac_ang_s0->clone()); timer->stop(t_vecset); // Simulation related variables int simu_k; int t_step[20]; double t_width[20]; int flagF; int S_Steps; int last_S_Steps; int steps3, steps2, steps1; double h_sol1, h_sol2; int flagF1, flagF2; int I_Steps; const double sysFreq = 60.0; double pi = 4.0*atan(1.0); const double basrad = 2.0 * pi * sysFreq; gridpack::ComplexType jay(0.0, 1.0); // switch info is set up here int nswtch = 4; static double sw1[4]; static double sw7[4]; sw1[0] = 0.0; sw1[1] = fault.start; sw1[2] = fault.end; sw1[3] = p_sim_time; sw7[0] = p_time_step; sw7[1] = fault.step; sw7[2] = p_time_step; sw7[3] = p_time_step; simu_k = 0; for (int i = 0; i < nswtch-1; i++) { t_step[i] = (int) ((sw1[i+1] -sw1[i]) / sw7[i]); t_width[i] = (sw1[i+1] - sw1[i]) / t_step[i]; simu_k += t_step[i]; } simu_k++; steps3 = t_step[0] + t_step[1] + t_step[2] - 1; steps2 = t_step[0] + t_step[1] - 1; steps1 = t_step[0] - 1; h_sol1 = t_width[0]; h_sol2 = h_sol1; flagF1 = 0; flagF2 = 0; p_S_Steps = 1; last_S_Steps = -1; if (p_generatorWatch) { p_generatorIO->header("t"); p_generatorIO->write("watch_header"); p_generatorIO->header("\n"); } for (I_Steps = 0; I_Steps < simu_k+1; I_Steps++) { if (I_Steps < steps1) { p_S_Steps = I_Steps; flagF1 = 0; flagF2 = 0; } else if (I_Steps == steps1) { p_S_Steps = I_Steps; flagF1 = 0; flagF2 = 1; } else if (I_Steps == steps1+1) { p_S_Steps = I_Steps; flagF1 = 1; flagF2 = 1; } else if ((I_Steps>steps1+1) && (I_Steps<steps2+1)) { p_S_Steps = I_Steps - 1; flagF1 = 1; flagF2 = 1; } else if (I_Steps==steps2+1) { p_S_Steps = I_Steps - 1; flagF1 = 1; flagF2 = 2; } else if (I_Steps==steps2+2) { p_S_Steps = I_Steps - 1; flagF1 = 2; flagF2 = 2; } else if (I_Steps>steps2+2) { p_S_Steps = I_Steps - 2; flagF1 = 2; flagF2 = 2; } if (I_Steps !=0 && last_S_Steps != p_S_Steps) { mac_ang_s0->equate(*mac_ang_s1); mac_spd_s0->equate(*mac_spd_s1); eprime_s0->equate(*eprime_s1); } vecTemp->equate(*mac_ang_s0); vecTemp->scale(jay); vecTemp->exp(); vecTemp->elementMultiply(*eqprime); eprime_s0->equate(*vecTemp); // ---------- CALL i_simu_innerloop(k,p_S_Steps,flagF1): ---------- int t_trnsmul = timer->createCategory("Dynamic Simulation: Transpose Multiply"); timer->start(t_trnsmul); if (flagF1 == 0) { curr.reset(multiply(*trans_prefy11, *eprime_s0)); //MatMultTranspose(prefy11, eprime_s0, curr); //transposeMultiply(*prefy11,*eprime_s0,*curr); } else if (flagF1 == 1) { curr.reset(multiply(*trans_fy11, *eprime_s0)); //MatMultTranspose(fy11, eprime_s0, curr); //transposeMultiply(*fy11,*eprime_s0,*curr); } else if (flagF1 == 2) { curr.reset(multiply(*trans_posfy11, *eprime_s0)); //MatMultTranspose(posfy11, eprime_s0, curr); //transposeMultiply(*posfy11,*eprime_s0,*curr); } timer->stop(t_trnsmul); // ---------- CALL mac_em2(k,p_S_Steps): ---------- // ---------- pelect: ---------- curr->conjugate(); vecTemp->equate(*eprime_s0); vecTemp->elementMultiply(*curr); pelect->equate(*vecTemp); pelect->real(); //Get the real part of pelect // ---------- dmac_ang: ---------- vecTemp->equate(*mac_spd_s0); vecTemp->add(-1.0); vecTemp->scale(basrad); dmac_ang_s0->equate(*vecTemp); // ---------- dmac_spd: ---------- vecTemp->equate(*pelect); vecTemp->elementMultiply(*mva); dmac_spd_s0->equate(*pmech); dmac_spd_s0->add(*vecTemp, -1.0); vecTemp->equate(*mac_spd_s0); vecTemp->add(-1.0); vecTemp->elementMultiply(*d0); dmac_spd_s0->add(*vecTemp, -1.0); vecTemp->equate(*h); vecTemp->scale(2.0); dmac_spd_s0->elementDivide(*vecTemp); mac_ang_s1->equate(*mac_ang_s0); vecTemp->equate(*dmac_ang_s0); mac_ang_s1->add(*dmac_ang_s0, h_sol1); mac_spd_s1->equate(*mac_spd_s0); vecTemp->equate(*dmac_spd_s0); mac_spd_s1->add(*vecTemp, h_sol1); vecTemp->equate(*mac_ang_s1); vecTemp->scale(jay); vecTemp->exp(); vecTemp->elementMultiply(*eqprime); eprime_s1->equate(*vecTemp); // ---------- CALL i_simu_innerloop2(k,p_S_Steps+1,flagF2): ---------- timer->start(t_trnsmul); if (flagF2 == 0) { curr.reset(multiply(*trans_prefy11, *eprime_s1)); //MatMultTranspose(prefy11, eprime_s1, curr); //transposeMultiply(*prefy11,*eprime_s1,*curr); } else if (flagF2 == 1) { curr.reset(multiply(*trans_fy11, *eprime_s1)); //MatMultTranspose(fy11, eprime_s1, curr); //transposeMultiply(*fy11,*eprime_s1,*curr); } else if (flagF2 == 2) { curr.reset(multiply(*trans_posfy11, *eprime_s1)); //MatMultTranspose(posfy11, eprime_s1, curr); //transposeMultiply(*posfy11,*eprime_s1,*curr); } timer->stop(t_trnsmul); // ---------- CALL mac_em2(k,p_S_Steps+1): ---------- // ---------- pelect: ---------- curr->conjugate(); vecTemp->equate(*eprime_s1); vecTemp->elementMultiply(*curr); pelect->equate(*vecTemp); pelect->real(); //Get the real part of pelect // ---------- dmac_ang: ---------- vecTemp->equate(*mac_spd_s1); vecTemp->add(-1.0); vecTemp->scale(basrad); dmac_ang_s1->equate(*vecTemp); // ---------- dmac_spd: ---------- vecTemp->equate(*pelect); vecTemp->elementMultiply(*mva); dmac_spd_s1->equate(*pmech); dmac_spd_s1->add(*vecTemp, -1.0); vecTemp->equate(*mac_spd_s1); vecTemp->add(-1.0); vecTemp->elementMultiply(*d0); dmac_spd_s1->add(*vecTemp, -1.0); vecTemp->equate(*h); vecTemp->scale(2.0); dmac_spd_s1->elementDivide(*vecTemp); vecTemp->equate(*dmac_ang_s0); vecTemp->add(*dmac_ang_s1); vecTemp->scale(0.5); mac_ang_s1->equate(*mac_ang_s0); mac_ang_s1->add(*vecTemp, h_sol2); vecTemp->equate(*dmac_spd_s0); vecTemp->add(*dmac_spd_s1); vecTemp->scale(0.5); mac_spd_s1->equate(*mac_spd_s0); mac_spd_s1->add(*vecTemp, h_sol2); if (p_generatorWatch && I_Steps%p_watchFrequency == 0) { p_factory->setMode(init_mac_ang); XMap3.mapToBus(mac_ang_s1); p_factory->setMode(init_mac_spd); XMap3.mapToBus(mac_spd_s1); char tbuf[32]; sprintf(tbuf,"%8.4f",static_cast<double>(I_Steps)*p_time_step); p_generatorIO->header(tbuf); p_generatorIO->write("watch"); p_generatorIO->header("\n"); } // Print to screen if (last_S_Steps != p_S_Steps) { //sprintf(ioBuf, "\n========================S_Steps = %d=========================\n", p_S_Steps); //p_busIO->header(ioBuf); //mac_ang_s0->print(); //mac_spd_s0->print(); //pmech->print(); //pelect->print(); //sprintf(ioBuf, "========================End of S_Steps = %d=========================\n\n", p_S_Steps); //p_busIO->header(ioBuf); } if (I_Steps == simu_k) { p_factory->setMode(init_mac_ang); XMap3.mapToBus(mac_ang_s1); p_factory->setMode(init_mac_spd); XMap3.mapToBus(mac_spd_s1); p_factory->setMode(init_pmech); XMap6.mapToBus(pmech); p_factory->setMode(init_pelect); XMap1.mapToBus(pelect); //mac_ang_s1->print(); //mac_spd_s1->print(); //pmech->print(); //pelect->print(); } // End of Print to screen last_S_Steps = p_S_Steps; } closeGeneratorWatchFile(); timer->stop(t_total); #else timer->start(t_total); int t_matset = timer->createCategory("Matrix Setup"); timer->start(t_matset); p_factory->setMode(YBUS); gridpack::mapper::FullMatrixMap<DSNetwork> ybusMap(p_network); timer->stop(t_matset); int t_trans = timer->createCategory("Matrix Transpose"); // Construct matrix diagY_a using xd and ra extracted from gen data, timer->start(t_matset); p_factory->setMode(YA); gridpack::mapper::FullMatrixMap<DSNetwork> yaMap(p_network); boost::shared_ptr<gridpack::math::Matrix> diagY_a = yaMap.mapToMatrix(); // Convert diagY_a from sparse matrix to dense matrix Y_a so that SuperLU_DIST can solve gridpack::math::MatrixStorageType denseType = gridpack::math::Dense; boost::shared_ptr<gridpack::math::Matrix> Y_a(gridpack::math::storageType(*diagY_a, denseType)); timer->stop(t_matset); int t_matmul = timer->createCategory("Matrix Multiply"); // Construct Y_c as a dense matrix timer->start(t_matset); p_factory->setMode(YC); gridpack::mapper::FullMatrixMap<DSNetwork> cMap(p_network); boost::shared_ptr<gridpack::math::Matrix> Y_cDense = cMap.mapToMatrix(true); // Construct Y_b p_factory->setMode(YB); gridpack::mapper::FullMatrixMap<DSNetwork> bMap(p_network); boost::shared_ptr<gridpack::math::Matrix> Y_b = bMap.mapToMatrix(); timer->stop(t_matset); timer->start(t_matset); p_factory->setMode(updateYbus); boost::shared_ptr<gridpack::math::Matrix> prefy11ybus = ybusMap.mapToMatrix(); timer->stop(t_matset); // Solve linear equations ybus * X = Y_c int t_solve = timer->createCategory("Solve Linear Equation"); timer->start(t_solve); gridpack::math::LinearMatrixSolver solver1(*prefy11ybus); boost::shared_ptr<gridpack::math::Matrix> prefy11X(solver1.solve(*Y_cDense)); timer->stop(t_solve); //----------------------------------------------------------------------- // Compute prefy11 //----------------------------------------------------------------------- // Form reduced admittance matrix prefy11: prefy11 = Y_b * X timer->start(t_matmul); boost::shared_ptr<gridpack::math::Matrix> prefy11(multiply(*Y_b, *prefy11X)); timer->stop(t_matmul); // Update prefy11: prefy11 = Y_a + prefy11 prefy11->add(*Y_a); //----------------------------------------------------------------------- // Compute fy11 // Update ybus values at beginning of fault //----------------------------------------------------------------------- // Read the switch info from fault. Event from input.xml int sw2_2 = fault.from_idx - 1; int sw3_2 = fault.to_idx - 1; boost::shared_ptr<gridpack::math::Matrix> fy11ybus(prefy11ybus->clone()); gridpack::ComplexType x(0.0, -1e7); timer->start(t_matset); p_factory->setEvent(fault); p_factory->setMode(onFY); ybusMap.overwriteMatrix(fy11ybus); timer->stop(t_matset); // Solve linear equations of fy11ybus * X = Y_c timer->start(t_solve); gridpack::math::LinearMatrixSolver solver2(*fy11ybus); boost::shared_ptr<gridpack::math::Matrix> fy11X(solver2.solve(*Y_cDense)); timer->stop(t_solve); // Form reduced admittance matrix fy11: fy11 = Y_b * X timer->start(t_matmul); boost::shared_ptr<gridpack::math::Matrix> fy11(multiply(*Y_b, *fy11X)); timer->stop(t_matmul); // Update fy11: fy11 = Y_a + fy11 fy11->add(*Y_a); //----------------------------------------------------------------------- // Compute posfy11 // Update ybus values at clear fault stage //----------------------------------------------------------------------- // Get the updating factor for posfy11 stage ybus timer->start(t_matset); boost::shared_ptr<gridpack::math::Matrix> posfy11ybus(prefy11ybus->clone()); timer->stop(t_matset); timer->start(t_matset); p_factory->setMode(posFY); ybusMap.incrementMatrix(posfy11ybus); timer->stop(t_matset); // Solve linear equations of posfy11ybus * X = Y_c timer->start(t_solve); gridpack::math::LinearMatrixSolver solver3(*posfy11ybus); boost::shared_ptr<gridpack::math::Matrix> posfy11X(solver3.solve(*Y_cDense)); timer->stop(t_solve); // Form reduced admittance matrix posfy11: posfy11 = Y_b * X timer->start(t_matmul); boost::shared_ptr<gridpack::math::Matrix> posfy11(multiply(*Y_b, *posfy11X)); timer->stop(t_matmul); // Update posfy11: posfy11 = Y_a + posfy11 posfy11->add(*Y_a); //----------------------------------------------------------------------- // Integration implementation (Modified Euler Method) //----------------------------------------------------------------------- p_factory->setDSParams(); p_factory->setMode(Eprime0); // create vectors used in solution method gridpack::mapper::BusVectorMap<DSNetwork> Emap(p_network); boost::shared_ptr<gridpack::math::Vector> eprime_s0 = Emap.mapToVector(); boost::shared_ptr<gridpack::math::Vector> eprime_s1(eprime_s0->clone()); boost::shared_ptr<gridpack::math::Vector> curr(eprime_s0->clone()); timer->start(t_trans); boost::shared_ptr<gridpack::math::Matrix> trans_prefy11(transpose(*prefy11)); boost::shared_ptr<gridpack::math::Matrix> trans_fy11(transpose(*fy11)); boost::shared_ptr<gridpack::math::Matrix> trans_posfy11(transpose(*posfy11)); timer->stop(t_trans); // Simulation related variables int simu_k; int t_step[20]; double t_width[20]; int flagF; int S_Steps; int last_S_Steps; int steps3, steps2, steps1; double h_sol1, h_sol2; int flagF1, flagF2; int I_Steps; const double sysFreq = 60.0; double pi = 4.0*atan(1.0); const double basrad = 2.0 * pi * sysFreq; gridpack::ComplexType jay(0.0, 1.0); // switch info is set up here int nswtch = 4; static double sw1[4]; static double sw7[4]; sw1[0] = 0.0; sw1[1] = fault.start; sw1[2] = fault.end; sw1[3] = p_sim_time; sw7[0] = p_time_step; sw7[1] = fault.step; sw7[2] = p_time_step; sw7[3] = p_time_step; simu_k = 0; for (int i = 0; i < nswtch-1; i++) { t_step[i] = (int) ((sw1[i+1] -sw1[i]) / sw7[i]); t_width[i] = (sw1[i+1] - sw1[i]) / t_step[i]; simu_k += t_step[i]; } simu_k++; steps3 = t_step[0] + t_step[1] + t_step[2] - 1; steps2 = t_step[0] + t_step[1] - 1; steps1 = t_step[0] - 1; h_sol1 = t_width[0]; h_sol2 = h_sol1; flagF1 = 0; flagF2 = 0; S_Steps = 1; last_S_Steps = -1; if (p_generatorWatch) { p_generatorIO->header("t"); if (p_generatorWatch) p_generatorIO->write("watch_header"); p_generatorIO->header("\n"); } for (I_Steps = 0; I_Steps < simu_k+1; I_Steps++) { if (I_Steps < steps1) { S_Steps = I_Steps; flagF1 = 0; flagF2 = 0; } else if (I_Steps == steps1) { S_Steps = I_Steps; flagF1 = 0; flagF2 = 1; } else if (I_Steps == steps1+1) { S_Steps = I_Steps; flagF1 = 1; flagF2 = 1; } else if ((I_Steps>steps1+1) && (I_Steps<steps2+1)) { S_Steps = I_Steps - 1; flagF1 = 1; flagF2 = 1; } else if (I_Steps==steps2+1) { S_Steps = I_Steps - 1; flagF1 = 1; flagF2 = 2; } else if (I_Steps==steps2+2) { S_Steps = I_Steps - 1; flagF1 = 2; flagF2 = 2; } else if (I_Steps>steps2+2) { S_Steps = I_Steps - 2; flagF1 = 2; flagF2 = 2; } if (I_Steps !=0 && last_S_Steps != S_Steps) { p_factory->initDSStep(false); } else { p_factory->initDSStep(true); } p_factory->setMode(Eprime0); Emap.mapToVector(eprime_s0); // ---------- CALL i_simu_innerloop(k,S_Steps,flagF1): ---------- int t_trnsmul = timer->createCategory("Transpose Multiply"); timer->start(t_trnsmul); if (flagF1 == 0) { curr.reset(multiply(*trans_prefy11, *eprime_s0)); //MatMultTranspose(prefy11, eprime_s0, curr); //transposeMultiply(*prefy11,*eprime_s0,*curr); } else if (flagF1 == 1) { curr.reset(multiply(*trans_fy11, *eprime_s0)); //MatMultTranspose(fy11, eprime_s0, curr); //transposeMultiply(*fy11,*eprime_s0,*curr); } else if (flagF1 == 2) { curr.reset(multiply(*trans_posfy11, *eprime_s0)); //MatMultTranspose(posfy11, eprime_s0, curr); //transposeMultiply(*posfy11,*eprime_s0,*curr); } timer->stop(t_trnsmul); p_factory->setMode(Current); Emap.mapToBus(curr); p_factory->predDSStep(h_sol1); p_factory->setMode(Eprime1); Emap.mapToVector(eprime_s1); // ---------- CALL i_simu_innerloop2(k,S_Steps+1,flagF2): ---------- timer->start(t_trnsmul); if (flagF2 == 0) { curr.reset(multiply(*trans_prefy11, *eprime_s1)); //MatMultTranspose(prefy11, eprime_s1, curr); //transposeMultiply(*prefy11,*eprime_s1,*curr); } else if (flagF2 == 1) { curr.reset(multiply(*trans_fy11, *eprime_s1)); //MatMultTranspose(fy11, eprime_s1, curr); //transposeMultiply(*fy11,*eprime_s1,*curr); } else if (flagF2 == 2) { curr.reset(multiply(*trans_posfy11, *eprime_s1)); //MatMultTranspose(posfy11, eprime_s1, curr); //transposeMultiply(*posfy11,*eprime_s1,*curr); } timer->stop(t_trnsmul); p_factory->setMode(Current); Emap.mapToBus(curr); p_factory->corrDSStep(h_sol2); if (p_generatorWatch && I_Steps%p_watchFrequency == 0) { char tbuf[32]; sprintf(tbuf,"%8.4f",static_cast<double>(I_Steps)*p_time_step); p_generatorIO->header(tbuf); p_generatorIO->write("watch"); p_generatorIO->header("\n"); } // Print to screen if (I_Steps == simu_k) { char ioBuf[128]; sprintf(ioBuf, "\n========================S_Steps = %d=========================\n", S_Steps+1); p_busIO->header(ioBuf); sprintf(ioBuf, "\n Bus ID Generator ID" " mac_ang mac_spd mech elect\n\n"); p_busIO->header(ioBuf); p_busIO->write(); sprintf(ioBuf, "\n========================End of S_Steps = %d=========================\n\n", S_Steps+1); p_busIO->header(ioBuf); } // End of Print to screen last_S_Steps = S_Steps; } closeGeneratorWatchFile(); timer->stop(t_total); #endif }
void Foam::lduMatrix::operator-=(const lduMatrix& A) { if (A.diagPtr_) { diag() -= A.diag(); } if (symmetric() && A.symmetric()) { upper() -= A.upper(); } else if (symmetric() && A.asymmetric()) { if (upperPtr_) { lower(); } else { upper(); } upper() -= A.upper(); lower() -= A.lower(); } else if (asymmetric() && A.symmetric()) { if (A.upperPtr_) { lower() -= A.upper(); upper() -= A.upper(); } else { lower() -= A.lower(); upper() -= A.lower(); } } else if (asymmetric() && A.asymmetric()) { lower() -= A.lower(); upper() -= A.upper(); } else if (diagonal()) { if (A.upperPtr_) { upper() = -A.upper(); } if (A.lowerPtr_) { lower() = -A.lower(); } } else if (A.diagonal()) { } else { FatalErrorIn("lduMatrix::operator-=(const lduMatrix& A)") << "Unknown matrix type combination" << abort(FatalError); } }
glm::vec3 Node::scale_get() { glm::vec3 diagonal(transform_scale[0][0], transform_scale[1][1], transform_scale[2][2]); return diagonal; }
int main(int argc, char* argv[]) { // Initialization of the network, the communicator and the allocation of // the GPU is done as in previous tutorials. agile::NetworkEnvironment environment(argc, argv); typedef agile::GPUCommunicator<unsigned, float, float> communicator_type; communicator_type com; com.allocateGPU(); agile::GPUEnvironment::printInformation(std::cout); std::cout << std::endl; // We are interested in solving the linear problem \f$ Ax = y \f$, with a // given matrix \f$ A \f$ and a right-hand side vector \f$ y \f$. The unknown // is the vector \f$ x \f$. // Now, we can generate a matrix that shall be inverted (actually we do not // invert the matrix but use the CG algorithm). Note that CG requires a // symmetric positive definite (SPD) matrix and it is not too trivial to // write down a SPD matrix. If you fail to provide a SPD matrix to the CG // algorithm there is no guarantee that it will converge. You might be lucky, // you might be not... const unsigned SIZE = 20; float A_host[SIZE][SIZE]; for (unsigned row = 0; row < SIZE; ++row) for (unsigned column = 0; column <= row; ++column) { A_host[row][column] = (float(SIZE) - float(row) + float(SIZE) / 2.0) * (float(column) + 1.0); A_host[column][row] = A_host[row][column]; if (row == column) A_host[row][column] = 2.0 * float(SIZE) + float(row) + float(column); } // The matrix is still in the host's memory and has to be transfered to the // GPU. This is done automatically by the constructor of \p GPUMatrixPitched. agile::GPUMatrixPitched<float> A(SIZE, SIZE, (float*)A_host); // Next we need a reference solution. We can create any vector we like at // this place. std::vector<float> x_reference_host(SIZE); for (unsigned counter = 0; counter < SIZE; ++counter) x_reference_host[counter] = float(SIZE) - float(counter) + float(SIZE/3); // This vector has to be transfered to the GPU memory too. For vectors, this // can be achieved by the member function \p assignFromHost. agile::GPUVector<float> x_reference; x_reference.assignFromHost(x_reference_host.begin(), x_reference_host.end()); // We wrap the GPU matrix from above into a forward operator called // \p ForwardMatrix. Forward operators are simply objects that implement // the parenthesis-operator \p operator() which takes an // \p accumulated vector and returns a \p distributed one. In all other // respects the operator is a black box for us. // The \p ForwardMatrix operator requires a reference to the communicator // when constructing the object so that it has access to the network. typedef agile::ForwardMatrix<communicator_type, agile::GPUMatrixPitched<float> > forward_type; forward_type forward(com, A); // What we also want to use a preconditioner, which means that we change from // the original problem \f$ Ax = y \f$ to the equivalent one // \f$ PAx = Py \f$, where \f$ P \f$ is a preconditioner. The rationale is // that most often the matrix \f$ A \f$ is ill-conditioned and the CG algorithm // does not converge properly at all or it needs many iterations. The use of // a preconditioner makes the whole system better conditioned. The simplest // choice is to use the identity \f$ P = I \f$ (which means no preconditioning // at all). The best choice would be \f$ P = A^{-1} \f$ as we would have the // solution for \f$ x \f$ in the first step already (but then we need again // to find the inverse of \f$ A \f$ which we wanted to avoid). An // 'intermediate' possibility is to take \f$ P = diag(A)^{-1} \f$ which is // easy and fast to invert and gives better results than the identity. // A preconditioner belongs to the inverse operator. All inverse operators // implement a parenthesis-operator which takes a \p distributed vector // as input and returns an \p accumulated one (opposite to the forward // operators, thus). #if JACOBI_PRECONDITIONER typedef agile::JacobiPreconditioner<communicator_type, float> preconditioner_type; std::vector<float> diagonal(SIZE); for (unsigned row = 0; row < SIZE; ++row) diagonal[row] = A_host[row][row]; preconditioner_type preconditioner(com, diagonal); #else typedef agile::InverseIdentity<communicator_type> preconditioner_type; preconditioner_type preconditioner(com); #endif // The last operator needed is a measure. A measure operator has again // a parenthesis-operator. This timeis takes an \p accumulated vector as first // input and a \p distributed one as second input and returns a scalar // measuring somehow the size of the vectors. An example is the scalar // product operator. typedef agile::ScalarProductMeasure<communicator_type> measure_type; measure_type scalar_product(com); // Finally, generate the PCG solver. It needs the absolute and relative // tolerances as input so that it knows when the solution is good enough for // our purposes. Furthermore it requires the maximum amount of iterations // after which it simply capitulates without having found a solution. const double REL_TOLERANCE = 1e-12; const double ABS_TOLERANCE = 1e-6; const unsigned MAX_ITERATIONS = 100; agile::PreconditionedConjugateGradient<communicator_type, forward_type, preconditioner_type, measure_type> pcg(com, forward, preconditioner, scalar_product, REL_TOLERANCE, ABS_TOLERANCE, MAX_ITERATIONS); // What we have not generated, yet, is the right hand side \f$ y \f$. This is // simply one call to our forward operator. agile::GPUVector<float> y(SIZE); forward(x_reference, y); // We need one more vector to hold the result of the CG algorithm. Note that // we also supply the initial guess for the solution via this vector. agile::GPUVector<float> x(SIZE); // Finally, we have constructed, initialized, wrapped... everything. The only // thing left to do is to call the CG operator. pcg(y, x); // Print some statistics (and hope that the operator actually converged). if (pcg.convergence()) std::cout << "CG converged in "; else std::cout << "Error: CG did not converge in "; std::cout << pcg.getIteration() + 1 << " iterations." << std::endl; std::cout << "Initial residual = " << pcg.getRho0() << std::endl; std::cout << "Final residual = " << pcg.getRho() << std::endl; std::cout << "Ratio rho_k / rho_0 = " << pcg.getRho() / pcg.getRho0() << std::endl; // As the vectors in this example were quite small we can even print them to // standard output. std::cout << "Reference: " << std::endl << " "; for (unsigned counter = 0; counter < x_reference_host.size(); ++counter) std::cout << x_reference_host[counter] << " "; std::cout << std::endl; // The solution is still on the GPU and has to be transfered to the CPU memory. // This is accomplished using \p copyToHost. std::vector<float> x_host; x.copyToHost(x_host); // Output the solution, too. std::cout << "CG solution: " << std::endl << " "; for (unsigned counter = 0; counter < x_host.size(); ++counter) std::cout << x_host[counter] << " "; std::cout << std::endl; // Finally, we also compute the difference between the reference solution and // the true solution (of course, we do this on the GPU). agile::GPUVector<float> difference(SIZE); subVector(x_reference, x, difference); // To measure the distance, we use the scalar product measure we have // introduced above. Note, that this operator wants the first vector in // accumulated format and the second one in distributed format. The solution // we got from the CG algorithm is accumulated (because CG is an inverse // operator). This means, we have to distribute the solution to have mixed // formats. agile::GPUVector<float> difference_dist(difference); com.distribute(difference_dist); std::cout << "L2 of difference: " << std::sqrt(std::abs(scalar_product(difference, difference_dist))) << std::endl; // So, that's it. return 0; }
OMEGA_H_INLINE Matrix<dim, dim> compose_metric( Matrix<dim, dim> r, Vector<dim> h) { auto l = metric_eigenvalues(h); return r * diagonal(l) * transpose(r); }