double Newton::solucao(double x) { float ff, dff; int n; //contador ff = 1; //auxiliares n = 0; while ((std::abs(ff) > e) && (n < 1000)) { ff = FH(x); dff = dF(x); x = x - ff / dff; n++; } return x; }
std::vector<double> TSPsolver::calcFunc(std::vector<double> const &X) { std::vector<double> dX(X.size()); std::vector<double> chaoticValue; for (auto elem : chaos) { for (auto *p_chaos : elem) { std::vector<double> tmp = p_chaos->solve_get_next(); chaoticValue.push_back(tmp[3]); } } dX[0] = 1; for (unsigned i = 1; i < X.size(); ++i) { dX[i] = chaotic_coeff*chaoticValue[(i-1)] - alpha * dF(X,i-1); } chaotic_coeff *= chaotic_reduce_coeff; return dX; }
void GravityColumnSolverPolymer<FluxModel, Model>::solveSingleColumn(const std::vector<int>& column_cells, const double dt, std::vector<double>& s, std::vector<double>& c, std::vector<double>& cmax, std::vector<double>& sol_vec) { // This is written only to work with SinglePointUpwindTwoPhase, // not with arbitrary problem models. int col_size = column_cells.size(); // if (col_size == 1) { // sol_vec[2*column_cells[0] + 0] = 0.0; // sol_vec[2*column_cells[0] + 1] = 0.0; // return; // } StateWithZeroFlux state(s, c, cmax); // This holds s by reference. // Assemble. const int kl = 3; const int ku = 3; const int nrow = 2*kl + ku + 1; const int N = 2*col_size; // N unknowns: s and c for each cell. std::vector<double> hm(nrow*N, 0.0); // band matrix with 3 upper and 3 lower diagonals. std::vector<double> rhs(N, 0.0); const BandMatrixCoeff bmc(N, ku, kl); for (int ci = 0; ci < col_size; ++ci) { std::vector<double> F(2, 0.); std::vector<double> dFd1(4, 0.); std::vector<double> dFd2(4, 0.); std::vector<double> dF(4, 0.); const int cell = column_cells[ci]; const int prev_cell = (ci == 0) ? -999 : column_cells[ci - 1]; const int next_cell = (ci == col_size - 1) ? -999 : column_cells[ci + 1]; // model_.initResidual(cell, F); for (int j = grid_.cell_facepos[cell]; j < grid_.cell_facepos[cell+1]; ++j) { const int face = grid_.cell_faces[j]; const int c1 = grid_.face_cells[2*face + 0]; const int c2 = grid_.face_cells[2*face + 1]; if (c1 == prev_cell || c2 == prev_cell || c1 == next_cell || c2 == next_cell) { F.assign(2, 0.); dFd1.assign(4, 0.); dFd2.assign(4, 0.); fmodel_.fluxConnection(state, grid_, dt, cell, face, &F[0], &dFd1[0], &dFd2[0]); if (c1 == prev_cell || c2 == prev_cell) { hm[bmc(2*ci + 0, 2*(ci - 1) + 0)] += dFd2[0]; hm[bmc(2*ci + 0, 2*(ci - 1) + 1)] += dFd2[1]; hm[bmc(2*ci + 1, 2*(ci - 1) + 0)] += dFd2[2]; hm[bmc(2*ci + 1, 2*(ci - 1) + 1)] += dFd2[3]; } else { ASSERT(c1 == next_cell || c2 == next_cell); hm[bmc(2*ci + 0, 2*(ci + 1) + 0)] += dFd2[0]; hm[bmc(2*ci + 0, 2*(ci + 1) + 1)] += dFd2[1]; hm[bmc(2*ci + 1, 2*(ci + 1) + 0)] += dFd2[2]; hm[bmc(2*ci + 1, 2*(ci + 1) + 1)] += dFd2[3]; } hm[bmc(2*ci + 0, 2*ci + 0)] += dFd1[0]; hm[bmc(2*ci + 0, 2*ci + 1)] += dFd1[1]; hm[bmc(2*ci + 1, 2*ci + 0)] += dFd1[2]; hm[bmc(2*ci + 1, 2*ci + 1)] += dFd1[3]; rhs[2*ci + 0] += F[0]; rhs[2*ci + 1] += F[1]; } } F.assign(2, 0.); dF.assign(4, 0.); fmodel_.accumulation(grid_, cell, &F[0], &dF[0]); hm[bmc(2*ci + 0, 2*ci + 0)] += dF[0]; hm[bmc(2*ci + 0, 2*ci + 1)] += dF[1]; hm[bmc(2*ci + 1, 2*ci + 0)] += dF[2]; if (std::abs(dF[3]) < 1e-12) { hm[bmc(2*ci + 1, 2*ci + 1)] += 1e-12; } else { hm[bmc(2*ci + 1, 2*ci + 1)] += dF[3]; } rhs[2*ci + 0] += F[0]; rhs[2*ci + 1] += F[1]; } // model_.sourceTerms(); // Not needed // Solve. const int num_rhs = 1; int info = 0; std::vector<int> ipiv(N, 0); // Solution will be written to rhs. dgbsv_(&N, &kl, &ku, &num_rhs, &hm[0], &nrow, &ipiv[0], &rhs[0], &N, &info); if (info != 0) { std::cerr << "Failed column cells: "; std::copy(column_cells.begin(), column_cells.end(), std::ostream_iterator<int>(std::cerr, " ")); std::cerr << "\n"; THROW("Lapack reported error in dgtsv: " << info); } for (int ci = 0; ci < col_size; ++ci) { sol_vec[2*column_cells[ci] + 0] = -rhs[2*ci + 0]; sol_vec[2*column_cells[ci] + 1] = -rhs[2*ci + 1]; } }
const Vector & PFEMElement2DBubble::getResistingForceSensitivity(int gradnumber) { // resize P int ndf = this->getNumDOF(); P.resize(ndf); P.Zero(); Vector dF(6), dFp(3), vdot(6), v(6), p(3), du(6); for(int i=0; i<3; i++) { const Vector& accel = nodes[2*i]->getTrialAccel(); vdot(2*i) = accel(0); vdot(2*i+1) = accel(1); const Vector& vel = nodes[2*i]->getTrialVel(); v(2*i) = vel(0); v(2*i+1) = vel(1); const Vector& vel2 = nodes[2*i+1]->getTrialVel(); // pressure p(i) = vel2(0); du(2*i) = nodes[2*i]->getDispSensitivity(1,gradnumber); du(2*i+1) = nodes[2*i]->getDispSensitivity(2,gradnumber); } // consditional sensitivity getdF(dF); double dm = getdM(); dF.addVector(-1.0, vdot, dm); getdFp(dFp); Matrix dl; getdL(dl); dFp.addMatrixVector(-1.0, dl, p, 1.0); // geometric sensitivity Matrix dM, dg, df; getdM(vdot, dM); getdG(p, dg); getdF(df); dF.addMatrixVector(1.0, dM, du, 1.0); dF.addMatrixVector(1.0, dg, du, -1.0); dF.addMatrixVector(1.0, df, du, -1.0); Matrix dgt, dL, dfp; getdGt(v, dgt); getdL(p, dL); getdFp(dfp); dFp.addMatrixVector(1.0, dgt, du, 1.0); dFp.addMatrixVector(1.0, dL, du, 1.0); dFp.addMatrixVector(1.0, dfp, du, -1.0); // copy for(int i=0; i<3; i++) { P(numDOFs(2*i)) += dF(2*i); P(numDOFs(2*i)+1) += dF(2*i+1); P(numDOFs(2*i+1)) += dFp(i); } return P; }
/** Purpose ------- SLAQPS computes a step of QR factorization with column pivoting of a real M-by-N matrix A by using Blas-3. It tries to factorize NB columns from A starting from the row OFFSET+1, and updates all of the matrix with Blas-3 xGEMM. In some cases, due to catastrophic cancellations, it cannot factorize NB columns. Hence, the actual number of factorized columns is returned in KB. Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 @param[in] offset INTEGER The number of rows of A that have been factorized in previous steps. @param[in] nb INTEGER The number of columns to factorize. @param[out] kb INTEGER The number of columns actually factorized. @param[in,out] A REAL array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, block A(OFFSET+1:M,1:KB) is the triangular factor obtained and block A(1:OFFSET,1:N) has been accordingly pivoted, but no factorized. The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has been updated. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,M). @param[in,out] jpvt INTEGER array, dimension (N) JPVT(I) = K <==> Column K of the full matrix A has been permuted into position I in AP. @param[out] tau REAL array, dimension (KB) The scalar factors of the elementary reflectors. @param[in,out] vn1 REAL array, dimension (N) The vector with the partial column norms. @param[in,out] vn2 REAL array, dimension (N) The vector with the exact column norms. @param[in,out] auxv REAL array, dimension (NB) Auxiliar vector. @param[in,out] F REAL array, dimension (LDF,NB) Matrix F' = L*Y'*A. @param[in] ldf INTEGER The leading dimension of the array F. LDF >= max(1,N). @ingroup magma_sgeqp3_aux ********************************************************************/ extern "C" magma_int_t magma_slaqps(magma_int_t m, magma_int_t n, magma_int_t offset, magma_int_t nb, magma_int_t *kb, float *A, magma_int_t lda, float *dA, magma_int_t ldda, magma_int_t *jpvt, float *tau, float *vn1, float *vn2, float *auxv, float *F, magma_int_t ldf, float *dF, magma_int_t lddf) { #define A(i, j) (A + (i) + (j)*(lda )) #define dA(i, j) (dA + (i) + (j)*(ldda)) #define F(i, j) (F + (i) + (j)*(ldf )) #define dF(i, j) (dF + (i) + (j)*(lddf)) float c_zero = MAGMA_S_MAKE( 0.,0.); float c_one = MAGMA_S_MAKE( 1.,0.); float c_neg_one = MAGMA_S_MAKE(-1.,0.); magma_int_t ione = 1; magma_int_t i__1, i__2; float d__1; float z__1; magma_int_t j, k, rk; float Akk; magma_int_t pvt; float temp, temp2, tol3z; magma_int_t itemp; magma_int_t lsticc; magma_int_t lastrk; lastrk = min( m, n + offset ); tol3z = magma_ssqrt( lapackf77_slamch("Epsilon")); magma_queue_t stream; magma_queue_create( &stream ); lsticc = 0; k = 0; while( k < nb && lsticc == 0 ) { rk = offset + k; /* Determine ith pivot column and swap if necessary */ // subtract 1 from Fortran isamax; pvt, k are 0-based. i__1 = n-k; pvt = k + blasf77_isamax( &i__1, &vn1[k], &ione ) - 1; if (pvt != k) { if (pvt >= nb) { /* 1. Start copy from GPU */ magma_sgetmatrix_async( m - offset - nb, 1, dA(offset + nb, pvt), ldda, A (offset + nb, pvt), lda, stream ); } /* F gets swapped so F must be sent at the end to GPU */ i__1 = k; blasf77_sswap( &i__1, F(pvt,0), &ldf, F(k,0), &ldf ); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; vn1[pvt] = vn1[k]; vn2[pvt] = vn2[k]; if (pvt < nb) { /* no need of transfer if pivot is within the panel */ blasf77_sswap( &m, A(0, pvt), &ione, A(0, k), &ione ); } else { /* 1. Finish copy from GPU */ magma_queue_sync( stream ); /* 2. Swap as usual on CPU */ blasf77_sswap(&m, A(0, pvt), &ione, A(0, k), &ione); /* 3. Restore the GPU */ magma_ssetmatrix_async( m - offset - nb, 1, A (offset + nb, pvt), lda, dA(offset + nb, pvt), ldda, stream); } } /* Apply previous Householder reflectors to column K: A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. Optimization: multiply with beta=0; wait for vector and subtract */ if (k > 0) { #if defined(PRECISION_c) || defined(PRECISION_z) for (j = 0; j < k; ++j) { *F(k,j) = MAGMA_S_CNJG( *F(k,j) ); } #endif i__1 = m - rk; i__2 = k; blasf77_sgemv( MagmaNoTransStr, &i__1, &i__2, &c_neg_one, A(rk, 0), &lda, F(k, 0), &ldf, &c_one, A(rk, k), &ione ); #if defined(PRECISION_c) || defined(PRECISION_z) for (j = 0; j < k; ++j) { *F(k,j) = MAGMA_S_CNJG( *F(k,j) ); } #endif } /* Generate elementary reflector H(k). */ if (rk < m-1) { i__1 = m - rk; lapackf77_slarfg( &i__1, A(rk, k), A(rk + 1, k), &ione, &tau[k] ); } else { lapackf77_slarfg( &ione, A(rk, k), A(rk, k), &ione, &tau[k] ); } Akk = *A(rk, k); *A(rk, k) = c_one; /* Compute Kth column of F: Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */ if (k < n-1) { i__1 = m - rk; i__2 = n - k - 1; /* Send the vector to the GPU */ magma_ssetmatrix( i__1, 1, A(rk, k), lda, dA(rk,k), ldda ); /* Multiply on GPU */ // was CALL SGEMV( 'Conjugate transpose', M-RK+1, N-K, // TAU( K ), A( RK, K+1 ), LDA, // A( RK, K ), 1, // CZERO, F( K+1, K ), 1 ) magma_int_t i__3 = nb-k-1; magma_int_t i__4 = i__2 - i__3; magma_int_t i__5 = nb-k; magma_sgemv( MagmaConjTrans, i__1 - i__5, i__2 - i__3, tau[k], dA(rk +i__5, k+1+i__3), ldda, dA(rk +i__5, k ), ione, c_zero, dF(k+1+i__3, k ), ione ); magma_sgetmatrix_async( i__2-i__3, 1, dF(k + 1 +i__3, k), i__2, F (k + 1 +i__3, k), i__2, stream ); blasf77_sgemv( MagmaConjTransStr, &i__1, &i__3, &tau[k], A(rk, k+1), &lda, A(rk, k ), &ione, &c_zero, F(k+1, k ), &ione ); magma_queue_sync( stream ); blasf77_sgemv( MagmaConjTransStr, &i__5, &i__4, &tau[k], A(rk, k+1+i__3), &lda, A(rk, k ), &ione, &c_one, F(k+1+i__3, k ), &ione ); } /* Padding F(1:K,K) with zeros. */ for (j = 0; j < k; ++j) { *F(j, k) = c_zero; } /* Incremental updating of F: F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K). */ if (k > 0) { i__1 = m - rk; i__2 = k; z__1 = MAGMA_S_NEGATE( tau[k] ); blasf77_sgemv( MagmaConjTransStr, &i__1, &i__2, &z__1, A(rk, 0), &lda, A(rk, k), &ione, &c_zero, auxv, &ione ); i__1 = k; blasf77_sgemv( MagmaNoTransStr, &n, &i__1, &c_one, F(0,0), &ldf, auxv, &ione, &c_one, F(0,k), &ione ); } /* Optimization: On the last iteration start sending F back to the GPU */ /* Update the current row of A: A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < n-1) { i__1 = n - k - 1; i__2 = k + 1; blasf77_sgemm( MagmaNoTransStr, MagmaConjTransStr, &ione, &i__1, &i__2, &c_neg_one, A(rk, 0 ), &lda, F(k+1,0 ), &ldf, &c_one, A(rk, k+1), &lda ); } /* Update partial column norms. */ if (rk < lastrk) { for (j = k + 1; j < n; ++j) { if (vn1[j] != 0.) { /* NOTE: The following 4 lines follow from the analysis in Lapack Working Note 176. */ temp = MAGMA_S_ABS( *A(rk,j) ) / vn1[j]; temp = max( 0., ((1. + temp) * (1. - temp)) ); d__1 = vn1[j] / vn2[j]; temp2 = temp * (d__1 * d__1); if (temp2 <= tol3z) { vn2[j] = (float) lsticc; lsticc = j; } else { vn1[j] *= magma_ssqrt(temp); } } } } *A(rk, k) = Akk; ++k; } // leave k as the last column done --k; *kb = k + 1; rk = offset + *kb - 1; /* Apply the block reflector to the rest of the matrix: A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */ if (*kb < min(n, m - offset)) { i__1 = m - rk - 1; i__2 = n - *kb; /* Send F to the GPU */ magma_ssetmatrix( i__2, *kb, F (*kb, 0), ldf, dF(*kb, 0), i__2 ); magma_sgemm( MagmaNoTrans, MagmaConjTrans, i__1, i__2, *kb, c_neg_one, dA(rk+1, 0 ), ldda, dF(*kb, 0 ), i__2, c_one, dA(rk+1, *kb), ldda ); } /* Recomputation of difficult columns. */ while( lsticc > 0 ) { itemp = (magma_int_t)(vn2[lsticc] >= 0. ? floor(vn2[lsticc] + .5) : -floor(.5 - vn2[lsticc])); i__1 = m - rk - 1; if (lsticc <= nb) vn1[lsticc] = magma_cblas_snrm2( i__1, A(rk+1,lsticc), ione ); else { /* Where is the data, CPU or GPU ? */ float r1, r2; r1 = magma_cblas_snrm2( nb-k, A(rk+1,lsticc), ione ); r2 = magma_snrm2(m-offset-nb, dA(offset + nb + 1, lsticc), ione); //vn1[lsticc] = magma_snrm2(i__1, dA(rk + 1, lsticc), ione); vn1[lsticc] = magma_ssqrt(r1*r1 + r2*r2); } /* NOTE: The computation of VN1( LSTICC ) relies on the fact that SNRM2 does not fail on vectors with norm below the value of SQRT(SLAMCH('S')) */ vn2[lsticc] = vn1[lsticc]; lsticc = itemp; } magma_queue_destroy( stream ); return MAGMA_SUCCESS; } /* magma_slaqps */
void CLargeStrainElasticity::flux_matrix(const FEMesh *mesh, const Element *element, const ElementFuncNodeIterator &node, const Flux *flux, const MasterPosition &pt, double time, SmallSystem *fluxmtx) const { int (*ij2voigt)(int,int) = &SymTensorIndex::ij2voigt; // shorter func name SmallMatrix dU(3); // gradient of displacement double Fval; // the value of the shape function (for node) DoubleVec dF(3); // and its derivative at the given pt bool inplane = false; // needed both in 2D & 3D versions regardless, // passed to contract_C_dU_dF #if DIM==2 // in 2D, check if it is an in-plane eqn or a plane-flux eqn. static CompoundField *displacement = dynamic_cast<CompoundField*>(Field::getField("Displacement")); inplane = displacement->in_plane( mesh ); #endif // check for unexpected flux, flux should be a stress flux if (*flux != *stress_flux) { throw ErrProgrammingError("Unexpected flux", __FILE__, __LINE__); } // evaluate the shape function and its gradient (of node) at the given pt Fval = node.shapefunction( pt ); // value of the shape function dF[0] = node.dshapefunction( 0, pt ); // x-deriv of the shape function dF[1] = node.dshapefunction( 1, pt ); // y-deriv of the shape function #if DIM==3 dF[2] = node.dshapefunction( 2, pt ); // z-deriv of the shape function #endif computeDisplacementGradient( mesh, element, pt, dU ); const Cijkl CC = cijkl( mesh, element, pt ); // elasticity modulus // add the flux contributions to stiffness matrix element // k_indx is needed for fluxmtx->stifness_matrix_element function, // which does not take int k as argument VectorFieldIndex k_indx; for (SymTensorIterator ij_iter; !ij_iter.end(); ++ij_iter) { int k0, k1, k2, ij = ij_iter.integer(); double nonlinear_part; // to store the sum from the nonlinear terms // TODO: Use tensor iterators for k0, k1, k2. #if DIM==2 // sum CC(i,j,k,l)*dF(l), k=0 over l=0,1, then add to stiffness_mtx k_indx.set( 0 ); k0 = ij2voigt( 0,0 ); k1 = ij2voigt( 0,1 ); nonlinear_part = contract_C_dU_dF(CC, dU, dF, ij, 0, inplane ); // at ij, k=0 fluxmtx->stiffness_matrix_element( ij_iter, displacement, k_indx, node ) += CC( ij,k0 ) * dF[0] + CC( ij,k1 ) * dF[1] + nonlinear_part; // sum CC(i,j,k,l)*dF(l), k=1 over l=0,1, then add to stiffness_mtx k_indx.set( 1 ); k0 = ij2voigt( 1,0 ); k1 = ij2voigt( 1,1 ); nonlinear_part = contract_C_dU_dF( CC, dU, dF, ij, 1, inplane ); // at ij, k=1 fluxmtx->stiffness_matrix_element( ij_iter, displacement, k_indx, node ) += CC( ij,k0 ) * dF[0] + CC( ij,k1 ) * dF[1] + nonlinear_part; #elif DIM==3 // sum CC(i,j,k,l)*dF(l), k=0 over l=0,1,2 then add to stiffness_mtx k_indx.set( 0 ); k0 = ij2voigt( 0,0 ); k1 = ij2voigt( 0,1 ); k2 = ij2voigt( 0,2 ); nonlinear_part = contract_C_dU_dF( CC, dU, dF, ij, 0, inplane ); // at ij, k=0 fluxmtx->stiffness_matrix_element( ij_iter, displacement, k_indx, node ) += CC( ij,k0 ) * dF[0] + CC( ij,k1 ) * dF[1] + CC( ij,k2 ) * dF[2] + nonlinear_part; // sum CC(i,j,k,l)*dF(l), k=1 over l=0,1,2 then add to stiffness_mtx k_indx.set( 1 ); k0 = ij2voigt( 1,0 ); k1 = ij2voigt( 1,1 ); k2 = ij2voigt( 1,2 ); nonlinear_part = contract_C_dU_dF( CC, dU, dF, ij, 1, inplane ); // at ij, k=1 fluxmtx->stiffness_matrix_element( ij_iter, displacement, k_indx, node ) += CC( ij,k0 ) * dF[0] + CC( ij,k1 ) * dF[1] + CC( ij,k2 ) * dF[2] + nonlinear_part; // sum CC(i,j,k,l)*dF(l), k=2 over l=0,1,2 then add to stiffness_mtx k_indx.set( 2 ); k0 = ij2voigt( 2,0 ); k1 = ij2voigt( 2,1 ); k2 = ij2voigt( 2,2 ); nonlinear_part = contract_C_dU_dF( CC, dU, dF, ij, 2, inplane ); // at ij, k=2 fluxmtx->stiffness_matrix_element( ij_iter, displacement, k_indx, node ) += CC( ij,k0 ) * dF[0] + CC( ij,k1 ) * dF[1] + CC( ij,k2 ) * dF[2] + nonlinear_part; #endif #if DIM==2 if ( !inplane ) // now contributions from z-deriv of displacement field { Field *disp_z_deriv = displacement->out_of_plane(); for(IteratorP k_iter = disp_z_deriv->iterator( ALL_INDICES ); !k_iter.end(); ++k_iter) { double diag_factor = ( k_iter.integer()==2 ? 1.0 : 0.5 ); k2 = ij2voigt( 2, k_iter.integer() ); fluxmtx->stiffness_matrix_element( ij_iter, disp_z_deriv, k_iter, node ) += diag_factor * Fval * CC( ij,k2 ); } } // end of 'if (!inplane)' #endif } // end of loop over ij } // end of 'CLargeStrainElasticity::flux_matrix'
void Integration::SODE() { const int NOR = Global::NOR; double eps = Global::eps; double t0 = Global::t0; double te = Global::te; double step = Global::step; double SVi[6]; for (int i = 0; i < 6; i++) SVi[i] = Global::SV(i, 0); char lineRad[300]; vector<double> Rado(NOR); vector<double> h(NOR); vector<vector<double> > C(NOR, vector<double>(NOR)); vector<triple > X1(NOR); vector<triple > V1(NOR); //vector<triple > Fi(NOR); vector <triple> A1(NOR), A2(NOR); vector<vector<double>> Dh(NOR, vector<double>(NOR)); vector<triple> Alp(NOR); vector<triple> dF(NOR); double tout = t0; triple F0, X(SVi[0], SVi[1], SVi[2]), V(SVi[3], SVi[4], SVi[5]), Fi; FILE*f = fopen("Radau.txt", "r"); fosv = fopen("sv_J2000.out", "w"); //fprintf(fosv,"year month day hms(UTC) TDB(sec) interval(days) X Y Z Vx Vy Vz \n"); foel = fopen("elts_J2000.out", "w"); // fprintf(foel,"year month day hms(UTC) TDB(sec) interval(days) A E I NODE W M \n"); foSvEcl = fopen("sv_ECLIPJ2000.out", "w"); foelEcl = fopen("elts_ECLIPJ2000.out", "w"); fosvR = fopen("sv_IAUplanet.out", "w"); // fprintf(fosvR,"year month day hms(UTC) TDB(sec) interval(days) X Y Z Vx Vy Vz \n"); foBL = fopen("BL.out", "w"); // fprintf(foBL,"year month day hms(UTC) TDB(sec) interval(days) L B H \n"); foNEU = fopen("NEU.out", "w"); // fprintf(foNEU,"year month day hms(UTC) TDB(sec) interval(days) N E U \n"); fvisi = fopen("visibility.out", "w"); fo3bg = fopen("3body_geodetic.out", "w"); //Ќј’ќ∆ƒ≈Ќ»≈ ƒќЋ√ќ“џ ¬ќ—’ќƒяў≈√ќ ”«Ћј Ќј “0 // if(Global::b_out_elts_planet || Global::b_out_sv_planet || Global::b_out_el_IAUPlanet==true){ double poss[6]; double lt1, dlt; triple Zorb; double Zpl[3]; double TimeNode = t0; spkacs_c(Global::IDC, t0, "J2000", "NONE", 10, poss, <1, &dlt); triple Xv = triple(poss[0], poss[1], poss[2]); triple Vv = triple(poss[3], poss[4], poss[5]); Xv = Xv / Xv.getAbs(); Vv = Vv / Vv.getAbs(); Zorb = Xv&Vv / sin(triple::getAngle(Xv, Vv)); double Z[3] = { Zorb[0], Zorb[1], Zorb[2] }; trpos(t0, 1, Global::IDC, Z, Zpl); triple ZorbP = triple(Zpl); triple PolP = triple(0.0, 0.0, 1.0); triple Node = ZorbP&PolP; Node = Node / Node.getAbs(); double NodeA = atan2(Node[1], Node[0]); if (NodeA < 0.0) NodeA = NodeA + 2 * pi; // ѕЋјЌ≈“ќ÷≈Ќ“–»„≈— јя √–ј¬»“ј÷»ќЌЌјя ѕќ—“ќяЌЌјя double mu = Global::mu; int ii = 1; int jj = 0; while (!feof(f)) { fscanf(f, "%s\n", lineRad); if (ii > Misc::sum(NOR - 1) && ii <= Misc::sum(NOR)) { Rado[jj] = atof(lineRad); jj++; } else {} ii++; } //интегрирование назад if (te < t0) { step = -step; Global::Discr = -Global::Discr; } //System::Threading::Thread Worker ^ = gcnew System::Threading::Thread(delegate() { }); //собственно, сам интегртор: while (abs(te - t0) != 0e0) { if (abs(te - t0) < 1e-12) break; if (step > 0) { if (t0 + step > te) step = te - t0; } else { if (t0 + step < te) step = te - t0; } for (int i = 0; i < NOR; i++) h[i] = step*Rado[i]; for (int k = 0; k < NOR; k++) { Dh[k][k] = h[k]; for (int j = 0; j < k; j++) { Dh[k][j] = h[k] - h[j]; } } //числа Cтирлинга for (int i = 0; i < NOR; i++) { C[i][i] = 1.0; if (i > 0) C[i][0] = -h[i - 1] * C[i - 1][0]; for (int j = 1; j < i; j++) C[i][j] = C[i - 1][j - 1] - h[i - 1] * C[i - 1][j]; } ////////////////////////////////////////////////////// F0 = Force::force_SODE(t0, X, V); for (int j = 0; j < NOR; j++) { X1[j] = X + V*h[j] + F0*h[j] * h[j] / 2; V1[j] = V + F0*h[j]; /////////////////////////////////////////////// Fi = Force::force_SODE(t0 + h[j], X1[j], V1[j]); dF[j] = Fi - F0; } Alp[0] = dF[0] / h[0]; for (int k = 1; k < NOR; k++) { Alp[k] = dF[k] / h[k]; for (int j = 0; j < k; j++) { Alp[k] = (Alp[k] - Alp[j]) / Dh[k][j]; } } for (int k = 0; k < NOR; k++) { A1[k] = triple(0., 0., 0.); for (int i = k; i < NOR; i++) A1[k] = A1[k] + Alp[i] * C[i][k]; } int ij = 0; for (;;) { for (int k = 0; k < NOR; k++) { triple v2 = A1[0] * 0.5; triple x2 = A1[0] * (1.0 / 6.0); for (int i = 1; i < NOR; i++) { v2 = v2 + A1[i] * (pow(h[k], i) / (i + 2)); x2 = x2 + A1[i] * (pow(h[k], i) / (i + 2) / (i + 3)); } v2 = V1[k] + v2*(pow(h[k], 2)); x2 = X1[k] + x2*(pow(h[k], 3)); ////////////////////////////////////////////// Fi = Force::force_SODE(t0 + h[k], x2, v2); dF[k] = Fi - F0; } Alp[0] = dF[0] / h[0]; for (int k = 1; k < NOR; k++) { Alp[k] = dF[k] / h[k]; for (int j = 0; j < k; j++) { Alp[k] = (Alp[k] - Alp[j]) / Dh[k][j]; } } for (int k = 0; k < NOR; k++) { A2[k] = triple(0.0, 0., 0.); for (int i = k; i < NOR; i++) A2[k] = A2[k] + Alp[i] * C[i][k]; } vector <triple> dV(NOR); double max = 0; vector<double> mdV(NOR); for (int i = 0; i < NOR; i++) { dV[i] = A2[i] - A1[i]; mdV[i] = dV[i].getAbs(); if (max <abs( mdV[i])) max = abs(mdV[i]); } if (max < eps && ij>2) goto exit; if (ij > 7) goto exit; A1 = A2; ij++; } exit: triple dv = A1[0] * 0.5; triple dx = A1[0] * (1.0 / 6.0); for (int i = 1; i < NOR; i++) { dv = dv + A1[i] * (pow(step, i) / (i + 2.0)); dx = dx + A1[i] * (pow(step, i) / (i + 2.0) / (i + 3.0)); } //запись результатов: //int ik=int(t0-ts)%Global::Discr; if (Global::Discr != 0) { while (abs(tout - t0) < abs(step)) { double hout = tout - t0; triple dvo = A1[0] * 0.5; triple dxo = A1[0] * (1.0 / 6.0); for (int i = 1; i < NOR; i++) { dvo = dvo + A1[i] * (pow(hout, i) / (i + 2.0)); dxo = dxo + A1[i] * (pow(hout, i) / (i + 2.0) / (i + 3.0)); } triple Xout = X + V*hout + F0*(hout*hout / 2.0) + dxo*(hout*hout*hout); triple Vout = V + F0*hout + dvo*(hout*hout); write(tout, Xout, Vout); tout += Global::Discr; } } X = X + V*step + F0*(step*step / 2.0) + dx*(step*step*step); V = V + F0*step + dv*(step*step); t0 += step; }; write(t0, X, V); fclose(fosv); fclose(foel); fclose(foSvEcl); fclose(foelEcl); fclose(fosvR); //fclose(foelR); //fclose(fosvp); //fclose(foelp); fclose(foBL); fclose(foNEU); fclose(fvisi); fclose(fo3bg); };
extern "C" magma_int_t magma_slaqps(magma_int_t m, magma_int_t n, magma_int_t offset, magma_int_t nb, magma_int_t *kb, float *A, magma_int_t lda, float *dA, magma_int_t ldda, magma_int_t *jpvt, float *tau, float *vn1, float *vn2, float *auxv, float *F, magma_int_t ldf, float *dF, magma_int_t lddf) { /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 Purpose ======= SLAQPS computes a step of QR factorization with column pivoting of a real M-by-N matrix A by using Blas-3. It tries to factorize NB columns from A starting from the row OFFSET+1, and updates all of the matrix with Blas-3 xGEMM. In some cases, due to catastrophic cancellations, it cannot factorize NB columns. Hence, the actual number of factorized columns is returned in KB. Block A(1:OFFSET,1:N) is accordingly pivoted, but not factorized. Arguments ========= M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0 OFFSET (input) INTEGER The number of rows of A that have been factorized in previous steps. NB (input) INTEGER The number of columns to factorize. KB (output) INTEGER The number of columns actually factorized. A (input/output) REAL array, dimension (LDA,N) On entry, the M-by-N matrix A. On exit, block A(OFFSET+1:M,1:KB) is the triangular factor obtained and block A(1:OFFSET,1:N) has been accordingly pivoted, but no factorized. The rest of the matrix, block A(OFFSET+1:M,KB+1:N) has been updated. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). JPVT (input/output) INTEGER array, dimension (N) JPVT(I) = K <==> Column K of the full matrix A has been permuted into position I in AP. TAU (output) REAL array, dimension (KB) The scalar factors of the elementary reflectors. VN1 (input/output) DOUBLE PRECISION array, dimension (N) The vector with the partial column norms. VN2 (input/output) DOUBLE PRECISION array, dimension (N) The vector with the exact column norms. AUXV (input/output) REAL array, dimension (NB) Auxiliar vector. F (input/output) REAL array, dimension (LDF,NB) Matrix F' = L*Y'*A. LDF (input) INTEGER The leading dimension of the array F. LDF >= max(1,N). ===================================================================== */ #define A(i, j) (A + (i) + (j)*(lda )) #define dA(i, j) (dA + (i) + (j)*(ldda)) #define F(i, j) (F + (i) + (j)*(ldf )) #define dF(i, j) (dF + (i) + (j)*(lddf)) float c_zero = MAGMA_S_MAKE( 0.,0.); float c_one = MAGMA_S_MAKE( 1.,0.); float c_neg_one = MAGMA_S_MAKE(-1.,0.); magma_int_t ione = 1; magma_int_t i__1, i__2; float d__1; float z__1; magma_int_t j, k, rk; float Akk; magma_int_t pvt; float temp, temp2, tol3z; magma_int_t itemp; magma_int_t lsticc; magma_int_t lastrk; lastrk = min( m, n + offset ); tol3z = magma_ssqrt( lapackf77_slamch("Epsilon")); magma_queue_t stream; magma_queue_create( &stream ); lsticc = 0; k = 0; while( k < nb && lsticc == 0 ) { rk = offset + k; /* Determine ith pivot column and swap if necessary */ // Fortran: pvt, k, isamax are all 1-based; subtract 1 from k. // C: pvt, k, isamax are all 0-based; don't subtract 1. pvt = k + cblas_isamax( n-k, &vn1[k], ione ); if (pvt != k) { if (pvt >= nb) { /* 1. Start copy from GPU */ magma_sgetmatrix_async( m - offset - nb, 1, dA(offset + nb, pvt), ldda, A (offset + nb, pvt), lda, stream ); } /* F gets swapped so F must be sent at the end to GPU */ i__1 = k; blasf77_sswap( &i__1, F(pvt,0), &ldf, F(k,0), &ldf ); itemp = jpvt[pvt]; jpvt[pvt] = jpvt[k]; jpvt[k] = itemp; vn1[pvt] = vn1[k]; vn2[pvt] = vn2[k]; if (pvt < nb){ /* no need of transfer if pivot is within the panel */ blasf77_sswap( &m, A(0, pvt), &ione, A(0, k), &ione ); } else { /* 1. Finish copy from GPU */ magma_queue_sync( stream ); /* 2. Swap as usual on CPU */ blasf77_sswap(&m, A(0, pvt), &ione, A(0, k), &ione); /* 3. Restore the GPU */ magma_ssetmatrix_async( m - offset - nb, 1, A (offset + nb, pvt), lda, dA(offset + nb, pvt), ldda, stream); } } /* Apply previous Householder reflectors to column K: A(RK:M,K) := A(RK:M,K) - A(RK:M,1:K-1)*F(K,1:K-1)'. Optimization: multiply with beta=0; wait for vector and subtract */ if (k > 0) { #if defined(PRECISION_c) || defined(PRECISION_z) for (j = 0; j < k; ++j){ *F(k,j) = MAGMA_S_CNJG( *F(k,j) ); } #endif i__1 = m - rk; i__2 = k; blasf77_sgemv( MagmaNoTransStr, &i__1, &i__2, &c_neg_one, A(rk, 0), &lda, F(k, 0), &ldf, &c_one, A(rk, k), &ione ); #if defined(PRECISION_c) || defined(PRECISION_z) for (j = 0; j < k; ++j) { *F(k,j) = MAGMA_S_CNJG( *F(k,j) ); } #endif } /* Generate elementary reflector H(k). */ if (rk < m-1) { i__1 = m - rk; lapackf77_slarfg( &i__1, A(rk, k), A(rk + 1, k), &ione, &tau[k] ); } else { lapackf77_slarfg( &ione, A(rk, k), A(rk, k), &ione, &tau[k] ); } Akk = *A(rk, k); *A(rk, k) = c_one; /* Compute Kth column of F: Compute F(K+1:N,K) := tau(K)*A(RK:M,K+1:N)'*A(RK:M,K) on the GPU */ if (k < n-1) { i__1 = m - rk; i__2 = n - k - 1; /* Send the vector to the GPU */ magma_ssetmatrix( i__1, 1, A(rk, k), lda, dA(rk,k), ldda ); /* Multiply on GPU */ // was CALL SGEMV( 'Conjugate transpose', M-RK+1, N-K, // TAU( K ), A( RK, K+1 ), LDA, // A( RK, K ), 1, // CZERO, F( K+1, K ), 1 ) magma_int_t i__3 = nb-k-1; magma_int_t i__4 = i__2 - i__3; magma_int_t i__5 = nb-k; magma_sgemv( MagmaTrans, i__1 - i__5, i__2 - i__3, tau[k], dA(rk +i__5, k+1+i__3), ldda, dA(rk +i__5, k ), ione, c_zero, dF(k+1+i__3, k ), ione ); magma_sgetmatrix_async( i__2-i__3, 1, dF(k + 1 +i__3, k), i__2, F (k + 1 +i__3, k), i__2, stream ); blasf77_sgemv( MagmaTransStr, &i__1, &i__3, &tau[k], A(rk, k+1), &lda, A(rk, k ), &ione, &c_zero, F(k+1, k ), &ione ); magma_queue_sync( stream ); blasf77_sgemv( MagmaTransStr, &i__5, &i__4, &tau[k], A(rk, k+1+i__3), &lda, A(rk, k ), &ione, &c_one, F(k+1+i__3, k ), &ione ); } /* Padding F(1:K,K) with zeros. */ for (j = 0; j < k; ++j) { *F(j, k) = c_zero; } /* Incremental updating of F: F(1:N,K) := F(1:N,K) - tau(K)*F(1:N,1:K-1)*A(RK:M,1:K-1)'*A(RK:M,K). */ if (k > 0) { i__1 = m - rk; i__2 = k; z__1 = MAGMA_S_NEGATE( tau[k] ); blasf77_sgemv( MagmaTransStr, &i__1, &i__2, &z__1, A(rk, 0), &lda, A(rk, k), &ione, &c_zero, auxv, &ione ); i__1 = k; blasf77_sgemv( MagmaNoTransStr, &n, &i__1, &c_one, F(0,0), &ldf, auxv, &ione, &c_one, F(0,k), &ione ); } /* Optimization: On the last iteration start sending F back to the GPU */ /* Update the current row of A: A(RK,K+1:N) := A(RK,K+1:N) - A(RK,1:K)*F(K+1:N,1:K)'. */ if (k < n-1) { i__1 = n - k - 1; i__2 = k + 1; blasf77_sgemm( MagmaNoTransStr, MagmaTransStr, &ione, &i__1, &i__2, &c_neg_one, A(rk, 0 ), &lda, F(k+1,0 ), &ldf, &c_one, A(rk, k+1), &lda ); } /* Update partial column norms. */ if (rk < lastrk) { for (j = k + 1; j < n; ++j) { if (vn1[j] != 0.) { /* NOTE: The following 4 lines follow from the analysis in Lapack Working Note 176. */ temp = MAGMA_S_ABS( *A(rk,j) ) / vn1[j]; temp = max( 0., ((1. + temp) * (1. - temp)) ); d__1 = vn1[j] / vn2[j]; temp2 = temp * (d__1 * d__1); if (temp2 <= tol3z) { vn2[j] = (float) lsticc; lsticc = j; } else { vn1[j] *= magma_ssqrt(temp); } } } } *A(rk, k) = Akk; ++k; } // leave k as the last column done --k; *kb = k + 1; rk = offset + *kb - 1; /* Apply the block reflector to the rest of the matrix: A(OFFSET+KB+1:M,KB+1:N) := A(OFFSET+KB+1:M,KB+1:N) - A(OFFSET+KB+1:M,1:KB)*F(KB+1:N,1:KB)' */ if (*kb < min(n, m - offset)) { i__1 = m - rk - 1; i__2 = n - *kb; /* Send F to the GPU */ magma_ssetmatrix( i__2, *kb, F (*kb, 0), ldf, dF(*kb, 0), i__2 ); magma_sgemm( MagmaNoTrans, MagmaTrans, i__1, i__2, *kb, c_neg_one, dA(rk+1, 0 ), ldda, dF(*kb, 0 ), i__2, c_one, dA(rk+1, *kb), ldda ); } /* Recomputation of difficult columns. */ while( lsticc > 0 ) { itemp = (magma_int_t)(vn2[lsticc] >= 0. ? floor(vn2[lsticc] + .5) : -floor(.5 - vn2[lsticc])); i__1 = m - rk - 1; if (lsticc <= nb) vn1[lsticc] = cblas_snrm2(i__1, A(rk + 1, lsticc), ione); else { /* Where is the data, CPU or GPU ? */ float r1, r2; r1 = cblas_snrm2(nb-k, A(rk + 1, lsticc), ione); r2 = magma_snrm2(m-offset-nb, dA(offset + nb + 1, lsticc), ione); //vn1[lsticc] = magma_snrm2(i__1, dA(rk + 1, lsticc), ione); vn1[lsticc] = magma_ssqrt(r1*r1+r2*r2); } /* NOTE: The computation of VN1( LSTICC ) relies on the fact that SNRM2 does not fail on vectors with norm below the value of SQRT(SLAMCH('S')) */ vn2[lsticc] = vn1[lsticc]; lsticc = itemp; } magma_queue_destroy( stream ); return MAGMA_SUCCESS; } /* magma_slaqps */
static void cmd_cp (char *args) { bool onepass; get_option (args, onepass); char *src, *dst; if (!get2args ("cp", args, src, dst)) return; csRef<iStringArray> fl (VFS->FindFiles (src)); size_t i; for (i = 0; i < fl->GetSize () ; i++) { char destname [VFS_MAX_PATH_LEN + 1]; src = (char *)fl->Get (i); if (fl->GetSize () > 1) { size_t dirlen = strlen (src); if (dirlen) dirlen--; while (dirlen && src [dirlen - 1] != VFS_PATH_SEPARATOR) dirlen--; strcpy (destname, dst); if (destname [0]) if (destname [strlen (destname) - 1] != VFS_PATH_SEPARATOR) strcat (destname, "/"); strcat (destname, src + dirlen); csPrintf ("%s -> %s\n", src, destname); dst = destname; } if (onepass) { csRef<iDataBuffer> data (VFS->ReadFile (src)); if (!data) { csPrintfErr ("cp: cannot read file \"%s\"\n", src); return; } if (!VFS->WriteFile (dst, **data, data->GetSize ())) csPrintfErr ("cp: error writing to file \"%s\"\n", dst); } else { csRef<iFile> dF (VFS->Open (dst, VFS_FILE_WRITE)); if (!dF) { csPrintfErr ("cp: cannot open destination file \"%s\"\n", dst); return; } csRef<iFile> sF (VFS->Open (src, VFS_FILE_READ)); if (!sF) { csPrintfErr ("cp: cannot open source file \"%s\"\n", src); return; } while (!sF->AtEOF ()) { char buff [123]; size_t len = sF->Read (buff, sizeof (buff)); if (dF->Write (buff, len) != len) { csPrintfErr ("cp: error writing to file \"%s\"\n", dst); break; } } } } }
int Look_txt() { TCHAR filter[] = TEXT("Ghemical MD results File (*.txt)\0*.txt\0") TEXT("All Files (*.*)\0*.*\0"); TCHAR fpath[1024]; TCHAR filename[1024]; sprintf(filename, "\0"); { DWORD nFilterIndex; vector<string> names; vector<string> *pnames = &names; vector<vector<double> > vectors; vectors.reserve(2000000); while (OpenFileDlg(0, filter, fpath, nFilterIndex) == S_OK) { ReadDatFile(NULL, fpath, filename, &vectors, pnames); pnames = NULL; printf("\nfilename %s\n\n", filename); int cols = names.size(); int rows = vectors.size(); #if WRITE_LOCKED_FORCES int cMom = 4 - 1; int cVx = 5 - 1; int cFxup = 14 - 1; int cFxdw = 17 - 1; int cVxup = 8 - 1; int cVxdw = 11 - 1; #endif #if WRITE_WORKED_FORCES int cMom = 4 - 1; int cVx = 5 - 1; int cVxup = 14 - 1; int cVxdw = 17 - 1; int cVx_wk_up = 8 - 1; int cVx_wk_dw = 11 - 1; int cFx_wk_up = 20 - 1; int cFx_wk_dw = 23 - 1; #endif vector<double> means(cols, 0.0); printf("vectors.size() = %d\n",rows); printf("names.size() = %d\n", cols); for (vector<vector<double> >::iterator it = vectors.begin(); it != vectors.end(); it++) { for (int c = 0; c < cols; c++) { means[c] += (*it).operator [](c); } } for (int c = 0; c < cols; c++) { means[c] /= rows; printf("mean(%s) = %f\n", names[c].c_str(), means[c]); } #if WRITE_LOCKED_FORCES || WRITE_WORKED_FORCES int r0 = 0; cout << "enter r0\n"; cin >> r0; #endif #if WRITE_LOCKED_FORCES vector<double> dF(rows-r0); for (int r = r0; r < rows; r++) { dF[r-r0] = vectors[r][cFxup] - vectors[r][cFxdw]; } Statistika (dF, "dF"); vector<double> Mom(rows-r0); for (r = r0; r < rows; r++) { Mom[r-r0] = vectors[r][cMom]; } Statistika (Mom, "Mom"); vector<double> dV(rows-r0); for (r = r0; r < rows; r++) { dV[r-r0] = vectors[r][cVxup] - vectors[r][cVxdw]; } Statistika (dV, "dV"); vector<double> Vx(rows-r0); for (r = r0; r < rows; r++) { Vx[r-r0] = vectors[r][cVx]; } Statistika (Vx, "Vx"); #endif #if WRITE_WORKED_FORCES vector<double> dF_wk(rows-r0); for (int r = r0; r < rows; r++) { dF_wk[r-r0] = vectors[r][cFx_wk_up] - vectors[r][cFx_wk_dw]; } Statistika (dF_wk, "dF_wk"); vector<double> dV_wk(rows-r0); for (r = r0; r < rows; r++) { dV_wk[r-r0] = vectors[r][cVx_wk_up] - vectors[r][cVx_wk_dw]; } Statistika (dV_wk, "dV_wk"); //if (!worked[n1]) vector<double> Mom(rows-r0); for (r = r0; r < rows; r++) { Mom[r-r0] = vectors[r][cMom]; } Statistika (Mom, "Mom"); vector<double> dV(rows-r0); for (r = r0; r < rows; r++) { dV[r-r0] = vectors[r][cVxup] - vectors[r][cVxdw]; } Statistika (dV, "dV"); vector<double> Vx(rows-r0); for (r = r0; r < rows; r++) { Vx[r-r0] = vectors[r][cVx]; } Statistika (Vx, "Vx"); #endif } } /*else { DWORD nFilterIndex; if (SaveFileDlg(0, filename, filter, nFilterIndex) == S_OK) { SetDlgItemText(ref->hDlg,IDC_EDIT_TRAJFILE2, filename); } }*/ printf("Hello World!\n"); return 0; }