int RestrictedMultiVectorWrapper::restrict_comm(Teuchos::RCP<Epetra_MultiVector> input_mv){ input_mv_=input_mv; /* Pull the Input Matrix Info */ const Epetra_MpiComm *InComm = dynamic_cast<const Epetra_MpiComm*>(& input_mv_->Comm()); const Epetra_BlockMap *InMap = dynamic_cast<const Epetra_BlockMap*>(& input_mv_->Map()); if(!InComm || !InMap) return -1; if(!subcomm_is_set){ /* Build the Split Communicators, If Needed */ int color; if(InMap->NumMyElements()) color=1; else color=MPI_UNDEFINED; MPI_Comm_split(InComm->Comm(),color,InComm->MyPID(),&MPI_SubComm_); } else{ /* Sanity check user-provided subcomm - drop an error if the MPISubComm does not include a processor with data. */ if (input_mv->MyLength() && MPI_SubComm_ == MPI_COMM_NULL) return(-2); } /* Mark active processors */ if(MPI_SubComm_ == MPI_COMM_NULL) proc_is_active=false; else proc_is_active=true; int Nrows=InMap->NumGlobalElements(); if(proc_is_active){ RestrictedComm_=new Epetra_MpiComm(MPI_SubComm_); /* Build the Restricted Maps */ ResMap_ = new Epetra_BlockMap(Nrows,InMap->NumMyElements(),InMap->MyGlobalElements(), InMap->ElementSizeList(),InMap->IndexBase(),*RestrictedComm_); /* Allocate the restricted matrix*/ double *A; int LDA; input_mv_->ExtractView(&A,&LDA); restricted_mv_ = Teuchos::rcp(new Epetra_MultiVector(View,*ResMap_,A,LDA,input_mv_->NumVectors())); } }/*end restrict_comm*/
void Albany::ModelEvaluator::evalModel(const InArgs& inArgs, const OutArgs& outArgs) const { Teuchos::TimeMonitor Timer(*timer); //start timer // // Get the input arguments // Teuchos::RCP<const Epetra_Vector> x = inArgs.get_x(); Teuchos::RCP<const Epetra_Vector> x_dot; Teuchos::RCP<const Epetra_Vector> x_dotdot; double alpha = 0.0; double omega = 0.0; double beta = 1.0; double curr_time = 0.0; x_dot = inArgs.get_x_dot(); x_dotdot = inArgs.get_x_dotdot(); if (x_dot != Teuchos::null || x_dotdot != Teuchos::null) { alpha = inArgs.get_alpha(); omega = inArgs.get_omega(); beta = inArgs.get_beta(); curr_time = inArgs.get_t(); } for (int i=0; i<num_param_vecs; i++) { Teuchos::RCP<const Epetra_Vector> p = inArgs.get_p(i); if (p != Teuchos::null) { for (unsigned int j=0; j<sacado_param_vec[i].size(); j++) sacado_param_vec[i][j].baseValue = (*p)[j]; } } for (int i=0; i<num_dist_param_vecs; i++) { Teuchos::RCP<const Epetra_Vector> p = inArgs.get_p(i+num_param_vecs); if (p != Teuchos::null) { *(distParamLib->get(dist_param_names[i])->vector()) = *p; } } // // Get the output arguments // EpetraExt::ModelEvaluator::Evaluation<Epetra_Vector> f_out = outArgs.get_f(); Teuchos::RCP<Epetra_Operator> W_out = outArgs.get_W(); // Cast W to a CrsMatrix, throw an exception if this fails Teuchos::RCP<Epetra_CrsMatrix> W_out_crs; if (W_out != Teuchos::null) W_out_crs = Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(W_out, true); int test_var = 0; if(test_var != 0){ std::cout << "The current solution length is: " << x->MyLength() << std::endl; x->Print(std::cout); } // Get preconditioner operator, if requested Teuchos::RCP<Epetra_Operator> WPrec_out; if (outArgs.supports(OUT_ARG_WPrec)) WPrec_out = outArgs.get_WPrec(); // // Compute the functions // bool f_already_computed = false; // W matrix if (W_out != Teuchos::null) { app->computeGlobalJacobian(alpha, beta, omega, curr_time, x_dot.get(), x_dotdot.get(),*x, sacado_param_vec, f_out.get(), *W_out_crs); f_already_computed=true; if(test_var != 0){ //std::cout << "The current rhs length is: " << f_out->MyLength() << std::endl; //f_out->Print(std::cout); std::cout << "The current Jacobian length is: " << W_out_crs->NumGlobalRows() << std::endl; W_out_crs->Print(std::cout); } } if (WPrec_out != Teuchos::null) { app->computeGlobalJacobian(alpha, beta, omega, curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, f_out.get(), *Extra_W_crs); f_already_computed=true; if(test_var != 0){ //std::cout << "The current rhs length is: " << f_out->MyLength() << std::endl; //f_out->Print(std::cout); std::cout << "The current preconditioner length is: " << Extra_W_crs->NumGlobalRows() << std::endl; Extra_W_crs->Print(std::cout); } app->computeGlobalPreconditioner(Extra_W_crs, WPrec_out); } // scalar df/dp for (int i=0; i<num_param_vecs; i++) { Teuchos::RCP<Epetra_MultiVector> dfdp_out = outArgs.get_DfDp(i).getMultiVector(); if (dfdp_out != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DfDp(i).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[i],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int j=0; j<p_indexes.size(); j++) p_vec->addParam(sacado_param_vec[i][p_indexes[j]].family, sacado_param_vec[i][p_indexes[j]].baseValue); } app->computeGlobalTangent(0.0, 0.0, 0.0, curr_time, false, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, p_vec.get(), NULL, NULL, NULL, NULL, f_out.get(), NULL, dfdp_out.get()); f_already_computed=true; if(test_var != 0){ std::cout << "The current rhs length is: " << f_out->MyLength() << std::endl; f_out->Print(std::cout); } } } // distributed df/dp for (int i=0; i<num_dist_param_vecs; i++) { Teuchos::RCP<Epetra_Operator> dfdp_out = outArgs.get_DfDp(i+num_param_vecs).getLinearOp(); if (dfdp_out != Teuchos::null) { Teuchos::RCP<DistributedParameterDerivativeOp> dfdp_op = Teuchos::rcp_dynamic_cast<DistributedParameterDerivativeOp>(dfdp_out); dfdp_op->set(curr_time, x_dot, x_dotdot, x, Teuchos::rcp(&sacado_param_vec,false)); } } // f if (app->is_adjoint) { Derivative f_deriv(f_out, DERIV_TRANS_MV_BY_ROW); int response_index = 0; // need to add capability for sending this in app->evaluateResponseDerivative(response_index, curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, NULL, NULL, f_deriv, Derivative(), Derivative(), Derivative()); } else { if (f_out != Teuchos::null && !f_already_computed) { app->computeGlobalResidual(curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, *f_out); if(test_var != 0){ std::cout << "The current rhs length is: " << f_out->MyLength() << std::endl; f_out->Print(std::cout); } } } // Response functions for (int i=0; i<outArgs.Ng(); i++) { Teuchos::RCP<Epetra_Vector> g_out = outArgs.get_g(i); bool g_computed = false; Derivative dgdx_out = outArgs.get_DgDx(i); Derivative dgdxdot_out = outArgs.get_DgDx_dot(i); Derivative dgdxdotdot_out = outArgs.get_DgDx_dotdot(i); // dg/dx, dg/dxdot if (!dgdx_out.isEmpty() || !dgdxdot_out.isEmpty() || !dgdxdotdot_out.isEmpty() ) { app->evaluateResponseDerivative(i, curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, NULL, g_out.get(), dgdx_out, dgdxdot_out, dgdxdotdot_out, Derivative()); g_computed = true; } // dg/dp for (int j=0; j<num_param_vecs; j++) { Teuchos::RCP<Epetra_MultiVector> dgdp_out = outArgs.get_DgDp(i,j).getMultiVector(); if (dgdp_out != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DgDp(i,j).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[j],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int k=0; k<p_indexes.size(); k++) p_vec->addParam(sacado_param_vec[j][p_indexes[k]].family, sacado_param_vec[j][p_indexes[k]].baseValue); } app->evaluateResponseTangent(i, alpha, beta, omega, curr_time, false, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, p_vec.get(), NULL, NULL, NULL, NULL, g_out.get(), NULL, dgdp_out.get()); g_computed = true; } } // Need to handle dg/dp for distributed p if (g_out != Teuchos::null && !g_computed) app->evaluateResponse(i, curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, *g_out); } // // Stochastic Galerkin // #ifdef ALBANY_SG_MP InArgs::sg_const_vector_t x_sg = inArgs.get_x_sg(); if (x_sg != Teuchos::null) { app->init_sg(inArgs.get_sg_basis(), inArgs.get_sg_quadrature(), inArgs.get_sg_expansion(), x_sg->productComm()); InArgs::sg_const_vector_t x_dot_sg = inArgs.get_x_dot_sg(); InArgs::sg_const_vector_t x_dotdot_sg = inArgs.get_x_dotdot_sg(); if (x_dot_sg != Teuchos::null || x_dotdot_sg != Teuchos::null) { alpha = inArgs.get_alpha(); omega = inArgs.get_omega(); beta = inArgs.get_beta(); curr_time = inArgs.get_t(); } InArgs::sg_const_vector_t epetra_p_sg = inArgs.get_p_sg(0); Teuchos::Array<int> p_sg_index; for (int i=0; i<num_param_vecs; i++) { InArgs::sg_const_vector_t p_sg = inArgs.get_p_sg(i); if (p_sg != Teuchos::null) { p_sg_index.push_back(i); for (int j=0; j<p_sg_vals[i].size(); j++) { int num_sg_blocks = p_sg->size(); p_sg_vals[i][j].reset(app->getStochasticExpansion(), num_sg_blocks); p_sg_vals[i][j].copyForWrite(); for (int l=0; l<num_sg_blocks; l++) { p_sg_vals[i][j].fastAccessCoeff(l) = (*p_sg)[l][j]; } } } } OutArgs::sg_vector_t f_sg = outArgs.get_f_sg(); OutArgs::sg_operator_t W_sg = outArgs.get_W_sg(); bool f_sg_computed = false; // W_sg if (W_sg != Teuchos::null) { Stokhos::VectorOrthogPoly<Epetra_CrsMatrix> W_sg_crs(W_sg->basis(), W_sg->map()); for (int i=0; i<W_sg->size(); i++) W_sg_crs.setCoeffPtr( i, Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(W_sg->getCoeffPtr(i))); app->computeGlobalSGJacobian(alpha, beta, omega, curr_time, x_dot_sg.get(), x_dotdot_sg.get(), *x_sg, sacado_param_vec, p_sg_index, p_sg_vals, f_sg.get(), W_sg_crs); f_sg_computed = true; } // df/dp_sg for (int i=0; i<num_param_vecs; i++) { Teuchos::RCP< Stokhos::EpetraMultiVectorOrthogPoly > dfdp_sg = outArgs.get_DfDp_sg(i).getMultiVector(); if (dfdp_sg != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DfDp_sg(i).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[i],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int j=0; j<p_indexes.size(); j++) p_vec->addParam(sacado_param_vec[i][p_indexes[j]].family, sacado_param_vec[i][p_indexes[j]].baseValue); } app->computeGlobalSGTangent(0.0, 0.0, 0.0, curr_time, false, x_dot_sg.get(), x_dotdot_sg.get(),*x_sg, sacado_param_vec, p_sg_index, p_sg_vals, p_vec.get(), NULL, NULL, NULL, NULL, f_sg.get(), NULL, dfdp_sg.get()); f_sg_computed = true; } } if (f_sg != Teuchos::null && !f_sg_computed) app->computeGlobalSGResidual(curr_time, x_dot_sg.get(), x_dotdot_sg.get(),*x_sg, sacado_param_vec, p_sg_index, p_sg_vals, *f_sg); // Response functions for (int i=0; i<outArgs.Ng(); i++) { OutArgs::sg_vector_t g_sg = outArgs.get_g_sg(i); bool g_sg_computed = false; SGDerivative dgdx_sg = outArgs.get_DgDx_sg(i); SGDerivative dgdxdot_sg = outArgs.get_DgDx_dot_sg(i); SGDerivative dgdxdotdot_sg = outArgs.get_DgDx_dotdot_sg(i); // dg/dx, dg/dxdot if (!dgdx_sg.isEmpty() || !dgdxdot_sg.isEmpty() || !dgdxdotdot_sg.isEmpty()) { app->evaluateSGResponseDerivative( i, curr_time, x_dot_sg.get(), x_dotdot_sg.get(), *x_sg, sacado_param_vec, p_sg_index, p_sg_vals, NULL, g_sg.get(), dgdx_sg, dgdxdot_sg, dgdxdotdot_sg, SGDerivative()); g_sg_computed = true; } // dg/dp for (int j=0; j<num_param_vecs; j++) { Teuchos::RCP< Stokhos::EpetraMultiVectorOrthogPoly > dgdp_sg = outArgs.get_DgDp_sg(i,j).getMultiVector(); if (dgdp_sg != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DgDp_sg(i,j).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[j],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int k=0; k<p_indexes.size(); k++) p_vec->addParam(sacado_param_vec[j][p_indexes[k]].family, sacado_param_vec[j][p_indexes[k]].baseValue); } app->evaluateSGResponseTangent(i, alpha, beta, omega, curr_time, false, x_dot_sg.get(), x_dotdot_sg.get(), *x_sg, sacado_param_vec, p_sg_index, p_sg_vals, p_vec.get(), NULL, NULL, NULL, NULL, g_sg.get(), NULL, dgdp_sg.get()); g_sg_computed = true; } } if (g_sg != Teuchos::null && !g_sg_computed) app->evaluateSGResponse(i, curr_time, x_dot_sg.get(), x_dotdot_sg.get(), *x_sg, sacado_param_vec, p_sg_index, p_sg_vals, *g_sg); } } // // Multi-point evaluation // mp_const_vector_t x_mp = inArgs.get_x_mp(); if (x_mp != Teuchos::null) { mp_const_vector_t x_dot_mp = inArgs.get_x_dot_mp(); mp_const_vector_t x_dotdot_mp = inArgs.get_x_dotdot_mp(); if (x_dot_mp != Teuchos::null || x_dotdot_mp != Teuchos::null) { alpha = inArgs.get_alpha(); omega = inArgs.get_omega(); beta = inArgs.get_beta(); curr_time = inArgs.get_t(); } Teuchos::Array<int> p_mp_index; for (int i=0; i<num_param_vecs; i++) { mp_const_vector_t p_mp = inArgs.get_p_mp(i); if (p_mp != Teuchos::null) { p_mp_index.push_back(i); for (int j=0; j<p_mp_vals[i].size(); j++) { int num_mp_blocks = p_mp->size(); p_mp_vals[i][j].reset(num_mp_blocks); p_mp_vals[i][j].copyForWrite(); for (int l=0; l<num_mp_blocks; l++) { p_mp_vals[i][j].fastAccessCoeff(l) = (*p_mp)[l][j]; } } } } mp_vector_t f_mp = outArgs.get_f_mp(); mp_operator_t W_mp = outArgs.get_W_mp(); bool f_mp_computed = false; // W_mp if (W_mp != Teuchos::null) { Stokhos::ProductContainer<Epetra_CrsMatrix> W_mp_crs(W_mp->map()); for (int i=0; i<W_mp->size(); i++) W_mp_crs.setCoeffPtr( i, Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(W_mp->getCoeffPtr(i))); app->computeGlobalMPJacobian(alpha, beta, omega, curr_time, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, f_mp.get(), W_mp_crs); f_mp_computed = true; } // df/dp_mp for (int i=0; i<num_param_vecs; i++) { Teuchos::RCP< Stokhos::ProductEpetraMultiVector > dfdp_mp = outArgs.get_DfDp_mp(i).getMultiVector(); if (dfdp_mp != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DfDp_mp(i).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[i],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int j=0; j<p_indexes.size(); j++) p_vec->addParam(sacado_param_vec[i][p_indexes[j]].family, sacado_param_vec[i][p_indexes[j]].baseValue); } app->computeGlobalMPTangent(0.0, 0.0, 0.0, curr_time, false, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, p_vec.get(), NULL, NULL, NULL, NULL, f_mp.get(), NULL, dfdp_mp.get()); f_mp_computed = true; } } if (f_mp != Teuchos::null && !f_mp_computed) app->computeGlobalMPResidual(curr_time, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, *f_mp); // Response functions for (int i=0; i<outArgs.Ng(); i++) { mp_vector_t g_mp = outArgs.get_g_mp(i); bool g_mp_computed = false; MPDerivative dgdx_mp = outArgs.get_DgDx_mp(i); MPDerivative dgdxdot_mp = outArgs.get_DgDx_dot_mp(i); MPDerivative dgdxdotdot_mp = outArgs.get_DgDx_dotdot_mp(i); // dg/dx, dg/dxdot if (!dgdx_mp.isEmpty() || !dgdxdot_mp.isEmpty() || !dgdxdotdot_mp.isEmpty() ) { app->evaluateMPResponseDerivative( i, curr_time, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, NULL, g_mp.get(), dgdx_mp, dgdxdot_mp, dgdxdotdot_mp, MPDerivative()); g_mp_computed = true; } // dg/dp for (int j=0; j<num_param_vecs; j++) { Teuchos::RCP< Stokhos::ProductEpetraMultiVector > dgdp_mp = outArgs.get_DgDp_mp(i,j).getMultiVector(); if (dgdp_mp != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DgDp_mp(i,j).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[j],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int k=0; k<p_indexes.size(); k++) p_vec->addParam(sacado_param_vec[j][p_indexes[k]].family, sacado_param_vec[j][p_indexes[k]].baseValue); } app->evaluateMPResponseTangent(i, alpha, beta, omega, curr_time, false, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, p_vec.get(), NULL, NULL, NULL, NULL, g_mp.get(), NULL, dgdp_mp.get()); g_mp_computed = true; } } if (g_mp != Teuchos::null && !g_mp_computed) app->evaluateMPResponse(i, curr_time, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, *g_mp); } } #endif //ALBANY_SG_MP }
void Albany::ModelEvaluator::evalModel(const InArgs& inArgs, const OutArgs& outArgs) const { Teuchos::TimeMonitor Timer(*timer); //start timer // // Get the input arguments // Teuchos::RCP<const Epetra_Vector> x = inArgs.get_x(); Teuchos::RCP<const Epetra_Vector> x_dot; Teuchos::RCP<const Epetra_Vector> x_dotdot; //create comm and node objects for Epetra -> Tpetra conversions Teuchos::RCP<const Teuchos::Comm<int> > commT = app->getComm(); Teuchos::RCP<Epetra_Comm> comm = Albany::createEpetraCommFromTeuchosComm(commT); //Create Tpetra copy of x, call it xT Teuchos::RCP<const Tpetra_Vector> xT; if (x != Teuchos::null) xT = Petra::EpetraVector_To_TpetraVectorConst(*x, commT); double alpha = 0.0; double omega = 0.0; double beta = 1.0; double curr_time = 0.0; if(num_time_deriv > 0) x_dot = inArgs.get_x_dot(); if(num_time_deriv > 1) x_dotdot = inArgs.get_x_dotdot(); //Declare and create Tpetra copy of x_dot, call it x_dotT Teuchos::RCP<const Tpetra_Vector> x_dotT; if (Teuchos::nonnull(x_dot)) x_dotT = Petra::EpetraVector_To_TpetraVectorConst(*x_dot, commT); //Declare and create Tpetra copy of x_dotdot, call it x_dotdotT Teuchos::RCP<const Tpetra_Vector> x_dotdotT; if (Teuchos::nonnull(x_dotdot)) x_dotdotT = Petra::EpetraVector_To_TpetraVectorConst(*x_dotdot, commT); if (Teuchos::nonnull(x_dot)){ alpha = inArgs.get_alpha(); beta = inArgs.get_beta(); curr_time = inArgs.get_t(); } if (Teuchos::nonnull(x_dotdot)) { omega = inArgs.get_omega(); } for (int i=0; i<num_param_vecs; i++) { Teuchos::RCP<const Epetra_Vector> p = inArgs.get_p(i); if (p != Teuchos::null) { for (unsigned int j=0; j<sacado_param_vec[i].size(); j++) { sacado_param_vec[i][j].baseValue = (*p)[j]; } } } for (int i=0; i<num_dist_param_vecs; i++) { Teuchos::RCP<const Epetra_Vector> p = inArgs.get_p(i+num_param_vecs); //create Tpetra copy of p Teuchos::RCP<const Tpetra_Vector> pT; if (p != Teuchos::null) { pT = Petra::EpetraVector_To_TpetraVectorConst(*p, commT); //*(distParamLib->get(dist_param_names[i])->vector()) = *p; *(distParamLib->get(dist_param_names[i])->vector()) = *pT; } } // // Get the output arguments // EpetraExt::ModelEvaluator::Evaluation<Epetra_Vector> f_out = outArgs.get_f(); Teuchos::RCP<Epetra_Operator> W_out = outArgs.get_W(); // Cast W to a CrsMatrix, throw an exception if this fails Teuchos::RCP<Epetra_CrsMatrix> W_out_crs; #ifdef WRITE_MASS_MATRIX_TO_MM_FILE //IK, 7/15/14: adding object to hold mass matrix to be written to matrix market file Teuchos::RCP<Epetra_CrsMatrix> Mass; //IK, 7/15/14: needed for writing mass matrix out to matrix market file EpetraExt::ModelEvaluator::Evaluation<Epetra_Vector> ftmp = outArgs.get_f(); #endif if (W_out != Teuchos::null) { W_out_crs = Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(W_out, true); #ifdef WRITE_MASS_MATRIX_TO_MM_FILE //IK, 7/15/14: adding object to hold mass matrix to be written to matrix market file Mass = Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(W_out, true); #endif } int test_var = 0; if(test_var != 0){ std::cout << "The current solution length is: " << x->MyLength() << std::endl; x->Print(std::cout); } // Get preconditioner operator, if requested Teuchos::RCP<Epetra_Operator> WPrec_out; if (outArgs.supports(OUT_ARG_WPrec)) WPrec_out = outArgs.get_WPrec(); // // Compute the functions // bool f_already_computed = false; // W matrix if (W_out != Teuchos::null) { app->computeGlobalJacobian(alpha, beta, omega, curr_time, x_dot.get(), x_dotdot.get(),*x, sacado_param_vec, f_out.get(), *W_out_crs); #ifdef WRITE_MASS_MATRIX_TO_MM_FILE //IK, 7/15/14: write mass matrix to matrix market file //Warning: to read this in to MATLAB correctly, code must be run in serial. //Otherwise Mass will have a distributed Map which would also need to be read in to MATLAB for proper //reading in of Mass. app->computeGlobalJacobian(1.0, 0.0, 0.0, curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, ftmp.get(), *Mass); EpetraExt::RowMatrixToMatrixMarketFile("mass.mm", *Mass); EpetraExt::BlockMapToMatrixMarketFile("rowmap.mm", Mass->RowMap()); EpetraExt::BlockMapToMatrixMarketFile("colmap.mm", Mass->ColMap()); Teuchos::RCP<Teuchos::FancyOStream> out = Teuchos::VerboseObjectBase::getDefaultOStream(); #endif f_already_computed=true; if(test_var != 0){ //std::cout << "The current rhs length is: " << f_out->MyLength() << std::endl; //f_out->Print(std::cout); std::cout << "The current Jacobian length is: " << W_out_crs->NumGlobalRows() << std::endl; W_out_crs->Print(std::cout); } } if (WPrec_out != Teuchos::null) { app->computeGlobalJacobian(alpha, beta, omega, curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, f_out.get(), *Extra_W_crs); f_already_computed=true; if(test_var != 0){ //std::cout << "The current rhs length is: " << f_out->MyLength() << std::endl; //f_out->Print(std::cout); std::cout << "The current preconditioner length is: " << Extra_W_crs->NumGlobalRows() << std::endl; Extra_W_crs->Print(std::cout); } app->computeGlobalPreconditioner(Extra_W_crs, WPrec_out); } // scalar df/dp for (int i=0; i<num_param_vecs; i++) { Teuchos::RCP<Epetra_MultiVector> dfdp_out = outArgs.get_DfDp(i).getMultiVector(); if (dfdp_out != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DfDp(i).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[i],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int j=0; j<p_indexes.size(); j++) p_vec->addParam(sacado_param_vec[i][p_indexes[j]].family, sacado_param_vec[i][p_indexes[j]].baseValue); } app->computeGlobalTangent(0.0, 0.0, 0.0, curr_time, false, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, p_vec.get(), NULL, NULL, NULL, NULL, f_out.get(), NULL, dfdp_out.get()); f_already_computed=true; if(test_var != 0){ std::cout << "The current rhs length is: " << f_out->MyLength() << std::endl; f_out->Print(std::cout); } } } // distributed df/dp for (int i=0; i<num_dist_param_vecs; i++) { Teuchos::RCP<Epetra_Operator> dfdp_out = outArgs.get_DfDp(i+num_param_vecs).getLinearOp(); if (dfdp_out != Teuchos::null) { Teuchos::RCP<DistributedParameterDerivativeOp> dfdp_op = Teuchos::rcp_dynamic_cast<DistributedParameterDerivativeOp>(dfdp_out); dfdp_op->set(curr_time, x_dotT, x_dotdotT, xT, Teuchos::rcp(&sacado_param_vec,false)); } } // f if (app->is_adjoint) { Derivative f_deriv(f_out, DERIV_TRANS_MV_BY_ROW); int response_index = 0; // need to add capability for sending this in app->evaluateResponseDerivative(response_index, curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, NULL, NULL, f_deriv, Derivative(), Derivative(), Derivative()); } else { if (f_out != Teuchos::null && !f_already_computed) { app->computeGlobalResidual(curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, *f_out); if(test_var != 0){ std::cout << "The current rhs length is: " << f_out->MyLength() << std::endl; f_out->Print(std::cout); } } } // Response functions for (int i=0; i<outArgs.Ng(); i++) { //Set curr_time to final time at which response occurs. if(num_time_deriv > 0) curr_time = inArgs.get_t(); Teuchos::RCP<Epetra_Vector> g_out = outArgs.get_g(i); //Declare Tpetra_Vector copy of g_out Teuchos::RCP<Tpetra_Vector> g_outT; bool g_computed = false; Derivative dgdx_out = outArgs.get_DgDx(i); Derivative dgdxdot_out = outArgs.get_DgDx_dot(i); Derivative dgdxdotdot_out = outArgs.get_DgDx_dotdot(i); // dg/dx, dg/dxdot if (!dgdx_out.isEmpty() || !dgdxdot_out.isEmpty() || !dgdxdotdot_out.isEmpty() ) { app->evaluateResponseDerivative(i, curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, NULL, g_out.get(), dgdx_out, dgdxdot_out, dgdxdotdot_out, Derivative()); g_computed = true; } // dg/dp for (int j=0; j<num_param_vecs; j++) { Teuchos::RCP<Epetra_MultiVector> dgdp_out = outArgs.get_DgDp(i,j).getMultiVector(); //Declare Tpetra copy of dgdp_out Teuchos::RCP<Tpetra_MultiVector> dgdp_outT; if (dgdp_out != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DgDp(i,j).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[j],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int k=0; k<p_indexes.size(); k++) p_vec->addParam(sacado_param_vec[j][p_indexes[k]].family, sacado_param_vec[j][p_indexes[k]].baseValue); } //create Tpetra copy of g_out, call it g_outT if (g_out != Teuchos::null) g_outT = Petra::EpetraVector_To_TpetraVectorNonConst(*g_out, commT); //create Tpetra copy of dgdp_out, call it dgdp_outT if (dgdp_out != Teuchos::null) dgdp_outT = Petra::EpetraMultiVector_To_TpetraMultiVector(*dgdp_out, commT); app->evaluateResponseTangentT(i, alpha, beta, omega, curr_time, false, x_dotT.get(), x_dotdotT.get(), *xT, sacado_param_vec, p_vec.get(), NULL, NULL, NULL, NULL, g_outT.get(), NULL, dgdp_outT.get()); //convert g_outT to Epetra_Vector g_out if (g_out != Teuchos::null) Petra::TpetraVector_To_EpetraVector(g_outT, *g_out, comm); //convert dgdp_outT to Epetra_MultiVector dgdp_out if (dgdp_out != Teuchos::null) Petra::TpetraMultiVector_To_EpetraMultiVector(dgdp_outT, *dgdp_out, comm); g_computed = true; } } // Need to handle dg/dp for distributed p for(int j=0; j<num_dist_param_vecs; j++) { Derivative dgdp_out = outArgs.get_DgDp(i,j+num_param_vecs); if (!dgdp_out.isEmpty()) { dgdp_out.getMultiVector()->PutScalar(0.); app->evaluateResponseDistParamDeriv(i, curr_time, x_dot.get(), x_dotdot.get(), *x, sacado_param_vec, dist_param_names[j], dgdp_out.getMultiVector().get()); } } if (g_out != Teuchos::null && !g_computed) { //create Tpetra copy of g_out, call it g_outT g_outT = Petra::EpetraVector_To_TpetraVectorNonConst(*g_out, commT); app->evaluateResponseT(i, curr_time, x_dotT.get(), x_dotdotT.get(), *xT, sacado_param_vec, *g_outT); //convert g_outT to Epetra_Vector g_out Petra::TpetraVector_To_EpetraVector(g_outT, *g_out, comm); } } // // Stochastic Galerkin // #ifdef ALBANY_SG InArgs::sg_const_vector_t x_sg = inArgs.get_x_sg(); if (x_sg != Teuchos::null) { app->init_sg(inArgs.get_sg_basis(), inArgs.get_sg_quadrature(), inArgs.get_sg_expansion(), x_sg->productComm()); InArgs::sg_const_vector_t x_dot_sg = Teuchos::null; InArgs::sg_const_vector_t x_dot_sg = Teuchos::null; if(num_time_deriv > 0) x_dotdot_sg = inArgs.get_x_dotdot_sg(); if(num_time_deriv > 1) x_dotdot_sg = inArgs.get_x_dotdot_sg(); if (x_dot_sg != Teuchos::null || x_dotdot_sg != Teuchos::null) { alpha = inArgs.get_alpha(); beta = inArgs.get_beta(); curr_time = inArgs.get_t(); } if (x_dotdot_sg != Teuchos::null) { omega = inArgs.get_omega(); } InArgs::sg_const_vector_t epetra_p_sg = inArgs.get_p_sg(0); Teuchos::Array<int> p_sg_index; for (int i=0; i<num_param_vecs; i++) { InArgs::sg_const_vector_t p_sg = inArgs.get_p_sg(i); if (p_sg != Teuchos::null) { p_sg_index.push_back(i); for (int j=0; j<p_sg_vals[i].size(); j++) { int num_sg_blocks = p_sg->size(); p_sg_vals[i][j].reset(app->getStochasticExpansion(), num_sg_blocks); p_sg_vals[i][j].copyForWrite(); for (int l=0; l<num_sg_blocks; l++) { p_sg_vals[i][j].fastAccessCoeff(l) = (*p_sg)[l][j]; } } } } OutArgs::sg_vector_t f_sg = outArgs.get_f_sg(); OutArgs::sg_operator_t W_sg = outArgs.get_W_sg(); bool f_sg_computed = false; // W_sg if (W_sg != Teuchos::null) { Stokhos::VectorOrthogPoly<Epetra_CrsMatrix> W_sg_crs(W_sg->basis(), W_sg->map()); for (int i=0; i<W_sg->size(); i++) W_sg_crs.setCoeffPtr( i, Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(W_sg->getCoeffPtr(i))); app->computeGlobalSGJacobian(alpha, beta, omega, curr_time, x_dot_sg.get(), x_dotdot_sg.get(), *x_sg, sacado_param_vec, p_sg_index, p_sg_vals, f_sg.get(), W_sg_crs); f_sg_computed = true; } // df/dp_sg for (int i=0; i<num_param_vecs; i++) { Teuchos::RCP< Stokhos::EpetraMultiVectorOrthogPoly > dfdp_sg = outArgs.get_DfDp_sg(i).getMultiVector(); if (dfdp_sg != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DfDp_sg(i).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[i],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int j=0; j<p_indexes.size(); j++) p_vec->addParam(sacado_param_vec[i][p_indexes[j]].family, sacado_param_vec[i][p_indexes[j]].baseValue); } app->computeGlobalSGTangent(0.0, 0.0, 0.0, curr_time, false, x_dot_sg.get(), x_dotdot_sg.get(),*x_sg, sacado_param_vec, p_sg_index, p_sg_vals, p_vec.get(), NULL, NULL, NULL, NULL, f_sg.get(), NULL, dfdp_sg.get()); f_sg_computed = true; } } if (f_sg != Teuchos::null && !f_sg_computed) app->computeGlobalSGResidual(curr_time, x_dot_sg.get(), x_dotdot_sg.get(),*x_sg, sacado_param_vec, p_sg_index, p_sg_vals, *f_sg); // Response functions for (int i=0; i<outArgs.Ng(); i++) { OutArgs::sg_vector_t g_sg = outArgs.get_g_sg(i); bool g_sg_computed = false; SGDerivative dgdx_sg = outArgs.get_DgDx_sg(i); SGDerivative dgdxdot_sg = outArgs.get_DgDx_dot_sg(i); SGDerivative dgdxdotdot_sg = outArgs.get_DgDx_dotdot_sg(i); // dg/dx, dg/dxdot if (!dgdx_sg.isEmpty() || !dgdxdot_sg.isEmpty() || !dgdxdotdot_sg.isEmpty()) { app->evaluateSGResponseDerivative( i, curr_time, x_dot_sg.get(), x_dotdot_sg.get(), *x_sg, sacado_param_vec, p_sg_index, p_sg_vals, NULL, g_sg.get(), dgdx_sg, dgdxdot_sg, dgdxdotdot_sg, SGDerivative()); g_sg_computed = true; } // dg/dp for (int j=0; j<num_param_vecs; j++) { Teuchos::RCP< Stokhos::EpetraMultiVectorOrthogPoly > dgdp_sg = outArgs.get_DgDp_sg(i,j).getMultiVector(); if (dgdp_sg != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DgDp_sg(i,j).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[j],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int k=0; k<p_indexes.size(); k++) p_vec->addParam(sacado_param_vec[j][p_indexes[k]].family, sacado_param_vec[j][p_indexes[k]].baseValue); } app->evaluateSGResponseTangent(i, alpha, beta, omega, curr_time, false, x_dot_sg.get(), x_dotdot_sg.get(), *x_sg, sacado_param_vec, p_sg_index, p_sg_vals, p_vec.get(), NULL, NULL, NULL, NULL, g_sg.get(), NULL, dgdp_sg.get()); g_sg_computed = true; } } if (g_sg != Teuchos::null && !g_sg_computed) app->evaluateSGResponse(i, curr_time, x_dot_sg.get(), x_dotdot_sg.get(), *x_sg, sacado_param_vec, p_sg_index, p_sg_vals, *g_sg); } } #endif #ifdef ALBANY_ENSEMBLE // // Multi-point evaluation // mp_const_vector_t x_mp = inArgs.get_x_mp(); if (x_mp != Teuchos::null) { mp_const_vector_t x_dot_mp = Teuchos::null; mp_const_vector_t x_dotdot_mp = Teuchos::null; if(num_time_deriv > 0) x_dot_mp = inArgs.get_x_dot_mp(); if(num_time_deriv > 1) x_dotdot_mp = inArgs.get_x_dotdot_mp(); if (x_dot_mp != Teuchos::null || x_dotdot_mp != Teuchos::null) { alpha = inArgs.get_alpha(); //omega = inArgs.get_omega(); beta = inArgs.get_beta(); curr_time = inArgs.get_t(); } if (x_dotdot_mp != Teuchos::null) { omega = inArgs.get_omega(); } Teuchos::Array<int> p_mp_index; for (int i=0; i<num_param_vecs; i++) { mp_const_vector_t p_mp = inArgs.get_p_mp(i); if (p_mp != Teuchos::null) { p_mp_index.push_back(i); for (int j=0; j<p_mp_vals[i].size(); j++) { int num_mp_blocks = p_mp->size(); p_mp_vals[i][j].reset(num_mp_blocks); p_mp_vals[i][j].copyForWrite(); for (int l=0; l<num_mp_blocks; l++) { p_mp_vals[i][j].fastAccessCoeff(l) = (*p_mp)[l][j]; } } } } mp_vector_t f_mp = outArgs.get_f_mp(); mp_operator_t W_mp = outArgs.get_W_mp(); bool f_mp_computed = false; // W_mp if (W_mp != Teuchos::null) { Stokhos::ProductContainer<Epetra_CrsMatrix> W_mp_crs(W_mp->map()); for (int i=0; i<W_mp->size(); i++) W_mp_crs.setCoeffPtr( i, Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(W_mp->getCoeffPtr(i))); app->computeGlobalMPJacobian(alpha, beta, omega, curr_time, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, f_mp.get(), W_mp_crs); f_mp_computed = true; } // df/dp_mp for (int i=0; i<num_param_vecs; i++) { Teuchos::RCP< Stokhos::ProductEpetraMultiVector > dfdp_mp = outArgs.get_DfDp_mp(i).getMultiVector(); if (dfdp_mp != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DfDp_mp(i).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[i],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int j=0; j<p_indexes.size(); j++) p_vec->addParam(sacado_param_vec[i][p_indexes[j]].family, sacado_param_vec[i][p_indexes[j]].baseValue); } app->computeGlobalMPTangent(0.0, 0.0, 0.0, curr_time, false, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, p_vec.get(), NULL, NULL, NULL, NULL, f_mp.get(), NULL, dfdp_mp.get()); f_mp_computed = true; } } if (f_mp != Teuchos::null && !f_mp_computed) app->computeGlobalMPResidual(curr_time, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, *f_mp); // Response functions for (int i=0; i<outArgs.Ng(); i++) { mp_vector_t g_mp = outArgs.get_g_mp(i); bool g_mp_computed = false; MPDerivative dgdx_mp = outArgs.get_DgDx_mp(i); MPDerivative dgdxdot_mp = outArgs.get_DgDx_dot_mp(i); MPDerivative dgdxdotdot_mp = outArgs.get_DgDx_dotdot_mp(i); // dg/dx, dg/dxdot if (!dgdx_mp.isEmpty() || !dgdxdot_mp.isEmpty() || !dgdxdotdot_mp.isEmpty() ) { app->evaluateMPResponseDerivative( i, curr_time, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, NULL, g_mp.get(), dgdx_mp, dgdxdot_mp, dgdxdotdot_mp, MPDerivative()); g_mp_computed = true; } // dg/dp for (int j=0; j<num_param_vecs; j++) { Teuchos::RCP< Stokhos::ProductEpetraMultiVector > dgdp_mp = outArgs.get_DgDp_mp(i,j).getMultiVector(); if (dgdp_mp != Teuchos::null) { Teuchos::Array<int> p_indexes = outArgs.get_DgDp_mp(i,j).getDerivativeMultiVector().getParamIndexes(); Teuchos::RCP<ParamVec> p_vec; if (p_indexes.size() == 0) p_vec = Teuchos::rcp(&sacado_param_vec[j],false); else { p_vec = Teuchos::rcp(new ParamVec); for (int k=0; k<p_indexes.size(); k++) p_vec->addParam(sacado_param_vec[j][p_indexes[k]].family, sacado_param_vec[j][p_indexes[k]].baseValue); } app->evaluateMPResponseTangent(i, alpha, beta, omega, curr_time, false, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, p_vec.get(), NULL, NULL, NULL, NULL, g_mp.get(), NULL, dgdp_mp.get()); g_mp_computed = true; } } if (g_mp != Teuchos::null && !g_mp_computed) app->evaluateMPResponse(i, curr_time, x_dot_mp.get(), x_dotdot_mp.get(), *x_mp, sacado_param_vec, p_mp_index, p_mp_vals, *g_mp); } } #endif }
void InitialConditions(const Teuchos::RCP<Epetra_Vector>& soln, const Teuchos::ArrayRCP<Teuchos::ArrayRCP<Teuchos::ArrayRCP<Teuchos::ArrayRCP<int> > > >& wsElNodeEqID, const Teuchos::ArrayRCP<std::string>& wsEBNames, const Teuchos::ArrayRCP<Teuchos::ArrayRCP<Teuchos::ArrayRCP<double*> > > coords, const int neq, const int numDim, Teuchos::ParameterList& icParams, const bool hasRestartSolution) { // Called twice, with x and xdot. Different param lists are sent in. icParams.validateParameters(*AAdapt::getValidInitialConditionParameters(wsEBNames), 0); // Default function is Constant, unless a Restart solution vector // was used, in which case the Init COnd defaults to Restart. std::string name; if(!hasRestartSolution) name = icParams.get("Function", "Constant"); else name = icParams.get("Function", "Restart"); if(name == "Restart") return; // Handle element block specific constant data if(name == "EBPerturb" || name == "EBPerturbGaussian" || name == "EBConstant") { bool perturb_values = false; Teuchos::Array<double> defaultData(neq); Teuchos::Array<double> perturb_mag; // Only perturb if the user has told us by how much to perturb if(name != "EBConstant" && icParams.isParameter("Perturb IC")) { perturb_values = true; perturb_mag = icParams.get("Perturb IC", defaultData); } /* The element block-based IC specification here is currently a hack. It assumes the initial value is constant * within each element across the element block (or optionally perturbed somewhat element by element). The * proper way to do this would be to project the element integration point values to the nodes using the basis * functions and a consistent mass matrix. * * The current implementation uses a single integration point per element - this integration point value for this * element within the element block is specified in the input file (and optionally perturbed). An approximation * of the load vector is obtained by accumulating the resulting (possibly perturbed) value into the nodes. Then, * a lumped version of the mass matrix is inverted and used to solve for the approximate nodal point initial * conditions. */ // Use an Epetra_Vector to hold the lumped mass matrix (has entries only on the diagonal). Zero-ed out. Epetra_Vector lumpedMM(soln->Map(), true); // Make sure soln is zeroed - we are accumulating into it for(int i = 0; i < soln->MyLength(); i++) (*soln)[i] = 0; // Loop over all worksets, elements, all local nodes: compute soln as a function of coord and wsEBName Teuchos::RCP<AAdapt::AnalyticFunction> initFunc; for(int ws = 0; ws < wsElNodeEqID.size(); ws++) { // loop over worksets Teuchos::Array<double> data = icParams.get(wsEBNames[ws], defaultData); // Call factory method from library of initial condition functions if(perturb_values) { if(name == "EBPerturb") initFunc = Teuchos::rcp(new AAdapt::ConstantFunctionPerturbed(neq, numDim, ws, data, perturb_mag)); else // name == EBGaussianPerturb initFunc = Teuchos::rcp(new AAdapt::ConstantFunctionGaussianPerturbed(neq, numDim, ws, data, perturb_mag)); } else initFunc = Teuchos::rcp(new AAdapt::ConstantFunction(neq, numDim, data)); std::vector<double> X(neq); std::vector<double> x(neq); for(int el = 0; el < wsElNodeEqID[ws].size(); el++) { // loop over elements in workset for(int i = 0; i < neq; i++) X[i] = 0; for(int ln = 0; ln < wsElNodeEqID[ws][el].size(); ln++) // loop over node local to the element for(int i = 0; i < neq; i++) X[i] += coords[ws][el][ln][i]; // nodal coords for(int i = 0; i < neq; i++) X[i] /= (double)neq; initFunc->compute(&x[0], &X[0]); for(int ln = 0; ln < wsElNodeEqID[ws][el].size(); ln++) { // loop over node local to the element Teuchos::ArrayRCP<int> lid = wsElNodeEqID[ws][el][ln]; // local node ids for(int i = 0; i < neq; i++) { (*soln)[lid[i]] += x[i]; // (*soln)[lid[i]] += X[i]; // Test with coord values lumpedMM[lid[i]] += 1.0; } } } } // Apply the inverted lumped mass matrix to get the final nodal projection for(int i = 0; i < soln->MyLength(); i++) (*soln)[i] /= lumpedMM[i]; return; } if(name == "Coordinates") { // Place the coordinate locations of the nodes into the solution vector for an initial guess int numDOFsPerDim = neq / numDim; for(int ws = 0; ws < wsElNodeEqID.size(); ws++) { for(int el = 0; el < wsElNodeEqID[ws].size(); el++) { for(int ln = 0; ln < wsElNodeEqID[ws][el].size(); ln++) { const double* X = coords[ws][el][ln]; Teuchos::ArrayRCP<int> lid = wsElNodeEqID[ws][el][ln]; /* numDim = 3; numDOFSsPerDim = 2 (coord soln, tgt soln) X[0] = x; X[1] = y; X[2] = z; lid[0] = DOF[0],eq[0] (x eqn) lid[1] = DOF[0],eq[1] (y eqn) lid[2] = DOF[0],eq[2] (z eqn) lid[3] = DOF[1],eq[0] (x eqn) lid[4] = DOF[1],eq[1] (y eqn) lid[5] = DOF[1],eq[2] (z eqn) */ for(int j = 0; j < numDOFsPerDim; j++) for(int i = 0; i < numDim; i++) (*soln)[lid[j * numDim + i]] = X[i]; } } } } else { Teuchos::Array<double> defaultData(neq); Teuchos::Array<double> data = icParams.get("Function Data", defaultData); // Call factory method from library of initial condition functions Teuchos::RCP<AAdapt::AnalyticFunction> initFunc = createAnalyticFunction(name, neq, numDim, data); // Loop over all worksets, elements, all local nodes: compute soln as a function of coord std::vector<double> x(neq); for(int ws = 0; ws < wsElNodeEqID.size(); ws++) { for(int el = 0; el < wsElNodeEqID[ws].size(); el++) { for(int ln = 0; ln < wsElNodeEqID[ws][el].size(); ln++) { const double* X = coords[ws][el][ln]; Teuchos::ArrayRCP<int> lid = wsElNodeEqID[ws][el][ln]; for(int i = 0; i < neq; i++) x[i] = (*soln)[lid[i]]; initFunc->compute(&x[0], X); for(int i = 0; i < neq; i++)(*soln)[lid[i]] = x[i]; } } } } }
int RestrictedMultiVectorWrapper:: restrict_comm (Teuchos::RCP<Epetra_MultiVector> input_mv) { using Teuchos::rcp; input_mv_ = input_mv; // Extract the input MV's communicator and Map. const Epetra_MpiComm *InComm = dynamic_cast<const Epetra_MpiComm*> (& (input_mv_->Comm ())); const Epetra_BlockMap *InMap = dynamic_cast<const Epetra_BlockMap*> (& (input_mv_->Map ())); if (! InComm || ! InMap) { return -1; // At least one dynamic cast failed. } if (! subcomm_is_set) { /* Build the Split Communicators, If Needed */ int color; if (InMap->NumMyElements()) { color = 1; } else { color = MPI_UNDEFINED; } const int err = MPI_Comm_split (InComm->Comm(), color, InComm->MyPID(), &MPI_SubComm_); if (err != MPI_SUCCESS) { return -2; } } else { /* Sanity check user-provided subcomm - drop an error if the MPISubComm does not include a processor with data. */ if (input_mv->MyLength() && MPI_SubComm_ == MPI_COMM_NULL) { return -2; } } /* Mark active processors */ if (MPI_SubComm_ == MPI_COMM_NULL) { proc_is_active = false; } else { proc_is_active = true; } if (proc_is_active) { #ifndef EPETRA_NO_32BIT_GLOBAL_INDICES if(InMap->GlobalIndicesInt()) { int Nrows = InMap->NumGlobalElements (); RestrictedComm_ = new Epetra_MpiComm (MPI_SubComm_); // Build the restricted Maps ResMap_ = new Epetra_BlockMap (Nrows, InMap->NumMyElements(), InMap->MyGlobalElements(), InMap->ElementSizeList(), InMap->IndexBase(), *RestrictedComm_); } else #endif #ifndef EPETRA_NO_64BIT_GLOBAL_INDICES if(InMap->GlobalIndicesLongLong()) { long long Nrows = InMap->NumGlobalElements64 (); RestrictedComm_ = new Epetra_MpiComm (MPI_SubComm_); // Build the restricted Maps ResMap_ = new Epetra_BlockMap (Nrows, InMap->NumMyElements(), InMap->MyGlobalElements64(), InMap->ElementSizeList(), InMap->IndexBase64(), *RestrictedComm_); } else #endif throw "EpetraExt::RestrictedMultiVectorWrapper::restrict_comm ERROR: GlobalIndices type unknown"; // Allocate the restricted matrix double *A; int LDA; input_mv_->ExtractView (&A,&LDA); restricted_mv_ = rcp (new Epetra_MultiVector (View, *ResMap_, A, LDA, input_mv_->NumVectors ())); } return 0; // Success! }/*end restrict_comm*/
TEUCHOS_UNIT_TEST(initial_condition_control, control) { using Teuchos::RCP; using Teuchos::rcp; Teuchos::RCP<const Epetra_Comm> comm = Teuchos::rcp(new Epetra_MpiComm(MPI_COMM_WORLD)); // setup mesh ///////////////////////////////////////////// RCP<panzer_stk_classic::STK_Interface> mesh; { RCP<Teuchos::ParameterList> pl = rcp(new Teuchos::ParameterList); pl->set<int>("X Elements",2); pl->set<int>("Y Elements",2); pl->set<int>("X Blocks",2); pl->set<int>("Y Blocks",1); panzer_stk_classic::SquareQuadMeshFactory mesh_factory; mesh_factory.setParameterList(pl); mesh = mesh_factory.buildMesh(MPI_COMM_WORLD); mesh->writeToExodus("test.exo"); } RCP<const shards::CellTopology> ct = mesh->getCellTopology("eblock-0_0"); panzer::CellData cellData(4,ct); RCP<panzer::IntegrationRule> int_rule = rcp(new panzer::IntegrationRule(2,cellData)); ICFieldDescriptor densDesc; densDesc.fieldName = "DENSITY"; densDesc.basisName = "Const"; densDesc.basisOrder = 0; ICFieldDescriptor condDesc; condDesc.fieldName = "CONDUCTIVITY"; condDesc.basisName = "HGrad"; condDesc.basisOrder = 1; RCP<panzer::PureBasis> const_basis = rcp(new panzer::PureBasis(densDesc.basisName,densDesc.basisOrder,cellData)); RCP<panzer::PureBasis> hgrad_basis = rcp(new panzer::PureBasis(condDesc.basisName,condDesc.basisOrder,cellData)); RCP<const panzer::FieldPattern> constFP = rcp(new panzer::Intrepid2FieldPattern(const_basis->getIntrepid2Basis())); RCP<const panzer::FieldPattern> hgradFP = rcp(new panzer::Intrepid2FieldPattern(hgrad_basis->getIntrepid2Basis())); // setup DOF manager ///////////////////////////////////////////// RCP<panzer::ConnManager<int,int> > conn_manager = Teuchos::rcp(new panzer_stk_classic::STKConnManager<int>(mesh)); RCP<panzer::DOFManager<int,int> > dofManager = rcp(new panzer::DOFManager<int,int>(conn_manager,MPI_COMM_WORLD)); dofManager->addField(densDesc.fieldName, constFP); dofManager->addField(condDesc.fieldName, hgradFP); dofManager->buildGlobalUnknowns(); Teuchos::RCP<panzer::EpetraLinearObjFactory<panzer::Traits,int> > elof = Teuchos::rcp(new panzer::EpetraLinearObjFactory<panzer::Traits,int>(comm.getConst(),dofManager)); Teuchos::RCP<panzer::LinearObjFactory<panzer::Traits> > lof = elof; // setup worksets ///////////////////////////////////////////// std::map<std::string,panzer::WorksetNeeds> needs; needs["eblock-0_0"].cellData = cellData; needs["eblock-0_0"].int_rules.push_back(int_rule); needs["eblock-0_0"].bases = { const_basis, hgrad_basis}; needs["eblock-0_0"].rep_field_name = { densDesc.fieldName, condDesc.fieldName}; needs["eblock-1_0"].cellData = cellData; needs["eblock-1_0"].int_rules.push_back(int_rule); needs["eblock-1_0"].bases = { const_basis, hgrad_basis}; needs["eblock-1_0"].rep_field_name = { densDesc.fieldName, condDesc.fieldName}; Teuchos::RCP<panzer_stk_classic::WorksetFactory> wkstFactory = Teuchos::rcp(new panzer_stk_classic::WorksetFactory(mesh)); // build STK workset factory Teuchos::RCP<panzer::WorksetContainer> wkstContainer // attach it to a workset container (uses lazy evaluation) = Teuchos::rcp(new panzer::WorksetContainer(wkstFactory,needs)); // setup field manager builder ///////////////////////////////////////////// // Add in the application specific closure model factory panzer::ClosureModelFactory_TemplateManager<panzer::Traits> cm_factory; user_app::STKModelFactory_TemplateBuilder cm_builder; cm_factory.buildObjects(cm_builder); Teuchos::ParameterList user_data("User Data"); Teuchos::ParameterList ic_closure_models("Initial Conditions"); ic_closure_models.sublist("eblock-0_0").sublist(densDesc.fieldName).set<double>("Value",3.0); ic_closure_models.sublist("eblock-0_0").sublist(condDesc.fieldName).set<double>("Value",9.0); ic_closure_models.sublist("eblock-1_0").sublist(densDesc.fieldName).set<double>("Value",3.0); ic_closure_models.sublist("eblock-1_0").sublist(condDesc.fieldName).set<double>("Value",9.0); std::map<std::string,Teuchos::RCP<const shards::CellTopology> > block_ids_to_cell_topo; block_ids_to_cell_topo["eblock-0_0"] = mesh->getCellTopology("eblock-0_0"); block_ids_to_cell_topo["eblock-1_0"] = mesh->getCellTopology("eblock-1_0"); std::map<std::string,std::vector<ICFieldDescriptor> > block_ids_to_fields; block_ids_to_fields["eblock-0_0"] = {densDesc,condDesc}; block_ids_to_fields["eblock-1_0"] = {densDesc,condDesc}; int workset_size = 4; Teuchos::RCP<panzer::LinearObjContainer> loc = lof->buildLinearObjContainer(); lof->initializeContainer(panzer::LinearObjContainer::X,*loc); Teuchos::RCP<panzer::EpetraLinearObjContainer> eloc = Teuchos::rcp_dynamic_cast<EpetraLinearObjContainer>(loc); Teuchos::RCP<Thyra::VectorBase<double> > vec = eloc->get_x_th(); // this is the Function under test panzer::setupControlInitialCondition(block_ids_to_cell_topo, block_ids_to_fields, *wkstContainer, *lof,cm_factory,ic_closure_models,user_data, workset_size, 0.0, // t0 vec); Teuchos::RCP<Epetra_Vector> x = eloc->get_x(); out << x->GlobalLength() << " " << x->MyLength() << std::endl; for (int i=0; i < x->MyLength(); ++i) { double v = (*x)[i]; TEST_ASSERT(v==3.0 || v==9.0); } }
int main(int narg, char *arg[]) { using std::cout; #ifdef EPETRA_MPI // Initialize MPI MPI_Init(&narg,&arg); Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); bool verbose = true; int verbosity = 1; bool testEpetra64 = true; // Matrix properties bool isHermitian = true; // Multivector properties std::string initvec = "random"; // Eigenvalue properties std::string which = "SR"; std::string method = "LOBPCG"; std::string precond = "none"; std::string ortho = "SVQB"; bool lock = true; bool relconvtol = false; bool rellocktol = false; int nev = 5; // Block-Arnoldi properties int blockSize = -1; int numblocks = -1; int maxrestarts = -1; int maxiterations = -1; int extrablocks = 0; int gensize = 25; // Needs to be long long to test with > INT_MAX rows double tol = 1.0e-5; // Echo the command line if (MyPID == 0) { for (int i = 0; i < narg; i++) cout << arg[i] << " "; cout << endl; } // Command-line processing Teuchos::CommandLineProcessor cmdp(false,true); cmdp.setOption("Epetra64", "no-Epetra64", &testEpetra64, "Force code to use Epetra64, even if the problem size does " "not require it. (Epetra64 will be used automatically for " "sufficiently large problems, or not used if Epetra does not have built in support.)"); cmdp.setOption("gen",&gensize, "Generate a simple Laplacian matrix of size n."); cmdp.setOption("verbosity", &verbosity, "0=quiet, 1=low, 2=medium, 3=high."); cmdp.setOption("method",&method, "Solver method to use: LOBPCG, BD, BKS or IRTR."); cmdp.setOption("nev",&nev,"Number of eigenvalues to find."); cmdp.setOption("which",&which,"Targetted eigenvalues (SM,LM,SR,or LR)."); cmdp.setOption("tol",&tol,"Solver convergence tolerance."); cmdp.setOption("blocksize",&blockSize,"Block size to use in solver."); cmdp.setOption("numblocks",&numblocks,"Number of blocks to allocate."); cmdp.setOption("extrablocks",&extrablocks, "Number of extra NEV blocks to allocate in BKS."); cmdp.setOption("maxrestarts",&maxrestarts, "Maximum number of restarts in BKS or BD."); cmdp.setOption("maxiterations",&maxiterations, "Maximum number of iterations in LOBPCG."); cmdp.setOption("lock","no-lock",&lock, "Use Locking parameter (deflate for converged eigenvalues)"); cmdp.setOption("initvec", &initvec, "Initial vectors (random, unit, zero, random2)"); cmdp.setOption("ortho", &ortho, "Orthogonalization method (DGKS, SVQB, TSQR)."); cmdp.setOption("relative-convergence-tol","no-relative-convergence-tol", &relconvtol, "Use Relative convergence tolerance " "(normalized by eigenvalue)"); cmdp.setOption("relative-lock-tol","no-relative-lock-tol",&rellocktol, "Use Relative locking tolerance (normalized by eigenvalue)"); if (cmdp.parse(narg,arg)!=Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) { FINALIZE; return -1; } // Print the most essential options (not in the MyPL parameters later) verbose = (verbosity>0); if (verbose && MyPID==0){ cout << "verbosity = " << verbosity << endl; cout << "method = " << method << endl; cout << "initvec = " << initvec << endl; cout << "nev = " << nev << endl; } // We need blockSize to be set so we can allocate memory with it. // If it wasn't set on the command line, set it to the Anasazi defaults here. // Defaults are those given in the documentation. if (blockSize < 0) if (method == "BKS") blockSize = 1; else // other methods: LOBPCG, BD, IRTR blockSize = nev; // Make sure Epetra was built with 64-bit global indices enabled. #ifdef EPETRA_NO_64BIT_GLOBAL_INDICES if (testEpetra64) testEpetra64 = false; #endif Epetra_CrsMatrix *K = NULL; // Read matrix from file or generate a matrix if ((gensize > 0 && testEpetra64)) { // Generate the matrix using long long for global indices build_simple_matrix<long long>(Comm, K, (long long)gensize, true, verbose); } else if (gensize) { // Generate the matrix using int for global indices build_simple_matrix<int>(Comm, K, gensize, false, verbose); } else { printf("YOU SHOULDN'T BE HERE \n"); exit(-1); } if (verbose && (K->NumGlobalRows64() < TINYMATRIX)) { if (MyPID == 0) cout << "Input matrix: " << endl; K->Print(cout); } Teuchos::RCP<Epetra_CrsMatrix> rcpK = Teuchos::rcp( K ); // Set Anasazi verbosity level if (MyPID == 0) cout << "Setting up the problem..." << endl; int anasazi_verbosity = Anasazi::Errors + Anasazi::Warnings; if (verbosity >= 1) // low anasazi_verbosity += Anasazi::FinalSummary + Anasazi::TimingDetails; if (verbosity >= 2) // medium anasazi_verbosity += Anasazi::IterationDetails; if (verbosity >= 3) // high anasazi_verbosity += Anasazi::StatusTestDetails + Anasazi::OrthoDetails + Anasazi::Debug; // Create parameter list to pass into solver Teuchos::ParameterList MyPL; MyPL.set("Verbosity", anasazi_verbosity); MyPL.set("Which", which); MyPL.set("Convergence Tolerance", tol); MyPL.set("Relative Convergence Tolerance", relconvtol); MyPL.set("Orthogonalization", ortho); // For the following, use Anasazi's defaults unless explicitly specified. if (numblocks > 0) MyPL.set( "Num Blocks", numblocks); if (maxrestarts > 0) MyPL.set( "Maximum Restarts", maxrestarts); if (maxiterations > 0) MyPL.set( "Maximum Iterations", maxiterations); if (blockSize > 0) MyPL.set( "Block Size", blockSize ); typedef Epetra_MultiVector MV; typedef Epetra_Operator OP; typedef Anasazi::MultiVecTraits<double, MV> MVT; typedef Anasazi::OperatorTraits<double, MV, OP> OPT; // Create the eigenproblem to be solved. // Dummy initial vectors - will be set later. Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp(new Epetra_MultiVector(K->Map(), blockSize)); Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > MyProblem; MyProblem = Teuchos::rcp(new Anasazi::BasicEigenproblem<double, MV, OP>(rcpK, ivec) ); // Inform the eigenproblem whether K is Hermitian MyProblem->setHermitian(isHermitian); // Set the number of eigenvalues requested MyProblem->setNEV(nev); // Loop to solve the same eigenproblem numtrial times (different initial vectors) int numfailed = 0; int iter = 0; double solvetime = 0; // Set random seed to have consistent initial vectors between // experiments. Different seed in each loop iteration. ivec->SetSeed(2*(MyPID) +1); // Odd seed // Set up initial vectors // Using random values as the initial guess. if (initvec == "random"){ MVT::MvRandom(*ivec); } else if (initvec == "zero"){ // All zero initial vector should be essentially the same, // but appears slightly worse in practice. ivec->PutScalar(0.); } else if (initvec == "unit"){ // Orthogonal unit initial vectors. ivec->PutScalar(0.); for (int i = 0; i < blockSize; i++) ivec->ReplaceGlobalValue(i,i,1.); } else if (initvec == "random2"){ // Partially random but orthogonal (0,1) initial vectors. // ivec(i,*) is zero in all but one column (for each i) // Inefficient implementation but this is only done once... double rowmax; int col; ivec->Random(); for (int i = 0; i < ivec->MyLength(); i++){ rowmax = -1; col = -1; for (int j = 0; j < blockSize; j++){ // Make ivec(i,j) = 1 for largest random value in row i if ((*ivec)[j][i] > rowmax){ rowmax = (*ivec)[j][i]; col = j; } ivec->ReplaceMyValue(i,j,0.); } ivec->ReplaceMyValue(i,col,1.); } } else cout << "ERROR: Unknown value for initial vectors." << endl; if (verbose && (ivec->GlobalLength64() < TINYMATRIX)) ivec->Print(std::cout); // Inform the eigenproblem that you are finished passing it information bool boolret = MyProblem->setProblem(); if (boolret != true) { if (verbose && MyPID == 0) { cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << endl; } FINALIZE; return -1; } Teuchos::RCP<Anasazi::SolverManager<double, MV, OP> > MySolverMgr; if (method == "BKS") { // Initialize the Block Arnoldi solver MyPL.set("Extra NEV Blocks", extrablocks); MySolverMgr = Teuchos::rcp( new Anasazi::BlockKrylovSchurSolMgr<double, MV, OP>(MyProblem,MyPL) ); } else if (method == "BD") { // Initialize the Block Davidson solver MyPL.set("Use Locking", lock); MyPL.set("Relative Locking Tolerance", rellocktol); MySolverMgr = Teuchos::rcp( new Anasazi::BlockDavidsonSolMgr<double, MV, OP>(MyProblem, MyPL) ); } else if (method == "LOBPCG") { // Initialize the LOBPCG solver MyPL.set("Use Locking", lock); MyPL.set("Relative Locking Tolerance", rellocktol); MySolverMgr = Teuchos::rcp( new Anasazi::LOBPCGSolMgr<double, MV, OP>(MyProblem, MyPL) ); } else if (method == "IRTR") { // Initialize the IRTR solver MySolverMgr = Teuchos::rcp( new Anasazi::RTRSolMgr<double, MV, OP>(MyProblem, MyPL) ); } else cout << "Unknown solver method!" << endl; if (verbose && MyPID==0) MyPL.print(cout); // Solve the problem to the specified tolerances or length if (MyPID == 0) cout << "Beginning the " << method << " solve..." << endl; Anasazi::ReturnType returnCode = MySolverMgr->solve(); if (returnCode != Anasazi::Converged && MyPID==0) { ++numfailed; cout << "Anasazi::SolverManager::solve() returned unconverged." << endl; } iter = MySolverMgr->getNumIters(); solvetime = (MySolverMgr->getTimers()[0])->totalElapsedTime(); if (MyPID == 0) { cout << "Iterations in this solve: " << iter << endl; cout << "Solve complete; beginning post-processing..."<< endl; } // Get the eigenvalues and eigenvectors from the eigenproblem Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution(); std::vector<Anasazi::Value<double> > evals = sol.Evals; Teuchos::RCP<MV> evecs = sol.Evecs; std::vector<int> index = sol.index; int numev = sol.numVecs; // Compute residuals. if (numev > 0) { Teuchos::LAPACK<int,double> lapack; std::vector<double> normR(numev); if (MyProblem->isHermitian()) { // Get storage Epetra_MultiVector Kevecs(K->Map(),numev); Teuchos::RCP<Epetra_MultiVector> Mevecs; Teuchos::SerialDenseMatrix<int,double> B(numev,numev); B.putScalar(0.0); for (int i=0; i<numev; i++) {B(i,i) = evals[i].realpart;} // Compute A*evecs OPT::Apply( *rcpK, *evecs, Kevecs ); Mevecs = evecs; // Compute A*evecs - lambda*evecs and its norm MVT::MvTimesMatAddMv( -1.0, *Mevecs, B, 1.0, Kevecs ); MVT::MvNorm( Kevecs, normR ); // Scale the norms by the eigenvalue if relative convergence tol was used if (relconvtol) { for (int i=0; i<numev; i++) normR[i] /= Teuchos::ScalarTraits<double>::magnitude(evals[i].realpart); } } else { printf("The problem isn't non-Hermitian; sorry.\n"); exit(-1); } if (verbose && MyPID==0) { cout.setf(std::ios_base::right, std::ios_base::adjustfield); cout<<endl<< "Actual Results"<<endl; if (MyProblem->isHermitian()) { cout<< std::setw(16) << "Eigenvalue " << std::setw(20) << "Direct Residual" << (relconvtol?" (normalized by eigenvalue)":" (no normalization)") << endl; cout<<"--------------------------------------------------------"<<endl; for (int i=0; i<numev; i++) { cout<< "EV" << i << std::setw(16) << evals[i].realpart << std::setw(20) << normR[i] << endl; } cout<<"--------------------------------------------------------"<<endl; } else { cout<< std::setw(16) << "Real Part" << std::setw(16) << "Imag Part" << std::setw(20) << "Direct Residual"<< endl; cout<<"--------------------------------------------------------"<<endl; for (int i=0; i<numev; i++) { cout<< std::setw(16) << evals[i].realpart << std::setw(16) << evals[i].imagpart << std::setw(20) << normR[i] << endl; } cout<<"--------------------------------------------------------"<<endl; } } } // Summarize iteration counts and solve time if (MyPID == 0) { cout << endl; cout << "DRIVER SUMMARY" << endl; cout << "Failed to converge: " << numfailed << endl; cout << "Solve time: " << solvetime << endl; } FINALIZE; if (numfailed) { if (MyPID == 0) { cout << "End Result: TEST FAILED" << endl; } return -1; } // // Default return value // if (MyPID == 0) { cout << "End Result: TEST PASSED" << endl; } return 0; }
TEUCHOS_UNIT_TEST(PdQuickGridDiscretization_MPI_np2, SimpleTensorProductMeshTest) { Teuchos::RCP<Epetra_Comm> comm; comm = rcp(new Epetra_MpiComm(MPI_COMM_WORLD)); int numProcs = comm->NumProc(); int rank = comm->MyPID(); TEST_COMPARE(numProcs, ==, 2); if(numProcs != 2){ std::cerr << "Unit test runtime ERROR: utPeridigm_PdQuickGridDiscretization_MPI_np2 only makes sense on 2 processors" << std::endl; return; } RCP<ParameterList> discParams = rcp(new ParameterList); // create a 2x2x2 discretization // specify a spherical neighbor search with the horizon a tad longer than the mesh spacing discParams->set("Type", "PdQuickGrid"); discParams->set("NeighborhoodType", "Spherical"); ParameterList& quickGridParams = discParams->sublist("TensorProduct3DMeshGenerator"); quickGridParams.set("Type", "PdQuickGrid"); quickGridParams.set("X Origin", 0.0); quickGridParams.set("Y Origin", 0.0); quickGridParams.set("Z Origin", 0.0); quickGridParams.set("X Length", 1.0); quickGridParams.set("Y Length", 1.0); quickGridParams.set("Z Length", 1.0); quickGridParams.set("Number Points X", 2); quickGridParams.set("Number Points Y", 2); quickGridParams.set("Number Points Z", 2); // initialize the horizon manager and set the horizon to 0.501 ParameterList blockParameterList; ParameterList& blockParams = blockParameterList.sublist("My Block"); blockParams.set("Block Names", "block_1"); blockParams.set("Horizon", 0.501); PeridigmNS::HorizonManager::self().loadHorizonInformationFromBlockParameters(blockParameterList); // create the discretization RCP<PdQuickGridDiscretization> discretization = rcp(new PdQuickGridDiscretization(comm, discParams)); // sanity check, calling with a dimension other than 1 or 3 should throw an exception TEST_THROW(discretization->getGlobalOwnedMap(0), Teuchos::Exceptions::InvalidParameter); TEST_THROW(discretization->getGlobalOwnedMap(2), Teuchos::Exceptions::InvalidParameter); TEST_THROW(discretization->getGlobalOwnedMap(4), Teuchos::Exceptions::InvalidParameter); // basic checks on the 1d map Teuchos::RCP<const Epetra_BlockMap> map = discretization->getGlobalOwnedMap(1); TEST_ASSERT(map->NumGlobalElements() == 8); TEST_ASSERT(map->NumMyElements() == 4); TEST_ASSERT(map->ElementSize() == 1); TEST_ASSERT(map->IndexBase() == 0); TEST_ASSERT(map->UniqueGIDs() == true); int* myGlobalElements = map->MyGlobalElements(); if(rank == 0){ TEST_ASSERT(myGlobalElements[0] == 0); TEST_ASSERT(myGlobalElements[1] == 2); TEST_ASSERT(myGlobalElements[2] == 4); TEST_ASSERT(myGlobalElements[3] == 6); } if(rank == 1){ TEST_ASSERT(myGlobalElements[0] == 5); TEST_ASSERT(myGlobalElements[1] == 7); TEST_ASSERT(myGlobalElements[2] == 1); TEST_ASSERT(myGlobalElements[3] == 3); } // check the 1d overlap map // for this simple discretization, everything should be ghosted on both processors Teuchos::RCP<const Epetra_BlockMap> overlapMap = discretization->getGlobalOverlapMap(1); TEST_ASSERT(overlapMap->NumGlobalElements() == 16); TEST_ASSERT(overlapMap->NumMyElements() == 8); TEST_ASSERT(overlapMap->ElementSize() == 1); TEST_ASSERT(overlapMap->IndexBase() == 0); TEST_ASSERT(overlapMap->UniqueGIDs() == false); myGlobalElements = overlapMap->MyGlobalElements(); if(rank == 0){ TEST_ASSERT(myGlobalElements[0] == 0); TEST_ASSERT(myGlobalElements[1] == 2); TEST_ASSERT(myGlobalElements[2] == 4); TEST_ASSERT(myGlobalElements[3] == 6); TEST_ASSERT(myGlobalElements[4] == 1); TEST_ASSERT(myGlobalElements[5] == 3); TEST_ASSERT(myGlobalElements[6] == 5); TEST_ASSERT(myGlobalElements[7] == 7); } if(rank == 1){ TEST_ASSERT(myGlobalElements[0] == 5); TEST_ASSERT(myGlobalElements[1] == 7); TEST_ASSERT(myGlobalElements[2] == 1); TEST_ASSERT(myGlobalElements[3] == 3); TEST_ASSERT(myGlobalElements[4] == 0); TEST_ASSERT(myGlobalElements[5] == 2); TEST_ASSERT(myGlobalElements[6] == 4); TEST_ASSERT(myGlobalElements[7] == 6); } // same checks for 3d map map = discretization->getGlobalOwnedMap(3); TEST_ASSERT(map->NumGlobalElements() == 8); TEST_ASSERT(map->NumMyElements() == 4); TEST_ASSERT(map->ElementSize() == 3); TEST_ASSERT(map->IndexBase() == 0); TEST_ASSERT(map->UniqueGIDs() == true); myGlobalElements = map->MyGlobalElements(); if(rank == 0){ TEST_ASSERT(myGlobalElements[0] == 0); TEST_ASSERT(myGlobalElements[1] == 2); TEST_ASSERT(myGlobalElements[2] == 4); TEST_ASSERT(myGlobalElements[3] == 6); } if(rank == 1){ TEST_ASSERT(myGlobalElements[0] == 5); TEST_ASSERT(myGlobalElements[1] == 7); TEST_ASSERT(myGlobalElements[2] == 1); TEST_ASSERT(myGlobalElements[3] == 3); } // check the 3d overlap map // for this simple discretization, everything should be ghosted on both processors overlapMap = discretization->getGlobalOverlapMap(3); TEST_ASSERT(overlapMap->NumGlobalElements() == 16); TEST_ASSERT(overlapMap->NumMyElements() == 8); TEST_ASSERT(overlapMap->ElementSize() == 3); TEST_ASSERT(overlapMap->IndexBase() == 0); TEST_ASSERT(overlapMap->UniqueGIDs() == false); myGlobalElements = overlapMap->MyGlobalElements(); if(rank == 0){ TEST_ASSERT(myGlobalElements[0] == 0); TEST_ASSERT(myGlobalElements[1] == 2); TEST_ASSERT(myGlobalElements[2] == 4); TEST_ASSERT(myGlobalElements[3] == 6); TEST_ASSERT(myGlobalElements[4] == 1); TEST_ASSERT(myGlobalElements[5] == 3); TEST_ASSERT(myGlobalElements[6] == 5); TEST_ASSERT(myGlobalElements[7] == 7); } if(rank == 1){ TEST_ASSERT(myGlobalElements[0] == 5); TEST_ASSERT(myGlobalElements[1] == 7); TEST_ASSERT(myGlobalElements[2] == 1); TEST_ASSERT(myGlobalElements[3] == 3); TEST_ASSERT(myGlobalElements[4] == 0); TEST_ASSERT(myGlobalElements[5] == 2); TEST_ASSERT(myGlobalElements[6] == 4); TEST_ASSERT(myGlobalElements[7] == 6); } // check the bond map // the horizon was chosen such that each point should have three neighbors // note that if the NeighborhoodType parameter is not set to Spherical, this will fail Teuchos::RCP<const Epetra_BlockMap> bondMap = discretization->getGlobalBondMap(); TEST_ASSERT(bondMap->NumGlobalElements() == 8); TEST_ASSERT(bondMap->NumMyElements() == 4); TEST_ASSERT(bondMap->IndexBase() == 0); TEST_ASSERT(bondMap->UniqueGIDs() == true); myGlobalElements = bondMap->MyGlobalElements(); if(rank == 0){ TEST_ASSERT(myGlobalElements[0] == 0); TEST_ASSERT(myGlobalElements[1] == 2); TEST_ASSERT(myGlobalElements[2] == 4); TEST_ASSERT(myGlobalElements[3] == 6); } if(rank == 1){ TEST_ASSERT(myGlobalElements[0] == 5); TEST_ASSERT(myGlobalElements[1] == 7); TEST_ASSERT(myGlobalElements[2] == 1); TEST_ASSERT(myGlobalElements[3] == 3); } TEST_ASSERT(discretization->getNumBonds() == 4*3); // check the initial positions // all three coordinates are contained in a single vector Teuchos::RCP<Epetra_Vector> initialX = discretization->getInitialX(); TEST_ASSERT(initialX->MyLength() == 4*3); TEST_ASSERT(initialX->GlobalLength() == 8*3); if(rank == 0){ TEST_FLOATING_EQUALITY((*initialX)[0], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[1], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[2], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[3], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[4], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[5], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[6], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[7], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[8], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[9], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[10], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[11], 0.75, 1.0e-16); } if(rank == 1){ TEST_FLOATING_EQUALITY((*initialX)[0], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[1], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[2], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[3], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[4], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[5], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[6], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[7], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[8], 0.25, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[9], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[10], 0.75, 1.0e-16); TEST_FLOATING_EQUALITY((*initialX)[11], 0.25, 1.0e-16); } // check cell volumes Teuchos::RCP<Epetra_Vector> volume = discretization->getCellVolume(); TEST_ASSERT(volume->MyLength() == 4); TEST_ASSERT(volume->GlobalLength() == 8); for(int i=0 ; i<volume->MyLength() ; ++i) TEST_FLOATING_EQUALITY((*volume)[i], 0.125, 1.0e-16); // check the neighbor lists Teuchos::RCP<PeridigmNS::NeighborhoodData> neighborhoodData = discretization->getNeighborhoodData(); TEST_ASSERT(neighborhoodData->NumOwnedPoints() == 4); int* ownedIds = neighborhoodData->OwnedIDs(); TEST_ASSERT(ownedIds[0] == 0); TEST_ASSERT(ownedIds[1] == 1); TEST_ASSERT(ownedIds[2] == 2); TEST_ASSERT(ownedIds[3] == 3); TEST_ASSERT(neighborhoodData->NeighborhoodListSize() == 16); int* neighborhood = neighborhoodData->NeighborhoodList(); int* neighborhoodPtr = neighborhoodData->NeighborhoodPtr(); // remember, these are local IDs on each processor, // which includes both owned and ghost nodes (confusing!) if(rank == 0){ TEST_ASSERT(neighborhoodPtr[0] == 0); TEST_ASSERT(neighborhood[0] == 3); TEST_ASSERT(neighborhood[1] == 4); TEST_ASSERT(neighborhood[2] == 1); TEST_ASSERT(neighborhood[3] == 2); TEST_ASSERT(neighborhoodPtr[1] == 4); TEST_ASSERT(neighborhood[4] == 3); TEST_ASSERT(neighborhood[5] == 0); TEST_ASSERT(neighborhood[6] == 5); TEST_ASSERT(neighborhood[7] == 3); TEST_ASSERT(neighborhoodPtr[2] == 8); TEST_ASSERT(neighborhood[8] == 3); TEST_ASSERT(neighborhood[9] == 0); TEST_ASSERT(neighborhood[10] == 6); TEST_ASSERT(neighborhood[11] == 3); TEST_ASSERT(neighborhoodPtr[3] == 12); TEST_ASSERT(neighborhood[12] == 3); TEST_ASSERT(neighborhood[13] == 1); TEST_ASSERT(neighborhood[14] == 2); TEST_ASSERT(neighborhood[15] == 7); } if(rank == 1){ TEST_ASSERT(neighborhoodPtr[0] == 0); TEST_ASSERT(neighborhood[0] == 3); TEST_ASSERT(neighborhood[1] == 2); TEST_ASSERT(neighborhood[2] == 6); TEST_ASSERT(neighborhood[3] == 1); TEST_ASSERT(neighborhoodPtr[1] == 4); TEST_ASSERT(neighborhood[4] == 3); TEST_ASSERT(neighborhood[5] == 3); TEST_ASSERT(neighborhood[6] == 0); TEST_ASSERT(neighborhood[7] == 7); TEST_ASSERT(neighborhoodPtr[2] == 8); TEST_ASSERT(neighborhood[8] == 3); TEST_ASSERT(neighborhood[9] == 4); TEST_ASSERT(neighborhood[10] == 3); TEST_ASSERT(neighborhood[11] == 0); TEST_ASSERT(neighborhoodPtr[3] == 12); TEST_ASSERT(neighborhood[12] == 3); TEST_ASSERT(neighborhood[13] == 2); TEST_ASSERT(neighborhood[14] == 5); TEST_ASSERT(neighborhood[15] == 1); } }
TEUCHOS_UNIT_TEST(initial_condition_builder2, block_structure) { using Teuchos::RCP; panzer_stk::STK_ExodusReaderFactory mesh_factory; Teuchos::RCP<user_app::MyFactory> eqset_factory = Teuchos::rcp(new user_app::MyFactory); user_app::BCFactory bc_factory; const std::size_t workset_size = 20; panzer::FieldManagerBuilder fmb; // setup mesh ///////////////////////////////////////////// RCP<panzer_stk::STK_Interface> mesh; { RCP<Teuchos::ParameterList> pl = rcp(new Teuchos::ParameterList); pl->set("File Name","block-decomp.exo"); mesh_factory.setParameterList(pl); mesh = mesh_factory.buildMesh(MPI_COMM_WORLD); mesh->writeToExodus("test.exo"); } // setup physic blocks ///////////////////////////////////////////// Teuchos::RCP<Teuchos::ParameterList> ipb = Teuchos::parameterList("Physics Blocks"); std::vector<panzer::BC> bcs; std::vector<Teuchos::RCP<panzer::PhysicsBlock> > physics_blocks; { testInitialzation_blockStructure(ipb, bcs); std::map<std::string,std::string> block_ids_to_physics_ids; block_ids_to_physics_ids["eblock-0_0"] = "PB A"; block_ids_to_physics_ids["eblock-1_0"] = "PB B"; std::map<std::string,Teuchos::RCP<const shards::CellTopology> > block_ids_to_cell_topo; block_ids_to_cell_topo["eblock-0_0"] = mesh->getCellTopology("eblock-0_0"); block_ids_to_cell_topo["eblock-1_0"] = mesh->getCellTopology("eblock-1_0"); Teuchos::RCP<panzer::GlobalData> gd = panzer::createGlobalData(); int default_integration_order = 1; panzer::buildPhysicsBlocks(block_ids_to_physics_ids, block_ids_to_cell_topo, ipb, default_integration_order, workset_size, eqset_factory, gd, false, physics_blocks); } // setup worksets ///////////////////////////////////////////// Teuchos::RCP<panzer_stk::WorksetFactory> wkstFactory = Teuchos::rcp(new panzer_stk::WorksetFactory(mesh)); // build STK workset factory Teuchos::RCP<panzer::WorksetContainer> wkstContainer // attach it to a workset container (uses lazy evaluation) = Teuchos::rcp(new panzer::WorksetContainer(wkstFactory,physics_blocks,workset_size)); // get vector of element blocks std::vector<std::string> elementBlocks; mesh->getElementBlockNames(elementBlocks); // build volume worksets from container std::map<panzer::BC,Teuchos::RCP<std::map<unsigned,panzer::Workset> >,panzer::LessBC> bc_worksets; panzer::getSideWorksetsFromContainer(*wkstContainer,bcs,bc_worksets); // setup DOF manager ///////////////////////////////////////////// const Teuchos::RCP<panzer::ConnManager<int,int> > conn_manager = Teuchos::rcp(new panzer_stk::STKConnManager<int>(mesh)); Teuchos::RCP<const panzer::UniqueGlobalIndexerFactory<int,int,int,int> > indexerFactory = Teuchos::rcp(new panzer::DOFManagerFactory<int,int>); const Teuchos::RCP<panzer::UniqueGlobalIndexer<int,int> > dofManager = indexerFactory->buildUniqueGlobalIndexer(Teuchos::opaqueWrapper(MPI_COMM_WORLD),physics_blocks,conn_manager); // and linear object factory Teuchos::RCP<const Teuchos::MpiComm<int> > tComm = Teuchos::rcp(new Teuchos::MpiComm<int>(MPI_COMM_WORLD)); Teuchos::RCP<panzer::EpetraLinearObjFactory<panzer::Traits,int> > elof = Teuchos::rcp(new panzer::EpetraLinearObjFactory<panzer::Traits,int>(tComm.getConst(),dofManager)); Teuchos::RCP<panzer::LinearObjFactory<panzer::Traits> > lof = elof; // setup field manager builder ///////////////////////////////////////////// // Add in the application specific closure model factory panzer::ClosureModelFactory_TemplateManager<panzer::Traits> cm_factory; user_app::STKModelFactory_TemplateBuilder cm_builder; cm_factory.buildObjects(cm_builder); Teuchos::ParameterList closure_models("Closure Models"); closure_models.sublist("solid").sublist("SOURCE_TEMPERATURE").set<double>("Value",1.0); closure_models.sublist("solid").sublist("SOURCE_ELECTRON_TEMPERATURE").set<double>("Value",1.0); closure_models.sublist("ion solid").sublist("SOURCE_ION_TEMPERATURE").set<double>("Value",1.0); Teuchos::ParameterList user_data("User Data"); user_data.sublist("Panzer Data").set("Mesh", mesh); user_data.sublist("Panzer Data").set("DOF Manager", dofManager); user_data.sublist("Panzer Data").set("Linear Object Factory", lof); fmb.setWorksetContainer(wkstContainer); fmb.setupVolumeFieldManagers(physics_blocks,cm_factory,closure_models,*elof,user_data); fmb.setupBCFieldManagers(bcs,physics_blocks,*eqset_factory,cm_factory,bc_factory,closure_models,*elof,user_data); Teuchos::ParameterList ic_closure_models("Initial Conditions"); ic_closure_models.sublist("eblock-0_0").sublist("TEMPERATURE").set<double>("Value",3.0); ic_closure_models.sublist("eblock-0_0").sublist("ELECTRON_TEMPERATURE").set<double>("Value",3.0); ic_closure_models.sublist("eblock-1_0").sublist("TEMPERATURE").set<double>("Value",3.0); ic_closure_models.sublist("eblock-1_0").sublist("ION_TEMPERATURE").set<double>("Value",3.0); std::map<std::string, Teuchos::RCP< PHX::FieldManager<panzer::Traits> > > phx_ic_field_managers; panzer::setupInitialConditionFieldManagers(*wkstContainer, physics_blocks, cm_factory, ic_closure_models, *elof, user_data, true, "initial_condition_test", phx_ic_field_managers); Teuchos::RCP<panzer::LinearObjContainer> loc = elof->buildLinearObjContainer(); elof->initializeContainer(panzer::EpetraLinearObjContainer::X,*loc); Teuchos::RCP<panzer::EpetraLinearObjContainer> eloc = Teuchos::rcp_dynamic_cast<EpetraLinearObjContainer>(loc); eloc->get_x()->PutScalar(0.0); panzer::evaluateInitialCondition(*wkstContainer, phx_ic_field_managers, loc, *elof, 0.0); Teuchos::RCP<Epetra_Vector> x = eloc->get_x(); out << x->GlobalLength() << " " << x->MyLength() << std::endl; for (int i=0; i < x->MyLength(); ++i) TEST_FLOATING_EQUALITY((*x)[i], 3.0, 1.0e-10); }