int main(int argc, char* argv[])
{

#ifdef _MPI
	  int myrank; 
	  MPI_Init( &argc, &argv ); 
	  MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
#endif  

	  GRID grid("params");

	  TMT tmt("params");

#ifdef _MPI
	  if (myrank != 0)
	  {
	    // MPI SLAVE

	    tmt.SetGrid(&grid);
               
	    tmt.Slave(myrank);
	  }
	  else 
	  {
  
	    // MPI MASTER  
#endif 
            Result result(&grid);
	    grid.assign_omega(result.omega);
            
            InitDOS(DOStypes::SemiCircle, 0.5, grid.get_N(), result.omega, result.NIDOS);

            //for(double T=0.02; T>0.009; T -= 0.02)
            double T=0.02;
            double U=4.0;
            //for(double U=3.0; U<4.1; U+=0.5)
            {
	      InitDelta(DOStypes::SemiCircle, grid.get_N(), 0.5, 0.0, 0.01, 0.5, result.omega, result.Delta);
            
              //for(double W=0.6; W<4.1; W+=0.2)
              double W=0;
              for(double dmu=0.0; dmu<2.0; dmu+=1000.05)
              {   
                //InitDelta(DOStypes::SemiCircle, grid.get_N(), 0.5, 0.0, 0.01, 0.5, result.omega, result.Delta);
                
                result.mu = U/2.0+dmu;
            
                result.mu0 = 0;

                tmt.SetWDN(W, Distributions::Uniform, (W>0.0) ? (int) (W/0.05) : 1);
                tmt.SetParams(U, T, 0.5);             
                bool failed = tmt.Run(&result);

                if (failed) 
                  printf ("==== ERROR ==== Solution is INVALID\n");
        
                char FN[50];
                sprintf(FN, "TMT.T%.3f.U%.3f.dmu%.3f%s", tmt.get_T(), tmt.get_U(), dmu, 
                                                       (failed) ? ".FAILED" : "" );
	        result.PrintResult(FN);
             }
           }


#ifdef _MPI
	    tmt.SendExitSignal();   
	  } //end MPI MASTER

	  MPI_Finalize();
#endif

	  return 0;
}
Esempio n. 2
0
int main(int argc, char* argv[])
{

#ifdef _MPI
	  int myrank; 
	  MPI_Init( &argc, &argv ); 
	  MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
#endif  

	  GRID grid("params");

	  TMT tmt("params");

#ifdef _MPI
	  if (myrank != 0)
	  {
	    // MPI SLAVE

	    tmt.SetGrid(&grid);
               
	    tmt.Slave(myrank);
	  }
	  else 
	  {
  
	    // MPI MASTER  
#endif 
            Result result(&grid);
	    grid.assign_omega(result.omega);
            
            InitDOS(DOStypes::SemiCircle, 0.5, grid.get_N(), result.omega, result.NIDOS);

            for(double T=0.05; T<0.06; T += 10000.0)
            for(double U=3.00; U<=7.1; U+=0.5)
            {
	      InitDelta(DOStypes::SemiCircle, grid.get_N(), 0.5, 0.0, 0.01, 0.5, result.omega, result.Delta);
              
              for(double W=0.5; W<=7.1; W+=0.5)
              {   
                //InitDelta(DOStypes::SemiCircle, grid.get_N(), 0.5, 0.0, 0.01, 0.5, result.omega, result.Delta);
                
                result.mu = U/2.0;
            
                result.mu0 = 0;

                tmt.SetWDN(W, Distributions::Uniform, (W>0.0) ? 20 + (int) ( ((U>2.8) ? 2.0 : 1.0) * W / 0.1 ) : 1);
                tmt.SetParams(U, T, 0.5);

                char OFN[300];
                sprintf(OFN,".T%.3f.U%.3f.W%.3f",T,U,W);             
                tmt.LC->SetOutputFileName(OFN);
                tmt.LC->SetNMT(grid.get_N(), 1000, T);
                //printf("grid.get_N(): %d, half: %d\n",grid.get_N(),grid.get_N()/2) ;
                //tmt.LC->SetOffset(grid.get_N()/2);

                bool failed = tmt.Run(&result);

                FILE* f = fopen("lambdaUW.T0.050","a");
                fprintf(f, "%.15le %.15le %.15le %.15le\n", U,W, tmt.LC->lambdas[0], tmt.LC->continued_lambdas[0]);
                fclose(f);
                 
                if (failed) 
                  printf ("==== ERROR ==== Solution is INVALID\n");
        
                char FN[50];
                sprintf(FN, "TMT.T%.3f.U%.3f.W%.3f%s", tmt.get_T(), tmt.get_U(), tmt.get_W(), 
                                                       (failed) ? ".FAILED" : "" );
	        result.PrintResult(FN);
             }
           }


#ifdef _MPI
	    tmt.SendExitSignal();   
	  } //end MPI MASTER

	  MPI_Finalize();
#endif

	  return 0;
}
Esempio n. 3
0
SurfaceScalarJump<EvalT, Traits>::
SurfaceScalarJump(const Teuchos::ParameterList& p,
                  const Teuchos::RCP<Albany::Layouts>& dl) :
  cubature      (p.get<Teuchos::RCP<Intrepid2::Cubature<RealType, Intrepid2::FieldContainer_Kokkos<RealType, PHX::Layout,PHX::Device> >>>("Cubature")), 
  intrepidBasis (p.get<Teuchos::RCP<Intrepid2::Basis<RealType, Intrepid2::FieldContainer_Kokkos<RealType, PHX::Layout, PHX::Device>>>>("Intrepid2 Basis"))
//  scalar        (p.get<std::string>("Nodal Scalar Name"),dl->node_scalar),
//  scalarJump    (p.get<std::string>("Scalar Jump Name"),dl->qp_scalar),
 // scalarAverage (p.get<std::string>("Scalar Average Name"),dl->qp_scalar)
{
 // this->addDependentField(scalar);

//  this->addEvaluatedField(scalarJump);
//  this->addEvaluatedField(scalarAverage);

  this->setName("Surface Scalar Jump"+PHX::typeAsString<EvalT>());

  haveTemperature = false;
  haveTransport = false;
  haveHydroStress = false;
  havePorePressure = false;

  if (p.isType<std::string>("Nodal Pore Pressure Name")) {
	havePorePressure = true;
    PHX::MDField<ScalarT,Cell,Vertex>
      tp(p.get<std::string>("Nodal Pore Pressure Name"), dl->node_scalar);
    nodalPorePressure = tp;
    this->addDependentField(nodalPorePressure);

    PHX::MDField<ScalarT,Cell,QuadPoint>
      tjp(p.get<std::string>("Jump of Pore Pressure Name"), dl->qp_scalar);
    jumpPorePressure = tjp;
    this->addEvaluatedField(jumpPorePressure);

    PHX::MDField<ScalarT,Cell,QuadPoint>
      tmpp(p.get<std::string>("MidPlane Pore Pressure Name"), dl->qp_scalar);
    midPlanePorePressure = tmpp;
    this->addEvaluatedField(midPlanePorePressure);
  }

  if (p.isType<std::string>("Nodal Temperature Name")) {
	haveTemperature = true;
    PHX::MDField<ScalarT,Cell,Vertex>
      tf(p.get<std::string>("Nodal Temperature Name"), dl->node_scalar);
    nodalTemperature = tf;
    this->addDependentField(nodalTemperature);

    PHX::MDField<ScalarT,Cell,QuadPoint>
      tt(p.get<std::string>("Jump of Temperature Name"), dl->qp_scalar);
    jumpTemperature = tt;
    this->addEvaluatedField(jumpTemperature);

    PHX::MDField<ScalarT,Cell,QuadPoint>
      tmt(p.get<std::string>("MidPlane Temperature Name"), dl->qp_scalar);
    midPlaneTemperature = tmt;
    this->addEvaluatedField(midPlaneTemperature);
  }

  if (p.isType<std::string>("Nodal Transport Name")) {
	haveTransport = true;
    PHX::MDField<ScalarT,Cell,Vertex>
      ttp(p.get<std::string>("Nodal Transport Name"), dl->node_scalar);
    nodalTransport = ttp;
    this->addDependentField(nodalTransport);

    PHX::MDField<ScalarT,Cell,QuadPoint>
    tjtp(p.get<std::string>("Jump of Transport Name"), dl->qp_scalar);
    jumpTransport= tjtp;
    this->addEvaluatedField(jumpTransport);

    PHX::MDField<ScalarT,Cell,QuadPoint>
    tjtm(p.get<std::string>("MidPlane Transport Name"), dl->qp_scalar);
    midPlaneTransport = tjtm;
    this->addEvaluatedField(midPlaneTransport);
  }

  if (p.isType<std::string>("Nodal HydroStress Name")) {

    haveHydroStress = true;
    PHX::MDField<ScalarT,Cell,Vertex>
      ths(p.get<std::string>("Nodal HydroStress Name"), dl->node_scalar);
    nodalHydroStress = ths;
    this->addDependentField(nodalHydroStress);

    PHX::MDField<ScalarT,Cell,QuadPoint>
    tjths(p.get<std::string>("Jump of HydroStress Name"), dl->qp_scalar);
    jumpHydroStress= tjths;
    this->addEvaluatedField(jumpHydroStress);

    PHX::MDField<ScalarT,Cell,QuadPoint>
    tmpths(p.get<std::string>("MidPlane HydroStress Name"), dl->qp_scalar);
    midPlaneHydroStress= tmpths;
    this->addEvaluatedField(midPlaneHydroStress);
  }

  std::vector<PHX::DataLayout::size_type> dims;
  dl->node_vector->dimensions(dims);
  worksetSize = dims[0];
  numNodes = dims[1];
  numDims = dims[2];

  numQPs = cubature->getNumPoints();

  numPlaneNodes = numNodes / 2;
  numPlaneDims = numDims - 1;

#ifdef ALBANY_VERBOSE
    std::cout << "in Surface Scalar Jump" << std::endl;
    std::cout << " numPlaneNodes: " << numPlaneNodes << std::endl;
    std::cout << " numPlaneDims: " << numPlaneDims << std::endl;
    std::cout << " numQPs: " << numQPs << std::endl;
    std::cout << " cubature->getNumPoints(): " << cubature->getNumPoints() << std::endl;
    std::cout << " cubature->getDimension(): " << cubature->getDimension() << std::endl;
#endif

  // Allocate Temporary FieldContainers
  refValues.resize(numPlaneNodes, numQPs);
  refGrads.resize(numPlaneNodes, numQPs, numPlaneDims);
  refPoints.resize(numQPs, numPlaneDims);
  refWeights.resize(numQPs);

  // Pre-Calculate reference element quantitites
  cubature->getCubature(refPoints, refWeights);
  intrepidBasis->getValues(refValues, refPoints, Intrepid2::OPERATOR_VALUE);
  intrepidBasis->getValues(refGrads, refPoints, Intrepid2::OPERATOR_GRAD);
}
Esempio n. 4
0
int main(int argc, char* argv[])
{

#ifdef _MPI
          // MPI initialization. Every MPI process has an ordinal number (myrank). 
	  // 0 is the master process, and all the others slave processes
	  int myrank; 
	  MPI_Init( &argc, &argv ); 
	  MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
#endif  


          // initialize grid and tmt from input file. This is done in both master and slave processes.
	  GRID grid("params");

	  TMT tmt("params");
          // NOTE: since TMT is initialized with same parameters in all process there is no actual need
	  // for master process to send the values for U and T along with Delta to slave processes.
	  // However if U and T are changed in an automatized manner (using for-loops for example) to
	  // obtain results for different sets of parameters in a single run, then this is needed since
	  // TMT in slave processes does not experience the change in parameters. Still, things can
	  // be implemented a bit differently to avoid sending U and T. For example, put the entire
	  // "if else" below in the for loop instead of just the "else" part. This way the exit signal
	  // would have to be sent after each calculation and Result would have to be initialized in both
          // master and slave processes. Maybe I'll change this in future and exclude the sending of 
          // U and T, but this way seems "cleaner" to me for now (check TMT::SolveSIAM and TMT::Slave) 


#ifdef _MPI
	  if (myrank != 0)
	  {
	    // MPI SLAVE

            // in the case of slave processes, grid has to be set maunally to the tmt 
	    // object because no result object will be passed to its Run method (neither it will be called)
	    tmt.SetGrid(&grid);
               
            // slave method sets tmt in a perpetual loop of waiting for input from master process, performing
	    // SIAM calculation on recieved data, and returning results to the master process until Exit Signal
	    // is received. If an error occurs during solving SIAM, an error signal is sent to master process.
	    tmt.Slave(myrank);
	  }
	  else 
	  {
  
	    // MPI MASTER  
#endif 
            Result result(&grid);
	    grid.assign_omega(result.omega);
          
            // create an initial guess for Delta
	    InitDelta(DOStypes::SemiCircle, grid.get_N(), 0.5, 0.0, 0.01, 0.5, result.omega, result.Delta);
            // NIDOS is not needed for input since tmt only works with bethe-specific self-consistency
    
            // opposite to what is the case with CHM, tmt takes mu as an input parameter, rather than n
            // To use tmt away from half-filling, set mu to values other than U/2
	    result.mu = tmt.get_U()/2.0;

            // this is just the initial guess for SIAM inside TMT
	    result.mu0 = 0;
             
            // run the calculation and receive the error code. 
	    bool failed = tmt.Run(&result);

	    if (failed) 
	        printf ("==== ERROR ==== Solution is INVALID\n");
        
            // prepare the file name and print out results
            char FN[50];
            sprintf(FN, "TMT.U%.3f.T%.3f.W%.3f%s", tmt.get_U(), tmt.get_T(), tmt.get_W(), 
                                                   (failed) ? ".FAILED" : "" );
	    result.PrintResult(FN);


#ifdef _MPI
            // when finished, send exit signal to slave processes to end them.
	    tmt.SendExitSignal();   
	  } //end MPI MASTER

          // all processes must reach MPI finalize.    
	  MPI_Finalize();
#endif

	  return 0;
}