Esempio n. 1
0
void PisdWdF<EvalT, Traits>::
evaluateFields(typename Traits::EvalData workset)
{
    ScalarT kappa;
    ScalarT mu;

    // Leading dimension of 1 added so we can use Intrepid::det
    Intrepid::FieldContainer<EnergyFadType> F(1,numDims,numDims);

    // Allocate F ( = defgrad of derivative types) and seed with identity derivs
    for (std::size_t i=0; i < numDims; ++i)
    {
        for (std::size_t j=0; j < numDims; ++j)
        {
            F(0,i,j) = EnergyFadType(numDims*numDims, 0.0); // 0.0 will be overwriten below
            F(0,i,j).fastAccessDx(i*numDims + j) = 1.0;
        }
    }

    for (std::size_t cell=0; cell < workset.numCells; ++cell) {
        for (std::size_t qp=0; qp < numQPs; ++qp) {
            kappa = elasticModulus(cell,qp) / ( 3. * ( 1. - 2. * poissonsRatio(cell,qp) ) );
            mu    = elasticModulus(cell,qp) / ( 2. * ( 1. + poissonsRatio(cell,qp) ) );

            // Fill F with defgrad for value. Derivs already seeded with identity.
            for (std::size_t i=0; i < numDims; ++i)
                for (std::size_t j=0; j < numDims; ++j)
                    F(0,i,j).val() = defgrad(cell, qp, i, j);

            // Call energy funtional (can make a library of these)
            EnergyFadType W = computeEnergy(kappa, mu, F);

            // Extract stress from derivs of energy
            for (std::size_t i=0; i < numDims; ++i)
                for (std::size_t j=0; j < numDims; ++j)
                    P(cell, qp, i, j) = W.fastAccessDx(i*numDims + j);

        }
    }
}
Esempio n. 2
0
  void LatticeDefGrad<EvalT, Traits>::
  evaluateFields(typename Traits::EvalData workset)
  {
    // Compute LatticeDefGrad tensor from displacement gradient
    for (int cell=0; cell < workset.numCells; ++cell)
    {
      for (int qp=0; qp < numQPs; ++qp)
      {
        for (int i=0; i < numDims; ++i)
        {
          for (int j=0; j < numDims; ++j)
          {
            latticeDefGrad(cell,qp,i,j) = defgrad(cell,qp,i,j);
          }
        }
        JH(cell,qp) = J(cell,qp);
      }
    }
    // Since Intrepid will later perform calculations on the entire workset size
    // and not just the used portion, we must fill the excess with reasonable 
    // values. Leaving this out leads to inversion of 0 tensors.
    for (int cell=workset.numCells; cell < worksetSize; ++cell) 
      for (int qp=0; qp < numQPs; ++qp) 
        for (int i=0; i < numDims; ++i)
          latticeDefGrad(cell,qp,i,i) = 1.0;

    if (weightedAverage)
    {
      ScalarT Jbar, wJbar, vol;
      for (int cell=0; cell < workset.numCells; ++cell)
      {
        Jbar = 0.0;
        vol = 0.0;
        for (int qp=0; qp < numQPs; ++qp)
        {
          Jbar += weights(cell,qp) * std::log( 1 + VH(cell,qp)*(Ctotal(cell,qp) - CtotalRef(cell,qp)) );
          vol  += weights(cell,qp);
        }
        Jbar /= vol;
        // Jbar = std::exp(Jbar);
        for (int qp=0; qp < numQPs; ++qp)
        {
          for (int i=0; i < numDims; ++i)
          {
            for (int j=0; j < numDims; ++j)
            {
              wJbar = std::exp( (1-alpha) * Jbar +
                             alpha * std::log( 1 + VH(cell,qp)*(Ctotal(cell,qp) - CtotalRef(cell,qp))));
              latticeDefGrad(cell,qp,i,j) *= std::pow( wJbar ,-1./3. );
            }
          }
          JH(cell,qp) *= wJbar;
        }
      }
    } else {
      for (int cell=0; cell < workset.numCells; ++cell)
      {
        for (int qp=0; qp < numQPs; ++qp)
        {
          JH(cell,qp) *=   (1 + VH(cell,qp)*(Ctotal(cell,qp) - CtotalRef(cell,qp)));
          for (int i=0; i < numDims; ++i)
          {
            for (int j=0; j < numDims; ++j)
            {
              latticeDefGrad(cell,qp,i,j) *= std::pow(JH(cell,qp) ,-1./3. );
            }
          }
        }
      }
    }
  }
Esempio n. 3
0
void AssumedStrain<EvalT, Traits>::
evaluateFields(typename Traits::EvalData workset)
{
  // Compute AssumedStrain tensor from displacement gradient
  for (std::size_t cell=0; cell < workset.numCells; ++cell)
  {
    for (std::size_t qp=0; qp < numQPs; ++qp)
    {
      for (std::size_t i=0; i < numDims; ++i)
      {
        for (std::size_t j=0; j < numDims; ++j)
	{
          defgrad(cell,qp,i,j) = GradU(cell,qp,i,j);
        }
	defgrad(cell,qp,i,i) += 1.0;
      }
    }
  }

  Intrepid::RealSpaceTools<ScalarT>::det(J, defgrad);

  if (avgJ)
  {
    ScalarT Jbar;
    for (std::size_t cell=0; cell < workset.numCells; ++cell)
    {
      Jbar = 0.0;
      for (std::size_t qp=0; qp < numQPs; ++qp)
      {
        //TEUCHOS_TEST_FOR_EXCEPTION(J(cell,qp) < 0, std::runtime_error,
        //    " negative volume detected in avgJ routine");
	Jbar += std::log(J(cell,qp));
        //Jbar += J(cell,qp);
      }
      Jbar /= numQPs;
      Jbar = std::exp(Jbar);
      for (std::size_t qp=0; qp < numQPs; ++qp)
      {
	for (std::size_t i=0; i < numDims; ++i)
	{
	  for (std::size_t j=0; j < numDims; ++j)
	  {
	    defgrad(cell,qp,i,j) *= std::pow(Jbar/J(cell,qp),1./3.);
	  }
	}
	J(cell,qp) = Jbar;
      }
    }
  }
  else if (volavgJ)
  {
    ScalarT Jbar, vol;
    for (std::size_t cell=0; cell < workset.numCells; ++cell)
    {
      Jbar = 0.0;
      vol = 0.0;
      for (std::size_t qp=0; qp < numQPs; ++qp)
      {
        //TEUCHOS_TEST_FOR_EXCEPTION(J(cell,qp) < 0, std::runtime_error,
        //    " negative volume detected in volavgJ routine");
	Jbar += weights(cell,qp) * std::log( J(cell,qp) );
	vol  += weights(cell,qp);
      }
      Jbar /= vol;
      Jbar = std::exp(Jbar);
      for (std::size_t qp=0; qp < numQPs; ++qp)
      {
	for (std::size_t i=0; i < numDims; ++i)
	{
	  for (std::size_t j=0; j < numDims; ++j)
	  {
	    defgrad(cell,qp,i,j) *= std::pow(Jbar/J(cell,qp),1./3.);
	  }
	}
	J(cell,qp) = Jbar;
      }
    }
  }
  else if (weighted_Volume_Averaged_J)
    {
      ScalarT Jbar, wJbar, vol;
      ScalarT StabAlpha = 0.5; // This setting need to change later..
      for (std::size_t cell=0; cell < workset.numCells; ++cell)
      {
        Jbar = 0.0;
        vol = 0.0;
        for (std::size_t qp=0; qp < numQPs; ++qp)
        {
          //TEUCHOS_TEST_FOR_EXCEPTION(J(cell,qp) < 0, std::runtime_error,
          //    " negative volume detected in volavgJ routine");
  	Jbar += weights(cell,qp) * std::log( J(cell,qp) );
  	vol  += weights(cell,qp);

        }
        Jbar /= vol;

       // Jbar = std::exp(Jbar);
        for (std::size_t qp=0; qp < numQPs; ++qp)
        {
  	for (std::size_t i=0; i < numDims; ++i)
  	{
  	  for (std::size_t j=0; j < numDims; ++j)
  	  {
  		wJbar =   std::exp( (1-StabAlpha)*Jbar+
  		          	        		  StabAlpha*std::log(J(cell,qp)));

  	    defgrad(cell,qp,i,j) *= std::pow(wJbar /J(cell,qp),1./3.);
  	  }
  	}
  	J(cell,qp) = wJbar;
        }
      }
    }

  // Since Intrepid will later perform calculations on the entire workset size
  // and not just the used portion, we must fill the excess with reasonable 
  // values. Leaving this out leads to inversion of 0 tensors.
  for (std::size_t cell=workset.numCells; cell < worksetSize; ++cell) 
    for (std::size_t qp=0; qp < numQPs; ++qp) 
      for (std::size_t i=0; i < numDims; ++i)
	defgrad(cell,qp,i,i) = 1.0;


  // assembly assumed strain
  for (std::size_t cell=0; cell < workset.numCells; ++cell) {
      for (std::size_t qp=0; qp < numQPs; ++qp) {
        for (std::size_t i=0; i < numDims; ++i){
        	for (std::size_t j=0; j < numDims; ++j){
  	            assumedStrain(cell,qp,i,j) =0.5*(defgrad(cell,qp,i,j) + defgrad(cell,qp,j,i));
  	            if (i==j) assumedStrain(cell,qp,i,j) = assumedStrain(cell,qp,i,j) - 1.0;
        	}
        }
      }
  }





}