Exemple #1
0
double getCommuteParity(SpinQuantum a, SpinQuantum b, SpinQuantum c)
{
  int aspin = a.get_s(), airrep = a.get_symm().getirrep();
  int bspin = b.get_s(), birrep = b.get_symm().getirrep();
  int cspin = c.get_s(), cirrep = c.get_symm().getirrep();

  int an = a.get_n(), bn = b.get_n();
  int parity = IsFermion(a) && IsFermion(b) ? -1 : 1;
  for (int asz = -aspin; asz<aspin+1; asz+=2)
  for (int bsz = -bspin; bsz<bspin+1; bsz+=2)
  for (int al = 0; al<Symmetry::sizeofIrrep(airrep); al++)
  for (int bl = 0; bl<Symmetry::sizeofIrrep(birrep); bl++)
  {
    //double cleb = cleb_(aspin, asz, bspin, bsz, cspin, cspin);
    double cleb = clebsch(aspin, asz, bspin, bsz, cspin, cspin);
    double clebspatial = Symmetry::spatial_cg(airrep, birrep, cirrep, al, bl, 0);
    if (fabs(cleb) <= 1.0e-14 || fabs(clebspatial) <= 1.0e-14)
      continue;
    else
      //return parity*cleb*clebdinfh/cleb_(bspin, bsz, aspin, asz, cspin, cspin)/Symmetry::spatial_cg(birrep, airrep, cirrep, bl, al, 0);
      return parity*cleb*clebspatial/clebsch(bspin, bsz, aspin, asz, cspin, cspin)/Symmetry::spatial_cg(birrep, airrep, cirrep, bl, al, 0);
  }
  cout << "Major trouble, getCommuteParity asked for three inappropriate operators"<<endl;
  cout << a<<"  "<<b<<"  "<<c<<endl;
  return 1.0;
}
Exemple #2
0
  template<> void Op_component<CreCreDesComp>::build_iterators(SpinBlock& b)
    {
      if (b.get_sites().size () == 0) return; // blank construction (used in unset_initialised() Block copy construction, for use with STL)
      const double screen_tol = dmrginp.oneindex_screen_tol();
      vector< int > screened_cdd_ix = (dmrginp.hamiltonian() == BCS) ?
        screened_cddcomp_indices(b.get_complementary_sites(), b.get_sites(), v_1, *b.get_twoInt(), v_cc, v_cccc, v_cccd, screen_tol) :
        screened_cddcomp_indices(b.get_complementary_sites(), b.get_sites(), v_1, *b.get_twoInt(), screen_tol);
      m_op.set_indices(screened_cdd_ix, dmrginp.last_site());      
      std::vector<int> orbs(1);
      for (int i = 0; i < m_op.local_nnz(); ++i)
	{
	  orbs[0] = m_op.get_local_indices()[i];
	  m_op.get_local_element(i).resize(1);
	  m_op.get_local_element(i)[0]=boost::shared_ptr<CreCreDesComp>(new CreCreDesComp);
	  SparseMatrix& op = *m_op.get_local_element(i)[0];
	  op.set_orbs() = orbs;
	  op.set_initialised() = true;
	  op.set_fermion() = true;
	  //op.set_deltaQuantum() = SpinQuantum(1, SpinOf(orbs[0]), SymmetryOfSpatialOrb(orbs[0]) );
      if (dmrginp.hamiltonian() == BCS) {
        op.resize_deltaQuantum(4);
        SpinQuantum qorb = getSpinQuantum(orbs[0]);
        op.set_deltaQuantum(0) = qorb;
        op.set_deltaQuantum(1) = SpinQuantum(3, qorb.get_s(), qorb.get_symm());
        op.set_deltaQuantum(2) = SpinQuantum(-1, qorb.get_s(), qorb.get_symm());
        op.set_deltaQuantum(3) = SpinQuantum(-3, qorb.get_s(), qorb.get_symm());
      } else {
	    op.set_deltaQuantum(1, getSpinQuantum(orbs[0]));
      }
	}
    }
Exemple #3
0
void SpinAdapted::operatorfunctions::TensorMultiply(const SpinBlock *ablock, const Baseoperator<Matrix>& a, const Baseoperator<Matrix>& b, const SpinBlock *cblock, Wavefunction& c, Wavefunction& v, const SpinQuantum opQ, double scale)
{
  // can be used for situation with different bra and ket
  const int leftBraOpSz = cblock->get_leftBlock()->get_braStateInfo().quanta.size ();
  const int leftKetOpSz = cblock->get_leftBlock()->get_ketStateInfo().quanta.size ();
  const int rightBraOpSz = cblock->get_rightBlock()->get_braStateInfo().quanta.size ();
  const int rightKetOpSz = cblock->get_rightBlock()->get_ketStateInfo().quanta.size ();

  const StateInfo* lbraS = cblock->get_braStateInfo().leftStateInfo, *rbraS = cblock->get_braStateInfo().rightStateInfo;
  const StateInfo* lketS = cblock->get_ketStateInfo().leftStateInfo, *rketS = cblock->get_ketStateInfo().rightStateInfo;

  const char conjC = (cblock->get_leftBlock() == ablock) ? 'n' : 't';

  const Baseoperator<Matrix>& leftOp = (conjC == 'n') ? a : b; // an ugly hack to support the release memory optimisation
  const Baseoperator<Matrix>& rightOp = (conjC == 'n') ? b : a;
  const char leftConj = (conjC == 'n') ? a.conjugacy() : b.conjugacy();
  const char rightConj = (conjC == 'n') ? b.conjugacy() : a.conjugacy();


  int totalmem =0;

  for (int lQrQPrime = 0; lQrQPrime<leftBraOpSz*rightKetOpSz; ++lQrQPrime)
  {
    int rQPrime = lQrQPrime%rightKetOpSz, lQ = lQrQPrime/rightKetOpSz;
    for (int lQPrime = 0; lQPrime < leftKetOpSz; lQPrime++)
      if (leftOp.allowed(lQ, lQPrime) && c.allowed(lQPrime, rQPrime))
      {	    
	Matrix m; m.ReSize(lbraS->getquantastates(lQ), rketS->getquantastates(rQPrime));
        
	double factor = leftOp.get_scaling(lbraS->quanta[lQ], lketS->quanta[lQPrime]);
	MatrixMultiply (leftOp.operator_element(lQ, lQPrime), leftConj, c.operator_element(lQPrime, rQPrime), 'n',
			m, factor, 0.);	      
	
	for (int rQ = 0; rQ<rightBraOpSz; rQ++) {
	  if (v.allowed(lQ, rQ) && rightOp.allowed(rQ, rQPrime)) {
	    double factor = scale;
	    
	    factor *= dmrginp.get_ninej()(lketS->quanta[lQPrime].get_s().getirrep(), rketS->quanta[rQPrime].get_s().getirrep() , c.get_deltaQuantum(0).get_s().getirrep(), 
					  leftOp.get_spin().getirrep(), rightOp.get_spin().getirrep(), opQ.get_s().getirrep(),
					  lbraS->quanta[lQ].get_s().getirrep(), rbraS->quanta[rQ].get_s().getirrep() , v.get_deltaQuantum(0).get_s().getirrep());
	    factor *= Symmetry::spatial_ninej(lketS->quanta[lQPrime].get_symm().getirrep() , rketS->quanta[rQPrime].get_symm().getirrep(), c.get_symm().getirrep(), 
					      leftOp.get_symm().getirrep(), rightOp.get_symm().getirrep(), opQ.get_symm().getirrep(),
					      lbraS->quanta[lQ].get_symm().getirrep() , rbraS->quanta[rQ].get_symm().getirrep(), v.get_symm().getirrep());
	    int parity = rightOp.get_fermion() && IsFermion(lketS->quanta[lQPrime]) ? -1 : 1;
	    factor *=  rightOp.get_scaling(rbraS->quanta[rQ], rketS->quanta[rQPrime]);
	    MatrixMultiply (m, 'n', rightOp(rQ, rQPrime), TransposeOf(rightOp.conjugacy()), v.operator_element(lQ, rQ), factor*parity);
	  }
	}
	
      }
  }

}
Exemple #4
0
void SpinAdapted::operatorfunctions::TensorMultiply(const Baseoperator<Matrix>& a, const Baseoperator<Matrix>& b, const StateInfo *brastateinfo, const StateInfo *ketstateinfo, const Wavefunction& c, Wavefunction& v, const SpinQuantum opQ, bool aIsLeftOp, double scale)
{
  const int leftBraOpSz = brastateinfo->leftStateInfo->quanta.size ();
  const int leftKetOpSz = ketstateinfo->leftStateInfo->quanta.size ();
  const int rightBraOpSz = brastateinfo->rightStateInfo->quanta.size ();
  const int rightKetOpSz = ketstateinfo->rightStateInfo->quanta.size ();

  const StateInfo* lbraS = brastateinfo->leftStateInfo, *rbraS = brastateinfo->rightStateInfo;
  const StateInfo* lketS = ketstateinfo->leftStateInfo, *rketS = ketstateinfo->rightStateInfo;

  const char conjC = (aIsLeftOp) ? 'n' : 't';

  const Baseoperator<Matrix>& leftOp = (conjC == 'n') ? a : b; // an ugly hack to support the release memory optimisation
  const Baseoperator<Matrix>& rightOp = (conjC == 'n') ? b : a;
  const char leftConj = (conjC == 'n') ? a.conjugacy() : b.conjugacy();
  const char rightConj = (conjC == 'n') ? b.conjugacy() : a.conjugacy();

  Wavefunction u;
  u.resize(leftBraOpSz*leftKetOpSz, rightKetOpSz);

  int totalmem =0;

  {
    for (int lQrQPrime = 0; lQrQPrime<leftBraOpSz*rightKetOpSz; ++lQrQPrime)
    {
      int rQPrime = lQrQPrime%rightKetOpSz, lQ = lQrQPrime/rightKetOpSz;
	for (int lQPrime = 0; lQPrime < leftKetOpSz; lQPrime++)
	  if (leftOp.allowed(lQ, lQPrime) && c.allowed(lQPrime, rQPrime))
	  {
	    int lindex = lQ*leftKetOpSz+lQPrime;
	    u.allowed(lindex, rQPrime) = true;

	    u(lindex,rQPrime).ReSize(lbraS->getquantastates(lQ), rketS->getquantastates(rQPrime));
	    double factor = leftOp.get_scaling(lbraS->quanta[lQ], lketS->quanta[lQPrime]);
	    MatrixMultiply (leftOp.operator_element(lQ, lQPrime), leftConj, c.operator_element(lQPrime, rQPrime), 'n',
			    u.operator_element(lindex, rQPrime), factor, 0.);	      

	  }
    }
  }

  {
    for (int lQrQ = 0; lQrQ<leftBraOpSz*rightBraOpSz; ++lQrQ)
    {
      int rQ = lQrQ%rightBraOpSz, lQ=lQrQ/rightBraOpSz;
	if (v.allowed(lQ, rQ))
	  for (int rQPrime = 0; rQPrime < rightKetOpSz; rQPrime++)
	    if (rightOp.allowed(rQ, rQPrime))
	      for (int lQPrime = 0; lQPrime < leftKetOpSz; lQPrime++)
		if (leftOp.allowed(lQ, lQPrime) && u.allowed(lQ*leftKetOpSz+lQPrime, rQPrime))
		{
		  int lindex = lQ*leftKetOpSz+lQPrime;
		  double factor = scale;
      //if(dmrginp.spinAdapted()){
      //ninej has already considered non spin-adapted
      //it is just 1 in nonspin-adapted

		  factor *= dmrginp.get_ninej()(lketS->quanta[lQPrime].get_s().getirrep(), rketS->quanta[rQPrime].get_s().getirrep() , c.get_deltaQuantum(0).get_s().getirrep(), 
						leftOp.get_spin().getirrep(), rightOp.get_spin().getirrep(), opQ.get_s().getirrep(),
						lbraS->quanta[lQ].get_s().getirrep(), rbraS->quanta[rQ].get_s().getirrep() , v.get_deltaQuantum(0).get_s().getirrep());
      //}
		  factor *= Symmetry::spatial_ninej(lketS->quanta[lQPrime].get_symm().getirrep() , rketS->quanta[rQPrime].get_symm().getirrep(), c.get_symm().getirrep(), 
				       leftOp.get_symm().getirrep(), rightOp.get_symm().getirrep(), opQ.get_symm().getirrep(),
				       lbraS->quanta[lQ].get_symm().getirrep() , rbraS->quanta[rQ].get_symm().getirrep(), v.get_symm().getirrep());
		  int parity = rightOp.get_fermion() && IsFermion(lketS->quanta[lQPrime]) ? -1 : 1;
		  factor *=  rightOp.get_scaling(rbraS->quanta[rQ], rketS->quanta[rQPrime]);
		  MatrixMultiply (u.operator_element(lindex, rQPrime), 'n',
				  rightOp(rQ, rQPrime), TransposeOf(rightOp.conjugacy()), v.operator_element(lQ, rQ), factor*parity);
		}
    }
  }
	      
}
Exemple #5
0
void SpinAdapted::InitBlocks::InitStartingBlock (SpinBlock& startingBlock, const bool &forward, int leftState, int rightState,
						 const int & forward_starting_size, const int &backward_starting_size,
						 const int& restartSize, const bool &restart, const bool& warmUp, int integralIndex, const vector<SpinQuantum>& braquanta, const vector<SpinQuantum>& ketquanta)
{
  if (restart && restartSize != 1)
  {
    int len = restart? restartSize : forward_starting_size;
    vector<int> sites(len);
    if (forward)
      for (int i=0; i<len; i++)
	sites[i] = i;
    else
      for (int i=0; i<len; i++) 
	sites[i] = dmrginp.last_site() - len +i ;
    
    if (restart)
      SpinBlock::restore (forward, sites, startingBlock, leftState, rightState);
    else
      SpinBlock::restore (true, sites, startingBlock, leftState, rightState);
  }
  else if (forward)
  {
    if(startingBlock.nonactive_orb().size()!=0)
      startingBlock = SpinBlock(0, forward_starting_size - 1,startingBlock.nonactive_orb() , true);
    else
      startingBlock = SpinBlock(0, forward_starting_size - 1, integralIndex, leftState==rightState, true);
    if (dmrginp.add_noninteracting_orbs() && dmrginp.molecule_quantum().get_s().getirrep() != 0 && dmrginp.spinAdapted())
    {
      SpinQuantum s = dmrginp.molecule_quantum();
      s = SpinQuantum(s.get_s().getirrep(), s.get_s(), IrrepSpace(0));
      int qs = 1, ns = 1;
      StateInfo addstate(ns, &s, &qs); 
      SpinBlock dummyblock(addstate, integralIndex);
      SpinBlock newstartingBlock;
      newstartingBlock.set_integralIndex() = integralIndex;
      newstartingBlock.default_op_components(false, startingBlock, dummyblock, true, true, leftState==rightState);
      newstartingBlock.setstoragetype(LOCAL_STORAGE);
      if( braquanta.size()!= 0)
        newstartingBlock.BuildSumBlock(NO_PARTICLE_SPIN_NUMBER_CONSTRAINT, startingBlock, dummyblock,braquanta,ketquanta);
      else
        newstartingBlock.BuildSumBlock(NO_PARTICLE_SPIN_NUMBER_CONSTRAINT, startingBlock, dummyblock);
      startingBlock.clear();
      startingBlock = newstartingBlock;
    }
  }
  else
  {
    std::vector<int> backwardSites;
    if(dmrginp.spinAdapted()) {
      for (int i = 0; i < backward_starting_size; ++i) 
	backwardSites.push_back (dmrginp.last_site() - i - 1);
    }
    else {
      for (int i = 0; i < backward_starting_size; ++i) 
	backwardSites.push_back (dmrginp.last_site()/2 - i - 1);
    }
    sort (backwardSites.begin (), backwardSites.end ());
    startingBlock.set_integralIndex() = integralIndex;
    startingBlock.default_op_components(false, leftState==rightState);
    startingBlock.BuildTensorProductBlock (backwardSites);
  }
}
void SpinAdapted::operatorfunctions::TensorMultiply(const SpinBlock *ablock, const Baseoperator<Matrix>& a, const Baseoperator<Matrix>& b, const SpinBlock *cblock, Wavefunction& c, Wavefunction& v, const SpinQuantum opQ, double scale)
{
  const int leftOpSz = cblock->get_leftBlock()->get_stateInfo().quanta.size ();
  const int rightOpSz = cblock->get_rightBlock()->get_stateInfo().quanta.size ();

  const StateInfo* rS = cblock->get_stateInfo().rightStateInfo, *lS = cblock->get_stateInfo().leftStateInfo;

  assert (cblock->get_leftBlock() == ablock || cblock->get_rightBlock() == ablock);

  const char conjC = (cblock->get_leftBlock() == ablock) ? 'n' : 't';


  const Baseoperator<Matrix>& leftOp = (conjC == 'n') ? a : b; // an ugly hack to support the release memory optimisation
  const Baseoperator<Matrix>& rightOp = (conjC == 'n') ? b : a;
  const char leftConj = (conjC == 'n') ? a.conjugacy() : b.conjugacy();
  const char rightConj = (conjC == 'n') ? b.conjugacy() : a.conjugacy();

  Wavefunction u;
  u.resize(leftOpSz*leftOpSz, rightOpSz);

  int totalmem =0;

  {
    for (int lQrQPrime = 0; lQrQPrime<leftOpSz*rightOpSz; ++lQrQPrime)
    {
      int rQPrime = lQrQPrime%rightOpSz, lQ = lQrQPrime/rightOpSz;
	for (int lQPrime = 0; lQPrime < leftOpSz; lQPrime++)
	  if (leftOp.allowed(lQ, lQPrime) && c.allowed(lQPrime, rQPrime))
	  {
	    int lindex = lQ*leftOpSz+lQPrime;
	    u.allowed(lindex, rQPrime) = true;
	    u(lindex,rQPrime).ReSize(lS->getquantastates(lQ), rS->getquantastates(rQPrime));
	    double factor = leftOp.get_scaling(lS->quanta[lQ], lS->quanta[lQPrime]);
	    MatrixMultiply (leftOp.operator_element(lQ, lQPrime), leftConj, c.operator_element(lQPrime, rQPrime), 'n',
			    u.operator_element(lindex, rQPrime), factor, 0.);	      
	  }
    }
  }

  {
    for (int lQrQ = 0; lQrQ<leftOpSz*rightOpSz; ++lQrQ)
    {
      int rQ = lQrQ%rightOpSz, lQ=lQrQ/rightOpSz;
	if (v.allowed(lQ, rQ))
	  for (int rQPrime = 0; rQPrime < rightOpSz; rQPrime++)
	    if (rightOp.allowed(rQ, rQPrime))
	      for (int lQPrime = 0; lQPrime < leftOpSz; lQPrime++)
		if (leftOp.allowed(lQ, lQPrime) && u.allowed(lQ*leftOpSz+lQPrime, rQPrime))
		{
		  int lindex = lQ*leftOpSz+lQPrime;
		  double factor = scale;
		  factor *= dmrginp.get_ninej()(lS->quanta[lQPrime].get_s(), rS->quanta[rQPrime].get_s() , c.get_deltaQuantum().get_s(), 
						leftOp.get_spin(), rightOp.get_spin(), opQ.get_s(),
						lS->quanta[lQ].get_s(), rS->quanta[rQ].get_s() , v.get_deltaQuantum().get_s());
		  factor *= Symmetry::spatial_ninej(lS->quanta[lQPrime].get_symm().getirrep() , rS->quanta[rQPrime].get_symm().getirrep(), c.get_symm().getirrep(), 
				       leftOp.get_symm().getirrep(), rightOp.get_symm().getirrep(), opQ.get_symm().getirrep(),
				       lS->quanta[lQ].get_symm().getirrep() , rS->quanta[rQ].get_symm().getirrep(), v.get_symm().getirrep());
		  int parity = rightOp.get_fermion() && IsFermion(lS->quanta[lQPrime]) ? -1 : 1;
		  factor *=  rightOp.get_scaling(rS->quanta[rQ], rS->quanta[rQPrime]);
		  MatrixMultiply (u.operator_element(lindex, rQPrime), 'n',
				  rightOp(rQ, rQPrime), TransposeOf(rightOp.conjugacy()), v.operator_element(lQ, rQ), factor*parity);
		}
    }
  }
	      
}