//==============================================================================
int Ifpack2_OverlapFactor::InitValues(const Tpetra_RowMatrix * UserMatrix) {
  

  if (OverlapGraph_!=0) {

    Tpetra_CrsMatrix * CrsMatrix = dynamic_cast<Tpetra_CrsMatrix *>(UserMatrix);
    if (CrsMatrix!=0) 
  if (!Allocated()) EPETRA_CHK_ERR(-1); //Must be allocated
  if (ValuesInitialized()) EPETRA_CHK_ERR(1); // Values already init'ed, warn caller
  
  EPETRA_CHK_ERR(DerivedFactor()); // Call Derived class factorization
  SetValuesInitialized(false);
  SetFactored(true);
  return(0);
}
//==============================================================================
int Ifpack2_OverlapFactor::Factor() {
  
  if (!ValuesInitialized()) EPETRA_CHK_ERR(-1); // Values must be initialized
  if (Factored()) EPETRA_CHK_ERR(1); // Return with a warning that factor already done
  
  EPETRA_CHK_ERR(DerivedFactor()); // Call Derived class factorization
  SetValuesInitialized(false);
  SetFactored(true);
  return(0);
}
//==========================================================================
int Ifpack_CrsRiluk::InitValues(const Epetra_CrsMatrix & A) {

  UserMatrixIsCrs_ = true;

  if (!Allocated()) AllocateCrs();

  Teuchos::RefCountPtr<Epetra_CrsMatrix> OverlapA = Teuchos::rcp( (Epetra_CrsMatrix *) &A, false );

  if (IsOverlapped_) {
  
    OverlapA = Teuchos::rcp( new Epetra_CrsMatrix(Copy, *Graph_.OverlapGraph()) );
    EPETRA_CHK_ERR(OverlapA->Import(A, *Graph_.OverlapImporter(), Insert));
    EPETRA_CHK_ERR(OverlapA->FillComplete());
  }
  
  // Get Maximun Row length
  int MaxNumEntries = OverlapA->MaxNumEntries();

  // Set L range map and U domain map
  U_DomainMap_ = Teuchos::rcp( &(A.DomainMap()), false );
  L_RangeMap_ = Teuchos::rcp( &(A.RangeMap()), false );
  // Do the rest using generic Epetra_RowMatrix interface

  EPETRA_CHK_ERR(InitAllValues(*OverlapA, MaxNumEntries));

  return(0);
}
Example #3
0
//----------------------------------------------------------------------
void GeneralMatrix::initialize(GeneralMatrix &source)
{
  isSparse = source.isSparse;
  nBlock = source.nBlock;
  if (blockStruct) delete[] blockStruct;
  blockStruct = new int[nBlock];
  CheckMemory(blockStruct);
  if (isSparse)
  {
    if (!mat.empty()) mat.clear();
    smat.resize(nBlock);
  }
  else
  {
    if (!smat.empty()) smat.clear();
    mat.resize(nBlock);
  }
  for(int j=0; j<nBlock; j++)
  {
    blockStruct[j] = source.blockStruct[j];
    if (isSparse)
    {
      Resize(smat[j],RowDimension(source.smat[j]),ColDimension(source.smat[j]),Allocated(source.smat[j]));
      for (int i=1; i<=ColDimension(smat[j]); i++) SetCol(smat[j], i, Col(source.smat[j], i));
    }
    else
    {
      mat[j] = source.mat[j];
    }
  }
}
Example #4
0
////////////////////////////////////////////////////////////////////////////////
// vmVariable
void vmVariable::Allocate (vmData& data, vmTypeLibrary& typeLib) {

    // Allocate new data
    assert (!Allocated ());
    m_dataIndex = data.Allocate (typeLib.DataSize (m_type));

    // Initialise it
    data.InitData (m_dataIndex, m_type, typeLib);
}
Example #5
0
//----------------------------------------------------------------------
void RectangularMatrix::setRow(GeneralVector& vec, int i)
{
  if ((isSparse && !(vec.isSparse)) || (!isSparse && (vec.isSparse))) 			//logical XOR
    fprintf(stdout, "RectangularMatrix: attempt to assign a vector to a matrix row with different sparse mode.\n");
  if (isSparse)
  {
    int vectorNonZeroCount = vec.svec.colStarts[ColDimension(vec.svec)];
    int matrixNonZeroCount = smat.colStarts[ColDimension(smat)];
    int *coln = vec.svec.rowIndices;
    if (matrixNonZeroCount+vectorNonZeroCount > Allocated(smat))
      IncAlloc(smat, matrixNonZeroCount + vectorNonZeroCount - Allocated(smat));
    for (int j=0; j<vectorNonZeroCount; j++)
    {
      BIASINTERVAL buf = vec.svec.theElements[j];
      SetElement(smat, i, coln[j]+1, INTERVAL(BiasInf(&buf), BiasSup(&buf)));
    }
  }
  else
  {
    SetRow(dmat, i, vec.dvec);
  }
}
Example #6
0
void ff::SmallDict::Reserve(size_t newAllocated, bool allowEmptySpace)
{
	size_t oldAllocated = Allocated();
	if (newAllocated > oldAllocated)
	{
		if (allowEmptySpace)
		{
			newAllocated = std::max<size_t>(NearestPowerOfTwo(newAllocated), 4);
		}

		size_t byteSize = sizeof(Data) + newAllocated * sizeof(Entry) - sizeof(Entry);
		_data = (Data *)_aligned_realloc(_data, byteSize, __alignof(Data));
		_data->allocated = newAllocated;
		_data->size = oldAllocated ? _data->size : 0;
		_data->atomizer = oldAllocated ? _data->atomizer : &ProcessGlobals::Get()->GetStringCache();
	}
}
//==========================================================================
int Ifpack_CrsRiluk::InitValues(const Epetra_VbrMatrix & A) {

  UserMatrixIsVbr_ = true;

  if (!Allocated()) AllocateVbr();

  //cout << "Original Graph " << endl <<  A.Graph() << endl << flush;
  //A.Comm().Barrier(); 
  //if (A.Comm().MyPID()==0) cout << "*****************************************************" <<endl;
  //cout << "Original Matrix " << endl << A << endl << flush;
  //A.Comm().Barrier(); 
  //if (A.Comm().MyPID()==0) cout << "*****************************************************" <<endl;
  //cout << "Overlap Graph " << endl << *Graph_.OverlapGraph() << endl << flush;
  //A.Comm().Barrier(); 
  //if (A.Comm().MyPID()==0) cout << "*****************************************************" <<endl;

  Teuchos::RefCountPtr<Epetra_VbrMatrix> OverlapA = Teuchos::rcp( (Epetra_VbrMatrix *) &A, false );

  if (IsOverlapped_) {
  
    OverlapA = Teuchos::rcp( new Epetra_VbrMatrix(Copy, *Graph_.OverlapGraph()) );
    EPETRA_CHK_ERR(OverlapA->Import(A, *Graph_.OverlapImporter(), Insert));
    EPETRA_CHK_ERR(OverlapA->FillComplete());
  }
  
  //cout << "Overlap Matrix " << endl << *OverlapA << endl << flush;

  // Get Maximun Row length
  int MaxNumEntries = OverlapA->MaxNumNonzeros();

  // Do the rest using generic Epetra_RowMatrix interface

  EPETRA_CHK_ERR(InitAllValues(*OverlapA, MaxNumEntries));

  return(0);
}
Example #8
0
char *EMalloc(unsigned long nbytes)
/* storage allocator */
/* Always returns a pointer that has 8-byte alignment (essential for our
   internal representation of an object). */
{
    unsigned char *p;
    unsigned char *temp;
    register struct block_list *list;
    int alignment;
    int min_align;

#ifdef ELINUX
#ifndef EBSD62
	return malloc(nbytes);
#else
	p = malloc( nbytes + 8 );
	if( (unsigned long)p & 7 ){
		*(int *)p = MAGIC_FILLER;
		p += 4;
	}
	else{
	       *(int  *)(p+4) = 0;
	        p += 8;
	}
	return p;
#endif
#else
#ifdef HEAP_CHECK
    long size;

    check_pool();
#endif
    nbytes += align4; // allow for possible 4-aligned malloc pointers

    if (nbytes <= MAX_CACHED_SIZE) {
	/* See if we have a block of this size in our cache.
	   Every block in the cache is 8-aligned. */

	list = pool_map[(nbytes + (RESOLUTION - 1)) >> LOG_RESOLUTION];
#ifdef HEAP_CHECK
	if (list->size < nbytes || list->size > nbytes * 2) {
	    sprintf(msg, "Alloc - size is %d, nbytes is %d", list->size, nbytes);
	    RTInternal(msg);
	}
#endif
	temp = (char *)list->first;

	if (temp != NULL) {
	    /* a cache hit */

#ifdef EXTRA_STATS
	    a_hit++;
#endif
	    list->first = ((free_block_ptr)temp)->next;
	    cache_size -= 2;

#ifdef HEAP_CHECK
	    if (cache_size > 100000000)
		RTInternal("cache size is bad");
	    p = temp;
	    if (align4 && *(int *)(p-4) == MAGIC_FILLER)
		p = p - 4;
	    if (((unsigned long)temp) & 3)
		RTInternal("unaligned address in storage cache");
	    Allocated(block_size(p));
#endif
	    return temp; /* will be 8-aligned */
	}
	else {
	    nbytes = list->size; /* better to grab bigger size
				    so it can be reused for same purpose */
#ifdef EXTRA_STATS
	    a_miss++;
#endif
	}
    }