Пример #1
0
static void
dofile(		/* plot a file */
	int  optc,
	char  *optv[],
	char  *file
)
{
	char  stmp[256];
	int  i;
						/* start fresh */
	mgclearall();
						/* type options first */
	for (i = 0; i < optc; i++)
		if (istyp(optv[i])) {
			sprintf(stmp, "include=%s.plt", optv[i]+1);
			setmgvar(progname, stdin, stmp);
		} else
			i++;
						/* file next */
	mgload(file);
						/* variable options last */
	for (i = 0; i < optc; i++)
		if (isvar(optv[i])) {
			sprintf(stmp, "%s=%s", optv[i]+1, optv[i+1]);
			setmgvar(progname, stdin, stmp);
			i++;
		}
						/* graph it */
	mgraph();
}
Пример #2
0
void test_MatrixGraph_test8(MPI_Comm comm, int numProcs, int localProc)
{
  FEI_COUT << "testing matrix-graph with 'diagonal' connectivity block...";

  try {

  fei::SharedPtr<fei::VectorSpace> rowspace(new fei::VectorSpace(comm));
  fei::SharedPtr<fei::VectorSpace> colspace;

  int rowfield = 0, rowfieldsize = 1;
  rowspace->defineFields(1, &rowfield, &rowfieldsize);
  int idType = 0;
  rowspace->defineIDTypes(1, &idType);

  fei::MatrixGraph_Impl2 mgraph(rowspace, colspace);

  int numIDs = 4;
  int patternID = mgraph.definePattern(numIDs, idType, rowfield);
  fei::Pattern* pattern = mgraph.getPattern(patternID);

  if (pattern->getNumIndices() != 4*rowfieldsize) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_MatrixGraph_test8, line "<<__LINE__<<FEI_ENDL;
    throw std::runtime_error(osstr.str());
  }

  int blockID = 0;
  int numConnLists = 1;
  bool diagonal = true;
  mgraph.initConnectivityBlock(blockID, numConnLists, patternID, diagonal);

  std::vector<int> ids(numIDs);
  for(int i=0; i<numIDs; ++i) {
    ids[i] = i;
  }

  mgraph.initConnectivity(blockID, 0, &ids[0]);

  mgraph.initComplete();

  fei::SharedPtr<fei::SparseRowGraph> localSRGraph =
    mgraph.createGraph(false);

  if ((int)localSRGraph->packedColumnIndices.size() != numIDs) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_MatrixGraph_test8, line "<<__LINE__<<FEI_ENDL;
    throw std::runtime_error(osstr.str());
  }

  }
  catch(std::runtime_error& exc) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_MatrixGraph_test8, caught exception: " << exc.what();
    throw std::runtime_error(osstr.str());
  }

  FEI_COUT << "ok" << FEI_ENDL;
}
Пример #3
0
void test_MatrixGraph_test7(MPI_Comm comm, int numProcs, int localProc)
{
  fei::SharedPtr<fei::VectorSpace> rowspace(new fei::VectorSpace(comm));
  fei::SharedPtr<fei::VectorSpace> colspace(new fei::VectorSpace(comm));

  int rowfield = 0, rowfieldsize = 1;
  int colfield = 1, colfieldsize = 3;
  rowspace->defineFields(1, &rowfield, &rowfieldsize);
  colspace->defineFields(1, &colfield, &colfieldsize);

  fei::MatrixGraph_Impl2 mgraph(rowspace, colspace);

  int pID = mgraph.definePattern(4, 0, colfield);
  fei::Pattern* pattern = mgraph.getPattern(pID);

  if (pattern->getNumIndices() != 4*colfieldsize) {
    FEI_COUT << "getNumIndices: " << pattern->getNumIndices()<<", colfieldsize: " << colfieldsize<<FEI_ENDL;
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_MatrixGraph_test7, line "<<__LINE__<<FEI_ENDL;
    throw std::runtime_error(osstr.str());
  }
}
Пример #4
0
void test_Matrix_unit2(MPI_Comm comm, int numProcs, int localProc)
{
  if (numProcs > 1) {
    return;
  }

  FEI_COUT << "testing fei::Matrix_Impl...";

  fei::SharedPtr<fei::VectorSpace> rowspace(new fei::VectorSpace(comm));
  fei::SharedPtr<fei::VectorSpace> colspace;

  int rowfield = 0, rowfieldsize = 1;
  int idType = 0;
  rowspace->defineFields(1, &rowfield, &rowfieldsize);
  rowspace->defineIDTypes(1, &idType);

  fei::SharedPtr<fei::MatrixGraph> mgraph(new fei::MatrixGraph_Impl2(rowspace, colspace));

  int patternID1 = mgraph->definePattern(2, idType, rowfield);

  fei::Pattern* rowpattern = mgraph->getPattern(patternID1);

  mgraph->initConnectivityBlock(0, 1, patternID1);

  std::vector<int> ids(2);
  ids[0] = 0; ids[1] = 1;

  int err = mgraph->initConnectivity(0, 0, &ids[0]);
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit2, initConnectivity returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  err = mgraph->initComplete();
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit2, initComplete returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  bool factory_created = false;
  fei::SharedPtr<fei::Factory> factory;
  try {
    factory = fei::create_fei_Factory(comm, "Trilinos");
    factory_created = true;
  }
  catch(...) {}

  if (!factory_created) {
    FEI_COUT << "failed to create Trilinos factory."<<FEI_ENDL;
    return;
  }

  fei::SharedPtr<fei::Matrix> feimat = factory->createMatrix(mgraph);

  int numrowindices = rowpattern->getNumIndices();

  std::vector<double> coefs(numrowindices*numrowindices, 1.0);
  std::vector<double*> coefs_2D(numrowindices);
  for(int i=0; i<numrowindices; ++i) {
    coefs_2D[i] = &(coefs[i*numrowindices]);
  }

  err = feimat->sumIn(0, 0, &coefs_2D[0]);
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit2, feimat->sumIn returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  err = feimat->globalAssemble();
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit2, feimat->globalAssemble returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  err = feimat->writeToFile("feimat2.mtx", false);
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit2, feimat->writeToFile returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  fei::FillableMat feimat_ss;
  err = fei_test_utils::copy_feiMatrix_to_FillableMat(*feimat, feimat_ss);
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit2, copy_feiMatrix_to_FillableMat returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  fei_test_utils::writeMatrix("feimat_ss2.mtx", feimat_ss);

  FEI_COUT << "ok"<<FEI_ENDL;
}
Пример #5
0
void test_Matrix_unit4(MPI_Comm comm, int numProcs, int localProc)
{
  if (numProcs > 1) {
    return;
  }

  FEI_COUT << "testing fei::Matrix_Impl with FEI_BLOCK_DIAGONAL_ROW...";

  fei::SharedPtr<fei::VectorSpace> rowspace(new fei::VectorSpace(comm));
  fei::SharedPtr<fei::VectorSpace> colspace;

  int rowfield = 0, rowfieldsize = 2;
  int idType = 0;
  rowspace->defineFields(1, &rowfield, &rowfieldsize);
  rowspace->defineIDTypes(1, &idType);

  fei::SharedPtr<fei::MatrixGraph> mgraph(new fei::MatrixGraph_Impl2(rowspace, colspace));

  int patternID1 = mgraph->definePattern(2, idType, rowfield);

  fei::Pattern* rowpattern = mgraph->getPattern(patternID1);

  bool diagonal = true;
  mgraph->initConnectivityBlock(0, 1, patternID1, diagonal);

  std::vector<int> ids(2);
  ids[0] = 0; ids[1] = 1;

  int err = mgraph->initConnectivity(0, 0, &ids[0]);
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit4, initConnectivity returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  err = mgraph->initComplete();
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit4, initComplete returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  fei::SharedPtr<fei::Factory> factory;
  try {
    factory = fei::create_fei_Factory(comm, "Trilinos");
  }
  catch(...) {
    FEI_COUT << "Trilinos not available."<<FEI_ENDL;
    return;
  }

  fei::Param blktrue("BLOCK_MATRIX", true);
  fei::Param blkfalse("BLOCK_MATRIX", false);

  fei::ParameterSet paramset;
  paramset.add(blktrue);
  factory->parameters(paramset);

  fei::SharedPtr<fei::Matrix> feiblkmat = factory->createMatrix(mgraph);

  paramset.add(blkfalse);
  factory->parameters(paramset);

  fei::SharedPtr<fei::Matrix> feimat = factory->createMatrix(mgraph);

  int numrowindices = rowpattern->getNumIndices();

  std::vector<double> coefs(numrowindices*rowfieldsize*rowfieldsize, 1.0);
  std::vector<double*> coefs_2D(numrowindices*rowfieldsize);
  int offset = 0;
  for(int i=0; i<numrowindices*rowfieldsize; ++i) {
    coefs_2D[i] = &(coefs[offset]);
    offset += rowfieldsize;
  }

  err = feimat->sumIn(0, 0, &coefs_2D[0], FEI_BLOCK_DIAGONAL_ROW);
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit4, feimat->sumIn returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  err = feiblkmat->sumIn(0, 0, &coefs_2D[0], FEI_BLOCK_DIAGONAL_ROW);
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit4, feiblkmat->sumIn returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  err = feimat->globalAssemble();
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit4, feimat->globalAssemble returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  err = feiblkmat->globalAssemble();
  if (err) {
    FEI_OSTRINGSTREAM osstr;
    osstr << "test_Matrix_unit4, feimat->globalAssemble returned err="<<err;
    throw std::runtime_error(osstr.str());
  }

  feimat->writeToFile("feimat_blkdiag.mtx");
  feiblkmat->writeToFile("feiblkmat_blkdiag.mtx");

  FEI_COUT << "ok"<<FEI_ENDL;
}
void
view_selection(ST const & data_costs, UniGraph * graph, Settings const & settings) {

    UniGraph mgraph(*graph);
    isolate_unseen_faces(&mgraph, data_costs);

    unsigned int num_components = 0;

    std::vector<FaceInfo> face_infos(mgraph.num_nodes());
    std::vector<std::vector<std::size_t> > components;

    // get components in the graph
    mgraph.get_subgraphs(0, &components);
    for (std::size_t i = 0; i < components.size(); ++i) {
        if (components.size() > 1000) num_components += 1;
        for (std::size_t j = 0; j < components[i].size(); ++j) {
            face_infos[components[i][j]] = {i, j};
        }
    }

    #ifdef RESEARCH
    mrf::SOLVER_TYPE solver_type = mrf::GCO;
    #else
    mrf::SOLVER_TYPE solver_type = mrf::LBP;
    #endif

    /* Label 0 is undefined. */
    const std::size_t num_labels = data_costs.rows() + 1;
    std::vector<mrf::Graph::Ptr> mrfs(components.size());
    for (std::size_t i = 0; i < components.size(); ++i) {
        mrfs[i] = mrf::Graph::create(components[i].size(), num_labels, solver_type);
    }

    /* Set neighbors must be called prior to set_data_costs (LBP). */
    set_neighbors(mgraph, face_infos, mrfs);

    set_data_costs(face_infos, data_costs, mrfs);

    bool multiple_components_simultaneously = false;
    #ifdef RESEARCH
    multiple_components_simultaneously = true;
    #endif
    #ifndef _OPENMP
    multiple_components_simultaneously = false;
    #endif

    if (multiple_components_simultaneously) {
        if (num_components > 0) {
            std::cout << "\tOptimizing " << num_components
                << " components simultaneously." << std::endl;
        }
        std::cout << "\tComp\tIter\tEnergy\t\tRuntime" << std::endl;
    }
    #ifdef RESEARCH
    #pragma omp parallel for schedule(dynamic)
    #endif
    for (std::size_t i = 0; i < components.size(); ++i) {
        switch (settings.smoothness_term) {
            case POTTS:
                mrfs[i]->set_smooth_cost(*potts);
            break;
        }

        bool verbose = mrfs[i]->num_sites() > 10000;

        util::WallTimer timer;

        mrf::ENERGY_TYPE const zero = mrf::ENERGY_TYPE(0);
        mrf::ENERGY_TYPE last_energy = zero;
        mrf::ENERGY_TYPE energy = mrfs[i]->compute_energy();
        mrf::ENERGY_TYPE diff = last_energy - energy;
        unsigned int iter = 0;

        std::string const comp = util::string::get_filled(i, 4);

        if (verbose && !multiple_components_simultaneously) {
            std::cout << "\tComp\tIter\tEnergy\t\tRuntime" << std::endl;
        }
        while (diff != zero) {
            #pragma omp critical
            if (verbose) {
                std::cout << "\t" << comp << "\t" << iter << "\t" << energy
                    << "\t" << timer.get_elapsed_sec() << std::endl;
            }
            last_energy = energy;
            ++iter;
            energy = mrfs[i]->optimize(1);
            diff = last_energy - energy;
            if (diff <= zero) break;
        }

        #pragma omp critical
        if (verbose) {
            std::cout << "\t" << comp << "\t" << iter << "\t" << energy << std::endl;
            if (diff == zero) {
                std::cout << "\t" << comp << "\t" << "Converged" << std::endl;
            }
            if (diff < zero) {
                std::cout << "\t" << comp << "\t"
                    << "Increase of energy - stopping optimization" << std::endl;
            }
        }

        /* Extract resulting labeling from MRF. */
        for (std::size_t j = 0; j < components[i].size(); ++j) {
            int label = mrfs[i]->what_label(static_cast<int>(j));
            assert(0 <= label && static_cast<std::size_t>(label) < num_labels);
            graph->set_label(components[i][j], static_cast<std::size_t>(label));
        }
    }
}
Пример #7
0
TEUCHOS_UNIT_TEST(DirBC, DirBCManager_addBCRecords)
{
  MPI_Comm comm = MPI_COMM_WORLD;
  int idtype = 0;
  int fieldID = 0;
  int fieldSize = 2;
  int offsetIntoField = 1;
  std::vector<int> ids(5);
  std::vector<double> vals(5);

  ids[0] = 2; vals[0] = 2.0;
  ids[1] = 1; vals[1] = 1.0;
  ids[2] = 3; vals[2] = 3.0;
  ids[3] = 4; vals[3] = 4.0;
  ids[4] = 0; vals[4] = 0.0;


  fei::SharedPtr<fei::VectorSpace> vspace(new fei::VectorSpace(comm));

  vspace->defineFields(1, &fieldID, &fieldSize);
  vspace->defineIDTypes(1, &idtype);

  fei::SharedPtr<fei::MatrixGraph> mgraph(new fei::MatrixGraph_Impl2(vspace, vspace));

  int numIDs = 1;

  int patternID = mgraph->definePattern(numIDs, idtype, fieldID);

  int blockID = 0;

  mgraph->initConnectivityBlock(blockID, ids.size(), patternID);

  for(size_t i = 0; i<ids.size(); ++i) {
    mgraph->initConnectivity(blockID, i, &ids[i]);
  }

  mgraph->initComplete();

  fei::DirichletBCManager bcmgr(mgraph->getRowSpace());

  bcmgr.addBCRecords(5, idtype, fieldID, offsetIntoField,
                     &ids[0], &vals[0]);
 
  if (bcmgr.getNumBCRecords() != 5) {
    throw std::runtime_error("test_DirBCManager test 1 failed.");
  }

  std::vector<int> offsetsIntoField(5, 1);

  bcmgr.addBCRecords(5, idtype, fieldID, &ids[0],
                     &offsetsIntoField[0], &vals[0]);
 
  if (bcmgr.getNumBCRecords() != 5) {
    throw std::runtime_error("test_DirBCManager test 2 failed.");
  }

  offsetsIntoField[1] = 0;
  offsetsIntoField[3] = 0;
  offsetsIntoField[4] = 0;

  bcmgr.addBCRecords(5, idtype, fieldID, &ids[0],
                     &offsetsIntoField[0], &vals[0]);
 
  if (bcmgr.getNumBCRecords() != 8) {
    throw std::runtime_error("test_DirBCManager test 3 failed.");
  }
}
Пример #8
0
TEUCHOS_UNIT_TEST(DirBC, DirBCManager_finalizeBCEqns)
{
  MPI_Comm comm = MPI_COMM_WORLD;

  int numProcs = 1;
#ifndef FEI_SER
  MPI_Comm_size(comm, &numProcs);
#endif

  if (numProcs > 1) {
    FEI_COUT << "skipping test of fei::DirichletBCManager::finalizeBCEqn, which only"
     << " runs on 1 proc." << FEI_ENDL;
    return;
  }

  int idtype = 0;
  int fieldID = 0;
  int fieldSize = 2;
  int offsetIntoField = 1;
  std::vector<int> ids(5);
  std::vector<double> vals(5);

  ids[0] = 2; vals[0] = 2.0;
  ids[1] = 1; vals[1] = 1.0;
  ids[2] = 3; vals[2] = 3.0;
  ids[3] = 4; vals[3] = 4.0;
  ids[4] = 0; vals[4] = 0.0;


  fei::SharedPtr<fei::VectorSpace> vspace(new fei::VectorSpace(comm));

  vspace->defineFields(1, &fieldID, &fieldSize);
  vspace->defineIDTypes(1, &idtype);

  fei::SharedPtr<fei::MatrixGraph> mgraph(new fei::MatrixGraph_Impl2(vspace, vspace));

  int numIDs = 1;

  int patternID = mgraph->definePattern(numIDs, idtype);

  int blockID = 0;

  mgraph->initConnectivityBlock(blockID, ids.size(), patternID);

  for(size_t i = 0; i<ids.size(); ++i) {
    mgraph->initConnectivity(blockID, i, &ids[i]);
  }

  mgraph->initComplete();


  fei::DirichletBCManager bcmgr(mgraph->getRowSpace());

  bcmgr.addBCRecords(5, idtype, fieldID, offsetIntoField,
                     &ids[0], &vals[0]);
 
  fei::SharedPtr<fei::FillableMat> inner(new fei::FillableMat);
  fei::SharedPtr<fei::Matrix_Impl<fei::FillableMat> > feimat(new fei::Matrix_Impl<fei::FillableMat>(inner, mgraph, ids.size()));

  TEUCHOS_TEST_EQUALITY(bcmgr.finalizeBCEqns(*feimat, false), 0, out, success);

  TEUCHOS_TEST_EQUALITY(feimat->getGlobalNumRows(), feimat->getLocalNumRows(),out,success);
  TEUCHOS_TEST_EQUALITY(feimat->getGlobalNumRows(), (int)ids.size(), out, success);
}
Пример #9
0
TEUCHOS_UNIT_TEST(MatGraph, MatGraph_test1)
{
  int numprocs = fei::numProcs(MPI_COMM_WORLD);
  if (numprocs > 1) return;

  //two id-types: 0, 1:
  int idT[] = {0, 1};
  snl_fei::RecordCollection* recColls[] = {NULL,NULL};

  //set up a pattern for an element that has 6 ids: 3 nodes and 3 edges.
  const int numIDs = 6;

  //assume 0 is the node-type and 1 is the edge-type:
  int idTypes[] = {0, 0, 0, 1, 1, 1};

  //for the first pattern, only the edge ids will have a field attached:
  int fieldsPerID[] = {0, 0, 0, 1, 1, 1};

  int fieldID = 0;
  int fieldSize = 1;

  int fieldIDs[] = {fieldID, fieldID, fieldID};
  int fieldSizes[] = {fieldSize, fieldSize, fieldSize};

  fei::Pattern pattern1(numIDs, &idTypes[0], &recColls[0], &fieldsPerID[0], &fieldIDs[0], &fieldSizes[0]);

  //declare a vector-space, do some rudimentary initializations:

  fei::SharedPtr<fei::VectorSpace> rowspace(new fei::VectorSpace(MPI_COMM_WORLD));
  rowspace->defineIDTypes(2, &idT[0]);
  rowspace->defineFields(1, &fieldID, &fieldSize);

  fei::SharedPtr<fei::VectorSpace> colspace;

  //declare a matrix-graph:
  fei::MatrixGraph_Impl2 mgraph(rowspace, colspace);

  int patternID1 = mgraph.definePattern(numIDs, &idTypes[0], &fieldsPerID[0], &fieldIDs[0]);

  //unit-test: make sure the matrix-graph's pattern is the same as our
  //explicitly-declared pattern:
  fei::Pattern* pttn1 = mgraph.getPattern(patternID1);

  TEUCHOS_TEST_EQUALITY(pattern1 == *pttn1, true, out, success);

  //now declare a second pattern which is the same except now fields are
  //attached to nodes instead of edges:
  fieldsPerID[0] = 1; fieldsPerID[1] = 1; fieldsPerID[2] = 1;
  fieldsPerID[3] = 0; fieldsPerID[4] = 0; fieldsPerID[5] = 0;

  fei::Pattern pattern2(numIDs, &idTypes[0], &recColls[0], &fieldsPerID[0], &fieldIDs[0], &fieldSizes[0]);

  int patternID2 = mgraph.definePattern(numIDs, &idTypes[0], &fieldsPerID[0], &fieldIDs[0]);
  fei::Pattern* pttn2 = mgraph.getPattern(patternID2);
  TEUCHOS_TEST_EQUALITY(pattern2 == *pttn2, true, out, success);

  //declare two element-blocks, one for each pattern. each element block will have
  //just one element:
  mgraph.initConnectivityBlock(0, 1, patternID1);
  mgraph.initConnectivityBlock(1, 1, patternID2);

//Two-element mesh, each element has 3 vertex-nodes, and 3 edges:
/*
   0      1
   o---4--o
    \     | \
     \    |  \
      5   6   7
       \  |    \
        \ |     \
          o---8--o
          2      3
*/

  //the first element has nodes 0, 1, 2 and edges 4, 5, 6:
  int ids0[] = {0, 1, 2, 4, 5, 6};

  mgraph.initConnectivity(0, 0, &ids0[0]);

  //the second element has nodes 1, 2, 3 and edges 6, 7, 8:
  int ids1[] = {1, 2, 3, 6, 7, 8};

  mgraph.initConnectivity(1, 0, &ids1[0]);

  mgraph.initComplete();

  fei::SharedPtr<fei::SparseRowGraph> srg = mgraph.createGraph(false);

//The way we set things up, the graph should have 6 rows,
//with 3 nonzeros per row:

  TEUCHOS_TEST_EQUALITY(srg->rowNumbers.size(), 6, out, success);
  TEUCHOS_TEST_EQUALITY(srg->packedColumnIndices.size(), 18, out, success);
}
Пример #10
0
int test_Algebraic::serialtest1()
{
  int i, numRows = 10;
  fei::SharedPtr<fei::VectorSpace> vspace(new fei::VectorSpace(comm_));

  int idType = 0;

  vspace->defineIDTypes(1, &idType);

  std::vector<int> rowNumbers(numRows);
  for(i=0; i<numRows; ++i) {
    rowNumbers[i] = i;
  }

  CHK_ERR( vspace->addDOFs(idType, numRows, &rowNumbers[0]) );

  CHK_ERR( vspace->initComplete() );

  int index = -1;
  CHK_ERR( vspace->getGlobalIndex(idType, rowNumbers[3], index) );

  if (index != 3) {
    ERReturn(-1);
  }

  int numDOF = vspace->getNumDegreesOfFreedom(idType, rowNumbers[3]);

  if (numDOF != 1) {
    ERReturn(-1);
  }

  std::vector<int> globalOffsets;

  vspace->getGlobalIndexOffsets(globalOffsets);

  if (globalOffsets[0] != 0) {
    ERReturn(-1);
  }

  if (globalOffsets[1] != numRows) {
    ERReturn(-1);
  }

  fei::SharedPtr<fei::VectorSpace> dummy;
  fei::MatrixGraph_Impl2 mgraph(vspace, dummy);

  std::vector<int> rowOffsets(numRows+1);
  std::vector<int> packedColumnIDs(numRows);
  for(i=0; i<numRows; ++i) {
    rowOffsets[i] = i;
    packedColumnIDs[i] = i;
  }
  rowOffsets[numRows] = numRows;

  CHK_ERR( mgraph.initConnectivity(idType, numRows,
				   &rowNumbers[0],
				   &rowOffsets[0],
				   &packedColumnIDs[0]) );

  CHK_ERR( mgraph.initComplete() );

  fei::SharedPtr<fei::SparseRowGraph> localgraph = mgraph.createGraph(false);

  int mnumRows = localgraph->rowNumbers.size();
  int* mrowOffsets = &(localgraph->rowOffsets[0]);
  int mnumNonzeros = localgraph->packedColumnIndices.size();
  int* mpackedColumnIndices = &(localgraph->packedColumnIndices[0]);

  if (mnumRows != numRows) {
    ERReturn(-1);
  }

  if (mnumNonzeros != numRows) {
    ERReturn(-1);
  }

  for(i=0; i<numRows; ++i) {
    if ((mrowOffsets[i+1]-mrowOffsets[i]) != 1) {
      ERReturn(-1);
    }
    if (mpackedColumnIndices[i] != packedColumnIDs[i]) {
      ERReturn(-1);
    }
  }

  return(0);
}