Ejemplo n.º 1
0
void LocalMultiBlockInfo3D::computePeriodicOverlaps (
        SparseBlockStructure3D const& sparseBlock, plint blockId )
{
    Box3D intersection; // Temporary variable.
    std::vector<plint> neighbors; // Temporary variable.
    SmartBulk3D bulk(sparseBlock, envelopeWidth, blockId);

    for (plint dx=-1; dx<=+1; dx+=1) {
        for (plint dy=-1; dy<=+1; dy+=1) {
            for (plint dz=-1; dz<=+1; dz+=1) {
                if (dx!=0 || dy!=0 || dz!=0) {
                    // The new block is shifted by the length of the full multi block in each space
                    //   direction. Consequently, overlaps between the original multi block and the
                    //   shifted new block are identified as periodic overlaps.
                    plint shiftX = dx*sparseBlock.getBoundingBox().getNx();
                    plint shiftY = dy*sparseBlock.getBoundingBox().getNy();
                    plint shiftZ = dz*sparseBlock.getBoundingBox().getNz();
                    Box3D shiftedBulk(bulk.getBulk().shift(shiftX,shiftY,shiftZ));
                    Box3D shiftedEnvelope(bulk.computeEnvelope().shift(shiftX,shiftY,shiftZ));
                    // Speed optimization: perform following checks only if the shifted
                    //   domain touches the bounding box.
                    Box3D dummyIntersection;
                    if (intersect(shiftedEnvelope, sparseBlock.getBoundingBox(), dummyIntersection)) {
                        neighbors.clear();
                        sparseBlock.findNeighbors(shiftedBulk, envelopeWidth, neighbors);
                        // Check overlap with each existing block in the neighborhood, including with the newly added one.
                        for (pluint iNeighbor=0; iNeighbor<neighbors.size(); ++iNeighbor) {
                            plint neighborId = neighbors[iNeighbor];
                            SmartBulk3D neighborBulk(sparseBlock, envelopeWidth, neighborId);
                            // Does the envelope of the shifted new block overlap with the bulk of a previous
                            //   block? If yes, add an overlap, in which the previous block has the "original
                            //   position", and the new block has the "overlap position".
                            if (intersect(neighborBulk.getBulk(), shiftedEnvelope, intersection)) {
                                PeriodicOverlap3D overlap (
                                    Overlap3D(neighborId, blockId, intersection, shiftX, shiftY, shiftZ),
                                    dx, dy, dz );
                                periodicOverlaps.push_back(overlap);
                                periodicOverlapWithRemoteData.push_back(overlap);
                            }
                            // Does the bulk of the shifted new block overlap with the envelope of a previous
                            //   block? If yes, add an overlap, in which the new block has the "original position",
                            //   and the previous block has the "overlap position".
                            //   If we are in the situation in which the newly added block is periodic with itself,
                            //   this step must be skipped, because otherwise the overlap is counted twice.
                            if (!(neighborId==blockId) &&
                                intersect(shiftedBulk, neighborBulk.computeEnvelope(), intersection))
                            {
                                intersection = intersection.shift(-shiftX,-shiftY, -shiftZ);
                                periodicOverlaps.push_back (
                                        PeriodicOverlap3D (
                                            Overlap3D(blockId, neighborId, intersection, -shiftX, -shiftY, -shiftZ),
                                            -dx, -dy, -dz ) );
                            }
                        }
                    }
                }
            }
        }
    }
}
inline void fillBoundingVolumesUsingNodesFromFile(
        MPI_Comm comm, const std::string& sphereFilename, FlaotBoxVector &spheres)
{
    const int spatialDim = 3;
    stk::mesh::MetaData meta(spatialDim);
    stk::mesh::BulkData bulk(meta, comm);

    stk::io::fill_mesh(sphereFilename, bulk);

    stk::mesh::EntityVector nodes;
    stk::mesh::get_selected_entities(meta.locally_owned_part(), bulk.buckets(stk::topology::NODE_RANK), nodes);

    spheres.clear();
    spheres.resize(nodes.size());

    stk::mesh::FieldBase const * coords = meta.coordinate_field();

    for (size_t i=0;i<nodes.size();i++)
    {
        stk::mesh::Entity node = nodes[i];
        double *data = static_cast<double*>(stk::mesh::field_data(*coords, node));

        double x=data[0];
        double y=data[1];
        double z=data[2];

        double radius=1e-5;
        unsigned id = bulk.identifier(node);
        FloatBox box(x-radius, y-radius, z-radius, x+radius, y+radius, z+radius);
        spheres[i] = std::make_pair(box, Ident(id, bulk.parallel_rank()));
    }
}
Ejemplo n.º 3
0
void LocalMultiBlockInfo3D::computeNormalOverlaps (
        SparseBlockStructure3D const& sparseBlock, plint blockId )
{
    Box3D intersection;
    SmartBulk3D bulk(sparseBlock, envelopeWidth, blockId);

    std::vector<plint> neighbors;
    sparseBlock.findNeighbors(blockId, envelopeWidth, neighbors);

    for (pluint iNeighbor=0; iNeighbor<neighbors.size(); ++iNeighbor)
    {
        plint neighborId = neighbors[iNeighbor];
        SmartBulk3D neighborBulk(sparseBlock, envelopeWidth, neighborId);
        if (intersect( neighborBulk.getBulk(),
                       bulk.computeNonPeriodicEnvelope(), intersection) )
        {
            normalOverlaps.push_back(Overlap3D(neighborId, blockId, intersection));
        }
        if (intersect( bulk.getBulk(),
                       neighborBulk.computeNonPeriodicEnvelope(), intersection) )
        {
            normalOverlaps.push_back(Overlap3D(blockId, neighborId, intersection));
        }
    }
}
void UnitTestStkMeshBoundaryAnalysis::test_boundary_analysis_null_topology()
{
  //test on boundary_analysis for closure with a NULL topology - coverage of lines 39-40 of BoundaryAnalysis.cpp

  //create new meta, bulk and boundary for this test
  const int spatial_dimension = 3;
  stk::mesh::MetaData meta( stk::mesh::TopologicalMetaData::entity_rank_names(spatial_dimension) );
  stk::mesh::TopologicalMetaData top_data(meta, spatial_dimension);

  //declare part with topology = NULL
  stk::mesh::Part & quad_part = meta.declare_part("quad_part", top_data.side_rank);
  meta.commit();

  stk::ParallelMachine comm(MPI_COMM_WORLD);
  stk::mesh::BulkData bulk ( meta , comm , 100 );

  stk::mesh::EntitySideVector boundary;
  std::vector<stk::mesh::Entity*> newclosure;

  stk::mesh::PartVector face_parts;
  face_parts.push_back(&quad_part);

  bulk.modification_begin();
  if (m_rank == 0) {
    stk::mesh::Entity & new_face = bulk.declare_entity(top_data.side_rank, 1, face_parts);
    newclosure.push_back(&new_face);
  }

  stk::mesh::boundary_analysis(bulk, newclosure, top_data.side_rank, boundary);
  /*
  STKUNIT_EXPECT_TRUE(!boundary.empty());
  */

  bulk.modification_end();
}
bool MultiBlockManagement3D::findAllLocalRepresentations (
            plint iX, plint iY, plint iZ, std::vector<plint>& foundId,
            std::vector<plint>& foundX, std::vector<plint>& foundY,
            std::vector<plint>& foundZ ) const
{
    bool hasBulkCell = false;
    // First, search in all blocks which are local to the current processor,
    // including in the envelopes.
    for (pluint iBlock=0; iBlock < localInfo.getBlocks().size(); ++iBlock) {
        plint blockId = localInfo.getBlocks()[iBlock];
        SmartBulk3D bulk(sparseBlock, envelopeWidth, blockId);
        if (contained(iX, iY, iZ, bulk.computeEnvelope()))
        {
            if (contained(iX, iY, iZ, bulk.getBulk())) {
                hasBulkCell = true;
                foundId.insert(foundId.begin(), blockId);
                foundX.insert(foundX.begin(), bulk.toLocalX(iX));
                foundY.insert(foundY.begin(), bulk.toLocalY(iY));
                foundZ.insert(foundZ.begin(), bulk.toLocalZ(iZ));
            }
            else {
                foundId.push_back(blockId);
                foundX.push_back(bulk.toLocalX(iX));
                foundY.push_back(bulk.toLocalY(iY));
                foundZ.push_back(bulk.toLocalZ(iZ));
            }
        }
    }
    // Here's a subtlety: with periodic boundary conditions, one may need to take into
    //   account a cell which is not inside the boundingBox, because it's at the opposite
    //   boundary. Therefore, this loop checks all blocks which overlap with the current
    //   one by periodicity.
    for (pluint iOverlap=0; iOverlap<localInfo.getPeriodicOverlapWithRemoteData().size();
         ++iOverlap)
    {
        Overlap3D overlap = localInfo.getPeriodicOverlapWithRemoteData()[iOverlap].overlap;
        if (contained(iX,iY,iZ, overlap.getOriginalCoordinates())) {
            plint overlapId = overlap.getOverlapId();
            foundId.push_back(overlapId);
            SmartBulk3D bulk(sparseBlock, envelopeWidth, overlapId);
            foundX.push_back(bulk.toLocalX(iX-overlap.getShiftX()));
            foundY.push_back(bulk.toLocalY(iY-overlap.getShiftY()));
            foundZ.push_back(bulk.toLocalZ(iZ-overlap.getShiftZ()));
        }
    }
    return hasBulkCell;
}
inline void fillBoxesUsingSidesetsFromFile(MPI_Comm comm, const std::string& filename, std::vector<FloatBox> &domainBoxes)
{
    stk::mesh::MetaData meta(3);
    stk::mesh::BulkData bulk(meta, comm);
    stk::io::fill_mesh(filename, bulk);

    createBoundingBoxesForSidesInSidesets(bulk, domainBoxes);
}
inline void fillBoxesUsingElementBlocksFromFile(
        MPI_Comm comm, const std::string& volumeFilename, FlaotBoxVector &domainBoxes)
{
    stk::mesh::MetaData meta(3);
    stk::mesh::BulkData bulk(meta, comm);
    stk::io::fill_mesh(volumeFilename, bulk);

    createBoundingBoxesForElementsInElementBlocks(bulk, domainBoxes);
}
Ejemplo n.º 8
0
TEST(UnitTestKeyhole, NodeParts_case1)
{
  stk::ParallelMachine communicator = MPI_COMM_WORLD;

  int numProcs = stk::parallel_machine_size(communicator);
  if (numProcs != 2) {
    return;
  }

  const unsigned spatialDim = 2;
  stk::mesh::MetaData meta(spatialDim);
  stk::mesh::BulkData bulk(meta, communicator);

  setupKeyholeMesh2D_case1(bulk);

  stk::mesh::Part& shared = meta.globally_shared_part();
  const stk::mesh::BucketVector& shared_node_buckets = bulk.get_buckets(stk::topology::NODE_RANK, shared);
  stk::mesh::PartVector blocksA(2);
  blocksA[0] = meta.get_part("block_1");
  blocksA[1] = meta.get_part("block_2");
  unsigned num_shared_nodes = 0;
  for(size_t i=0; i<shared_node_buckets.size(); ++i) {
    num_shared_nodes += shared_node_buckets[i]->size();
    const stk::mesh::Bucket& bucket = *shared_node_buckets[i];
    std::ostringstream oss;
    oss<<"proc "<<bulk.parallel_rank()<<", shared node ids: ";
    for(size_t j=0; j<bucket.size(); ++j) oss <<bulk.identifier(bucket[j])<<" ";
    std::cerr<<oss.str()<<std::endl;
    bool in_both_blocks = bucket.member_all(blocksA);
    EXPECT_TRUE(in_both_blocks);
  }

  const unsigned expected_num_shared_nodes = 2;
  EXPECT_EQ(expected_num_shared_nodes, num_shared_nodes);

  if (bulk.parallel_rank() == 0) {
    stk::mesh::Entity node8 = bulk.get_entity(stk::topology::NODE_RANK, stk::mesh::EntityId(8));
    stk::mesh::Entity node9 = bulk.get_entity(stk::topology::NODE_RANK, stk::mesh::EntityId(9));

    EXPECT_TRUE(bulk.is_valid(node8));
    EXPECT_TRUE(bulk.is_valid(node9));

    const stk::mesh::Part& aura_part = meta.aura_part();
    EXPECT_TRUE(bulk.bucket(node8).member(aura_part));
    EXPECT_TRUE(bulk.bucket(node9).member(aura_part));

    stk::mesh::Part& block_2 = *meta.get_part("block_2");
    stk::mesh::Part& block_3 = *meta.get_part("block_3");
    stk::mesh::PartVector blocksB(2);
    blocksB[0] = &block_2;
    blocksB[1] = &block_3;
    EXPECT_TRUE(bulk.bucket(node8).member_all(blocksB));
    EXPECT_TRUE(bulk.bucket(node9).member_all(blocksB));
  }
}
bool MultiBlockManagement3D::findInLocalBulk (
            plint iX, plint iY, plint iZ, plint& foundId,
            plint& localX, plint& localY, plint& localZ ) const
{
    foundId = sparseBlock.locate(iX,iY,iZ);
    SmartBulk3D bulk(sparseBlock, envelopeWidth, foundId);
    localX = bulk.toLocalX(iX);
    localY = bulk.toLocalY(iY);
    localZ = bulk.toLocalZ(iZ);
    return foundId >= 0;
}
void MultiContainerBlock3D::allocateBlocks() 
{
    for (pluint iBlock=0; iBlock<this->getLocalInfo().getBlocks().size(); ++iBlock)
    {
        plint blockId = this->getLocalInfo().getBlocks()[iBlock];
        SmartBulk3D bulk(this->getMultiBlockManagement(), blockId);
        Box3D envelope = bulk.computeEnvelope();
        AtomicContainerBlock3D* newBlock =
            new AtomicContainerBlock3D (
                    envelope.getNx(), envelope.getNy(), envelope.getNz() );
        newBlock -> setLocation(Dot3D(envelope.x0, envelope.y0, envelope.z0));
        blocks[blockId] = newBlock;
    }
}
Ejemplo n.º 11
0
/*
  This test calls ABLWallFrictionVelocity::compute_utau() for three cases: neutral, unstable,
  and stable stratification.  The gold values for utau are obtained from a separate
  Matlab code implementation of the ABL friction velocity calculation.
*/
TEST(ABLWallFunction, compute_abl_utau) {

  stk::mesh::MetaData meta(3);
  stk::mesh::BulkData bulk(meta, MPI_COMM_WORLD);
  const double gravity = 9.81;
  const double z0 = 0.1;
  const double Tref = 300.0;
  HelperObjectsABLWallFrictionVelocity helperObjs(bulk, &meta.universal_part(), gravity, z0, Tref);

  const double tolerance = 1.0e-9;
  const double up = 1.563;
  const double zp  = 2.5;

  // Neutral
  const double qsurf_neutral = 0.0;
  NeutralABLProfileFunction NeutralProfFun;
  ABLProfileFunction *ABLProfFun = &NeutralProfFun;
  double utau;
  const double utau_neutral_gold = 0.199085033056820;

  helperObjs.ABLWallFrictionAlgorithm->compute_utau(up, zp, qsurf_neutral, ABLProfFun, utau);

  EXPECT_NEAR(utau, utau_neutral_gold, tolerance);

  // Unstable
  const double beta_m = 16.0;
  const double beta_h = 16.0;
  UnstableABLProfileFunction UnstableProfFun(beta_m, beta_h);
  ABLProfFun = &UnstableProfFun;
  const double qsurf_unstable = 0.281;
  const double utau_unstable_gold = 0.264845587455159;

  helperObjs.ABLWallFrictionAlgorithm->compute_utau(up, zp, qsurf_unstable, ABLProfFun, utau);

  EXPECT_NEAR(utau, utau_unstable_gold, tolerance);

  // Stable
  const double gamma_m = 5.0;
  const double gamma_h = 5.0;
  StableABLProfileFunction StableProfFun(gamma_m, gamma_h);
  ABLProfFun = &StableProfFun;
  const double qsurf_stable = -0.02;
  const double utau_stable_gold = 0.156653826868250;

  helperObjs.ABLWallFrictionAlgorithm->compute_utau(up, zp, qsurf_stable, ABLProfFun, utau);
  
  EXPECT_NEAR(utau, utau_stable_gold, tolerance);

}
Ejemplo n.º 12
0
    IndexAccessMethod* BtreeBasedAccessMethod::initiateBulk() {

        if ( _interface->nKeys( _btreeState,
                                _btreeState->head() ) > 0 )
            return NULL;

        auto_ptr<BtreeBulk> bulk( new BtreeBulk( this ) );
        bulk->_phase1.sortCmp.reset( getComparison( _descriptor->version(),
                                                    _descriptor->keyPattern() ) );

        bulk->_phase1.sorter.reset( new BSONObjExternalSorter(bulk->_phase1.sortCmp.get()) );
        bulk->_phase1.sorter->hintNumObjects( _btreeState->collection()->numRecords() );

        return bulk.release();
    }
Ejemplo n.º 13
0
TEST(UnitTestKeyhole, EdgeParts_case2)
{
  stk::ParallelMachine communicator = MPI_COMM_WORLD;

  int numProcs = stk::parallel_machine_size(communicator);
  if (numProcs != 2) {
    return;
  }

  const unsigned spatialDim = 2;
  stk::mesh::MetaData meta(spatialDim);
  stk::mesh::BulkData bulk(meta, communicator);

  setupKeyholeMesh2D_case2(bulk);

  stk::mesh::create_edges(bulk);

  //find the edge between nodes 5 and 6.
  stk::mesh::Entity edge = stk::mesh::Entity();
  stk::mesh::EntityId nodeId5 = 5;
  stk::mesh::Entity node5 = bulk.get_entity(stk::topology::NODE_RANK, nodeId5);
  unsigned num_edges = bulk.num_edges(node5);
  const stk::mesh::Entity* edges = bulk.begin_edges(node5);
  for(unsigned i=0; i<num_edges; ++i) {
    stk::mesh::Entity this_edge = edges[i];
    const stk::mesh::Entity* edge_nodes = bulk.begin_nodes(this_edge);
    if (bulk.identifier(edge_nodes[0])==5 && bulk.identifier(edge_nodes[1])==6) {
      edge = this_edge;
      break;
    }
  }

  EXPECT_TRUE(bulk.is_valid(edge));
  std::cerr<<"proc "<<bulk.parallel_rank()<<" found edge id="<<bulk.identifier(edge)<<" between nodes 5 and 6"<<std::endl;

  const stk::mesh::Part& block_2 = *meta.get_part("block_2");
  const stk::mesh::Part& block_3 = *meta.get_part("block_3");
  const stk::mesh::Bucket& edge_bucket = bulk.bucket(edge);
  EXPECT_TRUE(edge_bucket.member(block_2));
  EXPECT_FALSE(edge_bucket.member(block_3));
}
Ejemplo n.º 14
0
void test_field() {

  unsigned spatialDim = 3;
  stk::mesh::MetaData meta(spatialDim);
  stk::mesh::BulkData bulk(meta, MPI_COMM_WORLD);
  unsigned dimX = 10;
  unsigned dimY = 10;
  unsigned dimZ = 10;
  std::ostringstream mesh_spec;
  mesh_spec << "generated:"<<dimX<<"x"<<dimY<<"x"<<dimZ;
  stk::io::fill_mesh(mesh_spec.str(), bulk);

  ngp::StaticMesh staticMesh(bulk);
  double initialValue = 9.9;
  ngp::StaticField<double> scalarField(stk::topology::NODE_RANK, initialValue, bulk, meta.locally_owned_part());

  stk::mesh::EntityVector nodes;
  stk::mesh::get_selected_entities(meta.locally_owned_part(), bulk.buckets(stk::topology::NODE_RANK), nodes);
  Kokkos::View<stk::mesh::Entity*> device_nodes("device_nodes", nodes.size());
  Kokkos::View<stk::mesh::Entity*,Kokkos::HostSpace> host_nodes("host_nodes", nodes.size());

  for(size_t i=0; i<nodes.size(); ++i) {
      host_nodes(i) = nodes[i];
  }
  Kokkos::deep_copy(device_nodes, host_nodes);

  struct timeval begin,end;

  gettimeofday(&begin,NULL);

  int nrepeat = 500;
  double result = 0;

  for(int n=0; n<nrepeat; ++n) {

      result = 0;
      Kokkos::parallel_reduce(nodes.size(), KOKKOS_LAMBDA(int i, double& update) {
        update += scalarField.get(staticMesh, device_nodes(i), 0);
      }, result);
Ejemplo n.º 15
0
void dumpData( MultiBlock2D& multiBlock, bool dynamicContent,
               std::vector<plint>& offset, std::vector<plint>& myBlockIds,
               std::vector<std::vector<char> >& data )
{
    MultiBlockManagement2D const& management = multiBlock.getMultiBlockManagement();
    std::map<plint,Box2D> const& bulks = management.getSparseBlockStructure().getBulks();

    plint numBlocks = (plint) bulks.size();
    std::map<plint,plint> toContiguousId;
    std::map<plint,Box2D>::const_iterator it = bulks.begin();
    plint pos = 0;
    for (; it != bulks.end(); ++it) {
        toContiguousId[it->first] = pos;
        ++pos;
    }

    std::vector<plint> const& myBlocks = management.getLocalInfo().getBlocks();
    myBlockIds.resize(myBlocks.size());
    data.resize(myBlocks.size());
    std::vector<plint> blockSize(numBlocks);
    std::fill(blockSize.begin(), blockSize.end(), 0);
    for (pluint iBlock=0; iBlock<myBlocks.size(); ++iBlock) {
        plint blockId = myBlocks[iBlock];
        SmartBulk2D bulk(management, blockId);
        Box2D localBulk(bulk.toLocal(bulk.getBulk()));
        AtomicBlock2D const& block = multiBlock.getComponent(blockId);
        modif::ModifT typeOfVariables = dynamicContent ? modif::dataStructure : modif::staticVariables;
        block.getDataTransfer().send(localBulk, data[iBlock], typeOfVariables);
        plint contiguousId = toContiguousId[blockId];
        myBlockIds[iBlock] = contiguousId;
        blockSize[contiguousId] = (plint)data[iBlock].size();
    }
#ifdef PLB_MPI_PARALLEL
    global::mpi().allReduceVect(blockSize, MPI_SUM);
#endif
    offset.resize(numBlocks);
    std::partial_sum(blockSize.begin(), blockSize.end(), offset.begin());
}
Ejemplo n.º 16
0
void MimirPersistor::persist_admins() {

    size_t nb_empty_polygons = 0;
    size_t nb_added_admin = 0;
    BulkRubber bulk(rubber);
    for (const auto& relation: data.relations) {
        if (relation.second.polygon.empty()) {
            ++nb_empty_polygons;
            continue;
        }

        js::value val;
        const auto uri = "admin:osm:" + std::to_string(relation.first);
        val["name"] = js::value::string(relation.second.name);

        val["id"] = js::value::string(uri);
        val["zip_codes"] = to_json_array(relation.second.zip_codes);
        val["insee"] = js::value::string(relation.second.insee);
        val["level"] = relation.second.level;

        val["coord"]["lat"] = js::value::number(relation.second.centre.get<0>());
        val["coord"]["lon"] = js::value::number(relation.second.centre.get<1>());

        const auto shape = to_geojson(relation.second.polygon);
        val["shape"] = shape;
        val["admin_shape"] = shape;
        val["weight"] = 0; // TODO

        UpdateAction action(uri, "admin", es_index, val);
        bulk.add(action);
        nb_added_admin++;
    }
    bulk.finish();
    auto logger = log4cplus::Logger::getInstance("log");
    LOG4CPLUS_INFO(logger, nb_added_admin << " admin added, "
    << nb_empty_polygons << " ignored admins because their polygons were empty");
}
Ejemplo n.º 17
0
TEST(UnitTestKeyhole, NodeParts_case2)
{
  stk::ParallelMachine communicator = MPI_COMM_WORLD;

  int numProcs = stk::parallel_machine_size(communicator);
  if (numProcs != 2) {
    return;
  }

  const unsigned spatialDim = 2;
  stk::mesh::MetaData meta(spatialDim);
  stk::mesh::BulkData bulk(meta, communicator);

  setupKeyholeMesh2D_case2(bulk);

  if (bulk.parallel_rank() == 0) {
    stk::mesh::Part& aura = meta.aura_part();
    const stk::mesh::BucketVector& aura_node_buckets = bulk.get_buckets(stk::topology::NODE_RANK, aura);
    stk::mesh::PartVector blocks(2);
    blocks[0] = meta.get_part("block_2");
    blocks[1] = meta.get_part("block_3");
    unsigned num_aura_nodes = 0;
    for(size_t i=0; i<aura_node_buckets.size(); ++i) {
      num_aura_nodes += aura_node_buckets[i]->size();
      const stk::mesh::Bucket& bucket = *aura_node_buckets[i];
      std::cerr<<"proc 0, aura node ids: ";
      for(size_t j=0; j<bucket.size(); ++j) std::cerr<<bulk.identifier(bucket[j])<<" ";
      std::cerr<<std::endl;
      bool in_both_blocks = bucket.member_all(blocks);
      EXPECT_TRUE(in_both_blocks);
    }
  
    const unsigned expected_num_aura_nodes = 2;
    EXPECT_EQ(expected_num_aura_nodes, num_aura_nodes);
  }
}
TEST(UnitTestGhosting, ThreeElemSendElemWithNonOwnedNodes)
{
    stk::ParallelMachine communicator = MPI_COMM_WORLD;

    int numProcs = stk::parallel_machine_size(communicator);
    if (numProcs != 3) {
      return;
    }

    int procId = stk::parallel_machine_rank(communicator);

    unsigned spatialDim = 3;
    stk::mesh::MetaData meta(spatialDim);
    stk::mesh::unit_test::BulkDataTester bulk(meta, communicator);
    const std::string generatedMeshSpecification = "generated:1x1x3";
    stk::unit_test_util::fill_mesh_using_stk_io(generatedMeshSpecification, bulk, communicator);
    bulk.modification_begin();
    stk::mesh::Ghosting& custom_shared_ghosting = bulk.create_ghosting("custom_shared");
    bulk.modification_end();

    stk::mesh::EntityProcVec ownedEntitiesToGhost;

    if (procId == 1)
    {
        stk::mesh::Entity elem2 = bulk.get_entity(stk::topology::ELEM_RANK, 2);
        int destProc = 2;
        ownedEntitiesToGhost.push_back(stk::mesh::EntityProc(elem2, destProc));
    }


    stk::mesh::EntityLess my_less(bulk);
    std::set<stk::mesh::EntityProc,stk::mesh::EntityLess> entitiesWithClosure(my_less);
    bulk.my_add_closure_entities(custom_shared_ghosting, ownedEntitiesToGhost, entitiesWithClosure);

    if (procId == 1)
    {
        std::vector<stk::mesh::EntityKey> gold_keys;
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 5));
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 6));
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 7));
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 8));

        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::ELEM_RANK, 2));

        ASSERT_EQ(gold_keys.size(), entitiesWithClosure.size());

        unsigned i=0;
        int otherProc = 2;

        for(std::set<stk::mesh::EntityProc , stk::mesh::EntityLess>::const_iterator iter = entitiesWithClosure.begin();
                iter != entitiesWithClosure.end(); ++iter)
        {
            EXPECT_EQ(gold_keys[i], bulk.entity_key(iter->first));
            EXPECT_EQ(otherProc, iter->second);
            ++i;
        }
    }
    else
    {
        ASSERT_TRUE(entitiesWithClosure.empty());
    }

    stk::mesh::impl::move_unowned_entities_for_owner_to_ghost(bulk, entitiesWithClosure);

    if (procId==0)
    {
        std::vector<stk::mesh::EntityKey> gold_keys;
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 5));
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 6));
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 7));
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 8));

        ASSERT_EQ(gold_keys.size(), entitiesWithClosure.size());

        unsigned i=0;
        int otherProc = 2;

        for(std::set<stk::mesh::EntityProc , stk::mesh::EntityLess>::const_iterator iter = entitiesWithClosure.begin();
                iter != entitiesWithClosure.end(); ++iter)
        {
            EXPECT_EQ(gold_keys[i], bulk.entity_key(iter->first));
            EXPECT_EQ(otherProc, iter->second);
            ++i;
        }
    }
    else if (procId==1)
    {
        std::vector<stk::mesh::EntityKey> gold_keys;
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::ELEM_RANK, 2));

        ASSERT_EQ(gold_keys.size(), entitiesWithClosure.size());

        unsigned i=0;
        int otherProc = 2;

        for(std::set<stk::mesh::EntityProc , stk::mesh::EntityLess>::const_iterator iter = entitiesWithClosure.begin();
                iter != entitiesWithClosure.end(); ++iter)
        {
            EXPECT_EQ(gold_keys[i], bulk.entity_key(iter->first));
            EXPECT_EQ(otherProc, iter->second);
            ++i;
        }
    }
    else
    {
        ASSERT_TRUE(entitiesWithClosure.empty());
    }

    bulk.modification_begin();

    stk::mesh::Ghosting &ghosting = bulk.create_ghosting("custom ghost unit test");
    bulk.my_ghost_entities_and_fields(ghosting, entitiesWithClosure);
    bulk.my_internal_modification_end_for_change_ghosting();

    if (procId == 0)
    {
        EXPECT_TRUE(bulk.my_in_send_ghost(ghosting, stk::mesh::EntityKey(stk::topology::NODE_RANK, 5), 2));
        EXPECT_TRUE(bulk.my_in_send_ghost(ghosting, stk::mesh::EntityKey(stk::topology::NODE_RANK, 6), 2));
        EXPECT_TRUE(bulk.my_in_send_ghost(ghosting, stk::mesh::EntityKey(stk::topology::NODE_RANK, 7), 2));
        EXPECT_TRUE(bulk.my_in_send_ghost(ghosting, stk::mesh::EntityKey(stk::topology::NODE_RANK, 8), 2));
        EXPECT_FALSE(bulk.my_in_send_ghost(ghosting, stk::mesh::EntityKey(stk::topology::ELEM_RANK, 2), 2));
    }
    else if (procId == 1)
    {
        EXPECT_FALSE(bulk.my_in_send_ghost(ghosting, stk::mesh::EntityKey(stk::topology::NODE_RANK, 5), 2));
        EXPECT_FALSE(bulk.my_in_send_ghost(ghosting, stk::mesh::EntityKey(stk::topology::NODE_RANK, 6), 2));
        EXPECT_FALSE(bulk.my_in_send_ghost(ghosting, stk::mesh::EntityKey(stk::topology::NODE_RANK, 7), 2));
        EXPECT_FALSE(bulk.my_in_send_ghost(ghosting, stk::mesh::EntityKey(stk::topology::NODE_RANK, 8), 2));
        EXPECT_TRUE(bulk.my_in_send_ghost(ghosting, stk::mesh::EntityKey(stk::topology::ELEM_RANK, 2), 2));
    }
    else if (procId == 2)
    {
        std::vector<stk::mesh::EntityKey> gold_keys;
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 5));
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 6));
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 7));
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::NODE_RANK, 8));
        gold_keys.push_back(stk::mesh::EntityKey(stk::topology::ELEM_RANK, 2));

        for (size_t i=0;i<gold_keys.size();++i)
        {
            stk::mesh::Entity entity = bulk.get_entity(gold_keys[i]);
            ASSERT_TRUE(bulk.is_valid(entity));
            ASSERT_TRUE(bulk.in_receive_ghost(ghosting, gold_keys[i]));
        }
    }
}
Ejemplo n.º 19
0
TEST(Hex27SCS, area_vec)
{
  stk::mesh::MetaData meta(3);
  stk::mesh::BulkData bulk(meta, MPI_COMM_WORLD);

  stk::mesh::Entity elem = unit_test_utils::create_one_reference_element(bulk, stk::topology::HEXAHEDRON_27);
  const auto* node_rels = bulk.begin_nodes(elem);
  sierra::nalu::Hex27SCS me;
  auto& coordField = *static_cast<const VectorFieldType*>(meta.coordinate_field());
  int dim = me.nDim_;

  std::mt19937 rng;
  rng.seed(std::mt19937::default_seed);
  std::uniform_real_distribution<double> coeff(-1.0, 1.0);
  std::vector<double> coeffs(dim);

  for (int j = 0; j < dim; ++j) {
    coeffs[j] = coeff(rng);
  }

  std::vector<double> polyResult(me.numIntPoints_ * dim);
  for (int j = 0; j < me.numIntPoints_; ++j) {
    for (int d = 0; d < dim; ++d) {
      polyResult[j*dim+d] = coeffs[d];
    }
  }

  Kokkos::View<double**> ws_coords("coords", me.nodesPerElement_, dim);
  for (int j = 0; j < me.nodesPerElement_; ++j) {
    const double* coords = stk::mesh::field_data(coordField, node_rels[j]);
    for (int d = 0; d < dim; ++d) {
      ws_coords(j, d) = coords[d];
    }
  }

  Kokkos::View<double**> meAreav("area_vec", me.numIntPoints_, dim);

  using AlgTraits = sierra::nalu::AlgTraitsHex27;
  using GradViewType = Kokkos::View<double[AlgTraits::numScsIp_][AlgTraits::nodesPerElement_][AlgTraits::nDim_]>;
  GradViewType refGrad = me.copy_deriv_weights_to_view<GradViewType>();

  double duration = 0;
  int nIt = 10000;
  for (int k = 0; k < nIt; ++k) {
    Kokkos::deep_copy(meAreav, 0.0);
    auto start_clock = clock_type::now();
    me.weighted_area_vectors(refGrad, ws_coords, meAreav);
    auto end_clock = clock_type::now();
    duration += 1.0e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(end_clock - start_clock).count();
  }
  std::cout << "Time per iteration: " << (duration/nIt)*1000 << "(ms)" <<std::endl;

  constexpr int nTypes = 3;
  int typeCount[nTypes] = {0,0,0};
  double exactAreaType[nTypes];

  exactAreaType[0] = 0.25 * (1 - std::sqrt(1./3.)) * (1 - std::sqrt(1./3.));
  exactAreaType[1] = 1.0 / 6.0 * (std::sqrt(3.) -1);
  exactAreaType[2] = 1.0 / 3.0;


  for (int ip = 0 ; ip < me.numIntPoints_; ++ip) {
    double mag = meAreav(ip, 0) * meAreav(ip, 0) + meAreav(ip, 1) * meAreav(ip, 1) + meAreav(ip, 2) * meAreav(ip, 2);
    EXPECT_GT(mag, tol);

    for (int i = 0; i < nTypes; ++i) {
      const double Asq = exactAreaType[i] * exactAreaType[i];
      if (std::abs(Asq - mag) < tol) {
        ++typeCount[i];
      }
    }
  }

  EXPECT_EQ(typeCount[0], 96);
  EXPECT_EQ(typeCount[1], 96);
  EXPECT_EQ(typeCount[2], 24);
}
Ejemplo n.º 20
0
inline unsigned field_bytes_per_entity(const FieldBase& f, Entity e) {
  BulkData& bulk(f.get_mesh());
  ThrowAssert(f.entity_rank() == bulk.entity_rank(e));
  return field_bytes_per_entity(f, bulk.bucket(e));
}
Ejemplo n.º 21
0
inline unsigned field_scalars_per_entity(const FieldBase& f, Entity e) {
  const unsigned bytes_per_scalar = f.data_traits().size_of;
  BulkData& bulk(f.get_mesh());
  ThrowAssert(f.entity_rank() == bulk.entity_rank(e));
  return field_bytes_per_entity(f, bulk.bucket(e))/bytes_per_scalar;
}
void MultiProcessing3D<OriginalGenerator,MutableGenerator>::subdivideGenerator()
{
    // To start with, determine which multi-blocks are read and which are written
    std::vector<bool> isWritten(multiBlocks.size());
    generator.getModificationPattern(isWritten);
    PLB_ASSERT( isWritten.size() == multiBlocks.size() );

    // The reference block (the one for which the envelope is included if
    //   the domain generator.appliesTo() include the envelope) is either the
    //   multi-block which is written, or the first multi-block if all are read-only.
    pluint referenceBlock = 0;
    for (pluint iBlock=0; iBlock<isWritten.size(); ++iBlock) {
        if (isWritten[iBlock]) {
            referenceBlock = iBlock;
            break;
        }
    }

    // In debug mode, make sure that a most one multi-block is written when envelope is included.
#ifdef PLB_DEBUG
    if ( BlockDomain::usesEnvelope(generator.appliesTo()) ) {
        plint numWritten = 0;
        for (pluint iBlock=0; iBlock<isWritten.size(); ++iBlock) {
            if (isWritten[iBlock]) {
                ++numWritten;
            }
        }
        PLB_ASSERT( numWritten <= 1 );
    }
#endif
    
    // The first step is to access the domains of the the atomic blocks, as well
    //   as their IDs in each of the coupled multi blocks. The domain corresponds
    //   to the bulk and/or to the envelope, depending on the value of generator.appliesTo().
    std::vector<std::vector<DomainAndId3D> > domainsWithId(multiBlocks.size());
    for (pluint iMulti=0; iMulti<multiBlocks.size(); ++iMulti) {
        std::vector<plint> const& blocks
            = multiBlocks[iMulti]->getMultiBlockManagement().getLocalInfo().getBlocks();
        for (pluint iBlock=0; iBlock<blocks.size(); ++iBlock) {
            plint blockId = blocks[iBlock];
            SmartBulk3D bulk(multiBlocks[iMulti]->getMultiBlockManagement(), blockId);
            switch (generator.appliesTo()) {
                case BlockDomain::bulk:
                    domainsWithId[iMulti].push_back(DomainAndId3D(bulk.getBulk(),blockId));
                    break;
                case BlockDomain::bulkAndEnvelope:
                    // It's only the reference block that should have the envelope. However, we start
                    //   by assigning bulk and envelope to all of them, and eliminate overlapping
                    //   envelope components further down.
                    domainsWithId[iMulti].push_back(DomainAndId3D(bulk.computeEnvelope(),blockId));
                    break;
                case BlockDomain::envelope:
                    // For the reference block, we restrict ourselves to the envelope, because
                    //   that's the desired domain of application.
                    if (iMulti==referenceBlock) {
                        std::vector<Box3D> envelopeOnly;
                        except(bulk.computeEnvelope(), bulk.getBulk(), envelopeOnly);
                        for (pluint iEnvelope=0; iEnvelope<envelopeOnly.size(); ++iEnvelope) {
                            domainsWithId[iMulti].push_back(DomainAndId3D(envelopeOnly[iEnvelope], blockId));
                        }
                    }
                    // For the other blocks, we need to take bulk and envelope, because all these domains
                    //   potentially intersect with the envelope of the reference block.
                    else {
                        domainsWithId[iMulti].push_back(DomainAndId3D(bulk.computeEnvelope(),blockId));
                    }
                    break;
            }
        }
    }

    // If the multi-blocks are not at the same level of grid refinement, the level
    //   of the first block is taken as reference, and the coordinates of the other
    //   blocks are rescaled accordingly.
    plint firstLevel = multiBlocks[0]->getMultiBlockManagement().getRefinementLevel();
    for (pluint iMulti=1; iMulti<multiBlocks.size(); ++iMulti) {
        plint relativeLevel = firstLevel -
                             multiBlocks[iMulti]->getMultiBlockManagement().getRefinementLevel();
        if (relativeLevel != 0) {
            for (pluint iBlock=0; iBlock<domainsWithId[iMulti].size(); ++iBlock) {
                domainsWithId[iMulti][iBlock].domain =
                    global::getDefaultMultiScaleManager().scaleBox (
                            domainsWithId[iMulti][iBlock].domain, relativeLevel );
            }
        }
    }

    // If the envelopes are included as well, it is assumed that at most one of
    //   the multi blocks has write-access. All others (those that have read-only
    //   access) need to be non-overlaping, to avoid multiple writes on the cells
    //   of the write-access-multi-block. Thus, overlaps are now eliminitated in
    //   the read-access-multi-blocks.
    if ( BlockDomain::usesEnvelope(generator.appliesTo()) ) {
        for (pluint iMulti=0; iMulti<multiBlocks.size(); ++iMulti) {
            if (!isWritten[iMulti]) {
                std::vector<DomainAndId3D> nonOverlapBlocks(getNonOverlapingBlocks(domainsWithId[iMulti]));
                domainsWithId[iMulti].swap(nonOverlapBlocks);
            }
        }
    }

    // This is the heart of the whole procedure: intersecting atomic blocks
    //   between all coupled multi blocks are identified.
    std::vector<Box3D> finalDomains;
    std::vector<std::vector<plint> > finalIds;
    intersectDomainsAndIds(domainsWithId, finalDomains, finalIds);

    // And, to end with, re-create processor generators adapted to the
    //   computed domains of intersection.
    if ( BlockDomain::usesEnvelope(generator.appliesTo()) ) {
        // In case the envelope is included, periodicity must be explicitly treated.
        //   Indeed, the user indicates the domain of applicability with respect to
        //   bulk nodes only. The generator is therefore shifted in all space directions
        //   to englobe periodic boundary nodes as well.
        plint shiftX = firstMultiBlock->getNx();
        plint shiftY = firstMultiBlock->getNy();
        plint shiftZ = firstMultiBlock->getNz();
        PeriodicitySwitch3D const& periodicity = firstMultiBlock->periodicity();
        for (plint orientX=-1; orientX<=+1; ++orientX) {
            for (plint orientY=-1; orientY<=+1; ++orientY) {
                for (plint orientZ=-1; orientZ<=+1; ++orientZ) {
                    if (periodicity.get(orientX,orientY,orientZ)) {
                        extractGeneratorOnBlocks( finalDomains, finalIds,
                                                  orientX*shiftX, orientY*shiftY, orientZ*shiftZ );
                    }
                }
            }
        }
    }
    else {
        extractGeneratorOnBlocks(finalDomains, finalIds);
    }
}
void UnitTestBulkData::testChangeParts( ParallelMachine pm )
{
  static const char method[] =
    "stk::mesh::UnitTestBulkData::testChangeParts" ;

  std::cout << std::endl << method << std::endl ;

  const unsigned p_size = parallel_machine_size( pm );
  const unsigned p_rank = parallel_machine_rank( pm );

  if ( 1 < p_size ) return ;

  // Single process, no sharing

  // Meta data with entity ranks [0..9]
  std::vector<std::string> entity_names(10);
  for ( size_t i = 0 ; i < 10 ; ++i ) {
    std::ostringstream name ;
    name << "EntityRank_" << i ;
    entity_names[i] = name.str();
  }

  MetaData meta( entity_names );
  BulkData bulk( meta , pm , 100 );

  Part & part_univ = meta.universal_part();
  Part & part_owns = meta.locally_owned_part();

  Part & part_A_0 = meta.declare_part( std::string("A_0") , 0 );
  Part & part_A_1 = meta.declare_part( std::string("A_1") , 1 );
  Part & part_A_2 = meta.declare_part( std::string("A_2") , 2 );
  Part & part_A_3 = meta.declare_part( std::string("A_3") , 3 );

  Part & part_B_0 = meta.declare_part( std::string("B_0") , 0 );
  // Part & part_B_1 = meta.declare_part( std::string("B_1") , 1 );
  Part & part_B_2 = meta.declare_part( std::string("B_2") , 2 );
  // Part & part_B_3 = meta.declare_part( std::string("B_3") , 3 );

  meta.commit();
  bulk.modification_begin();

  PartVector tmp(1);

  tmp[0] = & part_A_0 ;
  Entity & entity_0_1 = bulk.declare_entity(  0 , 1 , tmp );

  tmp[0] = & part_A_1 ;
  Entity & entity_1_1 = bulk.declare_entity(  1 , 1 , tmp );

  tmp[0] = & part_A_2 ;
  Entity & entity_2_1 = bulk.declare_entity(  2 , 1 , tmp );

  tmp[0] = & part_A_3 ;
  Entity & entity_3_1 = bulk.declare_entity( 3 , 1 , tmp );

  entity_0_1.bucket().supersets( tmp );
  STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
  STKUNIT_ASSERT( tmp[0] == & part_univ );
  STKUNIT_ASSERT( tmp[1] == & part_owns );
  STKUNIT_ASSERT( tmp[2] == & part_A_0 );

  entity_1_1.bucket().supersets( tmp );
  STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
  STKUNIT_ASSERT( tmp[0] == & part_univ );
  STKUNIT_ASSERT( tmp[1] == & part_owns );
  STKUNIT_ASSERT( tmp[2] == & part_A_1 );

  entity_2_1.bucket().supersets( tmp );
  STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
  STKUNIT_ASSERT( tmp[0] == & part_univ );
  STKUNIT_ASSERT( tmp[1] == & part_owns );
  STKUNIT_ASSERT( tmp[2] == & part_A_2 );

  entity_3_1.bucket().supersets( tmp );
  STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
  STKUNIT_ASSERT( tmp[0] == & part_univ );
  STKUNIT_ASSERT( tmp[1] == & part_owns );
  STKUNIT_ASSERT( tmp[2] == & part_A_3 );

  {
    tmp.resize(1);
    tmp[0] = & part_A_0 ;
    bulk.change_entity_parts( entity_0_1 , tmp );
    entity_0_1.bucket().supersets( tmp );
    STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_0 );
  }

  { // Add a new part:
    tmp.resize(1);
    tmp[0] = & part_B_0 ;
    bulk.change_entity_parts( entity_0_1 , tmp );
    entity_0_1.bucket().supersets( tmp );
    STKUNIT_ASSERT_EQUAL( size_t(4) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_0 );
    STKUNIT_ASSERT( tmp[3] == & part_B_0 );
  }

  { // Remove the part just added:
    tmp.resize(1);
    tmp[0] = & part_B_0 ;
    bulk.change_entity_parts( entity_0_1 , PartVector() , tmp );
    entity_0_1.bucket().supersets( tmp );
    STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_0 );
  }

  { // Relationship induced membership:
    bulk.declare_relation( entity_1_1 , entity_0_1 , 0 );
    entity_0_1.bucket().supersets( tmp );
    STKUNIT_ASSERT_EQUAL( size_t(4) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_0 );
    STKUNIT_ASSERT( tmp[3] == & part_A_1 );
  }

  { // Remove relationship induced membership:
    bulk.destroy_relation( entity_1_1 , entity_0_1 );
    entity_0_1.bucket().supersets( tmp );
    STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_0 );
  }

  { // Add a new part:
    tmp.resize(1);
    tmp[0] = & part_B_2 ;
    bulk.change_entity_parts( entity_2_1 , tmp );
    entity_2_1.bucket().supersets( tmp );
    STKUNIT_ASSERT_EQUAL( size_t(4) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_2 );
    STKUNIT_ASSERT( tmp[3] == & part_B_2 );
  }

  { // Relationship induced membership:
    bulk.declare_relation( entity_2_1 , entity_0_1 , 0 );
    entity_0_1.bucket().supersets( tmp );
    STKUNIT_ASSERT_EQUAL( size_t(5) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_0 );
    STKUNIT_ASSERT( tmp[3] == & part_A_2 );
    STKUNIT_ASSERT( tmp[4] == & part_B_2 );
  }

  { // Remove relationship induced membership:
    bulk.destroy_relation( entity_2_1 , entity_0_1 );
    entity_0_1.bucket().supersets( tmp );
    STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_0 );
  }

  bulk.modification_end();

  //------------------------------
  // Now the parallel fun.  Existing entities should be shared
  // by all processes since they have the same identifiers.
  // They should also have the same parts.

  entity_0_1.bucket().supersets( tmp );
  if ( entity_0_1.owner_rank() == p_rank ) {
    STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_0 );
  }
  else {
    STKUNIT_ASSERT_EQUAL( size_t(2) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_A_0 );
  }

  entity_2_1.bucket().supersets( tmp );
  if ( entity_2_1.owner_rank() == p_rank ) {
    STKUNIT_ASSERT_EQUAL( size_t(4) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_2 );
    STKUNIT_ASSERT( tmp[3] == & part_B_2 );
  }
  else {
    STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_A_2 );
    STKUNIT_ASSERT( tmp[2] == & part_B_2 );
  }

  if (bulk.parallel_size() > 1) {
    STKUNIT_ASSERT_EQUAL( size_t(p_size - 1) , entity_0_1.sharing().size() );
    STKUNIT_ASSERT_EQUAL( size_t(p_size - 1) , entity_1_1.sharing().size() );
    STKUNIT_ASSERT_EQUAL( size_t(p_size - 1) , entity_2_1.sharing().size() );
    STKUNIT_ASSERT_EQUAL( size_t(p_size - 1) , entity_3_1.sharing().size() );
  }

  bulk.modification_begin();

  // Add a new part on the owning process:

  int ok_to_modify = entity_0_1.owner_rank() == p_rank ;

  try {
    tmp.resize(1);
    tmp[0] = & part_B_0 ;
    bulk.change_entity_parts( entity_0_1 , tmp );
    STKUNIT_ASSERT( ok_to_modify );
  }
  catch( const std::exception & x ) {
    STKUNIT_ASSERT( ! ok_to_modify );
  }

  entity_0_1.bucket().supersets( tmp );
  if ( entity_0_1.owner_rank() == p_rank ) {
    STKUNIT_ASSERT_EQUAL( size_t(4) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_0 );
    STKUNIT_ASSERT( tmp[3] == & part_B_0 );
  }
  else {
    STKUNIT_ASSERT_EQUAL( size_t(2) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_A_0 );
  }

  bulk.modification_end();

  entity_0_1.bucket().supersets( tmp );
  if ( entity_0_1.owner_rank() == p_rank ) {
    STKUNIT_ASSERT_EQUAL( size_t(4) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_owns );
    STKUNIT_ASSERT( tmp[2] == & part_A_0 );
    STKUNIT_ASSERT( tmp[3] == & part_B_0 );
  }
  else {
    STKUNIT_ASSERT_EQUAL( size_t(3) , tmp.size() );
    STKUNIT_ASSERT( tmp[0] == & part_univ );
    STKUNIT_ASSERT( tmp[1] == & part_A_0 );
    STKUNIT_ASSERT( tmp[2] == & part_B_0 );
  }
}
Ejemplo n.º 24
0
TEST(MasterElementFunctions, generic_grad_op_3d_hex_27)
{
  stk::mesh::MetaData meta(3);
  stk::mesh::BulkData bulk(meta, MPI_COMM_WORLD);

  stk::mesh::Entity elem = unit_test_utils::create_one_reference_element(bulk, stk::topology::HEXAHEDRON_27);
  const auto* node_rels = bulk.begin_nodes(elem);
  sierra::nalu::Hex27SCS me;
  auto& coordField = *static_cast<const VectorFieldType*>(meta.coordinate_field());
  int dim = me.nDim_;

  std::mt19937 rng;
  rng.seed(std::mt19937::default_seed);
  std::uniform_real_distribution<double> coeff(-1.0, 1.0);
  std::vector<double> coeffs(dim);

  double a = coeff(rng);
  for (int j = 0; j < dim; ++j) {
    coeffs[j] = coeff(rng);
  }

  std::vector<double> polyResult(me.numIntPoints_ * dim);
  for (int j = 0; j < me.numIntPoints_; ++j) {
    for (int d = 0; d < dim; ++d) {
      polyResult[j*dim+d] = coeffs[d];
    }
  }

  std::vector<double> ws_field(me.nodesPerElement_);
  Kokkos::View<double**> ws_coords("coords", me.nodesPerElement_, dim);
  for (int j = 0; j < me.nodesPerElement_; ++j) {
    const double* coords = stk::mesh::field_data(coordField, node_rels[j]);
    for (int d = 0; d < dim; ++d) {
      ws_coords(j, d) = coords[d];
    }
    ws_field[j] = linear_scalar_value(dim, a, coeffs.data(), coords);
  }

  Kokkos::View<double***> meGrad("grad", me.numIntPoints_, me.nodesPerElement_, dim);

  using AlgTraits = sierra::nalu::AlgTraitsHex27;
  using GradViewType = Kokkos::View<double[AlgTraits::numScsIp_][AlgTraits::nodesPerElement_][AlgTraits::nDim_]>;
  GradViewType refGrad = me.copy_deriv_weights_to_view<GradViewType>();

  double duration = 0;
  int nIt = 10000;
  for (int k = 0; k < nIt; ++k) {
    Kokkos::deep_copy(meGrad, 0.0);
    auto start_clock = clock_type::now();
    sierra::nalu::generic_grad_op_3d<AlgTraits>(refGrad, ws_coords, meGrad);
    auto end_clock = clock_type::now();
    duration += 1.0e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(end_clock - start_clock).count();
  }
  std::cout << "Time per iteration: " << (duration/nIt)*1000 << "(ms)" <<std::endl;

  std::vector<double> meResult(me.numIntPoints_ * dim, 0.0);
  for (int ip = 0; ip < me.numIntPoints_; ++ip) {
    for (int n = 0; n < me.nodesPerElement_; ++n) {
      for (int d = 0; d < dim; ++d) {
        meResult[ip*dim+d] += meGrad(ip,n,d) * ws_field[n];
      }
    }
 }

  // derivative should be exact to floating point error
  for (unsigned j = 0 ; j < meResult.size(); ++j) {
   EXPECT_NEAR(meResult[j], polyResult[j], tol);
  }
}