示例#1
0
//BOP
// !IROUTINE:  ESMC::DistDir
//
// !INTERFACE:
DistDir<id_type,hash>::DistDir(
//
// !RETURN VALUE:
//    
//
// !ARGUMENTS:
ESMCI::VM &_vm,           // Virtual machine for this object
UInt ngid,              // number of global id's to register
const id_type gid[],    // array of the global id's
const id_type lid[])    // local id's corresponding to the registered gid's
//
//
// !DESCRIPTION:
//     Create a distributed directory object.
//    
//EOP
//-----------------------------------------------------------------------------
: vm(_vm),
hash_func(),
gmin(0), gmax(0),
npet(0), rank(0),
my_managed()
{
  // Set up the structure by passing my gid's to the pet'cessor who
  // manages them.

  {
    int npet_i, rank_i; // signed to unsigned helpers
    npet_i = vm.getPetCount();
    rank_i = vm.getLocalPet();
    npet = npet_i; rank = rank_i;
  }
  
  // Find local min,max and global
  id_type lmin = std::numeric_limits<id_type>::max(),
       lmax = 0;
  for (UInt i = 0; i < ngid; i++) {
    // If this is a signed type, make sure the value is positive.
    if (is_unsigned<id_type>::value && gid[i] < 0) throw Ex() << "DistDir: gid[" << i << "]=" << gid[i] << " < 0.  Only unsigned values implemented.";
    if (gid[i] < lmin) lmin = gid[i];
    if (gid[i] > lmax) lmax = gid[i];
  }

  {
    int gmin_i, gmax_i;
    // The expensive part of the algorithm.  Declare the minimum and maximum
    // overall pets of the id's.
    vm.allreduce(&lmin, &gmin_i, 1, vmI4, vmMIN);
    vm.allreduce(&lmax, &gmax_i, 1, vmI4, vmMAX);
    //MPI_Allreduce(&lmin, &t_gmin, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD);
    //MPI_Allreduce(&lmax, &t_gmax, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
    gmin = gmin_i; gmax = gmax_i; // Using UInt for correctness.
  }

  // Loop gids, set up sends
  std::vector<UInt> to_pet; // pets I will send to

  std::vector<UInt> send_sizes_all(npet, 0);
  for (UInt i = 0; i < ngid; i++) {
    UInt tpet = hash_func(gid[i], npet, gmin, gmax);
    std::vector<UInt>::iterator lb = std::lower_bound(to_pet.begin(), to_pet.end(), tpet);
    if (lb == to_pet.end() || *lb != tpet)
      to_pet.insert(lb, tpet);
    // gid
    send_sizes_all[tpet] += SparsePack<id_type>::size();
    // lid 
    send_sizes_all[tpet] += SparsePack<id_type>::size();
  }

  UInt nsend = to_pet.size();
  std::vector<UInt> sizes(nsend, 0);
  for (UInt i = 0; i < nsend; i++) sizes[i] = send_sizes_all[to_pet[i]];
  send_sizes_all.clear();  // free memory

  SparseMsgVM msg(vm);

  msg.setPattern(nsend, nsend > 0 ? &to_pet[0] : NULL);

  msg.setSizes(nsend > 0 ? &sizes[0] : NULL);

  // Now pack
  for (UInt i = 0; i < ngid; i++) {
    UInt pet = hash_func(gid[i], npet, gmin, gmax);
    SparseMsgVM::buffer &b = *msg.getSendBuffer(pet);
    // gid, lid
    SparsePack<id_type>(b, gid[i]);
    SparsePack<id_type>(b, lid[i]);
  }

  if (!msg.filled()) throw Ex() << "Message not filled, P:" << rank << ", DistDir()";

  msg.communicate();

  // Now unpack
  for (UInt *p = msg.inPet_begin(); p != msg.inPet_end(); ++p) {
   UInt pet = *p;
   SparseMsgVM::buffer &b = *msg.getRecvBuffer(pet);

   // Deduce the message size
   UInt nmsg = b.msg_size() / (2*SparsePack<id_type>::size());
   for (UInt m = 0; m < nmsg; m++) {
     dentry d;
     SparseUnpack<id_type>(b, d.gid);
     SparseUnpack<id_type>(b, d.origin_lid);
     d.origin_pet = pet;

     my_managed.push_back(d);
   }
  }

  if (!msg.empty()) throw Ex() << "DistDir, msg unpack didnt empty buffer!";

  // Now, sort the list
  std::sort(my_managed.begin(), my_managed.end(), std::less<dentry>());
  

}
示例#2
0
void static sync(CommRel &comm) {
  Trace __trace("sync(CommRel &comm)");
  UInt csize = Par::Size();

  SparseMsg msg;

  // Sizing loop
  std::vector<UInt> send_sizes_all(csize, 0);
  std::vector<UInt> to_proc;
  std::vector<UInt> to_sizes;

  CommRel::MapType::iterator di = comm.domain_begin(), de = comm.domain_end();
  for (; di != de; ++di) {
    UInt proc = di->processor;

    std::vector<UInt>::iterator lb =
        std::lower_bound(to_proc.begin(), to_proc.end(), proc);
    if (lb == to_proc.end() || *lb != proc) 
      to_proc.insert(lb, proc);

    // Attr
    send_sizes_all[proc] += SparsePack<Attr>::size();

  } //sizes

  UInt nsend = to_proc.size();
  msg.setPattern(nsend, nsend == 0 ? NULL : &to_proc[0]);

  to_sizes.resize(nsend, 0);
 for (UInt i = 0; i < nsend; i++)
    to_sizes[i] = send_sizes_all[to_proc[i]];

  msg.setSizes(nsend == 0 ? NULL : &to_sizes[0]);

  // Pack loop
  di = comm.domain_begin();
  for (; di != de; ++di) {
   UInt proc = di->processor;
   MeshObj &obj = *di->obj;
   SparseMsg::buffer &b = *msg.getSendBuffer(proc);

   // Attr
   SparsePack<Attr>(b, GetAttr(obj));

  }

  if (!msg.filled())
    Throw() << "Message not full in sync attr!";

  msg.communicate();

  // Create an object to context map so that objects are
  // only updated in the mesh onces.
  typedef std::map<MeshObj*, Context> Obj_To_Ctxt_Type;
  Obj_To_Ctxt_Type obj_to_ctxt;

  // Unpack
  di = comm.domain_begin();
  for (; di != de; ++di) {
    MeshObj &obj = *di->obj;
    UInt proc = di->processor;
    SparseMsg::buffer &b = *msg.getRecvBuffer(proc);

    Attr a;
    SparseUnpack<Attr>(b, a);

    // Now the kernel.  Or all attributes except the shared ones
    // (ownership, etc...)
    const Attr &oa = GetAttr(obj);

    // For sanity:
    ThrowRequire(a.GetType() == oa.GetType());
    ThrowRequire(a.GetBlock() == oa.GetBlock());
    
    // Now merge the contexts
    Context c = a.GetContext();
    const Context &oc = oa.GetContext();
    Context nc(oc);

    // More sanity checks

    //ThrowRequire(c.is_set(Attr::ACTIVE_ID) == oc.is_set(Attr::ACTIVE_ID));
    if (!(c.is_set(Attr::ACTIVE_ID) == oc.is_set(Attr::ACTIVE_ID))) {
      Par::Out() << "Error, ACTIVE_ID incongruence, obj:" << obj;
      Par::Out() << "Incoming ctxt:" << c << std::endl;
      Throw();
    }
    ThrowRequire(c.is_set(Attr::SHARED_ID) && oc.is_set(Attr::SHARED_ID));
    ThrowRequire(c.is_set(Attr::GENESIS_ID) == oc.is_set(Attr::GENESIS_ID));

    // Both can't claim to own object
    //ThrowRequire(!(c.is_set(Attr::OWNED_ID) && oc.is_set(Attr::OWNED_ID)));
    if ((c.is_set(Attr::OWNED_ID) && oc.is_set(Attr::OWNED_ID))) {
      Par::Out() << "Error, OWNED_ID incongruence, obj:" << obj;
      Par::Out() << "Incoming attr:" << a << std::endl;
      Par::Out() << "From processor:" << proc << std::endl;
      Throw();
    }

    // Clear the bits not to merge
    c.clear(Attr::SHARED_ID);
    c.clear(Attr::OWNED_ID);
    c.clear(Attr::ACTIVE_ID);
    c.clear(Attr::GENESIS_ID);

    // Or the rest
    nc |= c;

    // Add or OR in the new context, depending on whether object is in map.
    std::pair<Obj_To_Ctxt_Type::iterator, bool> otci = 
      obj_to_ctxt.insert(std::make_pair(&obj, nc));
    if (otci.second == false) { // already there
      otci.first->second |= nc;
    }
  }

  // One last loop through the object map, updating the mesh
  Obj_To_Ctxt_Type::iterator oi = obj_to_ctxt.begin(), oe = obj_to_ctxt.end();
  for (; oi != oe; ++oi) {
    MeshObj &obj = *oi->first;
    Context &nc = oi->second;
    if (nc != GetMeshObjContext(obj)) {
      Attr oa(GetAttr(obj), nc);
      comm.DomainMesh()->update_obj(&obj, oa); 
    }
  }
}
示例#3
0
//BOP
// !IROUTINE:  ESMC::RemoteGID
//
// !INTERFACE:
void DistDir<id_type,hash>::RemoteGID(
//
// !RETURN VALUE:
//    
//
// !ARGUMENTS:
UInt ngid,                 // number of global id's 
const id_type gid[],       // list of global id's to query
UInt orig_pet[],           // (out) pet of queryied indices
id_type lid[],             // (out) local id's of queried indices
bool id_found[])              // (out) true=found, false=not in directory
//
//
// !DESCRIPTION:
//     Query information about a list of global id's.  Return the origin pet
//   and the original local id.
//    
//EOP
//-----------------------------------------------------------------------------
{
  // Cache the local requests (after communication)
  std::vector<dentry> requests;
  UInt req_size;

  { // Encapsulate send phase of requests.
    // Loop gids, set up sends
    std::vector<UInt> to_pet; // pets I will send to
  
    std::vector<UInt> send_sizes_all(npet, 0);
    for (UInt i = 0; i < ngid; i++) {
      UInt tpet = hash_func(gid[i], npet, gmin, gmax);
      std::vector<UInt>::iterator lb = std::lower_bound(to_pet.begin(), to_pet.end(), tpet);
      if (lb == to_pet.end() || *lb != tpet)
        to_pet.insert(lb, tpet);
      // gid
      send_sizes_all[tpet] += SparsePack<id_type>::size();
    }
  
  
    UInt nsend = to_pet.size();
    std::vector<UInt> sizes(nsend, 0);
    for (UInt i = 0; i < nsend; i++) sizes[i] = send_sizes_all[to_pet[i]];
    send_sizes_all.clear();  // free memory
  
    SparseMsgVM msg(vm);
  
    msg.setPattern(nsend, nsend > 0 ? &to_pet[0] : NULL);
  
    msg.setSizes(nsend > 0 ? &sizes[0] : NULL);
  
    // Now pack
    for (UInt i = 0; i < ngid; i++) {
      UInt pet = hash_func(gid[i], npet, gmin, gmax);
      SparseMsgVM::buffer &b = *msg.getSendBuffer(pet);
      // gid, lid, origin_pet
      SparsePack<id_type>(b, gid[i]);
    }
  
    if (!msg.filled()) throw Ex() << "RemoteGID, Message not filled, P:" << rank << ", DistDir()";
  
    msg.communicate();
  
    // Now unpack
    for (UInt *p = msg.inPet_begin(); p != msg.inPet_end(); ++p) {
     UInt pet = *p;
     SparseMsgVM::buffer &b = *msg.getRecvBuffer(pet);
  
     // Deduce the message size
     UInt nmsg = b.msg_size() / (1*SparsePack<id_type>::size());
     for (UInt m = 0; m < nmsg; m++) {
       dentry d;
       SparseUnpack<id_type>(b, d.gid);
       d.req_pet = pet;
  
       requests.push_back(d);
     }
    }
  
    if (!msg.empty()) throw Ex() << "RemoteGID, DistDir, msg unpack didnt empty buffer!";
  
  } // Phase 1 complete
  
    // Service the requests
  req_size = requests.size();
  for (UInt r = 0; r < req_size; r++) {
    dentry &req = requests[r];
    typename std::vector<dentry>::iterator ei = std::lower_bound(my_managed.begin(), my_managed.end(), req, dentry_less());
    // If we hit the end of the list, or if our first hit is a gid larger than our gid,
    // the directory doesn't contain the requested gid.
    if (ei == my_managed.end() || req.gid != ei->gid) { 
       req.valid = false;
    } else {
      dentry &ser = *ei;
      // valid request; update the info.
      req.origin_lid = ser.origin_lid;
      req.origin_pet = ser.origin_pet;
    }
  }

  // Build the reply.  Important: the ordering (per pet) of requests is the same
  // as the original request gid's.  When we send back, we use this same
  // ordering so that we can unpack straight into the request buffer.
  { // encapsulate the return message
    SparseMsgVM msg(vm);
    std::vector<UInt> to_pet; // pets I will send to

    std::vector<UInt> send_sizes_all(npet, 0);
    for (UInt i = 0; i < req_size; i++) {
      dentry &req = requests[i];
      UInt tpet = req.req_pet; // back to requestor
      std::vector<UInt>::iterator lb = std::lower_bound(to_pet.begin(), to_pet.end(), tpet);
      if (lb == to_pet.end() || *lb != tpet)
        to_pet.insert(lb, tpet);
      // lid
      send_sizes_all[tpet] += SparsePack<id_type>::size();
      // origin pet
      send_sizes_all[tpet] += SparsePack<UInt>::size();
      // Valid == 1, Invalid == 0.  Send as a UInt, because I have
      // no idea what a practical binary representation for a bool is.
      send_sizes_all[tpet] += SparsePack<UInt>::size();
    }

  
    UInt nsend = to_pet.size();
    std::vector<UInt> sizes(nsend, 0);
    for (UInt i = 0; i < nsend; i++) sizes[i] = send_sizes_all[to_pet[i]];
    send_sizes_all.clear();  // free memory

    msg.setPattern(nsend, nsend > 0 ? &to_pet[0] : NULL);
  
    msg.setSizes(nsend > 0 ? &sizes[0] : NULL);
  
    // Now pack
    for (UInt i = 0; i < req_size; i++) {
      dentry &req = requests[i];
      UInt pet = req.req_pet; // we send back to requestor
      SparseMsgVM::buffer &b = *msg.getSendBuffer(pet);
      // lid, origin_pet,valid
      SparsePack<id_type>(b, req.origin_lid);
      SparsePack<UInt>(b, req.origin_pet);
      UInt valid = req.valid ? 1 : 0;
      SparsePack<UInt>(b, valid);
    }
  
    if (!msg.filled()) throw Ex() << "RemoteGID, returning requests, Message not filled, P:" << rank << ", DistDir()";
  
    msg.communicate();

    // Now unpack.  This unpacking is a bit wierd.  We loop the original gid's,
    // and then get the pet'cessor number so that we know which buffer to pick from
    // next.  In this way, we unpack the buffers in the correct order so that the
    // requested gid's line up with our receive order.

    for (UInt i = 0; i < ngid; i++) {
      UInt pet = hash_func(gid[i], npet, gmin, gmax);
      SparseMsgVM::buffer &b = *msg.getRecvBuffer(pet);
      // lid
      SparseUnpack<id_type>(b, lid[i]);
      // origin pet
      SparseUnpack<UInt>(b, orig_pet[i]);
      // valid
      UInt valid;
      SparseUnpack<UInt>(b, valid);
      id_found[i] = (valid == 1 ? true : false);
    }

    if (!msg.empty()) throw Ex() << "RemoteGID, returning requests, DistDir, msg unpack didnt empty buffer!";

  } // Done returning the message

}