Пример #1
0
int
minheap_add (heap_t *heap, elem_t elem)
{
	ofs_t loc, par;

	if (heap->cnt >= HEAP_SIZE)
		return -ENOMEM;

	loc = heap->cnt;
	heap->cnt ++;

	/* place new element in the next spot */
	heap->data[loc] = elem;

	/* while we have a parent move to it and see if order is maintained */
	while ((par = heap_parent_of (heap, loc)) != OFS_INVAL) {

		key_t par_key = elem_key (heap->data[par]);
		key_t loc_key = elem_key (heap->data[loc]);

		/* parent must be smaller */
		if (par_key < loc_key)
			goto done;

		/* if not then swap the elements */
		heap_swap (heap, par, loc);

		/* now move up */
		loc = par;
	}

done:
	return 0;
}
Пример #2
0
int
minheap_get_first (heap_t *heap, elem_t *elem)
{
	elem_t ret;
	ofs_t loc, end;
	key_t key;

	if (heap->cnt == 0)
		return -ENOENT;

	/* this is what we are returning */
	ret = heap->data[0];

	/* move the last element into the first location, shrinking the heap
	 * by one */
	heap->data[0] = heap->data[--heap->cnt];
	heap->data[heap->cnt] = -1;

	/* now move down fixing the order */
	loc = 0;
	key = elem_key (heap->data[loc]);
	end = (heap->cnt-1)/2;
pd ("cnt = %d\n", heap->cnt);
pd ("end = %d\n", end);
	while (loc < end) {
		ofs_t min_ofs;
		ofs_t left_ofs = heap_left (heap, loc);
		ofs_t right_ofs = heap_right (heap, loc);

		key_t min_key;
		key_t left_key = elem_key (heap->data[left_ofs]);
		key_t right_key = elem_key (heap->data[right_ofs]);

pd ("loc   = @%d [%d]\n", loc, key);
pd ("left  = @%d [%d]\n", left_ofs, left_key);
pd ("right = @%d [%d]\n", right_ofs, right_key);

		if (left_key < right_key) {
			min_key = left_key;
			min_ofs = left_ofs;
		} else {
			min_key = right_key;
			min_ofs = right_ofs;
		}

		/* parent must be smaller */
		if (key < min_key)
			goto done;

		/* if not then swap the elements */
pd (">> swap   = @%d [%d] <-> @%d [%d]\n", loc, key, min_ofs, min_key);
		heap_swap (heap, loc, min_ofs);

		/* move to swapped child */
		loc = min_ofs;
		//key = min_key;
pd ("loc   = @%d [%d]\n", loc, key);
pd ("\n");
	}

	/* it's possible that we have a node on our path with only one child; 
	 * in this case the tree count is even and if it is then we must only
	 * compare to the left subtree */
	if ((loc == end) && !(heap->cnt & 1)) {
		ofs_t left_ofs = heap_left (heap, loc);
		key_t left_key;
		
pd ("bonus\n");

		if (left_ofs == OFS_INVAL) {
pd ("...bailed\n");
			goto done;
		}

		left_key = elem_key (heap->data[left_ofs]);

pd ("loc   = @%d [%d]\n", loc, key);
pd ("left  = @%d [%d]\n", left_ofs, left_key);
		/* parent must be smaller */
		if (key < left_key)
			goto done;

pd (">> swap   = @%d [%d] <-> @%d [%d]\n", loc, key, left_ofs, left_key);
		/* if not then swap the elements */
		heap_swap (heap, loc, left_ofs);
	}

done:
	*elem = ret;
	return 0;
}
Пример #3
0
void comm_mesh_rebalance( BulkData & M ,
                          const CoordinateField & node_coord_field ,
                          const WeightField  * const elem_weight_field ,
                          std::vector<OctTreeKey> & cut_keys )
{
  const MetaData & mesh_meta_data  = M.mesh_meta_data();
  Part * const uses_part = & mesh_meta_data.locally_used_part();
  Part * const owns_part = & mesh_meta_data.locally_owned_part();

  const unsigned p_size = M.parallel_size();
  const unsigned p_rank = M.parallel_rank();

  //--------------------------------------------------------------------
  // The node_coord_field must be up to date on all processors
  // so that the element oct tree keys are parallel consistent.
  // It is assumed that the shared node_coord_field values are
  // already consistent.
  {
    const FieldBase * const ptr = & node_coord_field ;
    std::vector< const FieldBase *> tmp ;
    tmp.push_back( ptr );
    const std::vector<EntityProc> & aura_domain = M.ghost_source();
    const std::vector<EntityProc> & aura_range  = M.ghost_destination();
    communicate_field_data( M , aura_domain , aura_range , tmp , false );
  }
  //--------------------------------------------------------------------
  // Generate global oct-tree keys for local element centroids
  // and cuts for the global element centroids.

  double bounds[4] ;

  global_coordinate_bounds( M , node_coord_field , bounds );

  cut_keys.assign( p_size , OctTreeKey() );

  OctTreeKey * const cut_begin = & cut_keys[0] ;
  OctTreeKey * const cut_first = cut_begin + 1 ;
  OctTreeKey * const cut_end   = cut_begin + p_size ;

  global_element_cuts( M , bounds , node_coord_field ,
                                    elem_weight_field , cut_begin );

  //--------------------------------------------------------------------
  // Mapping of *all* elements to load balanced processor,
  // even the aura elements.
  // This requires that the node coordinates on the aura
  // elements be up to date.

  {
    std::vector< const FieldBase * > tmp ;
    const FieldBase * const tmp_coord = & node_coord_field ;
    tmp.push_back( tmp_coord );

    const std::vector<EntityProc> & d = M.ghost_source();
    const std::vector<EntityProc> & r = M.ghost_destination();

    communicate_field_data( M , d , r , tmp , false );
  }

  {
    const EntitySet & elem_set = M.entities( Element );
    const EntitySet::iterator i_end = elem_set.end();
          EntitySet::iterator i     = elem_set.begin();
    while ( i != i_end ) {
      Entity & elem = *i ; ++i ;

      const OctTreeKey k = elem_key( bounds , node_coord_field , elem );

      const unsigned p = std::upper_bound(cut_first, cut_end, k) - cut_first ;

      M.change_entity_owner( elem , p );
    }
  }
  //--------------------------------------------------------------------
  // Fill 'rebal' with all uses entities' rebalancing processors

  std::vector<EntityProc> rebal ;

  rebal_elem_entities( M , Node , rebal );
  rebal_elem_entities( M , Edge , rebal );
  rebal_elem_entities( M , Face , rebal );

  {
    const Part & part_uses = * uses_part ;

    const KernelSet & elem_kernels = M.kernels( Element );
    const KernelSet::const_iterator i_end = elem_kernels.end();
          KernelSet::const_iterator i     = elem_kernels.begin();

    while ( i != i_end ) {
      const Kernel & kernel = *i ; ++i ;

      if ( kernel.has_superset( part_uses ) ) {

        const Kernel::iterator j_end = kernel.end();
              Kernel::iterator j     = kernel.begin();

        while ( j != j_end ) {
          Entity * const entity = *j ; ++j ;
          const unsigned p = entity->owner_rank();
          EntityProc tmp( entity , p );
          rebal.push_back( tmp );
        }
      }
    }
  }

  // The 'other' entities rebalance based upon the entities
  // that they use.  This may lead to more sharing entities.
  // Thus 'rebal' is input and then updated.

  rebal_other_entities( M , Particle , rebal );
  rebal_other_entities( M , Constraint , rebal );

  // 'rebal' now contains the rebalancing (entity,processor) pairs
  // for every non-aura entity.  Can now delete the aura entities.

  remove_aura( M );

  // Copy entities to new processors according to 'rebal'.
  // Only send the owned entities.
  // Include all processors associated with the entity in 'rebal'.
  // Unpack all nodes, then all edges, then all faces, then all elements, 
  // from each processor.
  // The owner of a shared entity is the max-rank processor.
  // Add received entities to shared if more than one processor.

  {
    const RebalanceComm manager ;
    std::vector<EntityProc> recv_rebal ;

    communicate_entities( manager , M , M , rebal , recv_rebal , false );

    // Destroy not-retained entities, they have been packed.
    // Remove the corresponding entries in 'rebal'

    destroy_not_retained( M , rebal );

    rebal.insert( rebal.end() , recv_rebal.begin() , recv_rebal.end() );

    sort_unique( rebal );
  }

  // The 'rebal' should contain a reference to every non-aura entity
  // on the local processor.  These references include every
  // processor on which the entity now resides, including the
  // local processor.

  { // Set parallel ownership and sharing parts.

    std::vector<EntityProc>::iterator ish ;

    for ( ish = rebal.begin() ; ish != rebal.end() ; ) {
      Entity & e = * ish->first ;

      for ( ; ish != rebal.end() && ish->first == & e ; ++ish );

      const bool is_owned = p_rank == e.owner_rank() ;

      // Change ownership.

      std::vector<Part*> add_parts ;
      std::vector<Part*> remove_parts ;

      if ( is_owned ) { add_parts.push_back( owns_part ); }
      else            { remove_parts.push_back( owns_part ); }

      M.change_entity_parts( e , add_parts , remove_parts );
    }

    // Remove references to the local processor,
    // the remaining entries define the sharing.

    for ( ish = rebal.end() ; ish != rebal.begin() ; ) {
      --ish ;
      if ( p_rank == ish->second ) { ish = rebal.erase( ish ); }
    }

    M.set_shares( rebal );
  }

  // Establish new aura

  comm_mesh_regenerate_aura( M );
}