//---------------------------------------------------------------------------//
// Map a reference point to the physical space of an entity.
void MoabEntityLocalMap::mapToPhysicalFrame( 
    const Entity& entity,
    const Teuchos::ArrayView<const double>& reference_point,
    const Teuchos::ArrayView<double>& physical_point ) const
{
    cacheEntity( entity );

    DTK_CHECK_ERROR_CODE(
	d_moab_evaluator->eval( reference_point.getRawPtr(),
				physical_point.getRawPtr() )
	);
}
void
MultiVecAdapter<Epetra_MultiVector>::put1dData(
  const Teuchos::ArrayView<const MultiVecAdapter<Epetra_MultiVector>::scalar_t>& new_data,
  size_t lda,
  Teuchos::Ptr<
    const Tpetra::Map<MultiVecAdapter<Epetra_MultiVector>::local_ordinal_t,
                      MultiVecAdapter<Epetra_MultiVector>::global_ordinal_t,
                      MultiVecAdapter<Epetra_MultiVector>::node_t> > source_map)
{
  using Teuchos::rcpFromPtr;
  using Teuchos::as;

  const size_t num_vecs  = getGlobalNumVectors();
  // TODO: check that the following const_cast is safe
  double* data_ptr = const_cast<double*>(new_data.getRawPtr());

  // Optimization for ROOTED
  // TODO: Element-wise copy rather than using importer
  if ( num_vecs == 1 && mv_->Comm().MyPID() == 0 && mv_->Comm().NumProc() == 1 ) {
	  const multivec_t source_mv(Copy, *mv_map_, data_ptr, as<int>(lda), as<int>(num_vecs));
	  const Epetra_Import importer(*mv_map_, *mv_map_); //trivial - map does not change
	  mv_->Import(source_mv, importer, Insert);
  }
  else {
    const Epetra_BlockMap e_source_map
      = *Util::tpetra_map_to_epetra_map<local_ordinal_t,global_ordinal_t,global_size_t,node_t>(*source_map);
    const multivec_t source_mv(Copy, e_source_map, data_ptr, as<int>(lda), as<int>(num_vecs));
    const Epetra_Import importer(*mv_map_, e_source_map);
  
    mv_->Import(source_mv, importer, Insert);
  }
}
//---------------------------------------------------------------------------//
// Map a point to the reference space of an entity. Return the parameterized
// point.
bool MoabEntityLocalMap::mapToReferenceFrame( 
    const Entity& entity,
    const Teuchos::ArrayView<const double>& physical_point,
    const Teuchos::ArrayView<double>& reference_point ) const
{
    cacheEntity( entity );

    int is_inside = -1;

    // Ignore the error code on this one because of the ridiculous
    // tolerancing/convergence scheme used in the implementation.
    d_moab_evaluator->reverse_eval(
	physical_point.getRawPtr(), d_newton_tol, 
	d_inclusion_tol, reference_point.getRawPtr(), &is_inside );
    return (is_inside > 0);
}
void MultiVecAdapter<Epetra_MultiVector>::get1dCopy(
  const Teuchos::ArrayView<MultiVecAdapter<Epetra_MultiVector>::scalar_t>& av,
  size_t lda,
  Teuchos::Ptr<
    const Tpetra::Map<MultiVecAdapter<Epetra_MultiVector>::local_ordinal_t,
                      MultiVecAdapter<Epetra_MultiVector>::global_ordinal_t,
                      MultiVecAdapter<Epetra_MultiVector>::node_t> > distribution_map ) const
{
  using Teuchos::rcpFromPtr;
  using Teuchos::as;

  const size_t num_vecs = getGlobalNumVectors();
  
#ifdef HAVE_AMESOS2_DEBUG
  const size_t requested_vector_length = distribution_map->getNodeNumElements();
  TEUCHOS_TEST_FOR_EXCEPTION( lda < requested_vector_length,
		      std::invalid_argument,
		      "Given stride is not large enough for local vector length" );
  TEUCHOS_TEST_FOR_EXCEPTION( as<size_t>(av.size()) < (num_vecs-1) * lda + requested_vector_length,
		      std::invalid_argument,
		      "MultiVector storage not large enough given leading dimension "
		      "and number of vectors" );
#endif

  // Optimization for ROOTED
  if ( num_vecs == 1 && mv_->Comm().MyPID() == 0 && mv_->Comm().NumProc() == 1 ) {
	  mv_->ExtractCopy(av.getRawPtr(), lda);
  }
  else {
  Epetra_Map e_dist_map
    = *Util::tpetra_map_to_epetra_map<local_ordinal_t,
                                      global_ordinal_t,
                                      global_size_t,
                                      node_t>(*distribution_map);

  multivec_t redist_mv(e_dist_map, as<int>(num_vecs));
  const Epetra_Import importer(e_dist_map, *mv_map_); // Note, target/source order is reversed in Tpetra
  redist_mv.Import(*mv_, importer, Insert);

  // Finally, do copy
  redist_mv.ExtractCopy(av.getRawPtr(), lda);
  }

}
예제 #5
0
void RTOpC::extract_reduct_obj_state_impl(
  const ReductTarget &reduct_obj,
  const Teuchos::ArrayView<primitive_value_type> &value_data,
  const Teuchos::ArrayView<index_type> &index_data,
  const Teuchos::ArrayView<char_type> &char_data
  ) const
{
  TEUCHOS_TEST_FOR_EXCEPTION(
    0!=RTOp_extract_reduct_obj_state(
      &op_, (*this)(reduct_obj),
      value_data.size(), value_data.getRawPtr(),
      index_data.size(), index_data.getRawPtr(),
      char_data.size(), char_data.getRawPtr()
      ),
    UnknownError,
    "RTOpC::extract_reduct_obj_state(...): Error, "
    "RTOp_extract_reduct_obj_state(...) returned != 0"
    );
}
//---------------------------------------------------------------------------//
// Determine if a reference point is in the parameterized space of an entity.
bool MoabEntityLocalMap::checkPointInclusion( 
    const Entity& entity,
    const Teuchos::ArrayView<const double>& reference_point ) const
{
    cacheEntity( entity );

    int is_inside = d_moab_evaluator->inside( reference_point.getRawPtr(),
					      d_inclusion_tol );
    return (is_inside > 0);
}
예제 #7
0
void RTOpC::load_reduct_obj_state_impl(
  const Teuchos::ArrayView<const primitive_value_type> &value_data,
  const Teuchos::ArrayView<const index_type> &index_data,
  const Teuchos::ArrayView<const char_type> &char_data,
  const Teuchos::Ptr<ReductTarget> &reduct_obj
  ) const
{
  TEUCHOS_TEST_FOR_EXCEPTION(
    0!=RTOp_load_reduct_obj_state(
      &op_,
      value_data.size(), value_data.getRawPtr(),
      index_data.size(), index_data.getRawPtr(),
      char_data.size(), char_data.getRawPtr(),
      (*this)(*reduct_obj)
      ),
    UnknownError,
    "RTOpC::load_reduct_obj_state(...): Error, "
    "RTOp_load_reduct_obj_state(...) returned != 0"
    );
}
//---------------------------------------------------------------------------//
// Return the centroid of the entity.
void MoabEntityLocalMap::centroid( 
    const Entity& entity, const Teuchos::ArrayView<double>& centroid ) const
{ 
    // Node case.
    if ( 0 == entity.topologicalDimension() )
    {
	moab::EntityHandle handle = MoabHelpers::extractEntity(entity);
	d_moab_mesh->get_moab()->get_coords( &handle, 1, centroid.getRawPtr() );
    }
    // Element case.
    else
    {
	cacheEntity( entity );
	Teuchos::Array<double> param_center;
	parametricCenter( entity, param_center );

	DTK_CHECK_ERROR_CODE( 
	    d_moab_evaluator->eval( param_center.getRawPtr(), 
				    centroid.getRawPtr() )
	    );    
    }
}
    Kokkos::View<const T*,D>
    getKokkosViewDeepCopy(const Teuchos::ArrayView<const T>& a) {
#if defined(KOKKOS_HAVE_PTHREAD)
      typedef Kokkos::Threads HostDevice;
#elif defined(KOKKOS_HAVE_OPENMP)
      typedef Kokkos::OpenMP HostDevice;
#else
      typedef Kokkos::Serial HostDevice;
#endif
      typedef Kokkos::View<T*,D>  view_type;
      typedef Kokkos::View<const T*,typename view_type::array_layout,HostDevice,Kokkos::MemoryUnmanaged> unmanaged_host_view_type;
      if (a.size() == 0)
        return view_type();
      view_type v("", a.size());
      unmanaged_host_view_type hv(a.getRawPtr(), a.size());
      Kokkos::deep_copy(v,hv);
      return v;
    }
 Kokkos::View<const T*,D>
 getKokkosViewDeepCopy(const Teuchos::ArrayView<const T>& a)
 {
   typedef typename Kokkos::Impl::if_c<
     Impl::VerifyExecutionCanAccessMemorySpace< D, Kokkos::HostSpace>::value,
     typename D::execution_space, Kokkos::HostSpace>::type
     HostDevice;
   typedef Kokkos::View<T*, D>  view_type;
   typedef Kokkos::View<const T*, typename view_type::array_layout, HostDevice,
                        Kokkos::MemoryUnmanaged> unmanaged_host_view_type;
   if (a.size () == 0) {
     return view_type ();
   }
   view_type v ("", a.size ());
   unmanaged_host_view_type hv (a.getRawPtr (), a.size ());
   Kokkos::deep_copy (v, hv);
   return v;
 }
  Map<LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType> >::
  Map (const global_size_t globalNumIndices,
       const Teuchos::ArrayView<const GlobalOrdinal>& myGlobalIndices,
       const GlobalOrdinal indexBase,
       const Teuchos::RCP<const Teuchos::Comm<int> >& comm,
       const Teuchos::RCP<node_type> &node) :
    comm_ (comm),
    node_ (node),
    directory_ (new Directory<LocalOrdinal, GlobalOrdinal, node_type> ())
  {
    typedef GlobalOrdinal GO;
    typedef Kokkos::View<const GlobalOrdinal*, device_type,
                         Kokkos::MemoryUnmanaged> host_view_type;
    typedef Kokkos::View<GlobalOrdinal*, device_type> device_view_type;

    // Copy the input GID list from host (we assume that
    // Teuchos::ArrayView should only view host memory) to device.
    //
    // FIXME (mfh 06 Feb, 24 Mar 2014) We could use the CUDA API
    // function here that can look at a pointer and tell whether it
    // lives on host or device, to tell whether the Teuchos::ArrayView
    // is viewing host or device memory.  Regardless, we don't own the
    // data and we will need a deep copy anyway, so we might as well
    // copy it.
    host_view_type gidsHost (myGlobalIndices.getRawPtr (), myGlobalIndices.size ());
    device_view_type gidsDevice ("GIDs", myGlobalIndices.size ());
    Kokkos::deep_copy (gidsDevice, gidsHost);

    const global_size_t GSTI = Teuchos::OrdinalTraits<global_size_t>::invalid ();
    const GO globalNumInds = (globalNumIndices == GSTI) ?
      getInvalidGlobalIndex () : Teuchos::as<GO> (globalNumIndices);

    // Start with a host Map implementation, since this will make this
    // class' public (host) methods work.  If users want device
    // methods, they will call getDeviceView(), which will initialize
    // the device Map implementation.
    //
    // NOTE (mfh 06 Feb 2014) If we're using UVM, we don't really need
    // the host and device Maps to be separate.
    mapHost_ = host_impl_type (globalNumInds, gidsDevice, indexBase, *comm);

    // Create the Directory on demand in getRemoteIndexList().
  }
void
MultiVecAdapter<Epetra_MultiVector>::put1dData(
  const Teuchos::ArrayView<const MultiVecAdapter<Epetra_MultiVector>::scalar_t>& new_data,
  size_t lda,
  Teuchos::Ptr<
    const Tpetra::Map<MultiVecAdapter<Epetra_MultiVector>::local_ordinal_t,
                      MultiVecAdapter<Epetra_MultiVector>::global_ordinal_t,
                      MultiVecAdapter<Epetra_MultiVector>::node_t> > source_map)
{
  using Teuchos::rcpFromPtr;
  using Teuchos::as;

  const size_t num_vecs  = getGlobalNumVectors();
  // TODO: check that the following const_cast is safe
  double* data_ptr = const_cast<double*>(new_data.getRawPtr());
  const Epetra_BlockMap e_source_map
    = *Util::tpetra_map_to_epetra_map<local_ordinal_t,global_ordinal_t,global_size_t,node_t>(*source_map);
  const multivec_t source_mv(Copy, e_source_map, data_ptr, as<int>(lda), as<int>(num_vecs));
  const Epetra_Import importer(*mv_map_, e_source_map);
  
  mv_->Import(source_mv, importer, Insert);
}
예제 #13
0
PyObject *
convertToDimData(const Teuchos::RCP< const Tpetra::Map< PYTRILINOS_LOCAL_ORD,
                                                        PYTRILINOS_GLOBAL_ORD > > & tm,
                 int   extraDim)
{
  // Initialization
  PyObject * dim_data    = NULL;
  PyObject * dim_dict    = NULL;
  PyObject * indices     = NULL;
  npy_intp   dims        = 1;
  int        currentDim  = 0;
  Teuchos::ArrayView< const PYTRILINOS_GLOBAL_ORD > nodeBlockIDs;

  // Get the Teuchos::Comm
  Teuchos::RCP< const Teuchos::Comm< int > > comm = tm->getComm();

  // Get the number of dimensions.  The vector data constitutes one
  // dimension.  If the extraDim is greater than one, then that
  // constitutes a second possible dimension.
  Py_ssize_t ndim = 1;
  if (extraDim > 1) ndim += 1;

  // Allocate the dim_data return object, which is a tuple of length
  // ndim
  dim_data = PyTuple_New(ndim);
  if (!dim_data) goto fail;

  // If we have an extra dimension argument greater than one, then
  // define a dimension associated with the multiple vectors
  if (extraDim > 1)
  {
    dim_dict = PyDict_New();
    if (!dim_dict) goto fail;
    if (PyDict_SetItemString(dim_dict,
                             "dist_type",
                             PyString_FromString("n")) == -1) goto fail;

    if (PyDict_SetItemString(dim_dict,
                             "size",
                             PyInt_FromLong(extraDim)) == -1) goto fail;
    // This function steals a reference from dim_dict
    PyTuple_SET_ITEM(dim_data, currentDim, dim_dict);
    currentDim += 1;
  }

  // Allocate the dimension data dictionary for the distributed points
  // and assign the common key values
  dim_dict = PyDict_New();
  if (!dim_dict) goto fail;
  if (PyDict_SetItemString(dim_dict,
                           "size",
                           PyInt_FromLong(tm->getGlobalNumElements())) == -1)
    goto fail;
  if (PyDict_SetItemString(dim_dict,
                           "proc_grid_size",
                           PyInt_FromLong(comm->getSize())) == -1) goto fail;
  if (PyDict_SetItemString(dim_dict,
                           "proc_grid_rank",
                           PyInt_FromLong(comm->getRank())) == -1) goto fail;

  // Determine the type of the dimension data, either block "b" or
  // unstructured "u", set the "dist_type" key and other keys required
  // according to dist_type.
  if (tm->isContiguous())
  {
    // isContiguous() == true corresponds to DistArray Protocol
    // dist_type == "b" (block)
    if (PyDict_SetItemString(dim_dict,
                             "dist_type",
                             PyString_FromString("b")) == -1) goto fail;
    if (PyDict_SetItemString(dim_dict,
                             "start",
                             PyInt_FromLong(tm->getMinGlobalIndex())) == -1)
      goto fail;
    if (PyDict_SetItemString(dim_dict,
                             "stop",
                             PyInt_FromLong(tm->getMaxGlobalIndex()+1)) == -1)
      goto fail;
  }
  else
  {
    // isContiguous() == false corresponds to DistArray Protocol
    // dist_type == "u" (unstructured)
    if (PyDict_SetItemString(dim_dict,
                             "dist_type",
                             PyString_FromString("u")) == -1) goto fail;
    dims    = tm->getNodeElementList().size();
    nodeBlockIDs = tm->getNodeElementList();
    indices = PyArray_SimpleNewFromData(1,
                                        &dims,
                                        NPY_LONG,
                                        (void*)nodeBlockIDs.getRawPtr());
    if (!indices) goto fail;
    if (PyDict_SetItemString(dim_dict,
                             "indices",
                             indices) == -1) goto fail;
    Py_DECREF(indices);
    indices = NULL;
  }

  // Put the dimension dictionary into the dim_data tuple
  PyTuple_SET_ITEM(dim_data, currentDim, dim_dict);

  // Return the dim_data tuple
  return dim_data;

  // Error handling
  fail:
  Py_XDECREF(dim_data);
  Py_XDECREF(dim_dict);
  Py_XDECREF(indices);
  return NULL;
}
예제 #14
0
 LookupStatus EpetraMap::getRemoteIndexList(const Teuchos::ArrayView< const int > &GIDList, const Teuchos::ArrayView< int > &nodeIDList) const { XPETRA_MONITOR("EpetraMap::getRemoteIndexList"); return toXpetra(map_->RemoteIDList(GIDList.size(), GIDList.getRawPtr(), nodeIDList.getRawPtr(), 0)); }
예제 #15
0
 // TODO: UnitTest FAILED
 EpetraMap::EpetraMap(global_size_t numGlobalElements, const Teuchos::ArrayView<const int> &elementList, int indexBase,
                      const Teuchos::RCP<const Teuchos::Comm<int> > &comm, const Teuchos::RCP<Node> &node)
 {
   if (numGlobalElements == Teuchos::OrdinalTraits<global_size_t>::invalid()) {
     IF_EPETRA_EXCEPTION_THEN_THROW_GLOBAL_INVALID_ARG((map_ = (rcp(new Epetra_BlockMap(-1, elementList.size(), elementList.getRawPtr(), 1, indexBase, *toEpetra(comm))))));
   } else {
     IF_EPETRA_EXCEPTION_THEN_THROW_GLOBAL_INVALID_ARG((map_ = (rcp(new Epetra_BlockMap(numGlobalElements, elementList.size(), elementList.getRawPtr(), 1, indexBase, *toEpetra(comm))))));
   }
 }
예제 #16
0
 void EpetraMultiVector::normInf(const Teuchos::ArrayView< Teuchos::ScalarTraits< Scalar >::magnitudeType > &norms) const { XPETRA_MONITOR("EpetraMultiVector::normInf"); vec_->NormInf(norms.getRawPtr()); }
예제 #17
0
  void EpetraMultiVector::normWeighted(const MultiVector<double,int,int> &weights, const Teuchos::ArrayView<Teuchos::ScalarTraits<double>::magnitudeType> &norms) const {
    XPETRA_MONITOR("EpetraMultiVector::normWeighted");

    XPETRA_DYNAMIC_CAST(const EpetraMultiVector, weights, eWeights, "This Xpetra::EpetraMultiVector method only accept Xpetra::EpetraMultiVector as input arguments.");
    vec_->NormWeighted(*eWeights.getEpetra_MultiVector(), norms.getRawPtr());
  }
예제 #18
0
 void EpetraMultiVector::meanValue(const Teuchos::ArrayView<double> &means) const { XPETRA_MONITOR("EpetraMultiVector::meanValue"); vec_->MeanValue(means.getRawPtr()); } //TODO: modify ArrayView size ??
예제 #19
0
  void EpetraMultiVector::dot(const MultiVector<double,int,int> &A, const Teuchos::ArrayView<double> &dots) const {
    XPETRA_MONITOR("EpetraMultiVector::dot");

    XPETRA_DYNAMIC_CAST(const EpetraMultiVector, A, eA, "This Xpetra::EpetraMultiVector method only accept Xpetra::EpetraMultiVector as input arguments.");
    vec_->Dot(*eA.getEpetra_MultiVector(), dots.getRawPtr());
  }