Ejemplo n.º 1
0
/*@C
   PetscInitializeFortran - Routine that should be called soon AFTER
   the call to PetscInitialize() if one is using a C main program
   that calls Fortran routines that in turn call PETSc routines.

   Collective on PETSC_COMM_WORLD

   Level: beginner

   Notes:
   PetscInitializeFortran() initializes some of the default viewers,
   communicators, etc. for use in the Fortran if a user's main program is
   written in C.  PetscInitializeFortran() is NOT needed if a user's main
   program is written in Fortran; in this case, just calling
   PetscInitialize() in the main (Fortran) program is sufficient.

.seealso:  PetscInitialize()

.keywords: Mixing C and Fortran, passing PETSc objects to Fortran
@*/
PetscErrorCode PetscInitializeFortran(void)
{
    MPI_Fint c1=0,c2=0;

    if (PETSC_COMM_WORLD) c1 =  MPI_Comm_c2f(PETSC_COMM_WORLD);
    c2 =  MPI_Comm_c2f(PETSC_COMM_SELF);
    petscsetcommonblock_(&c1,&c2);

#if defined(PETSC_USE_REAL___FLOAT128)
    {
        MPI_Fint freal,fscalar,fsum;
        freal   = MPI_Type_c2f(MPIU_REAL);
        fscalar = MPI_Type_c2f(MPIU_SCALAR);
        fsum    = MPI_Op_c2f(MPIU_SUM);
        petscsetcommonblockmpi_(&freal,&fscalar,&fsum);
    }
#endif

    {
        PetscReal pi = PETSC_PI;
        PetscReal maxreal = PETSC_MAX_REAL;
        PetscReal minreal = PETSC_MIN_REAL;
        PetscReal eps = PETSC_MACHINE_EPSILON;
        PetscReal seps = PETSC_SQRT_MACHINE_EPSILON;
        PetscReal small = PETSC_SMALL;
        PetscReal pinf = PETSC_INFINITY;
        PetscReal pninf = PETSC_NINFINITY;
        petscsetcommonblocknumeric_(&pi,&maxreal,&minreal,&eps,&seps,&small,&pinf,&pninf);
    }
    return 0;
}
Ejemplo n.º 2
0
__FRET__ __FFNAME__(__FPARAMS__)
{
  double tstart, tstop;

#if HAVE_CREQ    /* HAVE _CREQ */ 
  MPI_Request creq; 
#endif
#if HAVE_CSTAT   /* HAVE _CSTAT */ 
  MPI_Status  cstat; 
#endif
#if HAVE_CCOMM_OUT
  MPI_Comm ccomm_out;
#endif           /* HAVE _CCOMM_OUT */
#if HAVE_CCOMM_INOUT
  MPI_Comm ccomm_inout;
#endif           /* HAVE _CCOMM_INOUT */

#if HAVE_CCOMM_INOUT
  ccomm_inout = MPI_Comm_f2c(*comm_inout);
#endif 

#if HAVE_CGROUP_OUT /* HAVE _CGROUP_OUT */
  MPI_Group cgroup_out;
#endif

  IPM_TIMESTAMP(tstart);
  p__FFNAME__(__FARGS__);
  IPM_TIMESTAMP(tstop);

  if( ipm_state!=STATE_ACTIVE ) {
    return;
  }
  
#if HAVE_CSTAT   /* HAVE_CSTAT */ 
  if (*info==MPI_SUCCESS) 
    MPI_Status_c2f(&cstat, status);
#endif

#if HAVE_CREQ    /* HAVE_CREQ */ 
  if( *info==MPI_SUCCESS )
    *req=MPI_Request_c2f(creq);
#endif

#if HAVE_CCOMM_OUT /* HAVE _CCOMM_OUT */
  if( *info==MPI_SUCCESS ) 
    *comm_out=MPI_Comm_c2f(ccomm_out);
#endif

#if HAVE_CCOMM_INOUT /* HAVE _CCOMM_INOUT */
  if( *info==MPI_SUCCESS ) 
    *comm_inout=MPI_Comm_c2f(ccomm_inout);
#endif

#if HAVE_CGROUP_OUT /* HAVE _CGROUP_OUT */
  if( *info==MPI_SUCCESS )
    *group_out=MPI_Group_c2f(cgroup_out);
#endif
  IPM___CFNAME__(__F2CARGS__, tstart, tstop);

}
Ejemplo n.º 3
0
 void numfact(unsigned int nz, int* I, int* J, K* C) {
     _id = new typename MUMPS_STRUC_C<K>::trait();
     _id->job = -1;
     _id->par = 1;
     _id->comm_fortran = MPI_Comm_c2f(DMatrix::_communicator);
     const Option& opt = *Option::get();
     if(S == 'S')
         _id->sym = opt.val<char>("master_not_spd", 0) ? 2 : 1;
     else
         _id->sym = 0;
     MUMPS_STRUC_C<K>::mumps_c(_id);
     _id->n = _id->lrhs = DMatrix::_n;
     _id->nz_loc = nz;
     _id->irn_loc = I;
     _id->jcn_loc = J;
     _id->a_loc = reinterpret_cast<typename MUMPS_STRUC_C<K>::mumps_type*>(C);
     _id->nrhs = 1;
     _id->icntl[4] = 0;
     for(unsigned short i = 5; i < 40; ++i) {
         int val = opt.val<int>("master_mumps_icntl_" + to_string(i + 1));
         if(val != std::numeric_limits<int>::lowest())
             _id->icntl[i] = val;
     }
     _id->job = 4;
     if(opt.val<char>("verbosity", 0) < 3)
         _id->icntl[2] = 0;
     MUMPS_STRUC_C<K>::mumps_c(_id);
     if(DMatrix::_rank == 0 && _id->infog[0] != 0)
         std::cerr << "BUG MUMPS, INFOG(1) = " << _id->infog[0] << std::endl;
     _id->icntl[2] = 0;
     delete [] I;
 }
Ejemplo n.º 4
0
void ompi_cart_create_f(MPI_Fint *old_comm, MPI_Fint *ndims, MPI_Fint *dims,
                       ompi_fortran_logical_t *periods, ompi_fortran_logical_t *reorder,
                       MPI_Fint *comm_cart, MPI_Fint *ierr)
{
    MPI_Comm c_comm1, c_comm2;
    int size, c_ierr;
    OMPI_ARRAY_NAME_DECL(dims);
    OMPI_LOGICAL_ARRAY_NAME_DECL(periods);

    c_comm1 = MPI_Comm_f2c(*old_comm);

    size = OMPI_FINT_2_INT(*ndims);
    OMPI_ARRAY_FINT_2_INT(dims, size);
    OMPI_ARRAY_LOGICAL_2_INT(periods, size);

    c_ierr = MPI_Cart_create(c_comm1, OMPI_FINT_2_INT(*ndims),
                             OMPI_ARRAY_NAME_CONVERT(dims),
                             OMPI_LOGICAL_ARRAY_NAME_CONVERT(periods),
                             OMPI_LOGICAL_2_INT(*reorder),
                             &c_comm2);
    if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);

    if (MPI_SUCCESS == c_ierr) {
        *comm_cart = MPI_Comm_c2f(c_comm2);
    }

    /*
     * Need to convert back into Fortran, to not surprise the user
     */
    OMPI_ARRAY_FINT_2_INT_CLEANUP(dims);
    OMPI_ARRAY_INT_2_LOGICAL(periods, size);
}
Ejemplo n.º 5
0
int main(int argc,char **args)
{
  PetscErrorCode ierr;
  PetscInt       m = 10;
  int            fcomm;
  Vec            vec;

  ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
  /* This function should be called to be able to use PETSc routines
     from the FORTRAN subroutines needed by this program */

  PetscInitializeFortran();

  ierr = VecCreate(PETSC_COMM_WORLD,&vec);CHKERRQ(ierr);
  ierr = VecSetSizes(vec,PETSC_DECIDE,m);CHKERRQ(ierr);
  ierr = VecSetFromOptions(vec);CHKERRQ(ierr);

  /*
     Call Fortran routine - the use of MPI_Comm_c2f() allows
     translation of the MPI_Comm from C so that it can be properly
     interpreted from Fortran.
  */
  fcomm = MPI_Comm_c2f(PETSC_COMM_WORLD);

  ex7f_(&vec,&fcomm);

  ierr = VecView(vec,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  ierr = VecDestroy(&vec);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return ierr;
}
Ejemplo n.º 6
0
void ompi_graph_create_f(MPI_Fint *comm_old, MPI_Fint *nnodes,
                        MPI_Fint *indx, MPI_Fint *edges,
                        ompi_fortran_logical_t *reorder, MPI_Fint *comm_graph,
                        MPI_Fint *ierr)
{
    int c_ierr;
    MPI_Comm c_comm_old, c_comm_graph;
    OMPI_ARRAY_NAME_DECL(indx);
    OMPI_ARRAY_NAME_DECL(edges);

    c_comm_old = MPI_Comm_f2c(*comm_old);

    OMPI_ARRAY_FINT_2_INT(indx, *nnodes);

    /* Number of edges is equal to the last entry in the index array */
    OMPI_ARRAY_FINT_2_INT(edges, indx[*nnodes - 1]);

    c_ierr = MPI_Graph_create(c_comm_old,
                              OMPI_FINT_2_INT(*nnodes),
                              OMPI_ARRAY_NAME_CONVERT(indx),
                              OMPI_ARRAY_NAME_CONVERT(edges),
                              OMPI_LOGICAL_2_INT(*reorder),
                              &c_comm_graph);
    if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);

    if (OMPI_SUCCESS == c_ierr) {
        *comm_graph = MPI_Comm_c2f(c_comm_graph);
    }

    OMPI_ARRAY_FINT_2_INT_CLEANUP(indx);
    OMPI_ARRAY_FINT_2_INT_CLEANUP(edges);
}
LIS_INT lis_psolvet_saamg(LIS_SOLVER solver, LIS_VECTOR b, LIS_VECTOR x)
{
#if defined(USE_SAAMG)
	LIS_INT		n;
	LIS_PRECON precon;
	LIS_MATRIX A;
	#ifdef USE_MPI
		LIS_MPI_Fint comm;
	#endif

	LIS_DEBUG_FUNC_IN;

	A      = solver->A;
	precon = solver->precon;

	#ifdef USE_MPI
		comm = MPI_Comm_c2f(A->comm);
		n = b->np;
		 (*(void (*)())f_v_cycle_ptr)(b->value,x->value,precon->temp->value,&precon->level_num,
			 &comm,A->commtable->ws,A->commtable->wr,&n,&precon->wsize);
	#else
		n = b->n;
		 (*(void (*)())f_v_cycle_ptr)(&n,b->value,x->value,&precon->level_num,precon->temp->value);
	#endif
	LIS_DEBUG_FUNC_OUT;
	return LIS_SUCCESS;
#else
	LIS_DEBUG_FUNC_IN;

	lis_vector_copy(b,x);

	LIS_DEBUG_FUNC_OUT;
	return LIS_SUCCESS;
#endif
}
Ejemplo n.º 8
0
 int geopm_comm_split_ppn1_f(int comm, int *ppn1_comm)
 {
     MPI_Comm ppn1_comm_c;
     int err = geopm_comm_split_ppn1(MPI_Comm_f2c(comm), &ppn1_comm_c);
     *ppn1_comm = MPI_Comm_c2f(ppn1_comm_c);
     return err;
 }
Ejemplo n.º 9
0
   void cxios_init_client(const char* client_id , int len_client_id, MPI_Fint* f_local_comm, MPI_Fint* f_return_comm )
   {
      std::string str;
      ep_lib::MPI_Comm local_comm;
      ep_lib::MPI_Comm return_comm;
      
      //ep_lib::fc_comm_map.clear();

      if (!cstr2string(client_id, len_client_id, str)) return;

      int initialized;
      MPI_Initialized(&initialized);
      #ifdef _usingMPI
      if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm);
      else local_comm=MPI_COMM_NULL;
      #elif _usingEP
      ep_lib::fc_comm_map.clear();
      if (initialized) local_comm=ep_lib::EP_Comm_f2c(static_cast<int>(*f_local_comm));
      else local_comm=MPI_COMM_NULL;
      #endif
      


      CXios::initClientSide(str, local_comm, return_comm);
      #ifdef _usingMPI
      *f_return_comm=MPI_Comm_c2f(return_comm);
      #elif _usingEP
      *f_return_comm=ep_lib::EP_Comm_c2f(return_comm);
      #endif
      CTimer::get("XIOS init").suspend();
      CTimer::get("XIOS").suspend();
   }
Ejemplo n.º 10
0
 int geopm_comm_split_shared_f(int comm, int *split_comm)
 {
     MPI_Comm split_comm_c;
     int err = geopm_comm_split_shared(MPI_Comm_f2c(comm), &split_comm_c);
     *split_comm = MPI_Comm_c2f(split_comm_c);
     return err;
 }
Ejemplo n.º 11
0
 int geopm_comm_split_f(int comm, int *split_comm, int *is_ctl_comm)
 {
     MPI_Comm split_comm_c;
     int err = geopm_comm_split(MPI_Comm_f2c(comm), &split_comm_c, is_ctl_comm);
     *split_comm = MPI_Comm_c2f(split_comm_c);
     return err;
 }
Ejemplo n.º 12
0
void mpi_cart_sub_f(MPI_Fint *comm, ompi_fortran_logical_t *remain_dims,
                    MPI_Fint *new_comm, MPI_Fint *ierr)
{
    MPI_Comm c_comm, c_new_comm;
    /*
     * Just in the case, when sizeof(logical)!=sizeof(int) and
     * Fortran TRUE-value != 1, we have to convert -- then we need
     * to know the number of dimensions, for the size of remain_dims
     */
#if OMPI_FORTRAN_MUST_CONVERT_LOGICAL_2_INT == 1
    int ndims;
#endif
    OMPI_LOGICAL_ARRAY_NAME_DECL(remain_dims);

    c_comm = MPI_Comm_f2c(*comm);
    c_new_comm = MPI_Comm_f2c(*new_comm);

#if OMPI_FORTRAN_MUST_CONVERT_LOGICAL_2_INT == 1
    *ierr = OMPI_INT_2_FINT(MPI_Cartdim_get(c_comm, &ndims));
    if (MPI_SUCCESS != OMPI_FINT_2_INT(*ierr)) {
        return;
    }
#endif
    OMPI_ARRAY_LOGICAL_2_INT(remain_dims, ndims);

    *ierr = OMPI_INT_2_FINT(MPI_Cart_sub(c_comm,
                              OMPI_LOGICAL_ARRAY_NAME_CONVERT(remain_dims),
                              &c_new_comm));
    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
        *new_comm = MPI_Comm_c2f(c_new_comm);
    }

    OMPI_ARRAY_INT_2_LOGICAL(remain_dims, ndims);
}
Ejemplo n.º 13
0
EXTERN_C_BEGIN
void PETSC_STDCALL petscobjectgetcomm_(PetscObject *obj,int *comm,PetscErrorCode *ierr)
{
  MPI_Comm c;
  *ierr = PetscObjectGetComm(*obj,&c);
  *(int*)comm =  MPI_Comm_c2f(c);
}
Ejemplo n.º 14
0
EXPORT_MPI_API void FORTRAN_API mpi_comm_dup_ ( MPI_Fint *comm, MPI_Fint *comm_out, MPI_Fint *__ierr )
{
    MPI_Comm l_comm_out;

    *__ierr = MPI_Comm_dup( MPI_Comm_f2c(*comm), &l_comm_out );
    *comm_out = MPI_Comm_c2f(l_comm_out);
}
Ejemplo n.º 15
0
/* initialization function for basic pepc parameters */
FCSResult fcs_pepc_init(FCS handle)
{
  handle->shift_positions = 1;

  handle->destroy = fcs_pepc_destroy;
  handle->set_parameter = fcs_pepc_set_parameter;
  handle->print_parameters = fcs_pepc_print_parameters;
  handle->tune = fcs_pepc_tune;
  handle->run = fcs_pepc_run;
  handle->set_compute_virial = fcs_pepc_require_virial;
  handle->get_virial = fcs_pepc_get_virial;

  handle->pepc_param = malloc(sizeof(*handle->pepc_param));
  handle->pepc_param->theta             = 0.6;
  handle->pepc_param->epsilon           = 0.0;
  handle->pepc_param->require_virial    = 0;
  handle->pepc_param->num_walk_threads  = 3;
  handle->pepc_param->load_balancing    = 0;
  handle->pepc_param->dipole_correction = 1;
  handle->pepc_param->npm               = -45.0;
  handle->pepc_param->debug_level       = 0;

  fcs_pepc_internal_t *pepc_internal;
  MPI_Comm comm  = fcs_get_communicator(handle);
  MPI_Fint fcomm = MPI_Comm_c2f(comm);

  pepc_scafacos_initialize(&fcomm);

  handle->method_context = malloc(sizeof(fcs_pepc_internal_t));
  pepc_internal = (fcs_pepc_internal_t*) handle->method_context;
  pepc_internal->work_length = -1;
  pepc_internal->work        = NULL;

  return FCS_RESULT_SUCCESS;
}
Ejemplo n.º 16
0
void mpif_cart_sub_(MPI_Fint *old_comm, int *belongs, MPI_Fint *new_comm, int *error)
{
  MPI_Comm old_comm_c = MPI_Comm_f2c(*old_comm);
  MPI_Comm new_comm_c;

  *error = MPI_Cart_sub(old_comm_c, belongs, &new_comm_c);
  *new_comm = MPI_Comm_c2f(new_comm_c);
}
Ejemplo n.º 17
0
EXPORT_MPI_API void FORTRAN_API mpi_intercomm_merge_ ( MPI_Fint *comm, MPI_Fint *high, MPI_Fint *comm_out, MPI_Fint *__ierr )
{
    MPI_Comm l_comm_out;

    *__ierr = MPI_Intercomm_merge( MPI_Comm_f2c(*comm), (int)*high, 
                                   &l_comm_out);
    *comm_out = MPI_Comm_c2f(l_comm_out);
}
Ejemplo n.º 18
0
void mpif_intercomm_merge_(MPI_Fint *intercomm, int *high, MPI_Fint *newintracomm, int *error)
{
  MPI_Comm intercomm_c = MPI_Comm_f2c(*intercomm);
  MPI_Comm newintracomm_c;

  *error = MPI_Intercomm_merge(intercomm_c, *high, &newintracomm_c);
  *newintracomm = MPI_Comm_c2f(newintracomm_c);
}
Ejemplo n.º 19
0
int PNX(create_procmesh_2d_f03)(MPI_Fint f_comm, int np0, int np1, MPI_Fint * f_comm_cart_2d)
{
    MPI_Comm comm, comm_cart_2d;

    comm = MPI_Comm_f2c(f_comm);
    int ret = PNX(create_procmesh_2d)(comm, np0, np1, &comm_cart_2d);
    *f_comm_cart_2d = MPI_Comm_c2f(comm_cart_2d);
    return ret;
}
Ejemplo n.º 20
0
int PNX(create_procmesh_f03)(int rnk, MPI_Fint f_comm, const int * np, MPI_Fint * f_comm_cart)
{
    MPI_Comm comm, comm_cart;

    comm = MPI_Comm_f2c(f_comm);
    int ret = PNX(create_procmesh)(rnk, comm, np, &comm_cart);
    *f_comm_cart = MPI_Comm_c2f(comm_cart);
    return ret;
}
Ejemplo n.º 21
0
void mpif_comm_dup_(MPI_Fint *comm, MPI_Fint *newcomm, int *error)
{
  MPI_Comm comm_c = MPI_Comm_f2c(*comm);
  MPI_Comm newcomm_c;

  *error = MPI_Comm_dup(comm_c, &newcomm_c);

  *newcomm = MPI_Comm_c2f(newcomm_c);
}
Ejemplo n.º 22
0
void mpif_comm_split_(MPI_Fint *comm, int *color, int *key, MPI_Fint *newcomm, int *error)
{
  MPI_Comm comm_c = MPI_Comm_f2c(*comm);
  MPI_Comm newcomm_c;

  *error = MPI_Comm_split(comm_c, *color, *key, &newcomm_c);

  *newcomm = MPI_Comm_c2f(newcomm_c);
}
Ejemplo n.º 23
0
void mpi_comm_get_parent_f(MPI_Fint *parent, MPI_Fint *ierr)
{
    MPI_Comm c_parent;

    *ierr = OMPI_INT_2_FINT(MPI_Comm_get_parent(&c_parent));
    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
        *parent = MPI_Comm_c2f(c_parent);
    }
}
Ejemplo n.º 24
0
void mpif_graph_create_(MPI_Fint *comm_old, int *nnodes, int *index, int *edges, int *reorder, MPI_Fint *comm_graph, int *error)
{
  MPI_Comm comm_old_c = MPI_Comm_f2c(*comm_old);
  MPI_Comm comm_graph_c;

  *error = MPI_Graph_create(comm_old_c, *nnodes, index, edges, *reorder, &comm_graph_c);

  *comm_graph = MPI_Comm_c2f(comm_graph_c);
}
Ejemplo n.º 25
0
void mpif_intercomm_create_(MPI_Fint *local_comm, int *local_leader, MPI_Fint *peer_comm, int *remote_leader, int *tag, MPI_Fint *newintercomm, int *error)
{
  MPI_Comm local_comm_c = MPI_Comm_f2c(*local_comm);
  MPI_Comm peer_comm_c = MPI_Comm_f2c(*peer_comm);
  MPI_Comm newintercomm_c;

  *error = MPI_Intercomm_create(local_comm_c, *local_leader, peer_comm_c, *remote_leader, *tag, &newintercomm_c);
  *newintercomm = MPI_Comm_c2f(newintercomm_c);
}
Ejemplo n.º 26
0
void mpif_cart_create_(MPI_Fint *comm_old, int *ndims, int *dims, int *periods, int *reorder, MPI_Fint *comm_cart, int *error)
{
  MPI_Comm comm_old_c = MPI_Comm_f2c(*comm_old);
  MPI_Comm comm_cart_c;

  *error = MPI_Cart_create(comm_old_c, *ndims, dims, periods, *reorder, &comm_cart_c);

  *comm_cart = MPI_Comm_c2f(comm_cart_c);
}
Ejemplo n.º 27
0
void mpi_comm_join_f(MPI_Fint *fd, MPI_Fint *intercomm, MPI_Fint *ierr)
{
    MPI_Comm c_intercomm;

    *ierr = OMPI_INT_2_FINT(MPI_Comm_join(OMPI_FINT_2_INT(*fd),
					  &c_intercomm));
    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
        *intercomm = MPI_Comm_c2f(c_intercomm);
    }
}
Ejemplo n.º 28
0
void mpif_comm_create_(MPI_Fint *comm, MPI_Fint *group, MPI_Fint *newcomm, int *error)
{
  MPI_Comm comm_c = MPI_Comm_f2c(*comm);
  MPI_Comm newcomm_c;

  MPI_Group group_c = MPI_Group_f2c(*group);

  *error = MPI_Comm_create(comm_c, group_c, &newcomm_c);
  *newcomm = MPI_Comm_c2f(newcomm_c);
}
Ejemplo n.º 29
0
 void numfact(MatrixCSR<K>* const& A, bool detection = false, K* const& schur = nullptr) {
     static_assert(N == 'C' || N == 'F', "Unknown numbering");
     const Option& opt = *Option::get();
     if(!_id) {
         _id = new typename MUMPS_STRUC_C<K>::trait();
         _id->job = -1;
         _id->par = 1;
         _id->comm_fortran = MPI_Comm_c2f(MPI_COMM_SELF);
         _id->sym = A->_sym ? 1 + (opt.val<char>("local_operators_not_spd", 0) || detection) : 0;
         MUMPS_STRUC_C<K>::mumps_c(_id);
     }
     _id->icntl[23] = detection;
     _id->cntl[2] = -1.0e-6;
     if(N == 'C')
         std::for_each(A->_ja, A->_ja + A->_nnz, [](int& i) { ++i; });
     _id->jcn = A->_ja;
     _id->a = reinterpret_cast<typename MUMPS_STRUC_C<K>::mumps_type*>(A->_a);
     int* listvar = nullptr;
     if(_id->job == -1) {
         _id->nrhs = 1;
         std::fill_n(_id->icntl, 5, 0);
         _id->n = A->_n;
         for(unsigned short i = 5; i < 40; ++i) {
             int val = opt.val<int>("mumps_icntl_" + to_string(i + 1));
             if(val != std::numeric_limits<int>::lowest())
                 _id->icntl[i] = val;
         }
         _id->lrhs = A->_n;
         _I = new int[A->_nnz];
         _id->nz = A->_nnz;
         for(int i = 0; i < A->_n; ++i)
             std::fill(_I + A->_ia[i] - (N == 'F'), _I + A->_ia[i + 1] - (N == 'F'), i + 1);
         _id->irn = _I;
         if(schur) {
             listvar = new int[static_cast<int>(std::real(schur[0]))];
             std::iota(listvar, listvar + static_cast<int>(std::real(schur[0])), static_cast<int>(std::real(schur[1])));
             _id->size_schur = _id->schur_lld = static_cast<int>(std::real(schur[0]));
             _id->icntl[18] = 2;
             _id->icntl[25] = 0;
             _id->listvar_schur = listvar;
             _id->nprow = _id->npcol = 1;
             _id->mblock = _id->nblock = 100;
             _id->schur = reinterpret_cast<typename MUMPS_STRUC_C<K>::mumps_type*>(schur);
         }
         _id->job = 4;
     }
     else
         _id->job = 2;
     MUMPS_STRUC_C<K>::mumps_c(_id);
     delete [] listvar;
     if(_id->infog[0] != 0)
         std::cerr << "BUG MUMPS, INFOG(1) = " << _id->infog[0] << std::endl;
     if(N == 'C')
         std::for_each(A->_ja, A->_ja + A->_nnz, [](int& i) { --i; });
 }
Ejemplo n.º 30
0
EXPORT_MPI_API void FORTRAN_API mpi_intercomm_create_ ( MPI_Fint *local_comm, MPI_Fint *local_leader, MPI_Fint *peer_comm, 
                           MPI_Fint *remote_leader, MPI_Fint *tag, MPI_Fint *comm_out, MPI_Fint *__ierr )
{
    MPI_Comm l_comm_out;
    *__ierr = MPI_Intercomm_create( MPI_Comm_f2c(*local_comm), 
                                    (int)*local_leader, 
                                    MPI_Comm_f2c(*peer_comm), 
                                    (int)*remote_leader, (int)*tag,
				    &l_comm_out);
    *comm_out = MPI_Comm_c2f(l_comm_out);
}