Esempio n. 1
0
hypre_ParVector
*hypre_ParVectorRead( MPI_Comm    comm,
                      const char *file_name )
{
   char 	new_file_name[80];
   hypre_ParVector *par_vector;
   HYPRE_Int  	my_id, num_procs;
   HYPRE_Int		*partitioning;
   HYPRE_Int		global_size, i;
   FILE		*fp;

   hypre_MPI_Comm_rank(comm,&my_id); 
   hypre_MPI_Comm_size(comm,&num_procs); 

   partitioning = hypre_CTAlloc(HYPRE_Int,num_procs+1);

   hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); 
   fp = fopen(new_file_name, "r");
   hypre_fscanf(fp, "%d\n", &global_size);
#ifdef HYPRE_NO_GLOBAL_PARTITION
   for (i=0; i < 2; i++)
	hypre_fscanf(fp, "%d\n", &partitioning[i]);
   fclose (fp);
#else
   for (i=0; i < num_procs; i++)
	hypre_fscanf(fp, "%d\n", &partitioning[i]);
   fclose (fp);
   partitioning[num_procs] = global_size; 
#endif
   par_vector = hypre_CTAlloc(hypre_ParVector, 1);
	
   hypre_ParVectorComm(par_vector) = comm;
   hypre_ParVectorGlobalSize(par_vector) = global_size;

#ifdef HYPRE_NO_GLOBAL_PARTITION
   hypre_ParVectorFirstIndex(par_vector) = partitioning[0];
   hypre_ParVectorLastIndex(par_vector) = partitioning[1]-1;
#else
   hypre_ParVectorFirstIndex(par_vector) = partitioning[my_id];
   hypre_ParVectorLastIndex(par_vector) = partitioning[my_id+1]-1;
#endif

   hypre_ParVectorPartitioning(par_vector) = partitioning;

   hypre_ParVectorOwnsData(par_vector) = 1;
   hypre_ParVectorOwnsPartitioning(par_vector) = 1;

   hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); 
   hypre_ParVectorLocalVector(par_vector) = hypre_SeqVectorRead(new_file_name);

   /* multivector code not written yet >>> */
   hypre_assert( hypre_ParVectorNumVectors(par_vector) == 1 );

   return par_vector;
}
Esempio n. 2
0
hypre_ParVector *
hypre_ParVectorCreate(  MPI_Comm comm,
			HYPRE_Int global_size, 
			HYPRE_Int *partitioning)
{
   hypre_ParVector  *vector;
   HYPRE_Int num_procs, my_id;

   if (global_size < 0)
   {
      hypre_error_in_arg(2);
      return NULL;
   }
   vector = hypre_CTAlloc(hypre_ParVector, 1);
   hypre_MPI_Comm_rank(comm,&my_id);

   if (!partitioning)
   {
     hypre_MPI_Comm_size(comm,&num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
     hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, &partitioning);
#else
     hypre_GeneratePartitioning(global_size, num_procs, &partitioning);
#endif
   }


   hypre_ParVectorAssumedPartition(vector) = NULL;
   

   hypre_ParVectorComm(vector) = comm;
   hypre_ParVectorGlobalSize(vector) = global_size;
#ifdef HYPRE_NO_GLOBAL_PARTITION
   hypre_ParVectorFirstIndex(vector) = partitioning[0];
   hypre_ParVectorLastIndex(vector) = partitioning[1]-1;
   hypre_ParVectorPartitioning(vector) = partitioning;
   hypre_ParVectorLocalVector(vector) = 
		hypre_SeqVectorCreate(partitioning[1]-partitioning[0]);
#else
   hypre_ParVectorFirstIndex(vector) = partitioning[my_id];
   hypre_ParVectorLastIndex(vector) = partitioning[my_id+1] -1;
   hypre_ParVectorPartitioning(vector) = partitioning;
   hypre_ParVectorLocalVector(vector) = 
		hypre_SeqVectorCreate(partitioning[my_id+1]-partitioning[my_id]);
#endif

   /* set defaults */
   hypre_ParVectorOwnsData(vector) = 1;
   hypre_ParVectorOwnsPartitioning(vector) = 1;

   return vector;
}
Esempio n. 3
0
hypre_ParVector *
hypre_ParVectorCloneShallow( hypre_ParVector *x )
{
   hypre_ParVector * y = hypre_ParVectorCreate(
      hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x) );

   hypre_ParVectorOwnsData(y) = 1;
   /* ...This vector owns its local vector, although the local vector doesn't own _its_ data */
   hypre_ParVectorOwnsPartitioning(y) = 0;
   hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) );
   hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneShallow(
      hypre_ParVectorLocalVector(x) );
   hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x);

   return y;
}
Esempio n. 4
0
/* ----------------------------------------------------------------------
 * N_VMake Test
 *
 * NOTE: This routine depends on N_VConst to check vector data.
 * --------------------------------------------------------------------*/
int Test_N_VMake(HYPRE_ParVector W, int myid)
{
  int failure;
  /* double   start_time, stop_time; */
  N_Vector X;
  int local_length = hypre_ParVectorLastIndex(W) 
                     - hypre_ParVectorFirstIndex(W) + 1;

  /* clone vector */
  /* start_time = get_time(); */  
  X = N_VMake_ParHyp(W);
  /* stop_time = get_time();  */

  /* check cloned vector */
  if (X == NULL) {
    printf(">>> FAILED test -- N_VMake, Proc %d \n", myid);
    printf("    After N_VMakeEmpty, X == NULL \n \n");
    return(1);
  } 

  /* check cloned vector data */
  if (!has_data(X)) {
    printf(">>> FAILED test -- N_VMake, Proc %d \n", myid);
    printf("    Vector data == NULL \n \n");
    N_VDestroy(X);
    return(1);
  }    

  N_VConst(ONE,X);
  failure = check_ans(ONE, X, local_length);
  if (failure) {
    printf(">>> FAILED test -- N_VMake, Proc %d \n", myid);
    printf("    Failed N_VConst check \n \n");
    N_VDestroy(X);
    return(1);
  }    

  N_VDestroy(X); 

  if (myid == 0) {
    printf("    PASSED test -- N_VMake \n");
    /* PRINT_TIME("    N_VMake Time: %22.15e \n \n", stop_time - start_time); */
  }

  return(0);
}
Esempio n. 5
0
HYPRE_Int  hypre_BoomerAMGRelaxT( hypre_ParCSRMatrix *A,
                        hypre_ParVector    *f,
                        HYPRE_Int                *cf_marker,
                        HYPRE_Int                 relax_type,
                        HYPRE_Int                 relax_points,
                        HYPRE_Real          relax_weight,
                        hypre_ParVector    *u,
                        hypre_ParVector    *Vtemp )
{
   hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
   HYPRE_Real     *A_diag_data  = hypre_CSRMatrixData(A_diag);
   HYPRE_Int            *A_diag_i     = hypre_CSRMatrixI(A_diag);

   HYPRE_Int             n_global= hypre_ParCSRMatrixGlobalNumRows(A);
   HYPRE_Int             n       = hypre_CSRMatrixNumRows(A_diag);
   HYPRE_Int	      	   first_index = hypre_ParVectorFirstIndex(u);
   
   hypre_Vector   *u_local = hypre_ParVectorLocalVector(u);
   HYPRE_Real     *u_data  = hypre_VectorData(u_local);

   hypre_Vector   *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
   HYPRE_Real     *Vtemp_data = hypre_VectorData(Vtemp_local);

   hypre_CSRMatrix *A_CSR;
   HYPRE_Int		   *A_CSR_i;   
   HYPRE_Int		   *A_CSR_j;
   HYPRE_Real	   *A_CSR_data;
   
   hypre_Vector    *f_vector;
   HYPRE_Real	   *f_vector_data;

   HYPRE_Int             i;
   HYPRE_Int             jj;
   HYPRE_Int             column;
   HYPRE_Int             relax_error = 0;

   HYPRE_Real     *A_mat;
   HYPRE_Real     *b_vec;

   HYPRE_Real      zero = 0.0;
  
   /*-----------------------------------------------------------------------
    * Switch statement to direct control based on relax_type:
    *     relax_type = 7 -> Jacobi (uses ParMatvec)
    *     relax_type = 9 -> Direct Solve
    *-----------------------------------------------------------------------*/
   
   switch (relax_type)
   {            

      case 7: /* Jacobi (uses ParMatvec) */
      {
 
         /*-----------------------------------------------------------------
          * Copy f into temporary vector.
          *-----------------------------------------------------------------*/
        
         hypre_ParVectorCopy(f,Vtemp); 
 
         /*-----------------------------------------------------------------
          * Perform MatvecT Vtemp=f-A^Tu
          *-----------------------------------------------------------------*/
 
            hypre_ParCSRMatrixMatvecT(-1.0,A, u, 1.0, Vtemp);
            for (i = 0; i < n; i++)
            {
 
               /*-----------------------------------------------------------
                * If diagonal is nonzero, relax point i; otherwise, skip it.
                *-----------------------------------------------------------*/
           
               if (A_diag_data[A_diag_i[i]] != zero)
               {
                  u_data[i] += relax_weight * Vtemp_data[i] 
				/ A_diag_data[A_diag_i[i]];
               }
            }
      }
      break;
      
      
      case 9: /* Direct solve: use gaussian elimination */
      {

         /*-----------------------------------------------------------------
          *  Generate CSR matrix from ParCSRMatrix A
          *-----------------------------------------------------------------*/

	 if (n)
	 {
	    A_CSR = hypre_ParCSRMatrixToCSRMatrixAll(A);
	    f_vector = hypre_ParVectorToVectorAll(f);
 	    A_CSR_i = hypre_CSRMatrixI(A_CSR);
 	    A_CSR_j = hypre_CSRMatrixJ(A_CSR);
 	    A_CSR_data = hypre_CSRMatrixData(A_CSR);
   	    f_vector_data = hypre_VectorData(f_vector);

            A_mat = hypre_CTAlloc(HYPRE_Real, n_global*n_global);
            b_vec = hypre_CTAlloc(HYPRE_Real, n_global);    

            /*---------------------------------------------------------------
             *  Load transpose of CSR matrix into A_mat.
             *---------------------------------------------------------------*/

            for (i = 0; i < n_global; i++)
            {
               for (jj = A_CSR_i[i]; jj < A_CSR_i[i+1]; jj++)
               {
                  column = A_CSR_j[jj];
                  A_mat[column*n_global+i] = A_CSR_data[jj];
               }
               b_vec[i] = f_vector_data[i];
            }

            relax_error = gselim(A_mat,b_vec,n_global);

            for (i = 0; i < n; i++)
            {
               u_data[i] = b_vec[first_index+i];
            }

	    hypre_TFree(A_mat); 
            hypre_TFree(b_vec);
            hypre_CSRMatrixDestroy(A_CSR);
            A_CSR = NULL;
            hypre_SeqVectorDestroy(f_vector);
            f_vector = NULL;
         
         }
      }
      break;   
   }

   return(relax_error); 
}
Esempio n. 6
0
HYPRE_Int
hypre_seqAMGCycle( hypre_ParAMGData *amg_data,
                   HYPRE_Int p_level,
                   hypre_ParVector  **Par_F_array,
                   hypre_ParVector  **Par_U_array   )
{
   
   hypre_ParVector    *Aux_U;
   hypre_ParVector    *Aux_F;

   /* Local variables  */

   HYPRE_Int       Solve_err_flag = 0;

   HYPRE_Int n;
   HYPRE_Int i;
   
   hypre_Vector   *u_local;
   double         *u_data;
   
   HYPRE_Int	   first_index;
   
   /* Acquire seq data */
   MPI_Comm new_comm = hypre_ParAMGDataNewComm(amg_data);
   HYPRE_Solver coarse_solver = hypre_ParAMGDataCoarseSolver(amg_data);
   hypre_ParCSRMatrix *A_coarse = hypre_ParAMGDataACoarse(amg_data);
   hypre_ParVector *F_coarse = hypre_ParAMGDataFCoarse(amg_data);
   hypre_ParVector *U_coarse = hypre_ParAMGDataUCoarse(amg_data);

   Aux_U = Par_U_array[p_level];
   Aux_F = Par_F_array[p_level];

   first_index = hypre_ParVectorFirstIndex(Aux_U);
   u_local = hypre_ParVectorLocalVector(Aux_U);
   u_data  = hypre_VectorData(u_local);
   n =  hypre_VectorSize(u_local);


   if (A_coarse)
   {
      double         *f_data;
      hypre_Vector   *f_local;
      hypre_Vector   *tmp_vec;
      
      HYPRE_Int nf;
      HYPRE_Int local_info;
      double *recv_buf;
      HYPRE_Int *displs, *info;
      HYPRE_Int size;
      HYPRE_Int new_num_procs;
      
      hypre_MPI_Comm_size(new_comm, &new_num_procs);

      f_local = hypre_ParVectorLocalVector(Aux_F);
      f_data = hypre_VectorData(f_local);
      nf =  hypre_VectorSize(f_local);

      /* first f */
      info = hypre_CTAlloc(HYPRE_Int, new_num_procs);
      local_info = nf;
      hypre_MPI_Allgather(&local_info, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, new_comm);

      displs = hypre_CTAlloc(HYPRE_Int, new_num_procs+1);
      displs[0] = 0;
      for (i=1; i < new_num_procs+1; i++)
         displs[i] = displs[i-1]+info[i-1]; 
      size = displs[new_num_procs];
      
      tmp_vec =  hypre_ParVectorLocalVector(F_coarse);
      recv_buf = hypre_VectorData(tmp_vec);

      hypre_MPI_Allgatherv ( f_data, nf, hypre_MPI_DOUBLE, 
                          recv_buf, info, displs, 
                          hypre_MPI_DOUBLE, new_comm );

      tmp_vec =  hypre_ParVectorLocalVector(U_coarse);
      recv_buf = hypre_VectorData(tmp_vec);
      
      /*then u */
      hypre_MPI_Allgatherv ( u_data, n, hypre_MPI_DOUBLE, 
                       recv_buf, info, displs, 
                       hypre_MPI_DOUBLE, new_comm );
         
      /* clean up */
      hypre_TFree(displs);
      hypre_TFree(info);

      hypre_BoomerAMGSolve(coarse_solver, A_coarse, F_coarse, U_coarse);      

      /*copy my part of U to parallel vector */
      {
         double *local_data;

         local_data =  hypre_VectorData(hypre_ParVectorLocalVector(U_coarse));

         for (i = 0; i < n; i++)
         {
            u_data[i] = local_data[first_index+i];
         }
      }
   }

   return(Solve_err_flag);
}
Esempio n. 7
0
HYPRE_Int
hypre_seqAMGCycle( hypre_ParAMGData *amg_data,
                   HYPRE_Int p_level,
                   hypre_ParVector  **Par_F_array,
                   hypre_ParVector  **Par_U_array   )
{
   
   hypre_ParVector    *Aux_U;
   hypre_ParVector    *Aux_F;

   /* Local variables  */

   HYPRE_Int       Solve_err_flag = 0;

   HYPRE_Int n;
   HYPRE_Int i;
   
   hypre_Vector   *u_local;
   HYPRE_Real     *u_data;
   
   HYPRE_Int	   first_index;
   
   /* Acquire seq data */
   MPI_Comm new_comm = hypre_ParAMGDataNewComm(amg_data);
   HYPRE_Solver coarse_solver = hypre_ParAMGDataCoarseSolver(amg_data);
   hypre_ParCSRMatrix *A_coarse = hypre_ParAMGDataACoarse(amg_data);
   hypre_ParVector *F_coarse = hypre_ParAMGDataFCoarse(amg_data);
   hypre_ParVector *U_coarse = hypre_ParAMGDataUCoarse(amg_data);
   HYPRE_Int redundant = hypre_ParAMGDataRedundant(amg_data);

   Aux_U = Par_U_array[p_level];
   Aux_F = Par_F_array[p_level];

   first_index = hypre_ParVectorFirstIndex(Aux_U);
   u_local = hypre_ParVectorLocalVector(Aux_U);
   u_data  = hypre_VectorData(u_local);
   n =  hypre_VectorSize(u_local);


   /*if (A_coarse)*/
   if (hypre_ParAMGDataParticipate(amg_data))
   {
      HYPRE_Real     *f_data;
      hypre_Vector   *f_local;
      hypre_Vector   *tmp_vec;
      
      HYPRE_Int nf;
      HYPRE_Int local_info;
      HYPRE_Real *recv_buf = NULL;
      HYPRE_Int *displs = NULL;
      HYPRE_Int *info = NULL;
      HYPRE_Int new_num_procs, my_id;
      
      hypre_MPI_Comm_size(new_comm, &new_num_procs);
      hypre_MPI_Comm_rank(new_comm, &my_id);

      f_local = hypre_ParVectorLocalVector(Aux_F);
      f_data = hypre_VectorData(f_local);
      nf =  hypre_VectorSize(f_local);

      /* first f */
      info = hypre_CTAlloc(HYPRE_Int, new_num_procs);
      local_info = nf;
      if (redundant)
         hypre_MPI_Allgather(&local_info, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, new_comm);
      else
         hypre_MPI_Gather(&local_info, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, 0, new_comm);

      if (redundant || my_id ==0)
      {
         displs = hypre_CTAlloc(HYPRE_Int, new_num_procs+1);
         displs[0] = 0;
         for (i=1; i < new_num_procs+1; i++)
            displs[i] = displs[i-1]+info[i-1]; 
      
         if (F_coarse) 
         {
            tmp_vec =  hypre_ParVectorLocalVector(F_coarse);
            recv_buf = hypre_VectorData(tmp_vec);
         }
      }

      if (redundant)
         hypre_MPI_Allgatherv ( f_data, nf, HYPRE_MPI_REAL,
                          recv_buf, info, displs,
                          HYPRE_MPI_REAL, new_comm );
      else
         hypre_MPI_Gatherv ( f_data, nf, HYPRE_MPI_REAL,
                          recv_buf, info, displs,
                          HYPRE_MPI_REAL, 0, new_comm );

      if (redundant || my_id ==0)
      {
         tmp_vec =  hypre_ParVectorLocalVector(U_coarse);
         recv_buf = hypre_VectorData(tmp_vec);
      }
      
      /*then u */
      if (redundant)
      {
         hypre_MPI_Allgatherv ( u_data, n, HYPRE_MPI_REAL,
                       recv_buf, info, displs,
                       HYPRE_MPI_REAL, new_comm );
         hypre_TFree(displs);
         hypre_TFree(info);
      }
      else
         hypre_MPI_Gatherv ( u_data, n, HYPRE_MPI_REAL,
                       recv_buf, info, displs,
                       HYPRE_MPI_REAL, 0, new_comm );
         
      /* clean up */
      if (redundant || my_id ==0)
      {
         hypre_BoomerAMGSolve(coarse_solver, A_coarse, F_coarse, U_coarse);
      }

      /*copy my part of U to parallel vector */
      if (redundant)
      {
         HYPRE_Real *local_data;

         local_data =  hypre_VectorData(hypre_ParVectorLocalVector(U_coarse));

         for (i = 0; i < n; i++)
         {
            u_data[i] = local_data[first_index+i];
         }
      }
      else
      {
         HYPRE_Real *local_data=NULL;

         if (my_id == 0)
            local_data =  hypre_VectorData(hypre_ParVectorLocalVector(U_coarse));

         hypre_MPI_Scatterv ( local_data, info, displs, HYPRE_MPI_REAL,
                       u_data, n, HYPRE_MPI_REAL, 0, new_comm );
         /*if (my_id == 0)
            local_data =  hypre_VectorData(hypre_ParVectorLocalVector(F_coarse));
            hypre_MPI_Scatterv ( local_data, info, displs, HYPRE_MPI_REAL,
                       f_data, n, HYPRE_MPI_REAL, 0, new_comm );*/
         if (my_id == 0) hypre_TFree(displs);
         hypre_TFree(info);
      }
   }

   return(Solve_err_flag);
}
Esempio n. 8
0
hypre_Vector *
hypre_ParVectorToVectorAll (hypre_ParVector *par_v)
{
   MPI_Comm		comm = hypre_ParVectorComm(par_v);
   HYPRE_Int 			global_size = hypre_ParVectorGlobalSize(par_v);
#ifndef HYPRE_NO_GLOBAL_PARTITION
   HYPRE_Int 			*vec_starts = hypre_ParVectorPartitioning(par_v);
#endif
   hypre_Vector     	*local_vector = hypre_ParVectorLocalVector(par_v);
   HYPRE_Int  		num_procs, my_id;
   HYPRE_Int                  num_vectors = hypre_ParVectorNumVectors(par_v);
   hypre_Vector  	*vector;
   double		*vector_data;
   double		*local_data;
   HYPRE_Int 			local_size;
   hypre_MPI_Request		*requests;
   hypre_MPI_Status		*status;
   HYPRE_Int			i, j;
   HYPRE_Int			*used_procs;
   HYPRE_Int			num_types, num_requests;
   HYPRE_Int			vec_len, proc_id;

#ifdef HYPRE_NO_GLOBAL_PARTITION

   HYPRE_Int *new_vec_starts;
   
   HYPRE_Int num_contacts;
   HYPRE_Int contact_proc_list[1];
   HYPRE_Int contact_send_buf[1];
   HYPRE_Int contact_send_buf_starts[2];
   HYPRE_Int max_response_size;
   HYPRE_Int *response_recv_buf=NULL;
   HYPRE_Int *response_recv_buf_starts = NULL;
   hypre_DataExchangeResponse response_obj;
   hypre_ProcListElements send_proc_obj;
   
   HYPRE_Int *send_info = NULL;
   hypre_MPI_Status  status1;
   HYPRE_Int count, tag1 = 112, tag2 = 223;
   HYPRE_Int start;
   
#endif


   hypre_MPI_Comm_size(comm, &num_procs);
   hypre_MPI_Comm_rank(comm, &my_id);

#ifdef HYPRE_NO_GLOBAL_PARTITION

  local_size = hypre_ParVectorLastIndex(par_v) - 
     hypre_ParVectorFirstIndex(par_v) + 1;

 

/* determine procs which hold data of par_v and store ids in used_procs */
/* we need to do an exchange data for this.  If I own row then I will contact
   processor 0 with the endpoint of my local range */


   if (local_size > 0)
   {
      num_contacts = 1;
      contact_proc_list[0] = 0;
      contact_send_buf[0] =  hypre_ParVectorLastIndex(par_v);
      contact_send_buf_starts[0] = 0;
      contact_send_buf_starts[1] = 1;
   }
   else
   {
      num_contacts = 0;
      contact_send_buf_starts[0] = 0;
      contact_send_buf_starts[1] = 0;
   }

   /*build the response object*/
   /*send_proc_obj will  be for saving info from contacts */
   send_proc_obj.length = 0;
   send_proc_obj.storage_length = 10;
   send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length);
   send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1); 
   send_proc_obj.vec_starts[0] = 0;
   send_proc_obj.element_storage_length = 10;
   send_proc_obj.elements = hypre_CTAlloc(HYPRE_Int, send_proc_obj.element_storage_length);

   max_response_size = 0; /* each response is null */
   response_obj.fill_response = hypre_FillResponseParToVectorAll;
   response_obj.data1 = NULL;
   response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/
  
   
   hypre_DataExchangeList(num_contacts, 
                          contact_proc_list, contact_send_buf, 
                          contact_send_buf_starts, sizeof(HYPRE_Int), 
                          sizeof(HYPRE_Int), &response_obj, 
                          max_response_size, 1,
                          comm, (void**) &response_recv_buf,	   
                          &response_recv_buf_starts);

 /* now processor 0 should have a list of ranges for processors that have rows -
      these are in send_proc_obj - it needs to create the new list of processors
      and also an array of vec starts - and send to those who own row*/
   if (my_id)
   {
      if (local_size)      
      {
         /* look for a message from processor 0 */         
         hypre_MPI_Probe(0, tag1, comm, &status1);
         hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count);
         
         send_info = hypre_CTAlloc(HYPRE_Int, count);
         hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1);

         /* now unpack */  
         num_types = send_info[0];
         used_procs =  hypre_CTAlloc(HYPRE_Int, num_types);  
         new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1);

         for (i=1; i<= num_types; i++)
         {
            used_procs[i-1] = send_info[i];
         }
         for (i=num_types+1; i< count; i++)
         {
            new_vec_starts[i-num_types-1] = send_info[i] ;
         }
      }
      else /* clean up and exit */
      {
         hypre_TFree(send_proc_obj.vec_starts);
         hypre_TFree(send_proc_obj.id);
         hypre_TFree(send_proc_obj.elements);
         if(response_recv_buf)        hypre_TFree(response_recv_buf);
         if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts);
         return NULL;
      }
   }
   else /* my_id ==0 */
   {
      num_types = send_proc_obj.length;
      used_procs =  hypre_CTAlloc(HYPRE_Int, num_types);  
      new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1);
      
      new_vec_starts[0] = 0;
      for (i=0; i< num_types; i++)
      {
         used_procs[i] = send_proc_obj.id[i];
         new_vec_starts[i+1] = send_proc_obj.elements[i]+1;
      }
      qsort0(used_procs, 0, num_types-1);
      qsort0(new_vec_starts, 0, num_types);
      /*now we need to put into an array to send */
      count =  2*num_types+2;
      send_info = hypre_CTAlloc(HYPRE_Int, count);
      send_info[0] = num_types;
      for (i=1; i<= num_types; i++)
      {
         send_info[i] = used_procs[i-1];
      }
      for (i=num_types+1; i< count; i++)
      {
         send_info[i] = new_vec_starts[i-num_types-1];
      }
      requests = hypre_CTAlloc(hypre_MPI_Request, num_types);
      status =  hypre_CTAlloc(hypre_MPI_Status, num_types);

      /* don't send to myself  - these are sorted so my id would be first*/
      start = 0;
      if (used_procs[0] == 0)
      {
         start = 1;
      }
   
      
      for (i=start; i < num_types; i++)
      {
         hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]);
      }
      hypre_MPI_Waitall(num_types-start, requests, status);

      hypre_TFree(status);
      hypre_TFree(requests);
   }

   /* clean up */
   hypre_TFree(send_proc_obj.vec_starts);
   hypre_TFree(send_proc_obj.id);
   hypre_TFree(send_proc_obj.elements);
   hypre_TFree(send_info);
   if(response_recv_buf)        hypre_TFree(response_recv_buf);
   if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts);

   /* now proc 0 can exit if it has no rows */
   if (!local_size) {
      hypre_TFree(used_procs);
      hypre_TFree(new_vec_starts);
      return NULL;
   }
   
   /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */

  /* this vector should be rather small */

   local_data = hypre_VectorData(local_vector);
   vector = hypre_SeqVectorCreate(global_size);
   hypre_VectorNumVectors(vector) = num_vectors;
   hypre_SeqVectorInitialize(vector);
   vector_data = hypre_VectorData(vector);

   num_requests = 2*num_types;

   requests = hypre_CTAlloc(hypre_MPI_Request, num_requests);
   status = hypre_CTAlloc(hypre_MPI_Status, num_requests);

/* initialize data exchange among used_procs and generate vector  - here we 
   send to ourself also*/
 
   j = 0;
   for (i = 0; i < num_types; i++)
   {
        proc_id = used_procs[i];
        vec_len = new_vec_starts[i+1] - new_vec_starts[i];
        hypre_MPI_Irecv(&vector_data[new_vec_starts[i]], num_vectors*vec_len, hypre_MPI_DOUBLE,
                                proc_id, tag2, comm, &requests[j++]);
   }
   for (i = 0; i < num_types; i++)
   {
        hypre_MPI_Isend(local_data, num_vectors*local_size, hypre_MPI_DOUBLE, used_procs[i],
                          tag2, comm, &requests[j++]);
   }
 
   hypre_MPI_Waitall(num_requests, requests, status);


   if (num_requests)
   {
   	hypre_TFree(requests);
   	hypre_TFree(status); 
        hypre_TFree(used_procs);
   }

   hypre_TFree(new_vec_starts);
   


#else
   local_size = vec_starts[my_id+1] - vec_starts[my_id];

/* if my_id contains no data, return NULL  */

   if (!local_size)
	return NULL;
 
   local_data = hypre_VectorData(local_vector);
   vector = hypre_SeqVectorCreate(global_size);
   hypre_VectorNumVectors(vector) = num_vectors;
   hypre_SeqVectorInitialize(vector);
   vector_data = hypre_VectorData(vector);

/* determine procs which hold data of par_v and store ids in used_procs */

   num_types = -1;
   for (i=0; i < num_procs; i++)
        if (vec_starts[i+1]-vec_starts[i])
                num_types++;
   num_requests = 2*num_types;
 
   used_procs = hypre_CTAlloc(HYPRE_Int, num_types);
   j = 0;
   for (i=0; i < num_procs; i++)
        if (vec_starts[i+1]-vec_starts[i] && i-my_id)
                used_procs[j++] = i;
 
   requests = hypre_CTAlloc(hypre_MPI_Request, num_requests);
   status = hypre_CTAlloc(hypre_MPI_Status, num_requests);

/* initialize data exchange among used_procs and generate vector */
 
   j = 0;
   for (i = 0; i < num_types; i++)
   {
        proc_id = used_procs[i];
        vec_len = vec_starts[proc_id+1] - vec_starts[proc_id];
        hypre_MPI_Irecv(&vector_data[vec_starts[proc_id]], num_vectors*vec_len, hypre_MPI_DOUBLE,
                                proc_id, 0, comm, &requests[j++]);
   }
   for (i = 0; i < num_types; i++)
   {
        hypre_MPI_Isend(local_data, num_vectors*local_size, hypre_MPI_DOUBLE, used_procs[i],
                          0, comm, &requests[j++]);
   }
 
   for (i=0; i < num_vectors*local_size; i++)
        vector_data[vec_starts[my_id]+i] = local_data[i];
 
   hypre_MPI_Waitall(num_requests, requests, status);

   if (num_requests)
   {
   	hypre_TFree(used_procs);
   	hypre_TFree(requests);
   	hypre_TFree(status); 
   }


#endif

   return vector;
}
Esempio n. 9
0
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data)
{
  realtype ui, ult, urt, hordc, horac, hdiff, hadv;
  realtype *udata, *udotdata, *z;
  int i;
  int npes, my_pe, my_length, my_pe_m1, my_pe_p1, last_pe;
  UserData data;
  MPI_Status status;
  MPI_Comm comm;
  HYPRE_ParVector uhyp;
  HYPRE_ParVector udothyp;

  /* Extract hypre vectors */  
  uhyp  = N_VGetVector_ParHyp(u);
  udothyp  = N_VGetVector_ParHyp(udot);
  
  /* Access hypre vectors local data */
  udata = hypre_VectorData(hypre_ParVectorLocalVector(uhyp));
  udotdata = hypre_VectorData(hypre_ParVectorLocalVector(udothyp));

  /* Extract needed problem constants from data */
  data = (UserData) user_data;
  hordc = data->hdcoef;
  horac = data->hacoef;

  /* Extract parameters for parhyp computation. */
  comm = data->comm;
  npes = data->npes;                           /* Number of processes    */ 
  my_pe = data->my_pe;                         /* Current process number */
  my_length =  hypre_ParVectorLastIndex(uhyp)  /* Local length of uhyp   */
             - hypre_ParVectorFirstIndex(uhyp) + 1;  
  z = data->z;

  /* Compute related parameters. */
  my_pe_m1 = my_pe - 1;
  my_pe_p1 = my_pe + 1;
  last_pe = npes - 1;

  /* Store local segment of u in the working array z. */
  for (i = 1; i <= my_length; i++)
    z[i] = udata[i - 1];

  /* Pass needed data to processes before and after current process. */
  if (my_pe != 0)
    MPI_Send(&z[1], 1, PVEC_REAL_MPI_TYPE, my_pe_m1, 0, comm);
  if (my_pe != last_pe)
    MPI_Send(&z[my_length], 1, PVEC_REAL_MPI_TYPE, my_pe_p1, 0, comm);   

  /* Receive needed data from processes before and after current process. */
  if (my_pe != 0)
    MPI_Recv(&z[0], 1, PVEC_REAL_MPI_TYPE, my_pe_m1, 0, comm, &status);
  else 
    z[0] = ZERO;
  if (my_pe != last_pe)
    MPI_Recv(&z[my_length+1], 1, PVEC_REAL_MPI_TYPE, my_pe_p1, 0, comm,
             &status);   
  else 
    z[my_length + 1] = ZERO;

  /* Loop over all grid points in current process. */
  for (i=1; i<=my_length; i++) {

    /* Extract u at x_i and two neighboring points */
    ui = z[i];
    ult = z[i-1];
    urt = z[i+1];

    /* Set diffusion and advection terms and load into udot */
    hdiff = hordc*(ult - RCONST(2.0)*ui + urt);
    hadv = horac*(urt - ult);
    udotdata[i-1] = hdiff + hadv;
  }

  return(0);
}
Esempio n. 10
0
hypre_ParVector *
hypre_ParVectorCreateFromBlock(  MPI_Comm comm,
                                 HYPRE_Int p_global_size, 
                                 HYPRE_Int *p_partitioning, HYPRE_Int block_size)
{
   hypre_ParVector  *vector;
   HYPRE_Int num_procs, my_id, i;
   HYPRE_Int global_size;
   HYPRE_Int *new_partitioning; /* need to create a new partitioning - son't want to write over
                                   what is passed in */
   


   global_size = p_global_size*block_size;

   vector = hypre_CTAlloc(hypre_ParVector, 1);
   hypre_MPI_Comm_rank(comm,&my_id);
   hypre_MPI_Comm_size(comm,&num_procs);

   if (!p_partitioning)
   {
#ifdef HYPRE_NO_GLOBAL_PARTITION
      hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, &new_partitioning);
#else
      hypre_GeneratePartitioning(global_size, num_procs, &new_partitioning);
#endif
   }
   else /* adjust for block_size */
   {
#ifdef HYPRE_NO_GLOBAL_PARTITION
      new_partitioning = hypre_CTAlloc(HYPRE_Int, 2);
      for(i = 0; i < 2; i++)
      {
         new_partitioning[i] = p_partitioning[i]*block_size;
      }
#else
      new_partitioning = hypre_CTAlloc(HYPRE_Int, num_procs + 1);
      for(i = 0; i < num_procs + 1; i++)
      {
         new_partitioning[i] = p_partitioning[i]*block_size;
      }
#endif
   }
   

   hypre_ParVectorComm(vector) = comm;
   hypre_ParVectorGlobalSize(vector) = global_size;
#ifdef HYPRE_NO_GLOBAL_PARTITION
   hypre_ParVectorFirstIndex(vector) = new_partitioning[0];
   hypre_ParVectorLastIndex(vector) = new_partitioning[1]-1;
   hypre_ParVectorPartitioning(vector) = new_partitioning;
   hypre_ParVectorLocalVector(vector) = 
      hypre_SeqVectorCreate(new_partitioning[1]-new_partitioning[0]);
#else
   hypre_ParVectorFirstIndex(vector) = new_partitioning[my_id];
   hypre_ParVectorLastIndex(vector) = new_partitioning[my_id+1] -1;
   hypre_ParVectorPartitioning(vector) = new_partitioning;
   hypre_ParVectorLocalVector(vector) = 
      hypre_SeqVectorCreate(new_partitioning[my_id+1]-new_partitioning[my_id]);
#endif

   /* set defaults */
   hypre_ParVectorOwnsData(vector) = 1;
   hypre_ParVectorOwnsPartitioning(vector) = 1;

   return vector;
}