Example #1
0
hypre_ParVector
*hypre_ParVectorRead( MPI_Comm    comm,
                      const char *file_name )
{
   char 	new_file_name[80];
   hypre_ParVector *par_vector;
   HYPRE_Int  	my_id, num_procs;
   HYPRE_Int		*partitioning;
   HYPRE_Int		global_size, i;
   FILE		*fp;

   hypre_MPI_Comm_rank(comm,&my_id); 
   hypre_MPI_Comm_size(comm,&num_procs); 

   partitioning = hypre_CTAlloc(HYPRE_Int,num_procs+1);

   hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); 
   fp = fopen(new_file_name, "r");
   hypre_fscanf(fp, "%d\n", &global_size);
#ifdef HYPRE_NO_GLOBAL_PARTITION
   for (i=0; i < 2; i++)
	hypre_fscanf(fp, "%d\n", &partitioning[i]);
   fclose (fp);
#else
   for (i=0; i < num_procs; i++)
	hypre_fscanf(fp, "%d\n", &partitioning[i]);
   fclose (fp);
   partitioning[num_procs] = global_size; 
#endif
   par_vector = hypre_CTAlloc(hypre_ParVector, 1);
	
   hypre_ParVectorComm(par_vector) = comm;
   hypre_ParVectorGlobalSize(par_vector) = global_size;

#ifdef HYPRE_NO_GLOBAL_PARTITION
   hypre_ParVectorFirstIndex(par_vector) = partitioning[0];
   hypre_ParVectorLastIndex(par_vector) = partitioning[1]-1;
#else
   hypre_ParVectorFirstIndex(par_vector) = partitioning[my_id];
   hypre_ParVectorLastIndex(par_vector) = partitioning[my_id+1]-1;
#endif

   hypre_ParVectorPartitioning(par_vector) = partitioning;

   hypre_ParVectorOwnsData(par_vector) = 1;
   hypre_ParVectorOwnsPartitioning(par_vector) = 1;

   hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); 
   hypre_ParVectorLocalVector(par_vector) = hypre_SeqVectorRead(new_file_name);

   /* multivector code not written yet >>> */
   hypre_assert( hypre_ParVectorNumVectors(par_vector) == 1 );

   return par_vector;
}
Example #2
0
hypre_ParVector *
hypre_ParVectorCreate(  MPI_Comm comm,
			HYPRE_Int global_size, 
			HYPRE_Int *partitioning)
{
   hypre_ParVector  *vector;
   HYPRE_Int num_procs, my_id;

   if (global_size < 0)
   {
      hypre_error_in_arg(2);
      return NULL;
   }
   vector = hypre_CTAlloc(hypre_ParVector, 1);
   hypre_MPI_Comm_rank(comm,&my_id);

   if (!partitioning)
   {
     hypre_MPI_Comm_size(comm,&num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
     hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, &partitioning);
#else
     hypre_GeneratePartitioning(global_size, num_procs, &partitioning);
#endif
   }


   hypre_ParVectorAssumedPartition(vector) = NULL;
   

   hypre_ParVectorComm(vector) = comm;
   hypre_ParVectorGlobalSize(vector) = global_size;
#ifdef HYPRE_NO_GLOBAL_PARTITION
   hypre_ParVectorFirstIndex(vector) = partitioning[0];
   hypre_ParVectorLastIndex(vector) = partitioning[1]-1;
   hypre_ParVectorPartitioning(vector) = partitioning;
   hypre_ParVectorLocalVector(vector) = 
		hypre_SeqVectorCreate(partitioning[1]-partitioning[0]);
#else
   hypre_ParVectorFirstIndex(vector) = partitioning[my_id];
   hypre_ParVectorLastIndex(vector) = partitioning[my_id+1] -1;
   hypre_ParVectorPartitioning(vector) = partitioning;
   hypre_ParVectorLocalVector(vector) = 
		hypre_SeqVectorCreate(partitioning[my_id+1]-partitioning[my_id]);
#endif

   /* set defaults */
   hypre_ParVectorOwnsData(vector) = 1;
   hypre_ParVectorOwnsPartitioning(vector) = 1;

   return vector;
}
Example #3
0
void *
hypre_ParKrylovCreateVector( void *vvector )
{
   hypre_ParVector *vector = vvector;
   hypre_ParVector *new_vector;

   new_vector = hypre_ParVectorCreate( hypre_ParVectorComm(vector),
				       hypre_ParVectorGlobalSize(vector),	
                                       hypre_ParVectorPartitioning(vector) );
   hypre_ParVectorSetPartitioningOwner(new_vector,0);
   hypre_ParVectorInitialize(new_vector);

   return ( (void *) new_vector );
}
Example #4
0
HYPRE_Int
hypre_ParVectorSetRandomValues( hypre_ParVector *v,
                                HYPRE_Int            seed )
{
   HYPRE_Int my_id;
   hypre_Vector *v_local = hypre_ParVectorLocalVector(v);

   MPI_Comm 	comm = hypre_ParVectorComm(v);
   hypre_MPI_Comm_rank(comm,&my_id); 

   seed *= (my_id+1);
           
   return hypre_SeqVectorSetRandomValues(v_local,seed);
}
Example #5
0
double
hypre_ParVectorInnerProd( hypre_ParVector *x,
                    hypre_ParVector *y )
{
   MPI_Comm      comm    = hypre_ParVectorComm(x);
   hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
   hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
           
   double result = 0.0;
   double local_result = hypre_SeqVectorInnerProd(x_local, y_local);
   
   hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_DOUBLE, hypre_MPI_SUM, comm);
   
   return result;
}
Example #6
0
hypre_ParVector *
hypre_ParVectorCloneShallow( hypre_ParVector *x )
{
   hypre_ParVector * y = hypre_ParVectorCreate(
      hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x) );

   hypre_ParVectorOwnsData(y) = 1;
   /* ...This vector owns its local vector, although the local vector doesn't own _its_ data */
   hypre_ParVectorOwnsPartitioning(y) = 0;
   hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) );
   hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneShallow(
      hypre_ParVectorLocalVector(x) );
   hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x);

   return y;
}
Example #7
0
HYPRE_Real
hypre_ParVectorInnerProd( hypre_ParVector *x,
                          hypre_ParVector *y )
{
   MPI_Comm      comm    = hypre_ParVectorComm(x);
   hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
   hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
           
   HYPRE_Real result = 0.0;
   HYPRE_Real local_result = hypre_SeqVectorInnerProd(x_local, y_local);
   
   hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL,
                       hypre_MPI_SUM, comm);
   
   return result;
}
int HYPRE_ParCSRCotreeSetup(HYPRE_Solver solver, HYPRE_ParCSRMatrix A,
                            HYPRE_ParVector b, HYPRE_ParVector x)
{
   int           *partition, *new_partition, nprocs, *tindices, ii;
   void *vsolver = (void *) solver;
/*
   void *vA      = (void *) A;
   void *vb      = (void *) b;
   void *vx      = (void *) x;
*/
   hypre_CotreeData   *cotree_data = (hypre_CotreeData *) vsolver;
   hypre_ParCSRMatrix **submatrices;
   hypre_ParVector    *new_vector;
   MPI_Comm           comm;

   cotree_data->Aee = (hypre_ParCSRMatrix *) A;
   hypre_ParCSRMatrixGenSpanningTree(cotree_data->Gen, &tindices, 1);
   submatrices = (hypre_ParCSRMatrix **) malloc(sizeof(hypre_ParCSRMatrix *));
   hypre_ParCSRMatrixExtractSubmatrices(cotree_data->Aee, tindices,
                                        &submatrices);
   cotree_data->Att = submatrices[0];
   cotree_data->Atc = submatrices[1];
   cotree_data->Act = submatrices[2];
   cotree_data->Acc = submatrices[3];

   hypre_ParCSRMatrixExtractRowSubmatrices(cotree_data->Gen, tindices,
                                           &submatrices);
   cotree_data->Gt = submatrices[0];
   cotree_data->Gc = submatrices[1];
   free(submatrices);

   comm = hypre_ParCSRMatrixComm((hypre_ParCSRMatrix *) A);
   MPI_Comm_size(comm, &nprocs);
   partition = hypre_ParVectorPartitioning((hypre_ParVector *) b);
   new_partition = (int *) malloc((nprocs+1) * sizeof(int));
   for (ii = 0; ii <= nprocs; ii++) new_partition[ii] = partition[ii];
/*   partition = hypre_ParVectorPartitioning((hypre_ParVector *) b);  */
   new_vector = hypre_ParVectorCreate(hypre_ParVectorComm((hypre_ParVector *)b),
		   (int) hypre_ParVectorGlobalSize((hypre_ParVector *) b),	
                   new_partition);
   hypre_ParVectorInitialize(new_vector);
   cotree_data->w = new_vector;
   return 0;
}
Example #9
0
void *
hypre_ParKrylovCreateVectorArray(HYPRE_Int n, void *vvector )
{
   hypre_ParVector *vector = vvector;
   hypre_ParVector **new_vector;
   HYPRE_Int i;

   new_vector = hypre_CTAlloc(hypre_ParVector*,n);
   for (i=0; i < n; i++)
   {
      new_vector[i] = hypre_ParVectorCreate( hypre_ParVectorComm(vector),
                                             hypre_ParVectorGlobalSize(vector),	
                                             hypre_ParVectorPartitioning(vector) );
      hypre_ParVectorSetPartitioningOwner(new_vector[i],0);
      hypre_ParVectorInitialize(new_vector[i]);
   }

   return ( (void *) new_vector );
}
Example #10
0
/******************************************************************************
 *
 * hypre_IJVectorDistributePar
 *
 * takes an IJVector generated for one processor and distributes it
 * across many processors according to vec_starts,
 * if vec_starts is NULL, it distributes them evenly?
 *
 *****************************************************************************/
HYPRE_Int
hypre_IJVectorDistributePar(hypre_IJVector *vector,
			    const HYPRE_Int	   *vec_starts)
{

   hypre_ParVector *old_vector = hypre_IJVectorObject(vector);
   hypre_ParVector *par_vector;
   HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
   
   if (!old_vector)
   {
      if (print_level)
      {
         hypre_printf("old_vector == NULL -- ");
         hypre_printf("hypre_IJVectorDistributePar\n");
         hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
      }
      hypre_error_in_arg(1);
   }

   par_vector = hypre_VectorToParVector(hypre_ParVectorComm(old_vector),
		                        hypre_ParVectorLocalVector(old_vector),
                                        (HYPRE_Int *)vec_starts);
   if (!par_vector)
   {
      if (print_level)
      {
         hypre_printf("par_vector == NULL -- ");
         hypre_printf("hypre_IJVectorDistributePar\n");
         hypre_printf("**** Vector storage is unallocated ****\n");
      }
      hypre_error_in_arg(1);
   }

   hypre_ParVectorDestroy(old_vector);

   hypre_IJVectorObject(vector) = par_vector;

   return hypre_error_flag;
}
Example #11
0
HYPRE_Int
hypre_ParVectorPrint( hypre_ParVector  *vector, 
                      const char       *file_name )
{
   char 	new_file_name[80];
   hypre_Vector *local_vector;
   MPI_Comm 	comm;
   HYPRE_Int  	my_id, num_procs, i;
   HYPRE_Int		*partitioning;
   HYPRE_Int		global_size;
   FILE		*fp;
   if (!vector)
   {
      hypre_error_in_arg(1);
      return hypre_error_flag;
   }
   local_vector = hypre_ParVectorLocalVector(vector); 
   comm = hypre_ParVectorComm(vector);
   partitioning = hypre_ParVectorPartitioning(vector); 
   global_size = hypre_ParVectorGlobalSize(vector); 

   hypre_MPI_Comm_rank(comm,&my_id); 
   hypre_MPI_Comm_size(comm,&num_procs); 
   hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); 
   hypre_SeqVectorPrint(local_vector,new_file_name);
   hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); 
   fp = fopen(new_file_name, "w");
   hypre_fprintf(fp, "%d\n", global_size);
#ifdef HYPRE_NO_GLOBAL_PARTITION
   for (i=0; i < 2; i++)
	hypre_fprintf(fp, "%d\n", partitioning[i]);
#else
  for (i=0; i < num_procs; i++)
	hypre_fprintf(fp, "%d\n", partitioning[i]);
#endif

   fclose (fp);
   return hypre_error_flag;
}
Example #12
0
HYPRE_Int
hypre_ParVectorPrintIJ( hypre_ParVector *vector,
                        HYPRE_Int              base_j,
                        const char      *filename )
{
   MPI_Comm          comm;
   HYPRE_Int               global_size;
   HYPRE_Int              *partitioning;
   double           *local_data;
   HYPRE_Int               myid, num_procs, i, j, part0;
   char              new_filename[255];
   FILE             *file;
   if (!vector)
   {
      hypre_error_in_arg(1);
      return hypre_error_flag;
   }
   comm         = hypre_ParVectorComm(vector);
   global_size  = hypre_ParVectorGlobalSize(vector);
   partitioning = hypre_ParVectorPartitioning(vector);

   /* multivector code not written yet >>> */
   hypre_assert( hypre_ParVectorNumVectors(vector) == 1 );
   if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error_in_arg(1);

   hypre_MPI_Comm_rank(comm, &myid);
   hypre_MPI_Comm_size(comm, &num_procs);
  
   hypre_sprintf(new_filename,"%s.%05d", filename, myid);

   if ((file = fopen(new_filename, "w")) == NULL)
   {
      hypre_printf("Error: can't open output file %s\n", new_filename);
      hypre_error_in_arg(3);
      return hypre_error_flag;
   }

   local_data = hypre_VectorData(hypre_ParVectorLocalVector(vector));

   hypre_fprintf(file, "%d \n", global_size);
#ifdef HYPRE_NO_GLOBAL_PARTITION
   for (i=0; i <= 2; i++)
#else
   for (i=0; i <= num_procs; i++)
#endif
   {
      hypre_fprintf(file, "%d \n", partitioning[i] + base_j);
   }

#ifdef HYPRE_NO_GLOBAL_PARTITION
   part0 = partitioning[0];
   for (j = part0; j < partitioning[1]; j++)
#else
   part0 = partitioning[myid];
   for (j = part0; j < partitioning[myid+1]; j++)
#endif
   {
      hypre_fprintf(file, "%d %.14e\n", j + base_j, local_data[j-part0]);
   }

   fclose(file);

   return hypre_error_flag;
}
Example #13
0
hypre_Vector *
hypre_ParVectorToVectorAll (hypre_ParVector *par_v)
{
   MPI_Comm		comm = hypre_ParVectorComm(par_v);
   HYPRE_Int 			global_size = hypre_ParVectorGlobalSize(par_v);
#ifndef HYPRE_NO_GLOBAL_PARTITION
   HYPRE_Int 			*vec_starts = hypre_ParVectorPartitioning(par_v);
#endif
   hypre_Vector     	*local_vector = hypre_ParVectorLocalVector(par_v);
   HYPRE_Int  		num_procs, my_id;
   HYPRE_Int                  num_vectors = hypre_ParVectorNumVectors(par_v);
   hypre_Vector  	*vector;
   double		*vector_data;
   double		*local_data;
   HYPRE_Int 			local_size;
   hypre_MPI_Request		*requests;
   hypre_MPI_Status		*status;
   HYPRE_Int			i, j;
   HYPRE_Int			*used_procs;
   HYPRE_Int			num_types, num_requests;
   HYPRE_Int			vec_len, proc_id;

#ifdef HYPRE_NO_GLOBAL_PARTITION

   HYPRE_Int *new_vec_starts;
   
   HYPRE_Int num_contacts;
   HYPRE_Int contact_proc_list[1];
   HYPRE_Int contact_send_buf[1];
   HYPRE_Int contact_send_buf_starts[2];
   HYPRE_Int max_response_size;
   HYPRE_Int *response_recv_buf=NULL;
   HYPRE_Int *response_recv_buf_starts = NULL;
   hypre_DataExchangeResponse response_obj;
   hypre_ProcListElements send_proc_obj;
   
   HYPRE_Int *send_info = NULL;
   hypre_MPI_Status  status1;
   HYPRE_Int count, tag1 = 112, tag2 = 223;
   HYPRE_Int start;
   
#endif


   hypre_MPI_Comm_size(comm, &num_procs);
   hypre_MPI_Comm_rank(comm, &my_id);

#ifdef HYPRE_NO_GLOBAL_PARTITION

  local_size = hypre_ParVectorLastIndex(par_v) - 
     hypre_ParVectorFirstIndex(par_v) + 1;

 

/* determine procs which hold data of par_v and store ids in used_procs */
/* we need to do an exchange data for this.  If I own row then I will contact
   processor 0 with the endpoint of my local range */


   if (local_size > 0)
   {
      num_contacts = 1;
      contact_proc_list[0] = 0;
      contact_send_buf[0] =  hypre_ParVectorLastIndex(par_v);
      contact_send_buf_starts[0] = 0;
      contact_send_buf_starts[1] = 1;
   }
   else
   {
      num_contacts = 0;
      contact_send_buf_starts[0] = 0;
      contact_send_buf_starts[1] = 0;
   }

   /*build the response object*/
   /*send_proc_obj will  be for saving info from contacts */
   send_proc_obj.length = 0;
   send_proc_obj.storage_length = 10;
   send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length);
   send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1); 
   send_proc_obj.vec_starts[0] = 0;
   send_proc_obj.element_storage_length = 10;
   send_proc_obj.elements = hypre_CTAlloc(HYPRE_Int, send_proc_obj.element_storage_length);

   max_response_size = 0; /* each response is null */
   response_obj.fill_response = hypre_FillResponseParToVectorAll;
   response_obj.data1 = NULL;
   response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/
  
   
   hypre_DataExchangeList(num_contacts, 
                          contact_proc_list, contact_send_buf, 
                          contact_send_buf_starts, sizeof(HYPRE_Int), 
                          sizeof(HYPRE_Int), &response_obj, 
                          max_response_size, 1,
                          comm, (void**) &response_recv_buf,	   
                          &response_recv_buf_starts);

 /* now processor 0 should have a list of ranges for processors that have rows -
      these are in send_proc_obj - it needs to create the new list of processors
      and also an array of vec starts - and send to those who own row*/
   if (my_id)
   {
      if (local_size)      
      {
         /* look for a message from processor 0 */         
         hypre_MPI_Probe(0, tag1, comm, &status1);
         hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count);
         
         send_info = hypre_CTAlloc(HYPRE_Int, count);
         hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1);

         /* now unpack */  
         num_types = send_info[0];
         used_procs =  hypre_CTAlloc(HYPRE_Int, num_types);  
         new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1);

         for (i=1; i<= num_types; i++)
         {
            used_procs[i-1] = send_info[i];
         }
         for (i=num_types+1; i< count; i++)
         {
            new_vec_starts[i-num_types-1] = send_info[i] ;
         }
      }
      else /* clean up and exit */
      {
         hypre_TFree(send_proc_obj.vec_starts);
         hypre_TFree(send_proc_obj.id);
         hypre_TFree(send_proc_obj.elements);
         if(response_recv_buf)        hypre_TFree(response_recv_buf);
         if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts);
         return NULL;
      }
   }
   else /* my_id ==0 */
   {
      num_types = send_proc_obj.length;
      used_procs =  hypre_CTAlloc(HYPRE_Int, num_types);  
      new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1);
      
      new_vec_starts[0] = 0;
      for (i=0; i< num_types; i++)
      {
         used_procs[i] = send_proc_obj.id[i];
         new_vec_starts[i+1] = send_proc_obj.elements[i]+1;
      }
      qsort0(used_procs, 0, num_types-1);
      qsort0(new_vec_starts, 0, num_types);
      /*now we need to put into an array to send */
      count =  2*num_types+2;
      send_info = hypre_CTAlloc(HYPRE_Int, count);
      send_info[0] = num_types;
      for (i=1; i<= num_types; i++)
      {
         send_info[i] = used_procs[i-1];
      }
      for (i=num_types+1; i< count; i++)
      {
         send_info[i] = new_vec_starts[i-num_types-1];
      }
      requests = hypre_CTAlloc(hypre_MPI_Request, num_types);
      status =  hypre_CTAlloc(hypre_MPI_Status, num_types);

      /* don't send to myself  - these are sorted so my id would be first*/
      start = 0;
      if (used_procs[0] == 0)
      {
         start = 1;
      }
   
      
      for (i=start; i < num_types; i++)
      {
         hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]);
      }
      hypre_MPI_Waitall(num_types-start, requests, status);

      hypre_TFree(status);
      hypre_TFree(requests);
   }

   /* clean up */
   hypre_TFree(send_proc_obj.vec_starts);
   hypre_TFree(send_proc_obj.id);
   hypre_TFree(send_proc_obj.elements);
   hypre_TFree(send_info);
   if(response_recv_buf)        hypre_TFree(response_recv_buf);
   if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts);

   /* now proc 0 can exit if it has no rows */
   if (!local_size) {
      hypre_TFree(used_procs);
      hypre_TFree(new_vec_starts);
      return NULL;
   }
   
   /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */

  /* this vector should be rather small */

   local_data = hypre_VectorData(local_vector);
   vector = hypre_SeqVectorCreate(global_size);
   hypre_VectorNumVectors(vector) = num_vectors;
   hypre_SeqVectorInitialize(vector);
   vector_data = hypre_VectorData(vector);

   num_requests = 2*num_types;

   requests = hypre_CTAlloc(hypre_MPI_Request, num_requests);
   status = hypre_CTAlloc(hypre_MPI_Status, num_requests);

/* initialize data exchange among used_procs and generate vector  - here we 
   send to ourself also*/
 
   j = 0;
   for (i = 0; i < num_types; i++)
   {
        proc_id = used_procs[i];
        vec_len = new_vec_starts[i+1] - new_vec_starts[i];
        hypre_MPI_Irecv(&vector_data[new_vec_starts[i]], num_vectors*vec_len, hypre_MPI_DOUBLE,
                                proc_id, tag2, comm, &requests[j++]);
   }
   for (i = 0; i < num_types; i++)
   {
        hypre_MPI_Isend(local_data, num_vectors*local_size, hypre_MPI_DOUBLE, used_procs[i],
                          tag2, comm, &requests[j++]);
   }
 
   hypre_MPI_Waitall(num_requests, requests, status);


   if (num_requests)
   {
   	hypre_TFree(requests);
   	hypre_TFree(status); 
        hypre_TFree(used_procs);
   }

   hypre_TFree(new_vec_starts);
   


#else
   local_size = vec_starts[my_id+1] - vec_starts[my_id];

/* if my_id contains no data, return NULL  */

   if (!local_size)
	return NULL;
 
   local_data = hypre_VectorData(local_vector);
   vector = hypre_SeqVectorCreate(global_size);
   hypre_VectorNumVectors(vector) = num_vectors;
   hypre_SeqVectorInitialize(vector);
   vector_data = hypre_VectorData(vector);

/* determine procs which hold data of par_v and store ids in used_procs */

   num_types = -1;
   for (i=0; i < num_procs; i++)
        if (vec_starts[i+1]-vec_starts[i])
                num_types++;
   num_requests = 2*num_types;
 
   used_procs = hypre_CTAlloc(HYPRE_Int, num_types);
   j = 0;
   for (i=0; i < num_procs; i++)
        if (vec_starts[i+1]-vec_starts[i] && i-my_id)
                used_procs[j++] = i;
 
   requests = hypre_CTAlloc(hypre_MPI_Request, num_requests);
   status = hypre_CTAlloc(hypre_MPI_Status, num_requests);

/* initialize data exchange among used_procs and generate vector */
 
   j = 0;
   for (i = 0; i < num_types; i++)
   {
        proc_id = used_procs[i];
        vec_len = vec_starts[proc_id+1] - vec_starts[proc_id];
        hypre_MPI_Irecv(&vector_data[vec_starts[proc_id]], num_vectors*vec_len, hypre_MPI_DOUBLE,
                                proc_id, 0, comm, &requests[j++]);
   }
   for (i = 0; i < num_types; i++)
   {
        hypre_MPI_Isend(local_data, num_vectors*local_size, hypre_MPI_DOUBLE, used_procs[i],
                          0, comm, &requests[j++]);
   }
 
   for (i=0; i < num_vectors*local_size; i++)
        vector_data[vec_starts[my_id]+i] = local_data[i];
 
   hypre_MPI_Waitall(num_requests, requests, status);

   if (num_requests)
   {
   	hypre_TFree(used_procs);
   	hypre_TFree(requests);
   	hypre_TFree(status); 
   }


#endif

   return vector;
}
Example #14
0
hypre_ParVector *
hypre_ParVectorCreateFromBlock(  MPI_Comm comm,
                                 HYPRE_Int p_global_size, 
                                 HYPRE_Int *p_partitioning, HYPRE_Int block_size)
{
   hypre_ParVector  *vector;
   HYPRE_Int num_procs, my_id, i;
   HYPRE_Int global_size;
   HYPRE_Int *new_partitioning; /* need to create a new partitioning - son't want to write over
                                   what is passed in */
   


   global_size = p_global_size*block_size;

   vector = hypre_CTAlloc(hypre_ParVector, 1);
   hypre_MPI_Comm_rank(comm,&my_id);
   hypre_MPI_Comm_size(comm,&num_procs);

   if (!p_partitioning)
   {
#ifdef HYPRE_NO_GLOBAL_PARTITION
      hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, &new_partitioning);
#else
      hypre_GeneratePartitioning(global_size, num_procs, &new_partitioning);
#endif
   }
   else /* adjust for block_size */
   {
#ifdef HYPRE_NO_GLOBAL_PARTITION
      new_partitioning = hypre_CTAlloc(HYPRE_Int, 2);
      for(i = 0; i < 2; i++)
      {
         new_partitioning[i] = p_partitioning[i]*block_size;
      }
#else
      new_partitioning = hypre_CTAlloc(HYPRE_Int, num_procs + 1);
      for(i = 0; i < num_procs + 1; i++)
      {
         new_partitioning[i] = p_partitioning[i]*block_size;
      }
#endif
   }
   

   hypre_ParVectorComm(vector) = comm;
   hypre_ParVectorGlobalSize(vector) = global_size;
#ifdef HYPRE_NO_GLOBAL_PARTITION
   hypre_ParVectorFirstIndex(vector) = new_partitioning[0];
   hypre_ParVectorLastIndex(vector) = new_partitioning[1]-1;
   hypre_ParVectorPartitioning(vector) = new_partitioning;
   hypre_ParVectorLocalVector(vector) = 
      hypre_SeqVectorCreate(new_partitioning[1]-new_partitioning[0]);
#else
   hypre_ParVectorFirstIndex(vector) = new_partitioning[my_id];
   hypre_ParVectorLastIndex(vector) = new_partitioning[my_id+1] -1;
   hypre_ParVectorPartitioning(vector) = new_partitioning;
   hypre_ParVectorLocalVector(vector) = 
      hypre_SeqVectorCreate(new_partitioning[my_id+1]-new_partitioning[my_id]);
#endif

   /* set defaults */
   hypre_ParVectorOwnsData(vector) = 1;
   hypre_ParVectorOwnsPartitioning(vector) = 1;

   return vector;
}
Example #15
0
hypre_ParMultiVector *
hypre_ParMultiVectorTempRead(MPI_Comm comm, const char *fileName)
/* ***** temporary implementation ****** */
{
    HYPRE_Int i, n, id;
    double * dest;
    double * src;
    HYPRE_Int count;
    HYPRE_Int retcode;
    char temp_string[128];
    hypre_ParMultiVector * x;
    hypre_ParVector * temp_vec;

    /* calculate the number of files */
    hypre_MPI_Comm_rank( comm, &id );
    n = 0;
    do {
        hypre_sprintf( temp_string, "test -f %s.%d.%d", fileName, n, id );
        if (!(retcode=system(temp_string))) /* zero retcode mean file exists */
            n++;
    } while (!retcode);

    if ( n == 0 ) return NULL;

    /* now read the first vector using hypre_ParVectorRead into temp_vec */

    hypre_sprintf(temp_string,"%s.%d",fileName,0);
    temp_vec = hypre_ParVectorRead(comm, temp_string);

    /* this vector WON'T own partitioning */
    hypre_ParVectorSetPartitioningOwner(temp_vec,0);

    /* now create multivector using temp_vec as a sample */

    x = hypre_ParMultiVectorCreate(hypre_ParVectorComm(temp_vec),
                                   hypre_ParVectorGlobalSize(temp_vec),hypre_ParVectorPartitioning(temp_vec),n);

    /* this vector WILL own the partitioning */
    hypre_ParMultiVectorSetPartitioningOwner(x,1);

    hypre_ParMultiVectorInitialize(x);

    /* read data from first and all other vectors into "x" */

    i = 0;
    do {
        /* copy data from current vector */
        dest = x->local_vector->data + i*(x->local_vector->size);
        src = temp_vec->local_vector->data;
        count = temp_vec->local_vector->size;

        memcpy(dest,src, count*sizeof(double));

        /* destroy current vector */
        hypre_ParVectorDestroy(temp_vec);

        /* read the data to new current vector, if there are more vectors to read */
        if (i<n-1)
        {
            hypre_sprintf(temp_string,"%s.%d",fileName,i+1);
            temp_vec = hypre_ParVectorRead(comm, temp_string);

        }
    } while (++i<n);

    return x;
}