Esempio n. 1
1
int main( int argc, char *argv[] )
{
    int errs = 0;
    int wrank, wsize, mrank, msize, inter_rank;
    int np = 2;
    int errcodes[2];
    int rrank = -1;
    MPI_Comm      parentcomm, intercomm, intercomm2, even_odd_comm, merged_world;
    int can_spawn;

    MTest_Init( &argc, &argv );

    errs += MTestSpawnPossible(&can_spawn);

    if (can_spawn) {
        MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
        MPI_Comm_size( MPI_COMM_WORLD, &wsize );

        if (wsize != 2) {
            printf( "world size != 2, this test will not work correctly\n" );
            errs++;
        }

        MPI_Comm_get_parent( &parentcomm );

        if (parentcomm == MPI_COMM_NULL) {
            MPI_Comm_spawn( (char*)"./spaiccreate2", MPI_ARGV_NULL, np,
                    MPI_INFO_NULL, 0, MPI_COMM_WORLD,
                    &intercomm, errcodes );
        }
        else {
            intercomm = parentcomm;
        }

        MPI_Intercomm_merge( intercomm, (parentcomm == MPI_COMM_NULL ? 0 : 1), &merged_world );
        MPI_Comm_rank( merged_world, &mrank );
        MPI_Comm_size( merged_world, &msize );

        MPI_Comm_split( merged_world, mrank % 2, wrank, &even_odd_comm );

        MPI_Intercomm_create( even_odd_comm, 0, merged_world, (mrank + 1) % 2, 123, &intercomm2 );
        MPI_Comm_rank( intercomm2, &inter_rank );

        /* odds receive from evens */
        MPI_Sendrecv( &inter_rank, 1, MPI_INT, inter_rank, 456,
                &rrank, 1, MPI_INT, inter_rank, 456, intercomm2, MPI_STATUS_IGNORE );
        if (rrank != inter_rank) {
            printf( "Received %d from %d; expected %d\n",
                    rrank, inter_rank, inter_rank );
            errs++;
        }

        MPI_Barrier( intercomm2 );

        MPI_Comm_free( &intercomm );
        MPI_Comm_free( &intercomm2 );
        MPI_Comm_free( &merged_world );
        MPI_Comm_free( &even_odd_comm );

        /* Note that the MTest_Finalize get errs only over COMM_WORLD */
        /* Note also that both the parent and child will generate "No Errors"
           if both call MTest_Finalize */
        if (parentcomm == MPI_COMM_NULL) {
            MTest_Finalize( errs );
        }
    } else {
        MTest_Finalize( errs );
    }

    MPI_Finalize();
    return 0;
}
Esempio n. 2
0
/*@C
   PetscHMPISpawn - Initialize additional processes to be used as "worker" processes. This is not generally
     called by users. One should use -hmpi_spawn_size <n> to indicate that you wish to have n-1 new MPI
     processes spawned for each current process.

   Not Collective (could make collective on MPI_COMM_WORLD, generate one huge comm and then split it up)

   Input Parameter:
.  nodesize - size of each compute node that will share processors

   Options Database:
.   -hmpi_spawn_size nodesize

   Notes: This is only supported on systems with an MPI 2 implementation that includes the MPI_Comm_Spawn() routine.

$    Comparison of two approaches for HMPI usage (MPI started with N processes)
$
$    -hmpi_spawn_size <n> requires MPI 2, results in n*N total processes with N directly used by application code
$                                           and n-1 worker processes (used by PETSc) for each application node.
$                           You MUST launch MPI so that only ONE MPI process is created for each hardware node.
$
$    -hmpi_merge_size <n> results in N total processes, N/n used by the application code and the rest worker processes
$                            (used by PETSc)
$                           You MUST launch MPI so that n MPI processes are created for each hardware node.
$
$    petscmpiexec -n 2 ./ex1 -hmpi_spawn_size 3 gives 2 application nodes (and 4 PETSc worker nodes)
$    petscmpiexec -n 6 ./ex1 -hmpi_merge_size 3 gives the SAME 2 application nodes and 4 PETSc worker nodes
$       This is what would use if each of the computers hardware nodes had 3 CPUs.
$
$      These are intended to be used in conjunction with USER HMPI code. The user will have 1 process per
$   computer (hardware) node (where the computer node has p cpus), the user's code will use threads to fully
$   utilize all the CPUs on the node. The PETSc code will have p processes to fully use the compute node for
$   PETSc calculations. The user THREADS and PETSc PROCESSES will NEVER run at the same time so the p CPUs
$   are always working on p task, never more than p.
$
$    See PCHMPI for a PETSc preconditioner that can use this functionality
$

   For both PetscHMPISpawn() and PetscHMPIMerge() PETSC_COMM_WORLD consists of one process per "node", PETSC_COMM_LOCAL_WORLD
   consists of all the processes in a "node."

   In both cases the user's code is running ONLY on PETSC_COMM_WORLD (that was newly generated by running this command).

   Level: developer

   Concepts: HMPI

.seealso: PetscFinalize(), PetscInitializeFortran(), PetscGetArgs(), PetscHMPIFinalize(), PetscInitialize(), PetscHMPIMerge(), PetscHMPIRun()

@*/
PetscErrorCode  PetscHMPISpawn(PetscMPIInt nodesize)
{
  PetscErrorCode ierr;
  PetscMPIInt    size;
  MPI_Comm       parent,children;

  PetscFunctionBegin;
  ierr = MPI_Comm_get_parent(&parent);CHKERRQ(ierr);
  if (parent == MPI_COMM_NULL) {  /* the original processes started by user */
    char programname[PETSC_MAX_PATH_LEN];
    char **argv;

    ierr = PetscGetProgramName(programname,PETSC_MAX_PATH_LEN);CHKERRQ(ierr);
    ierr = PetscGetArguments(&argv);CHKERRQ(ierr);
    ierr = MPI_Comm_spawn(programname,argv,nodesize-1,MPI_INFO_NULL,0,PETSC_COMM_SELF,&children,MPI_ERRCODES_IGNORE);CHKERRQ(ierr);
    ierr = PetscFreeArguments(argv);CHKERRQ(ierr);
    ierr = MPI_Intercomm_merge(children,0,&PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr);

    ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
    ierr = PetscInfo2(0,"PETSc HMPI successfully spawned: number of nodes = %d node size = %d\n",size,nodesize);CHKERRQ(ierr);

    saved_PETSC_COMM_WORLD = PETSC_COMM_WORLD;
  } else { /* worker nodes that get spawned */
    ierr            = MPI_Intercomm_merge(parent,1,&PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr);
    ierr            = PetscHMPIHandle(PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr);
    PetscHMPIWorker = PETSC_TRUE; /* so that PetscHMPIFinalize() will not attempt a broadcast from this process */
    PetscEnd();  /* cannot continue into user code */
  }
  PetscFunctionReturn(0);
}
Esempio n. 3
0
int main(int argc, char *argv[])
{
	int rank;

	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);

	MPI_Comm parentcomm, intercomm;

	/* If child, finalize */
	MPI_Comm_get_parent(&parentcomm);
	if (parentcomm != MPI_COMM_NULL)
	  	goto out;

	/* Set add-host info */
	MPI_Info info;
	MPI_Info_create(&info);
	MPI_Info_set(info,"add-host","grsacc18");

	/* Spawn the children */
	printf("all info set, ready to spawn\n");
	MPI_Comm_spawn("/home/grsprabh/resmgmt/branches/Malleability/workdir/addhosttest",MPI_ARGV_NULL,1,info,0,MPI_COMM_WORLD,&intercomm,MPI_ERRCODES_IGNORE);
	printf("spawn returned successfully\n");

out:
	printf("spawn completed successfully\n");
	MPI_Finalize();

}
Esempio n. 4
0
int main(int argc, char *argv[])
{
  int myrank, nprocs;
  int n, i;
  double h, s, pi;
  MPI_Comm master;

  MPI_Init(&argc, &argv);

  MPI_Comm_get_parent(&master);
  MPI_Comm_size(master, &nprocs);
  MPI_Comm_rank(master, &myrank);

  MPI_Bcast(&n, 1, MPI_INT, 0, master);

  h = 1.0 / (double) n;
  s = 0.0;
  for (i = myrank+1; i < n+1; i += nprocs) {
    double x = h * (i - 0.5);
    s += 4.0 / (1.0 + x*x);
  }
  pi = s * h;

  MPI_Reduce(&pi, MPI_BOTTOM, 1, MPI_DOUBLE,
             MPI_SUM, 0, master);

  MPI_Comm_disconnect(&master);

  MPI_Finalize();
  return 0;
}
Esempio n. 5
0
File: slave.c Progetto: artpol84/poc
int main(int argc, char *argv[]) 
{ 
    int rank, size;
    int lsize, rsize;
    int grank, gsize;
    MPI_Comm parent, global;
    MPI_Init(&argc, &argv);
    // Locat info
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    // Global info
    MPI_Comm_get_parent(&parent);
    if (parent == MPI_COMM_NULL)
        error("No parent!");
    MPI_Comm_remote_size(parent, &size);

    MPI_Comm_size(parent, &lsize);
    MPI_Comm_remote_size(parent, &rsize);

    MPI_Intercomm_merge(parent, 1, &global);
    MPI_Comm_rank(global, &grank);
    MPI_Comm_size(global, &gsize);
    printf("child %d: lsize=%d, rsize=%d, grank=%d, gsize=%d\n",
           rank, lsize, rsize, grank, gsize);

    MPI_Barrier(global);

    printf("%d: after Barrier\n", grank);

    MPI_Comm_free(&global);

    MPI_Finalize();
    return 0;
} 
Esempio n. 6
0
int main(int argc, char *argv[])
{
    char str[10];
    MPI_Comm intercomm1, intracomm, intercomm2;
    int err, errcodes[256], rank;

    MPI_Init(&argc, &argv);

/*    printf("Child out of MPI_Init\n");
    fflush(stdout);
*/
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Comm_get_parent(&intercomm1);

    MPI_Intercomm_merge(intercomm1, 1, &intracomm);

    err = MPI_Comm_spawn("spawn_merge_child2", MPI_ARGV_NULL, 2,
                         MPI_INFO_NULL, 2, intracomm, &intercomm2, errcodes);
    if (err)
        printf("Error in MPI_Comm_spawn\n");

    MPI_Comm_rank(intercomm2, &rank);

    if (rank == 3) {
        err = MPI_Recv(str, 3, MPI_CHAR, 1, 0, intercomm2, MPI_STATUS_IGNORE);
        printf("Parent (first child) received from child 2: %s\n", str);
        fflush(stdout);

        err = MPI_Send("bye", 4, MPI_CHAR, 1, 0, intercomm2);
    }

    MPI_Finalize();
    return 0;
}
Esempio n. 7
0
int
main(int argc, char *argv[])
{
    int rank, size;
    MPI_Comm parent;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    /* Check to see if we *were* spawned -- because this is a test, we
       can only assume the existence of this one executable.  Hence, we
       both mpirun it and spawn it. */

    parent = MPI_COMM_NULL;
    MPI_Comm_get_parent(&parent);
    if (parent != MPI_COMM_NULL) {
        whoami = argv[1];
        do_target(argv, parent);
    } else {
        do_parent(argv, rank, size);
    }

    /* All done */
    MPI_Finalize();
    return 0;
}
Esempio n. 8
0
int main(int argc, char* argv[])
{
    int msg;
    MPI_Comm parent, child;
    int rank, size;
    char hostname[512];
    pid_t pid;
    int i;
    char *cmds[2];
    char *argv0[] = { "foo", NULL };
    char *argv1[] = { "bar", NULL };
    char **spawn_argv[2];
    int maxprocs[] = { 1, 1 };
    MPI_Info info[] = { MPI_INFO_NULL, MPI_INFO_NULL };

    cmds[1] = cmds[0] = argv[0];
    spawn_argv[0] = argv0;
    spawn_argv[1] = argv1;

    MPI_Init(NULL, NULL);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_get_parent(&parent);
    /* If we get COMM_NULL back, then we're the parent */
    if (MPI_COMM_NULL == parent) {
        pid = getpid();
        printf("Parent [pid %ld] about to spawn!\n", (long)pid);
        MPI_Comm_spawn_multiple(2, cmds, spawn_argv, maxprocs, 
                                info, 0, MPI_COMM_WORLD,
                                &child, MPI_ERRCODES_IGNORE);
        printf("Parent done with spawn\n");
        if (0 == rank) {
            msg = 38;
            printf("Parent sending message to children\n");
            MPI_Send(&msg, 1, MPI_INT, 0, 1, child);
            MPI_Send(&msg, 1, MPI_INT, 1, 1, child);
        }
        MPI_Comm_disconnect(&child);
        printf("Parent disconnected\n");
    } 
    /* Otherwise, we're the child */
    else {
        MPI_Comm_rank(MPI_COMM_WORLD, &rank);
        MPI_Comm_size(MPI_COMM_WORLD, &size);
        gethostname(hostname, 512);
        pid = getpid();
        printf("Hello from the child %d of %d on host %s pid %ld: argv[1] = %s\n", rank, size, hostname, (long)pid, argv[1]);
        if (0 == rank) {
            MPI_Recv(&msg, 1, MPI_INT, 0, 1, parent, MPI_STATUS_IGNORE);
            printf("Child %d received msg: %d\n", rank, msg);
        }
        MPI_Comm_disconnect(&parent);
        printf("Child %d disconnected\n", rank);
    }

    MPI_Finalize();
    return 0;
}
void mpi_comm_get_parent_f(MPI_Fint *parent, MPI_Fint *ierr)
{
    MPI_Comm c_parent;

    *ierr = OMPI_INT_2_FINT(MPI_Comm_get_parent(&c_parent));
    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
        *parent = MPI_Comm_c2f(c_parent);
    }
}
Esempio n. 10
0
void Cache::orient(int *argc_ptr, char ***argv_ptr) {
   MPI_ASSERT(argc_ptr != NULL);
   MPI_ASSERT(argv_ptr != NULL);

   int argc = *argc_ptr;
   char **argv = *argv_ptr;

   MPI_ASSERT(argc >= 3);

#ifdef DEBUG
   // Print the argv list for reference
   printf("job_node argc: %d\n", argc);
   for (int i = 0; i < argc; ++i) {
      printf("argv[%d]: %s\n", i, argv[i]);
   }
   //
#endif

   // Get the job_num for this job. 
   char *endptr;
   job_num = strtol(argv[1], &endptr, 10);

   // Grab the job to cache node pairings list. 
   std::string mapping(argv[2]);
   stringlist_to_vector(job_to_cache, mapping);

   // Update argc and argv so things are transparent to the caller.
   // TODO: Ensure this is working properly
   argc_ptr -= 1;
   std::string exec_name(argv[0]);
   memcpy(argv[2], exec_name.c_str(), exec_name.size());
   *argv_ptr = *(argv_ptr + 2);

   // Get details on the world this node lives in.
   MPI_Comm_size(MPI_COMM_WORLD, &local_size);
   MPI_Comm_rank(MPI_COMM_WORLD, &local_rank);

   MPI_Comm_get_parent(&parent_comm);
   MPI_Comm_remote_size(parent_comm, &parent_size);
   MPI_Comm_rank(parent_comm, &parent_rank);

   // Get coordinator cache node's rank for this job node.
   coord_rank = job_to_cache[local_rank];

#ifdef DEBUG
   printf("Job node: local rank - %d/%d parent rank - %d/%d\n", local_rank,
         local_size, parent_rank, parent_size);

   printf("Job Num: %d Job node %d: team cache node: %d\n", job_num, local_rank,
         coord_rank);
#endif
}
Esempio n. 11
0
int main(int argc, char* argv[])
{
    int msg, rc;
    MPI_Comm parent, child;
    int rank, size;
    char hostname[512];
    pid_t pid;

    pid = getpid();
    printf("[pid %ld] starting up!\n", (long)pid);
    MPI_Init(NULL, NULL);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    printf("%d completed MPI_Init\n", rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_get_parent(&parent);
    /* If we get COMM_NULL back, then we're the parent */
    if (MPI_COMM_NULL == parent) {
        pid = getpid();
        printf("Parent [pid %ld] about to spawn!\n", (long)pid);
        if (MPI_SUCCESS != (rc = MPI_Comm_spawn(argv[0], MPI_ARGV_NULL, 3, MPI_INFO_NULL, 
                                                0, MPI_COMM_WORLD, &child, MPI_ERRCODES_IGNORE))) {
            printf("Child failed to spawn\n");
            return rc;
        }
        printf("Parent done with spawn\n");
        if (0 == rank) {
            msg = 38;
            printf("Parent sending message to child\n");
            MPI_Send(&msg, 1, MPI_INT, 0, 1, child);
        }
        MPI_Comm_disconnect(&child);
        printf("Parent disconnected\n");
    } 
    /* Otherwise, we're the child */
    else {
        MPI_Comm_rank(MPI_COMM_WORLD, &rank);
        MPI_Comm_size(MPI_COMM_WORLD, &size);
        gethostname(hostname, 512);
        pid = getpid();
        printf("Hello from the child %d of %d on host %s pid %ld\n", rank, 3, hostname, (long)pid);
        if (0 == rank) {
            MPI_Recv(&msg, 1, MPI_INT, 0, 1, parent, MPI_STATUS_IGNORE);
            printf("Child %d received msg: %d\n", rank, msg);
        }
        MPI_Comm_disconnect(&parent);
        printf("Child %d disconnected\n", rank);
    }

    MPI_Finalize();
    fprintf(stderr, "%d: exiting\n", pid);
    return 0;
}
Esempio n. 12
0
/* Note: CR_SendVectorToPA is translated to "cr_sendvectortopa__"
 *  for Fortran compatibility */
void CR_SendVectorToPA (int *ib, int *ia, double *work, int *mb){
	MPI_Datatype GEMAT;
	MPI_Comm parent;

	MPI_Comm_get_parent (&parent);

	MPI_Type_vector(*ia, *ib, *ib, MPI_DOUBLE,&GEMAT);
	MPI_Type_commit (&GEMAT);

	MPI_Send ( work, 1, GEMAT, 0, 15000 , parent);

	MPI_Type_free (&GEMAT);
}
Esempio n. 13
0
/* Note:  CR_RecvVectorFromPA is translated to "cr_recvvectorfrompa__" 
 * for Fortran compatibility */
void CR_RecvVectorFromPA( int *ib, int *ia, double *A, int *mb ){
	MPI_Datatype GEMAT;
	MPI_Comm parent;

	MPI_Comm_get_parent (&parent);

	MPI_Type_vector(*ia, *ib, *ib, MPI_DOUBLE,&GEMAT);
	MPI_Type_commit (&GEMAT);

	MPI_Recv (A, 1, GEMAT,0 ,5000 ,parent, MPI_STATUS_IGNORE);

	MPI_Type_free (&GEMAT);
}
Esempio n. 14
0
int
main (int argc, char **argv)
{
  int rank;
  char buffer[1];
  MPI_Comm parent;

  MPI_Init(&argc, &argv);

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_get_parent(&parent);

  printf("[slave (%i)] starting up, sleeping...\n", rank);
  sleep(5);
  printf("[slave (%i)] done sleeping, signalling master\n", rank);
  MPI_Send(buffer, 1, MPI_CHAR, 0, 1, parent);

  MPI_Finalize();
}
Esempio n. 15
0
/**
 * Envio del ECM promedio conseguido por la instancia de la red que le corresponde la posicion especificada en el vector de
 * comunicadores para el esquema con mpi
 *
 * Parametros:
 * 		int	pos_hijo			- Entero con la posicion de memoria en vector de los comunicadores de hijos
 * 		double err				- Real con el valor del ECM promedio
 *
 * Salida
 * 		int 					- Entero con el valor 1 si fue exitoso el envio, 0 en caso contrario
 */
int enviar_error_mpi(int pos_hijo, double err){
	//Variables
	int result = 0;			//Indica si los datos fueron enviados correctamente

	//Se realiza solo si fue preparado previamente y con esquema mpi
	if (preparado_ && esquema_ == MPI){
		#ifdef HAVE_MPI
			MPI_Comm padre;			//Comunicador que permite el paso de mensajes entre el proceso esclavo y el proceso maestro padre

			//Se obtiene el comunicador al padre
			MPI_Comm_get_parent(&padre);

			//Envio exitoso del ECM promedio
			if (MPI_Send(&err, 1, MPI_DOUBLE, 0, 1, padre) == MPI_SUCCESS)
				result = 1;
		#endif
	}

	//Resultado
	return result;
}
Esempio n. 16
0
/* Receive - I/P Data dimensions and Process Grid Specifications from the parent
* 1. No. of rows in matrix A
* 2. No. of cols in matrix A
* 3. No. of rows in matrix B
* 4. No. of cols in matrix B
* 5. MB - Row Block size for matrix A
* 6. NB - Col Block size for matrix A
* 7. NPROW - Number of Process rows in the Process Grid - Row Block Size
* 8. NPCOL - Number of Process cols in the Process Grid - Col Block Size
* 9. Function id
* 10. Relaease Flag 
*/
int CR_GetInputParams(MPI_Comm mcParent, int *ipGridAndDims) {

	MPI_Comm parent;


	if (MPI_Comm_get_parent(&parent) != MPI_SUCCESS) {
		Rprintf("ERROR[2]: Getting Parent Comm ... FAILED .. EXITING !!\n");
		return AsInt(2);
	}

	if(MPI_Intercomm_merge(parent, 1, &intercomm)!= MPI_SUCCESS)
		return -1;

	if(MPI_Bcast(ipGridAndDims,10, MPI_INT, 0, intercomm) != MPI_SUCCESS) 
	{
		D_Rprintf(("Child: Broadcast error\n"));
		return -2;
	}
	else
	{
		return 0;
	}
}
Esempio n. 17
0
int main( int argc, char *argv[] )
{
    MPI_Comm intercomm;
    char str[10];
    int err, rank;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Comm_get_parent(&intercomm);

    if (rank == 3){
        err = MPI_Send("hi", 3, MPI_CHAR, 3, 0, intercomm);
        
        err = MPI_Recv(str, 4, MPI_CHAR, 3, 0, intercomm, MPI_STATUS_IGNORE);
        printf("Child received from parent: %s\n", str);
        fflush(stdout);
    }

    MPI_Finalize();
    return 0;
}
Esempio n. 18
0
/**
 * Main loop of the function.
 */
int main(int argc, char **argv) {
    int rank, size;
    MPI_Comm parent;

    MPI_Init(&argc, &argv);
    MPI_Comm_get_parent(&parent);

    if (parent == MPI_COMM_NULL)
        error("No parent.");

    MPI_Comm_remote_size(parent, &size);
    if (size != 1)
        error("Something wrong with parent.");

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    printf("I am a worker number %d of %d.\n", rank, size);

    MPI_Finalize();

    return 0;
}
Esempio n. 19
0
/**
 * Main loop of the worker threads.
 */
int main(int argc, char **argv) {
	int rank, size, darts, hits = 0, all_hits = 0;
	MPI_Comm parent;

	/* Init process and get some important info. */
	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Comm_get_parent(&parent);

	/* Seed the rand function, get the passed number of rounds. */
	srand(time(NULL) + rank*rank*rank);
	darts = atoi(*++argv);

	printf("%d of %d will now throw %d darts.\n", rank, size, darts);
	hits = throw_darts(darts);

	/* Reduce data back to master. */
	MPI_Reduce(&hits, &all_hits, 1, MPI_INT, MPI_SUM, 0, parent);

	MPI_Finalize();

	return 0;
}
Esempio n. 20
0
int main(int argc, char *argv[])
{
    int error;
    int rank, size;
    char *argv1[2] = { (char*)"connector", NULL };
    char *argv2[2] = { (char*)"acceptor", NULL };
    MPI_Comm comm_connector, comm_acceptor, comm_parent, comm;
    char port[MPI_MAX_PORT_NAME];
    MPI_Status status;
    MPI_Info spawn_path = MPI_INFO_NULL;
    int verbose = 0;

    if (getenv("MPITEST_VERBOSE"))
    {
	verbose = 1;
    }

    IF_VERBOSE(("init.\n"));
    error = MPI_Init(&argc, &argv);
    check_error(error, "MPI_Init");

    /* To improve reporting of problems about operations, we
       change the error handler to errors return */
    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
    MPI_Comm_set_errhandler( MPI_COMM_SELF, MPI_ERRORS_RETURN );

    IF_VERBOSE(("size.\n"));
    error = MPI_Comm_size(MPI_COMM_WORLD, &size);
    check_error(error, "MPI_Comm_size");

    IF_VERBOSE(("rank.\n"));
    error = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    check_error(error, "MPI_Comm_rank");

    if (argc == 1)
    {
	/* Make sure that the current directory is in the path.
	   Not all implementations may honor or understand this, but
	   it is highly recommended as it gives users a clean way
	   to specify the location of the executable without
	   specifying a particular directory format (e.g., this 
	   should work with both Windows and Unix implementations) */
	error = MPI_Info_create( &spawn_path );
	check_error( error, "MPI_Info_create" );
	error = MPI_Info_set( spawn_path, (char*)"path", (char*)"." );
	check_error( error, "MPI_Info_set" );

	IF_VERBOSE(("spawn connector.\n"));
	error = MPI_Comm_spawn((char*)"spaconacc", argv1, 1, spawn_path, 0, 
			       MPI_COMM_SELF, &comm_connector, 
			       MPI_ERRCODES_IGNORE);
	check_error(error, "MPI_Comm_spawn");

	IF_VERBOSE(("spawn acceptor.\n"));
	error = MPI_Comm_spawn((char*)"spaconacc", argv2, 1, spawn_path, 0, 
			       MPI_COMM_SELF, &comm_acceptor, 
			       MPI_ERRCODES_IGNORE);
	check_error(error, "MPI_Comm_spawn");
	error = MPI_Info_free( &spawn_path );
	check_error( error, "MPI_Info_free" );

	MPI_Comm_set_errhandler( comm_connector, MPI_ERRORS_RETURN );
	MPI_Comm_set_errhandler( comm_acceptor, MPI_ERRORS_RETURN );

	IF_VERBOSE(("recv port.\n"));
	error = MPI_Recv(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, 
			 comm_acceptor, &status);
	check_error(error, "MPI_Recv");

	IF_VERBOSE(("send port.\n"));
	error = MPI_Send(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, 
			 comm_connector);
	check_error(error, "MPI_Send");

	IF_VERBOSE(("barrier acceptor.\n"));
	error = MPI_Barrier(comm_acceptor);
	check_error(error, "MPI_Barrier");

	IF_VERBOSE(("barrier connector.\n"));
	error = MPI_Barrier(comm_connector);
	check_error(error, "MPI_Barrier");

        error = MPI_Comm_free(&comm_acceptor);
	check_error(error, "MPI_Comm_free");
        error = MPI_Comm_free(&comm_connector);
	check_error(error, "MPI_Comm_free");

	printf(" No Errors\n");
    }
    else if ((argc == 2) && (strcmp(argv[1], "acceptor") == 0))
    {
	IF_VERBOSE(("get_parent.\n"));
	error = MPI_Comm_get_parent(&comm_parent);
	check_error(error, "MPI_Comm_get_parent");
	if (comm_parent == MPI_COMM_NULL)
	{
	    printf("acceptor's parent is NULL.\n");fflush(stdout);
	    MPI_Abort(MPI_COMM_WORLD, -1);
	}
	IF_VERBOSE(("open_port.\n"));
	error = MPI_Open_port(MPI_INFO_NULL, port);
	check_error(error, "MPI_Open_port");

	MPI_Comm_set_errhandler( comm_parent, MPI_ERRORS_RETURN );

	IF_VERBOSE(("0: opened port: <%s>\n", port));
	IF_VERBOSE(("send.\n"));
	error = MPI_Send(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, comm_parent);
	check_error(error, "MPI_Send");

	IF_VERBOSE(("accept.\n"));
	error = MPI_Comm_accept(port, MPI_INFO_NULL, 0, MPI_COMM_SELF, &comm);
	check_error(error, "MPI_Comm_accept");

	IF_VERBOSE(("close_port.\n"));
	error = MPI_Close_port(port);
	check_error(error, "MPI_Close_port");

	IF_VERBOSE(("disconnect.\n"));
	error = MPI_Comm_disconnect(&comm);
	check_error(error, "MPI_Comm_disconnect");

	IF_VERBOSE(("barrier.\n"));
	error = MPI_Barrier(comm_parent);
	check_error(error, "MPI_Barrier");

	MPI_Comm_free( &comm_parent );
    }
    else if ((argc == 2) && (strcmp(argv[1], "connector") == 0))
    {
	IF_VERBOSE(("get_parent.\n"));
	error = MPI_Comm_get_parent(&comm_parent);
	check_error(error, "MPI_Comm_get_parent");
	if (comm_parent == MPI_COMM_NULL)
	{
	    printf("acceptor's parent is NULL.\n");fflush(stdout);
	    MPI_Abort(MPI_COMM_WORLD, -1);
	}

	MPI_Comm_set_errhandler( comm_parent, MPI_ERRORS_RETURN );
	IF_VERBOSE(("recv.\n"));
	error = MPI_Recv(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, 
			 comm_parent, &status);
	check_error(error, "MPI_Recv");

	IF_VERBOSE(("1: received port: <%s>\n", port));
	IF_VERBOSE(("connect.\n"));
	error = MPI_Comm_connect(port, MPI_INFO_NULL, 0, MPI_COMM_SELF, &comm);
	check_error(error, "MPI_Comm_connect");

	MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
	IF_VERBOSE(("disconnect.\n"));
	error = MPI_Comm_disconnect(&comm);
	check_error(error, "MPI_Comm_disconnect");

	IF_VERBOSE(("barrier.\n"));
	error = MPI_Barrier(comm_parent);
	check_error(error, "MPI_Barrier");

	MPI_Comm_free( &comm_parent );
    }
    else
    {
	printf("invalid command line.\n");fflush(stdout);
	{
	    int i;
	    for (i=0; i<argc; i++)
	    {
		printf("argv[%d] = <%s>\n", i, argv[i]);
	    }
	}
	fflush(stdout);
	MPI_Abort(MPI_COMM_WORLD, -2);
    }

    MPI_Finalize();
    return 0;
}
Esempio n. 21
0
int
main(int ac, char *av[])
{
	int  rank, size;
	char name[MPI_MAX_PROCESSOR_NAME];
	int  nameLen;
	int  n = 1, i;
	int  slave = 0;
	int  *errs;
	char *args[] = { "-W", NULL};
	MPI_Comm intercomm, icomm;
	int  err;
	char *line;
	char  *buff;
	int   buffSize;
	int   one_int;
	char  who[1024];

	memset(name, sizeof(name), 0);

	for(i=1; i<ac; i++){
		if (av[i] == NULL)
			continue;

		if (strcmp(av[i],"-W") == 0){
			slave = 1;
		}
		else if (strcmp(av[i],"-n") == 0){
			n = atoi(av[i+1]);
			av[i+1] = NULL;
		}
	}

	if (n <= 0){
		fprintf(stderr, "n=%d has an illegal value.\n", n);
		return -1;
	}

	sprintf(who, "%s[%d]", slave? " slave": "master", getpid());
	if (!slave)
		printf("Generating %d slave processes\n", n);


	errs = (int *)alloca(sizeof(int)*n);

	fprintf(stderr, "%s before MPI_Init()\n", who);
	MPI_Init(&ac, &av);
	fprintf(stderr, "%s after MPI_Init()\n", who);

	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Get_processor_name(name, &nameLen);
	sprintf(&who[strlen(who)], " (%s) %d/%d", name, rank, size);
	fprintf(stderr, "%s\n", who);


	if (!slave){
		err = MPI_Comm_spawn(av[0], args, n, MPI_INFO_NULL, 0, MPI_COMM_SELF, &intercomm, errs);
		if (err){
			fprintf(stderr, "MPI_Comm_spawn generated error %d.\n", err);
		}
	}
	else {
		fprintf(stderr, "%s before MPI_Comm_get_parent()\n", who);
		MPI_Comm_get_parent(&intercomm);
	}

	fprintf(stderr, "%s before MPI_Bcast()\n", who);
	MPI_Bcast(&one_int, 1, MPI_INT, 0, intercomm);
	fprintf(stderr, "%s after MPI_Bcast()\n", who);

	fprintf(stderr, "%s before MPI_Barrier()\n", who);
	MPI_Barrier(intercomm);
	fprintf(stderr, "%s after MPI_Barrier()\n", who);

	fprintf(stderr, "%s before MPI_Finalize()\n", who);
	MPI_Finalize();

	return 0;
}
Esempio n. 22
0
int main(int argc, char *argv[])
{
    complex_t coord_point, julia_constant;
    double x_max, x_min, y_max, y_min, x_resolution, y_resolution;
    double divergent_limit;
    char file_message[160];
    char filename[100];
    int icount, imax_iterations;
    int ipixels_across, ipixels_down;
    int i, j, k, julia, alternate_equation;
    int imin, imax, jmin, jmax;
    int work[5];
    header_t *result = NULL;
    /* make an integer array of size [N x M] to hold answers. */
    int *in_grid_array, *out_grid_array = NULL;
    int numprocs;
    int  namelen;
    char processor_name[MPI_MAX_PROCESSOR_NAME];
    int num_colors;
    color_t *colors = NULL;
    MPI_Status status;
    int listener;
    int save_image = 0;
    int optval;
    int num_children = DEFAULT_NUM_SLAVES;
    int master = 1;
    MPI_Comm parent, *child_comm = NULL;
    MPI_Request *child_request = NULL;
    int error_code, error;
    char error_str[MPI_MAX_ERROR_STRING];
    int length;
    int index;
    int pid;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &myid);
    MPI_Comm_get_parent(&parent);
    MPI_Get_processor_name(processor_name, &namelen);

    pid = getpid();

    if (parent == MPI_COMM_NULL)
    {
	if (numprocs > 1)
	{
	    printf("Error: only one process allowed for the master.\n");
	    PrintUsage();
	    error_code = MPI_Abort(MPI_COMM_WORLD, -1);
	    exit(error_code);
	}

	printf("Welcome to the Mandelbrot/Julia set explorer.\n");

	master = 1;

	/* Get inputs-- region to view (must be within x/ymin to x/ymax, make sure
	xmax>xmin and ymax>ymin) and resolution (number of pixels along an edge,
	N x M, i.e. 256x256)
	*/

	read_mand_args(argc, argv, &imax_iterations, &ipixels_across, &ipixels_down,
	    &x_min, &x_max, &y_min, &y_max, &julia, &julia_constant.real,
	    &julia_constant.imaginary, &divergent_limit,
	    &alternate_equation, filename, &num_colors, &use_stdin, &save_image,
	    &num_children);
	check_mand_params(&imax_iterations, &ipixels_across, &ipixels_down,
	    &x_min, &x_max, &y_min, &y_max, &divergent_limit, &num_children);

	if (julia == 1) /* we're doing a julia figure */
	    check_julia_params(&julia_constant.real, &julia_constant.imaginary);

	/* spawn slaves */
	child_comm = (MPI_Comm*)malloc(num_children * sizeof(MPI_Comm));
	child_request = (MPI_Request*)malloc(num_children * sizeof(MPI_Request));
	result = (header_t*)malloc(num_children * sizeof(header_t));
	if (child_comm == NULL || child_request == NULL || result == NULL)
	{
	    printf("Error: unable to allocate an array of %d communicators, requests and work objects for the slaves.\n", num_children);
	    error_code = MPI_Abort(MPI_COMM_WORLD, -1);
	    exit(error_code);
	}
	printf("Spawning %d slaves.\n", num_children);
	for (i=0; i<num_children; i++)
	{
	    error = MPI_Comm_spawn(argv[0], MPI_ARGV_NULL, 1,
		MPI_INFO_NULL, 0, MPI_COMM_WORLD, &child_comm[i], &error_code);
	    if (error != MPI_SUCCESS)
	    {
		error_str[0] = '\0';
		length = MPI_MAX_ERROR_STRING;
		MPI_Error_string(error, error_str, &length);
		printf("Error: MPI_Comm_spawn failed: %s\n", error_str);
		error_code = MPI_Abort(MPI_COMM_WORLD, -1);
		exit(error_code);
	    }
	}

	/* send out parameters */
	for (i=0; i<num_children; i++)
	{
	    MPI_Send(&num_colors, 1, MPI_INT, 0, 0, child_comm[i]);
	    MPI_Send(&imax_iterations, 1, MPI_INT, 0, 0, child_comm[i]);
	    MPI_Send(&ipixels_across, 1, MPI_INT, 0, 0, child_comm[i]);
	    MPI_Send(&ipixels_down, 1, MPI_INT, 0, 0, child_comm[i]);
	    MPI_Send(&divergent_limit, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
	    MPI_Send(&julia, 1, MPI_INT, 0, 0, child_comm[i]);
	    MPI_Send(&julia_constant.real, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
	    MPI_Send(&julia_constant.imaginary, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
	    MPI_Send(&alternate_equation, 1, MPI_INT, 0, 0, child_comm[i]);
	}
    }
    else
    {
	master = 0;
	MPI_Recv(&num_colors, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&imax_iterations, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&ipixels_across, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&ipixels_down, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&divergent_limit, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&julia, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&julia_constant.real, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&julia_constant.imaginary, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&alternate_equation, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
    }

    if (master)
    {
	colors = malloc((num_colors+1)* sizeof(color_t));
	if (colors == NULL)
	{
	    MPI_Abort(MPI_COMM_WORLD, -1);
	    exit(-1);
	}
	Make_color_array(num_colors, colors);
	colors[num_colors] = 0; /* add one on the top to avoid edge errors */
    }

    /* allocate memory */
    if ( (in_grid_array = (int *)calloc(ipixels_across * ipixels_down, sizeof(int))) == NULL)
    {
	printf("Memory allocation failed for data array, aborting.\n");
	MPI_Abort(MPI_COMM_WORLD, -1);
	exit(-1);
    }

    if (master)
    {
	int istep, jstep;
	int i1[400], i2[400], j1[400], j2[400];
	int ii, jj;
	struct sockaddr_in addr;
	int len;
	char line[1024], *token;

	if ( (out_grid_array = (int *)calloc(ipixels_across * ipixels_down, sizeof(int))) == NULL)
	{
	    printf("Memory allocation failed for data array, aborting.\n");
	    MPI_Abort(MPI_COMM_WORLD, -1);
	    exit(-1);
	}

	srand(getpid());

	if (!use_stdin)
	{
	    addr.sin_family = AF_INET;
	    addr.sin_addr.s_addr = INADDR_ANY;
	    addr.sin_port = htons(DEFAULT_PORT);

	    listener = socket(AF_INET, SOCK_STREAM, 0);
	    if (listener == -1)
	    {
		printf("unable to create a listener socket.\n");
		MPI_Abort(MPI_COMM_WORLD, -1);
		exit(-1);
	    }
	    if (bind(listener, &addr, sizeof(addr)) == -1)
	    {
		addr.sin_port = 0;
		if (bind(listener, &addr, sizeof(addr)) == -1)
		{
		    printf("unable to create a listener socket.\n");
		    MPI_Abort(MPI_COMM_WORLD, -1);
		    exit(-1);
		}
	    }
	    if (listen(listener, 1) == -1)
	    {
		printf("unable to listen.\n");
		MPI_Abort(MPI_COMM_WORLD, -1);
		exit(-1);
	    }
	    len = sizeof(addr);
	    getsockname(listener, &addr, &len);
	    
	    printf("%s listening on port %d\n", processor_name, ntohs(addr.sin_port));
	    fflush(stdout);

	    sock = accept(listener, NULL, NULL);
	    if (sock == -1)
	    {
		printf("unable to accept a socket connection.\n");
		MPI_Abort(MPI_COMM_WORLD, -1);
		exit(-1);
	    }
	    printf("accepted connection from visualization program.\n");
	    fflush(stdout);

#ifdef HAVE_WINDOWS_H
	    optval = 1;
	    setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *)&optval, sizeof(optval));
#endif

	    printf("sending image size to visualizer.\n");
	    sock_write(sock, &ipixels_across, sizeof(int));
	    sock_write(sock, &ipixels_down, sizeof(int));
	    sock_write(sock, &num_colors, sizeof(int));
	    sock_write(sock, &imax_iterations, sizeof(int));
	}

	for (;;)
	{
	    /* get x_min, x_max, y_min, and y_max */
	    if (use_stdin)
	    {
		printf("input xmin ymin xmax ymax max_iter, (0 0 0 0 0 to quit):\n");fflush(stdout);
		fgets(line, 1024, stdin);
		printf("read <%s> from stdin\n", line);fflush(stdout);
		token = strtok(line, " \n");
		x_min = atof(token);
		token = strtok(NULL, " \n");
		y_min = atof(token);
		token = strtok(NULL, " \n");
		x_max = atof(token);
		token = strtok(NULL, " \n");
		y_max = atof(token);
		token = strtok(NULL, " \n");
		imax_iterations = atoi(token);
		/*sscanf(line, "%g %g %g %g", &x_min, &y_min, &x_max, &y_max);*/
		/*scanf("%g %g %g %g", &x_min, &y_min, &x_max, &y_max);*/
	    }
	    else
	    {
		printf("reading xmin,ymin,xmax,ymax.\n");fflush(stdout);
		sock_read(sock, &x_min, sizeof(double));
		sock_read(sock, &y_min, sizeof(double));
		sock_read(sock, &x_max, sizeof(double));
		sock_read(sock, &y_max, sizeof(double));
		sock_read(sock, &imax_iterations, sizeof(int));
	    }
	    printf("x0,y0 = (%f, %f) x1,y1 = (%f,%f) max_iter = %d\n", x_min, y_min, x_max, y_max, imax_iterations);fflush(stdout);

	    /*printf("sending the limits: (%f,%f)(%f,%f)\n", x_min, y_min, x_max, y_max);fflush(stdout);*/
	    /* let everyone know the limits */
	    for (i=0; i<num_children; i++)
	    {
		MPI_Send(&x_min, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
		MPI_Send(&x_max, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
		MPI_Send(&y_min, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
		MPI_Send(&y_max, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
		MPI_Send(&imax_iterations, 1, MPI_INT, 0, 0, child_comm[i]);
	    }

	    /* check for the end condition */
	    if (x_min == x_max && y_min == y_max)
	    {
		/*printf("root bailing.\n");fflush(stdout);*/
		break;
	    }

	    /* break the work up into 400 pieces */
	    istep = ipixels_across / 20;
	    jstep = ipixels_down / 20;
	    if (istep < 1)
		istep = 1;
	    if (jstep < 1)
		jstep = 1;
	    k = 0;
	    for (i=0; i<20; i++)
	    {
		for (j=0; j<20; j++)
		{
		    i1[k] = MIN(istep * i, ipixels_across - 1);
		    i2[k] = MIN((istep * (i+1)) - 1, ipixels_across - 1);
		    j1[k] = MIN(jstep * j, ipixels_down - 1);
		    j2[k] = MIN((jstep * (j+1)) - 1, ipixels_down - 1);
		    k++;
		}
	    }

	    /* shuffle the work */
	    for (i=0; i<500; i++)
	    {
		ii = rand() % 400;
		jj = rand() % 400;
		swap(&i1[ii], &i1[jj]);
		swap(&i2[ii], &i2[jj]);
		swap(&j1[ii], &j1[jj]);
		swap(&j2[ii], &j2[jj]);
	    }

	    /* send a piece of work to each worker (there must be more work than workers) */
	    k = 0;
	    for (i=0; i<num_children; i++)
	    {
		work[0] = k+1;
		work[1] = i1[k]; /* imin */
		work[2] = i2[k]; /* imax */
		work[3] = j1[k]; /* jmin */
		work[4] = j2[k]; /* jmax */

		/*printf("sending work(%d) to %d\n", k+1, i);fflush(stdout);*/
		MPI_Send(work, 5, MPI_INT, 0, 100, child_comm[i]);
		MPI_Irecv(&result[i], 5, MPI_INT, 0, 200, child_comm[i], &child_request[i]);
		k++;
	    }
	    /* receive the results and hand out more work until the image is complete */
	    while (k<400)
	    {
		MPI_Waitany(num_children, child_request, &index, &status);
		memcpy(work, &result[index], 5 * sizeof(int));
		/*printf("master receiving data in k<400 loop.\n");fflush(stdout);*/
		MPI_Recv(in_grid_array, (work[2] + 1 - work[1]) * (work[4] + 1 - work[3]), MPI_INT, 0, 201, child_comm[index], &status);
		/* draw data */
		output_data(in_grid_array, &work[1], out_grid_array, ipixels_across, ipixels_down);
		work[0] = k+1;
		work[1] = i1[k];
		work[2] = i2[k];
		work[3] = j1[k];
		work[4] = j2[k];
		/*printf("sending work(%d) to %d\n", k+1, index);fflush(stdout);*/
		MPI_Send(work, 5, MPI_INT, 0, 100, child_comm[index]);
		MPI_Irecv(&result[index], 5, MPI_INT, 0, 200, child_comm[index], &child_request[index]);
		k++;
	    }
	    /* receive the last pieces of work */
	    /* and tell everyone to stop */
	    for (i=0; i<num_children; i++)
	    {
		MPI_Wait(&child_request[i], &status);
		memcpy(work, &result[i], 5 * sizeof(int));
		/*printf("master receiving data in tail loop.\n");fflush(stdout);*/
		MPI_Recv(in_grid_array, (work[2] + 1 - work[1]) * (work[4] + 1 - work[3]), MPI_INT, 0, 201, child_comm[i], &status);
		/* draw data */
		output_data(in_grid_array, &work[1], out_grid_array, ipixels_across, ipixels_down);
		work[0] = 0;
		work[1] = 0;
		work[2] = 0;
		work[3] = 0;
		work[4] = 0;
		/*printf("sending %d to tell %d to stop\n", work[0], i);fflush(stdout);*/
		MPI_Send(work, 5, MPI_INT, 0, 100, child_comm[i]);
	    }

	    /* tell the visualizer the image is done */
	    if (!use_stdin)
	    {
		work[0] = 0;
		work[1] = 0;
		work[2] = 0;
		work[3] = 0;
		sock_write(sock, work, 4 * sizeof(int));
	    }
	}
    }
    else
    {
	for (;;)
	{
	    /*printf("slave[%d] receiveing bounds.\n", pid);fflush(stdout);*/
	    MPI_Recv(&x_min, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	    MPI_Recv(&x_max, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	    MPI_Recv(&y_min, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	    MPI_Recv(&y_max, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	    MPI_Recv(&imax_iterations, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	    /*printf("slave[%d] received bounding box: (%f,%f)(%f,%f)\n", pid, x_min, y_min, x_max, y_max);fflush(stdout);*/

	    /* check for the end condition */
	    if (x_min == x_max && y_min == y_max)
	    {
		/*printf("slave[%d] done.\n", pid);fflush(stdout);*/
		break;
	    }

	    x_resolution = (x_max-x_min)/ ((double)ipixels_across);
	    y_resolution = (y_max-y_min)/ ((double)ipixels_down);

	    MPI_Recv(work, 5, MPI_INT, 0, 100, parent, &status);
	    /*printf("slave[%d] received work: %d, (%d,%d)(%d,%d)\n", pid, work[0], work[1], work[2], work[3], work[4]);fflush(stdout);*/
	    while (work[0] != 0)
	    {
		imin = work[1];
		imax = work[2];
		jmin = work[3];
		jmax = work[4];

		k = 0;
		for (j=jmin; j<=jmax; ++j)
		{
		    coord_point.imaginary = y_max - j*y_resolution; /* go top to bottom */

		    for (i=imin; i<=imax; ++i)
		    {
			/* Call Mandelbrot routine for each code, fill array with number of iterations. */

			coord_point.real = x_min + i*x_resolution; /* go left to right */
			if (julia == 1)
			{
			    /* doing Julia set */
			    /* julia eq:  z = z^2 + c, z_0 = grid coordinate, c = constant */
			    icount = single_mandelbrot_point(coord_point, julia_constant, imax_iterations, divergent_limit);
			}
			else if (alternate_equation == 1)
			{
			    /* doing experimental form 1 */
			    icount = subtractive_mandelbrot_point(coord_point, julia_constant, imax_iterations, divergent_limit);
			}
			else if (alternate_equation == 2)
			{
			    /* doing experimental form 2 */
			    icount = additive_mandelbrot_point(coord_point, julia_constant, imax_iterations, divergent_limit);
			}
			else
			{
			    /* default to doing Mandelbrot set */
			    /* mandelbrot eq: z = z^2 + c, z_0 = c, c = grid coordinate */
			    icount = single_mandelbrot_point(coord_point, coord_point, imax_iterations, divergent_limit);
			}
			in_grid_array[k] = icount;
			++k;
		    }
		}
		/* send the result to the root */
		/*printf("slave[%d] sending work %d back.\n", pid, work[0]);fflush(stdout);*/
		MPI_Send(work, 5, MPI_INT, 0, 200, parent);
		/*printf("slave[%d] sending work %d data.\n", pid, work[0]);fflush(stdout);*/
		MPI_Send(in_grid_array, (work[2] + 1 - work[1]) * (work[4] + 1 - work[3]), MPI_INT, 0, 201, parent);
		/* get the next piece of work */
		/*printf("slave[%d] receiving new work.\n", pid);fflush(stdout);*/
		MPI_Recv(work, 5, MPI_INT, 0, 100, parent, &status);
		/*printf("slave[%d] received work: %d, (%d,%d)(%d,%d)\n", pid, work[0], work[1], work[2], work[3], work[4]);fflush(stdout);*/
	    }
	}
    }

    if (master && save_image)
    {
	imax_iterations = 0;
	for (i=0; i<ipixels_across * ipixels_down; ++i)
	{
	    /* look for "brightest" pixel value, for image use */
	    if (out_grid_array[i] > imax_iterations)
		imax_iterations = out_grid_array[i];
	}

	if (julia == 0)
	    printf("Done calculating mandelbrot, now creating file\n");
	else
	    printf("Done calculating julia, now creating file\n");
	fflush(stdout);

	/* Print out the array in some appropriate form. */
	if (julia == 0)
	{
	    /* it's a mandelbrot */
	    sprintf(file_message, "Mandelbrot over (%lf-%lf,%lf-%lf), size %d x %d",
		x_min, x_max, y_min, y_max, ipixels_across, ipixels_down);
	}
	else
	{
	    /* it's a julia */
	    sprintf(file_message, "Julia over (%lf-%lf,%lf-%lf), size %d x %d, center (%lf, %lf)",
		x_min, x_max, y_min, y_max, ipixels_across, ipixels_down,
		julia_constant.real, julia_constant.imaginary);
	}

	dumpimage(filename, out_grid_array, ipixels_across, ipixels_down, imax_iterations, file_message, num_colors, colors);
    }

    if (master)
    {
	for (i=0; i<num_children; i++)
	{
	    MPI_Comm_disconnect(&child_comm[i]);
	}
	free(child_comm);
	free(child_request);
	free(colors);
    }

    MPI_Finalize();
    return 0;
}
Esempio n. 23
0
        int MpiCommunicator::init( int minId, long thecomm_ )
        {
            VT_FUNC_I( "MpiCommunicator::init" );

            assert( sizeof(thecomm_) >= sizeof(MPI_Comm) );
            MPI_Comm thecomm = (MPI_Comm)thecomm_;

            // turn wait mode on for intel mpi if possible
            // this should greatly improve performance for intel mpi
            PAL_SetEnvVar( "I_MPI_WAIT_MODE", "enable", 0);

            int flag;
            MPI_Initialized( &flag );
            if ( ! flag ) {
                int p;
                //!! FIXME passing NULL ptr breaks mvapich1 mpi implementation
                MPI_Init_thread( 0, NULL, MPI_THREAD_MULTIPLE, &p );
                if( p != MPI_THREAD_MULTIPLE ) {
                    // can't use Speaker yet, need Channels to be inited
                    std::cerr << "[CnC] Warning: not MPI_THREAD_MULTIPLE (" << MPI_THREAD_MULTIPLE << "), but " << p << std::endl;
                }
            } else if( thecomm == 0 ) {
                CNC_ABORT( "Process has already been initialized" );
            }


            MPI_Comm myComm = MPI_COMM_WORLD;
            int rank;
            MPI_Comm parentComm;
            if( thecomm == 0 ) {
                MPI_Comm_get_parent( &parentComm );
            } else {
                m_customComm = true;
                m_exit0CallOk = false;
                myComm = thecomm;
            }
            MPI_Comm_rank( myComm, &rank );
            
            // father of all checks if he's requested to spawn processes:
            if ( rank == 0 && parentComm == MPI_COMM_NULL ) {
                // Ok, let's spawn the clients.
                // I need some information for the startup.
                // 1. Name of the executable (default is the current exe)
                const char * _tmp = getenv( "CNC_MPI_SPAWN" );
                if ( _tmp ) {
                    int nClientsToSpawn = atol( _tmp );
                    _tmp = getenv( "CNC_MPI_EXECUTABLE" );
                    std::string clientExe( _tmp ? _tmp : "" );
                    if( clientExe.empty() ) clientExe = PAL_GetProgname();
                    CNC_ASSERT( ! clientExe.empty() );
                    // 3. Special setting for MPI_Info: hosts
                    const char * clientHost = getenv( "CNC_MPI_HOSTS" );
                    
                    // Prepare MPI_Info object:
                    MPI_Info clientInfo = MPI_INFO_NULL;
                    if ( clientHost ) {
                        MPI_Info_create( &clientInfo );
                        if ( clientHost ) {
                            MPI_Info_set( clientInfo, const_cast< char * >( "host" ), const_cast< char * >( clientHost ) );
                            // can't use Speaker yet, need Channels to be inited
                            std::cerr << "[CnC " << rank << "] Set MPI_Info_set( \"host\", \"" << clientHost << "\" )\n";
                        }
                    }
                    // Now spawn the client processes:
                    // can't use Speaker yet, need Channels to be inited
                    std::cerr << "[CnC " << rank << "] Spawning " << nClientsToSpawn << " MPI processes" << std::endl;
                    int* errCodes = new int[nClientsToSpawn];
                    MPI_Comm interComm;
                    int err = MPI_Comm_spawn( const_cast< char * >( clientExe.c_str() ),
                                              MPI_ARGV_NULL, nClientsToSpawn,
                                              clientInfo, 0, MPI_COMM_WORLD,
                                              &interComm, errCodes );
                    delete [] errCodes;
                    if ( err ) {
                        // can't use Speaker yet, need Channels to be inited
                        std::cerr << "[CnC " << rank << "] Error in MPI_Comm_spawn. Skipping process spawning";
                    } else {
                        MPI_Intercomm_merge( interComm, 0, &myComm );
                    }
                } // else {
                // No process spawning
                // MPI-1 situation: all clients to be started by mpiexec
                //                    myComm = MPI_COMM_WORLD;
                //}
            }
            if ( thecomm == 0 && parentComm != MPI_COMM_NULL ) {
                // I am a child. Build intra-comm to the parent.
                MPI_Intercomm_merge( parentComm, 1, &myComm );
            }
            MPI_Comm_rank( myComm, &rank );

            CNC_ASSERT( m_channel == NULL );
            MpiChannelInterface* myChannel = new MpiChannelInterface( use_crc(), myComm );
            m_channel = myChannel;

            int size;
            MPI_Comm_size( myComm, &size );
            // Are we on the host or on the remote side?
            if ( rank == 0 ) {
                if( size <= 1 ) {
                    Speaker oss( std::cerr ); oss << "Warning: no clients avabilable. Forgot to set CNC_MPI_SPAWN?";
                }
                // ==> HOST startup: 
                // This initializes the mpi environment in myChannel.
                MpiHostInitializer hostInitializer( *myChannel );
                hostInitializer.init_mpi_comm( myComm );
            } else {
                // ==> CLIENT startup:
                // This initializes the mpi environment in myChannel.
                MpiClientInitializer clientInitializer( *myChannel );
                clientInitializer.init_mpi_comm( myComm );
            }

            { Speaker oss( std::cerr ); oss << "MPI initialization complete (rank " << rank << ")."; }

            //            MPI_Barrier( myComm );

            // Now the mpi specific setup is finished.
            // Do the generic initialization stuff.
            GenericCommunicator::init( minId );

            return 0;
        }
Esempio n. 24
0
/* Note:  CR_SendIntToPA is translated to "cr_sendinttopa__" 
 * for Fortran compatibility */
void CR_SendIntToPA(int *info, int *infoDim, int *tag) {
	MPI_Comm parent;
	MPI_Comm_get_parent (&parent);
	MPI_Send ( info, *infoDim, MPI_INT, 0, *tag , parent);
}
Esempio n. 25
0
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size, rsize;
    int np = 3;
    MPI_Comm parentcomm, intercomm;
    int verbose = 0;
    char *env;
    int can_spawn;

    env = getenv("MPITEST_VERBOSE");
    if (env) {
        if (*env != '0')
            verbose = 1;
    }

    MTest_Init(&argc, &argv);

    errs += MTestSpawnPossible(&can_spawn);

    if (can_spawn) {
        MPI_Comm_get_parent(&parentcomm);

        if (parentcomm == MPI_COMM_NULL) {
            IF_VERBOSE(("spawning %d processes\n", np));
            /* Create 3 more processes */
            MPI_Comm_spawn((char *) "./disconnect", MPI_ARGV_NULL, np,
                           MPI_INFO_NULL, 0, MPI_COMM_WORLD, &intercomm, MPI_ERRCODES_IGNORE);
        } else {
            intercomm = parentcomm;
        }

        /* We now have a valid intercomm */

        MPI_Comm_remote_size(intercomm, &rsize);
        MPI_Comm_size(intercomm, &size);
        MPI_Comm_rank(intercomm, &rank);

        if (parentcomm == MPI_COMM_NULL) {
            IF_VERBOSE(("parent rank %d alive.\n", rank));
            /* Parent */
            if (rsize != np) {
                errs++;
                printf("Did not create %d processes (got %d)\n", np, rsize);
                fflush(stdout);
            }
            IF_VERBOSE(("disconnecting child communicator\n"));
            MPI_Comm_disconnect(&intercomm);

            /* Errors cannot be sent back to the parent because there is no
             * communicator connected to the children
             * for (i=0; i<rsize; i++)
             * {
             * MPI_Recv(&err, 1, MPI_INT, i, 1, intercomm, MPI_STATUS_IGNORE);
             * errs += err;
             * }
             */
        } else {
            IF_VERBOSE(("child rank %d alive.\n", rank));
            /* Child */
            if (size != np) {
                errs++;
                printf("(Child) Did not create %d processes (got %d)\n", np, size);
                fflush(stdout);
            }

            IF_VERBOSE(("disconnecting communicator\n"));
            MPI_Comm_disconnect(&intercomm);

            /* Send the errs back to the master process */
            /* Errors cannot be sent back to the parent because there is no
             * communicator connected to the parent */
            /*MPI_Ssend(&errs, 1, MPI_INT, 0, 1, intercomm); */
        }

        /* Note that the MTest_Finalize get errs only over COMM_WORLD */
        /* Note also that both the parent and child will generate "No Errors"
         * if both call MTest_Finalize */
        if (parentcomm == MPI_COMM_NULL) {
            MTest_Finalize(errs);
        } else {
            MPI_Finalize();
        }
    } else {
        MTest_Finalize(errs);
    }

    IF_VERBOSE(("calling finalize\n"));
    return MTestReturnValue(errs);
}
Esempio n. 26
0
/* -------------------------------------------------------------------------
   O primeiro parametro a ser passado para esse agente e' o numero do proce-
   ssador em que esta executando o 'ns'  e o segundo parametro e' o nome  do
   arquivo que contem a instancia do SCP.
   ------------------------------------------------------------------------- */
main(int argc, char *argv[])
{
  
  int flagConnect = 1;		  
  char port[MPI_MAX_PORT_NAME];   
  MPI_Comm commServerMD;          
  	            				            
  MPI_Init(&argc, &argv);               
  MPI_Comm_get_parent(&interfaceComm);
                                       
  /* Tetativa de conectar com o servidor */
  flagConnect = MPI_Lookup_name("ServerMD", MPI_INFO_NULL, port);
  if(flagConnect) 
  {
     printf("\n\n* Termino do Agente Dual Greedy *");
     printf("\n* ERRO : A memoria de solucoes duais nao foi iniciada. *");
     fflush(stdout);
  }	 
  else
   { 
/*
     TimeSleeping  = (int)  ReadAteamParam(1);
     MaxLenDualMem = (int)  ReadAteamParam(2);
     CutsofSol     = (int)  ReadAteamParam(6);
     MaxExeTime    = (int)  ReadAteamParam(11);
     ReducPerc     = (int)  ReadAteamParam(12);
     RandomDual    = (char) ReadAteamParam(17);
*/

     TimeSleeping  = atoi(argv[2]);
     MaxLenDualMem = atoi(argv[3]);
     CutsofSol     = atoi(argv[4]);
     MaxExeTime    = atoi(argv[5]);
     ReducPerc     = atoi(argv[6]);
     RandomDual    = (char) atoi(argv[7]);

     if (!(finput = fopen(argv[1],"r")))
     { printf("\n\n* Erro na abertura do arquivo %s. *\n",argv[1]);
	   exit(1);
     }
     ReadSource();
     fclose(finput);
     Reduction(NULL,NULL);
     srand48(time(NULL));

     MPI_Comm_connect(port, MPI_INFO_NULL, 0, MPI_COMM_SELF, &commServerMD);

     ExecAgDual(commServerMD);
     
     printf("\n\n* Agente Dual Greedy finalizado *");
     printf("\n* Servidor finalizou processamento. *\n");
   }

  /* Finaliza comunicacao com o servidor */
   int message = 1224;
   MPI_Send(&message, 1, MPI_INT, 0, 1, commServerMD);
   
   /* Envia mensagem de Finalizacao para interface */
   //message = 1;
   //MPI_Send(&message, 1, MPI_INT, 0, 1, interfaceComm);
   
  
   MPI_Comm_disconnect(&commServerMD);
   MPI_Finalize();
   printf("\n ==== ag_dual FINALIZADO \n", message);

}
Esempio n. 27
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, rsize, i;
    int np = 2;
    int errcodes[2];
    MPI_Comm      parentcomm, intercomm, intracomm, intracomm2, intracomm3;
    int           isChild = 0;
    MPI_Status    status;

    MTest_Init( &argc, &argv );

    MPI_Comm_get_parent( &parentcomm );

    if (parentcomm == MPI_COMM_NULL) {
	/* Create 2 more processes */
	MPI_Comm_spawn( (char*)"./spawnintra", MPI_ARGV_NULL, np,
			MPI_INFO_NULL, 0, MPI_COMM_WORLD,
			&intercomm, errcodes );
    }
    else 
	intercomm = parentcomm;

    /* We now have a valid intercomm */

    MPI_Comm_remote_size( intercomm, &rsize );
    MPI_Comm_size( intercomm, &size );
    MPI_Comm_rank( intercomm, &rank );

    if (parentcomm == MPI_COMM_NULL) {
	/* Master */
	if (rsize != np) {
	    errs++;
	    printf( "Did not create %d processes (got %d)\n", np, rsize );
	}
	if (rank == 0) {
	    for (i=0; i<rsize; i++) {
		MPI_Send( &i, 1, MPI_INT, i, 0, intercomm );
	    }
	}
    }
    else {
	/* Child */
	isChild = 1;
	if (size != np) {
	    errs++;
	    printf( "(Child) Did not create %d processes (got %d)\n", 
		    np, size );
	}
	MPI_Recv( &i, 1, MPI_INT, 0, 0, intercomm, &status );
	if (i != rank) {
	    errs++;
	    printf( "Unexpected rank on child %d (%d)\n", rank, i );
	}
    }

    /* At this point, try to form the intracommunicator */
    MPI_Intercomm_merge( intercomm, isChild, &intracomm );

    /* Check on the intra comm */
    {
	int icsize, icrank, wrank;

	MPI_Comm_size( intracomm, &icsize );
	MPI_Comm_rank( intracomm, &icrank );
	MPI_Comm_rank( MPI_COMM_WORLD, &wrank );

	if (icsize != rsize + size) {
	    errs++;
	    printf( "Intracomm rank %d thinks size is %d, not %d\n",
		    icrank, icsize, rsize + size );
	}
	/* Make sure that the processes are ordered correctly */
	if (isChild) {
	    int psize;
	    MPI_Comm_remote_size( parentcomm, &psize );
	    if (icrank != psize + wrank ) {
		errs++;
		printf( "Intracomm rank %d (from child) should have rank %d\n",
			icrank, psize + wrank );
	    }
	}
	else {
	    if (icrank != wrank) {
		errs++;
		printf( "Intracomm rank %d (from parent) should have rank %d\n",
			icrank, wrank );
	    }
	}
    }

    /* At this point, try to form the intracommunicator, with the other 
     processes first */
    MPI_Intercomm_merge( intercomm, !isChild, &intracomm2 );

    /* Check on the intra comm */
    {
	int icsize, icrank, wrank;

	MPI_Comm_size( intracomm2, &icsize );
	MPI_Comm_rank( intracomm2, &icrank );
	MPI_Comm_rank( MPI_COMM_WORLD, &wrank );

	if (icsize != rsize + size) {
	    errs++;
	    printf( "(2)Intracomm rank %d thinks size is %d, not %d\n",
		    icrank, icsize, rsize + size );
	}
	/* Make sure that the processes are ordered correctly */
	if (isChild) {
	    if (icrank != wrank ) {
		errs++;
		printf( "(2)Intracomm rank %d (from child) should have rank %d\n",
			icrank, wrank );
	    }
	}
	else {
	    int csize;
	    MPI_Comm_remote_size( intercomm, &csize );
	    if (icrank != wrank + csize) {
		errs++;
		printf( "(2)Intracomm rank %d (from parent) should have rank %d\n",
			icrank, wrank + csize );
	    }
	}
    }

    /* At this point, try to form the intracommunicator, with an 
       arbitrary choice for the first group of processes */
    MPI_Intercomm_merge( intercomm, 0, &intracomm3 );
    /* Check on the intra comm */
    {
	int icsize, icrank, wrank;

	MPI_Comm_size( intracomm3, &icsize );
	MPI_Comm_rank( intracomm3, &icrank );
	MPI_Comm_rank( MPI_COMM_WORLD, &wrank );

	if (icsize != rsize + size) {
	    errs++;
	    printf( "(3)Intracomm rank %d thinks size is %d, not %d\n",
		    icrank, icsize, rsize + size );
	}
	/* Eventually, we should test that the processes are ordered 
	   correctly, by groups (must be one of the two cases above) */
    }

    /* Update error count */
    if (isChild) {
	/* Send the errs back to the master process */
	MPI_Ssend( &errs, 1, MPI_INT, 0, 1, intercomm );
    }
    else {
	if (rank == 0) {
	    /* We could use intercomm reduce to get the errors from the 
	       children, but we'll use a simpler loop to make sure that
	       we get valid data */
	    for (i=0; i<rsize; i++) {
		MPI_Recv( &err, 1, MPI_INT, i, 1, intercomm, MPI_STATUS_IGNORE );
		errs += err;
	    }
	}
    }

    /* It isn't necessary to free the intracomms, but it should not hurt */
    MPI_Comm_free( &intracomm );
    MPI_Comm_free( &intracomm2 );
    MPI_Comm_free( &intracomm3 );

    /* It isn't necessary to free the intercomm, but it should not hurt */
    MPI_Comm_free( &intercomm );

    /* Note that the MTest_Finalize get errs only over COMM_WORLD */
    /* Note also that both the parent and child will generate "No Errors"
       if both call MTest_Finalize */
    if (parentcomm == MPI_COMM_NULL) {
	MTest_Finalize( errs );
    }

    MPI_Finalize();
    return 0;
}
Esempio n. 28
0
/* Note:  CR_SendDoubleToPA is translated to "cr_senddouble2pa__"
 *  for Fortran compatibility */
void CR_SendDoubleToPA(double *data, int *infoDim, int *tag) {
	MPI_Comm parent;
	MPI_Comm_get_parent (&parent);
	MPI_Send ( data, *infoDim, MPI_DOUBLE, 0, *tag , parent);
}
Esempio n. 29
0
/* **** CR_Exec ****
 * CR - stands for Child R-scalapack
 * CR_Exec is the main function that is invoked by all
 * the spawned processes 
 */
SEXP CR_Exec() {
	MPI_Comm mcParent;

	int iMyRank;
	int iNumProcs;
	int rel_flag = 0;
	int exitTemp =0;

	int ipGridAndDims[10]={0,0,0,0,0,0,0,0,0,0};	/* See CR_GetInputParams
	 						 * function for contents.
							 */

#ifdef OMPI
	dlopen("libmpi.so.0", RTLD_GLOBAL | RTLD_LAZY);
#endif


	/* Performing MPI_Init through BLACS */
	if (CR_InitializeEnv(&iMyRank, &iNumProcs) != 0)	{
		Rprintf("ERROR[1]: Initializing MPI thru BLACS ... FAILED .. EXITING !!\n");
		return AsInt(1);
	}



	/* Get the Parent Communicator */
	if (MPI_Comm_get_parent(&mcParent) != MPI_SUCCESS) {
		Rprintf("ERROR[2]: Getting Parent Comm ... FAILED .. EXITING !!\n");
		return AsInt(2);
	}



	if (mcParent == MPI_COMM_NULL) {
		char *cpNoParent = "No Parent found.  This program is not designed"
		" to be run by itself.\nThis program is used by the R-ScaLAPACK"
		" package as the computational engine.\n";
		//fprintf(stderr, cpNoParent);
		return AsInt (3);
	}


	while (rel_flag != 1){

		/* Get the broadcasted input parameters from the Parallel Agent */
		if (CR_GetInputParams(mcParent,ipGridAndDims) != 0) {
			Rprintf ("ERROR[4]: FAILED while getting Input Parameters \n");
			return AsInt(4);
		}


		rel_flag = ipGridAndDims[9];


		/* Call the requested ScaLAPACK Function */
		if (CR_CallScalapackFn(ipGridAndDims, iMyRank)!=0) {
			Rprintf ("ERROR[4]: FAILED while executing scalapack function  - CR_CallScalapackFn() \n");
			return AsInt(6);
		}


		/* If the function ID is 0 and the release flag is 1, then
		 * the function is sla.gridExit, so exit.
		 */ 
		if (ipGridAndDims[8] == 0 && rel_flag == 1){

			MPI_Comm_free( &intercomm );
			MPI_Finalize();
			return AsInt(1); 
		}

	}	/*  Endof  while (...) */


	//MPI_Barrier(intercomm);
	MPI_Comm_free( &intercomm );
	MPI_Finalize();


	return AsInt(0);
}
Esempio n. 30
0
/* This test spawns two child jobs and has them open a port and connect to
 * each other.
 * The two children repeatedly connect, accept, and disconnect from each other.
 */
int main(int argc, char *argv[])
{
    int error;
    int rank, size;
    int numprocs = 3;
    char *argv1[2] = { (char *) "connector", NULL };
    char *argv2[2] = { (char *) "acceptor", NULL };
    MPI_Comm comm_connector, comm_acceptor, comm_parent, comm;
    char port[MPI_MAX_PORT_NAME] = { 0 };
    MPI_Status status;
    MPI_Info spawn_path = MPI_INFO_NULL;
    int i, num_loops = 100;
    int data;
    int verbose = 0;
    int can_spawn, errs = 0;

    if (getenv("MPITEST_VERBOSE")) {
        verbose = 1;
    }

    IF_VERBOSE(("init.\n"));
    error = MPI_Init(&argc, &argv);
    check_error(error, "MPI_Init");

    errs += MTestSpawnPossible(&can_spawn);
    if (!can_spawn) {
        if (errs)
            printf(" Found %d errors\n", errs);
        else
            printf(" No Errors\n");
        fflush(stdout);
    } else {
        IF_VERBOSE(("size.\n"));
        error = MPI_Comm_size(MPI_COMM_WORLD, &size);
        check_error(error, "MPI_Comm_size");

        IF_VERBOSE(("rank.\n"));
        error = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
        check_error(error, "MPI_Comm_rank");

        if (argc == 1) {
            /* Make sure that the current directory is in the path.
             * Not all implementations may honor or understand this, but
             * it is highly recommended as it gives users a clean way
             * to specify the location of the executable without
             * specifying a particular directory format (e.g., this
             * should work with both Windows and Unix implementations) */
            MPI_Info_create(&spawn_path);
            MPI_Info_set(spawn_path, (char *) "path", (char *) ".");

            IF_VERBOSE(("spawn connector.\n"));
            error = MPI_Comm_spawn((char *) "disconnect_reconnect2",
                                   argv1, numprocs, spawn_path, 0,
                                   MPI_COMM_WORLD, &comm_connector, MPI_ERRCODES_IGNORE);
            check_error(error, "MPI_Comm_spawn");

            IF_VERBOSE(("spawn acceptor.\n"));
            error = MPI_Comm_spawn((char *) "disconnect_reconnect2",
                                   argv2, numprocs, spawn_path, 0,
                                   MPI_COMM_WORLD, &comm_acceptor, MPI_ERRCODES_IGNORE);
            check_error(error, "MPI_Comm_spawn");
            MPI_Info_free(&spawn_path);

            if (rank == 0) {
                IF_VERBOSE(("recv port.\n"));
                error = MPI_Recv(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, comm_acceptor, &status);
                check_error(error, "MPI_Recv");

                IF_VERBOSE(("send port.\n"));
                error = MPI_Send(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, comm_connector);
                check_error(error, "MPI_Send");
            }

            IF_VERBOSE(("barrier acceptor.\n"));
            error = MPI_Barrier(comm_acceptor);
            check_error(error, "MPI_Barrier");

            IF_VERBOSE(("barrier connector.\n"));
            error = MPI_Barrier(comm_connector);
            check_error(error, "MPI_Barrier");

            error = MPI_Comm_free(&comm_acceptor);
            check_error(error, "MPI_Comm_free");
            error = MPI_Comm_free(&comm_connector);
            check_error(error, "MPI_Comm_free");

            if (rank == 0) {
                printf(" No Errors\n");
                fflush(stdout);
            }
        } else if ((argc == 2) && (strcmp(argv[1], "acceptor") == 0)) {
            IF_VERBOSE(("get_parent.\n"));
            error = MPI_Comm_get_parent(&comm_parent);
            check_error(error, "MPI_Comm_get_parent");
            if (comm_parent == MPI_COMM_NULL) {
                printf("acceptor's parent is NULL.\n");
                fflush(stdout);
                MPI_Abort(MPI_COMM_WORLD, -1);
            }
            if (rank == 0) {
                IF_VERBOSE(("open_port.\n"));
                error = MPI_Open_port(MPI_INFO_NULL, port);
                check_error(error, "MPI_Open_port");

                IF_VERBOSE(("0: opened port: <%s>\n", port));
                IF_VERBOSE(("send.\n"));
                error = MPI_Send(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, comm_parent);
                check_error(error, "MPI_Send");
            }

            for (i = 0; i < num_loops; i++) {
                IF_VERBOSE(("accept.\n"));
                error = MPI_Comm_accept(port, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &comm);
                check_error(error, "MPI_Comm_accept");

                if (rank == 0) {
                    data = i;
                    error = MPI_Send(&data, 1, MPI_INT, 0, 0, comm);
                    check_error(error, "MPI_Send");
                    error = MPI_Recv(&data, 1, MPI_INT, 0, 0, comm, &status);
                    check_error(error, "MPI_Recv");
                    if (data != i) {
                        printf("expected %d but received %d\n", i, data);
                        fflush(stdout);
                        MPI_Abort(MPI_COMM_WORLD, 1);
                    }
                }

                IF_VERBOSE(("disconnect.\n"));
                error = MPI_Comm_disconnect(&comm);
                check_error(error, "MPI_Comm_disconnect");
            }

            if (rank == 0) {
                IF_VERBOSE(("close_port.\n"));
                error = MPI_Close_port(port);
                check_error(error, "MPI_Close_port");
            }

            IF_VERBOSE(("barrier.\n"));
            error = MPI_Barrier(comm_parent);
            check_error(error, "MPI_Barrier");

            MPI_Comm_free(&comm_parent);
        } else if ((argc == 2) && (strcmp(argv[1], "connector") == 0)) {
            IF_VERBOSE(("get_parent.\n"));
            error = MPI_Comm_get_parent(&comm_parent);
            check_error(error, "MPI_Comm_get_parent");
            if (comm_parent == MPI_COMM_NULL) {
                printf("acceptor's parent is NULL.\n");
                fflush(stdout);
                MPI_Abort(MPI_COMM_WORLD, -1);
            }

            if (rank == 0) {
                IF_VERBOSE(("recv.\n"));
                error = MPI_Recv(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, comm_parent, &status);
                check_error(error, "MPI_Recv");
                IF_VERBOSE(("1: received port: <%s>\n", port));
            }

            for (i = 0; i < num_loops; i++) {
                IF_VERBOSE(("connect.\n"));
                error = MPI_Comm_connect(port, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &comm);
                check_error(error, "MPI_Comm_connect");

                if (rank == 0) {
                    data = -1;
                    error = MPI_Recv(&data, 1, MPI_INT, 0, 0, comm, &status);
                    check_error(error, "MPI_Recv");
                    if (data != i) {
                        printf("expected %d but received %d\n", i, data);
                        fflush(stdout);
                        MPI_Abort(MPI_COMM_WORLD, 1);
                    }
                    error = MPI_Send(&data, 1, MPI_INT, 0, 0, comm);
                    check_error(error, "MPI_Send");
                }

                IF_VERBOSE(("disconnect.\n"));
                error = MPI_Comm_disconnect(&comm);
                check_error(error, "MPI_Comm_disconnect");
            }

            IF_VERBOSE(("barrier.\n"));
            error = MPI_Barrier(comm_parent);
            check_error(error, "MPI_Barrier");

            MPI_Comm_free(&comm_parent);
        } else {
            printf("invalid command line.\n");
            fflush(stdout);
            {
                int ii;
                for (ii = 0; ii < argc; ii++) {
                    printf("argv[%d] = <%s>\n", ii, argv[ii]);
                }
            }
            fflush(stdout);
            MPI_Abort(MPI_COMM_WORLD, -2);
        }
    }

    MPI_Finalize();
    return MTestReturnValue(errs);
}