Пример #1
1
int main( int argc, char *argv[] )
{
    int errs = 0;
    int wrank, wsize, mrank, msize, inter_rank;
    int np = 2;
    int errcodes[2];
    int rrank = -1;
    MPI_Comm      parentcomm, intercomm, intercomm2, even_odd_comm, merged_world;
    int can_spawn;

    MTest_Init( &argc, &argv );

    errs += MTestSpawnPossible(&can_spawn);

    if (can_spawn) {
        MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
        MPI_Comm_size( MPI_COMM_WORLD, &wsize );

        if (wsize != 2) {
            printf( "world size != 2, this test will not work correctly\n" );
            errs++;
        }

        MPI_Comm_get_parent( &parentcomm );

        if (parentcomm == MPI_COMM_NULL) {
            MPI_Comm_spawn( (char*)"./spaiccreate2", MPI_ARGV_NULL, np,
                    MPI_INFO_NULL, 0, MPI_COMM_WORLD,
                    &intercomm, errcodes );
        }
        else {
            intercomm = parentcomm;
        }

        MPI_Intercomm_merge( intercomm, (parentcomm == MPI_COMM_NULL ? 0 : 1), &merged_world );
        MPI_Comm_rank( merged_world, &mrank );
        MPI_Comm_size( merged_world, &msize );

        MPI_Comm_split( merged_world, mrank % 2, wrank, &even_odd_comm );

        MPI_Intercomm_create( even_odd_comm, 0, merged_world, (mrank + 1) % 2, 123, &intercomm2 );
        MPI_Comm_rank( intercomm2, &inter_rank );

        /* odds receive from evens */
        MPI_Sendrecv( &inter_rank, 1, MPI_INT, inter_rank, 456,
                &rrank, 1, MPI_INT, inter_rank, 456, intercomm2, MPI_STATUS_IGNORE );
        if (rrank != inter_rank) {
            printf( "Received %d from %d; expected %d\n",
                    rrank, inter_rank, inter_rank );
            errs++;
        }

        MPI_Barrier( intercomm2 );

        MPI_Comm_free( &intercomm );
        MPI_Comm_free( &intercomm2 );
        MPI_Comm_free( &merged_world );
        MPI_Comm_free( &even_odd_comm );

        /* Note that the MTest_Finalize get errs only over COMM_WORLD */
        /* Note also that both the parent and child will generate "No Errors"
           if both call MTest_Finalize */
        if (parentcomm == MPI_COMM_NULL) {
            MTest_Finalize( errs );
        }
    } else {
        MTest_Finalize( errs );
    }

    MPI_Finalize();
    return 0;
}
Пример #2
0
int main(int argc, char *argv[])
{
    char str[10];
    MPI_Comm intercomm1, intracomm, intercomm2;
    int err, errcodes[256], rank;

    MPI_Init(&argc, &argv);

/*    printf("Child out of MPI_Init\n");
    fflush(stdout);
*/
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Comm_get_parent(&intercomm1);

    MPI_Intercomm_merge(intercomm1, 1, &intracomm);

    err = MPI_Comm_spawn("spawn_merge_child2", MPI_ARGV_NULL, 2,
                         MPI_INFO_NULL, 2, intracomm, &intercomm2, errcodes);
    if (err)
        printf("Error in MPI_Comm_spawn\n");

    MPI_Comm_rank(intercomm2, &rank);

    if (rank == 3) {
        err = MPI_Recv(str, 3, MPI_CHAR, 1, 0, intercomm2, MPI_STATUS_IGNORE);
        printf("Parent (first child) received from child 2: %s\n", str);
        fflush(stdout);

        err = MPI_Send("bye", 4, MPI_CHAR, 1, 0, intercomm2);
    }

    MPI_Finalize();
    return 0;
}
Пример #3
0
int main(int argc, char **argv)
{
    int iter, err, rank, size;
    MPI_Comm comm, merged;

    /* MPI environnement */    

    printf("parent*******************************\n");
    printf("parent: Launching MPI*\n");

    MPI_Init( &argc, &argv);

    for (iter = 0; iter < 1000; ++iter) {
        MPI_Comm_spawn(EXE_TEST, NULL, 1, MPI_INFO_NULL,
                       0, MPI_COMM_WORLD, &comm, &err);
        printf("parent: MPI_Comm_spawn #%d return : %d\n", iter, err);

        MPI_Intercomm_merge(comm, 0, &merged);
        MPI_Comm_rank(merged, &rank);
        MPI_Comm_size(merged, &size);
        printf("parent: MPI_Comm_spawn #%d rank %d, size %d\n", 
               iter, rank, size);
//        sleep(2);
        MPI_Comm_free(&merged);
    }

    MPI_Finalize();
    printf("parent: End .\n" );
    return 0;
}
Пример #4
0
static int
spawn_and_merge( char* argv[], char* arg, int count,
                 MPI_Comm* inter, MPI_Comm* intra )
{
    int *errcode, err, i;
    char *spawn_argv[2];

    errcode = malloc(sizeof(int) * count);
    if (errcode == NULL)
        ompitest_error(__FILE__, __LINE__, "Doh!  Rank %d was not able to allocate enough memory.  MPI test aborted!\n", 0);
    memset(errcode, -1, count);
    /*MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);*/

    spawn_argv[0] = arg;
    spawn_argv[1] = NULL;
    err = MPI_Comm_spawn(argv[0], spawn_argv, count, MPI_INFO_NULL, 0,
                         MPI_COMM_WORLD, inter, errcode);
    for (i = 0; i < count; i++)
        if (errcode[i] != MPI_SUCCESS)
            ompitest_error(__FILE__, __LINE__,
                           "ERROR: MPI_Comm_spawn returned errcode[%d] = %d\n",
                           i, errcode[i]);
    if (err != MPI_SUCCESS)
        ompitest_error(__FILE__, __LINE__,
                       "ERROR: MPI_Comm_spawn returned errcode = %d\n", err);
    err = MPI_Intercomm_merge( *inter, 0, intra );
    free(errcode);
    return err;
}
Пример #5
0
int main(int argc, char *argv[])
{
	int rank;

	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);

	MPI_Comm parentcomm, intercomm;

	/* If child, finalize */
	MPI_Comm_get_parent(&parentcomm);
	if (parentcomm != MPI_COMM_NULL)
	  	goto out;

	/* Set add-host info */
	MPI_Info info;
	MPI_Info_create(&info);
	MPI_Info_set(info,"add-host","grsacc18");

	/* Spawn the children */
	printf("all info set, ready to spawn\n");
	MPI_Comm_spawn("/home/grsprabh/resmgmt/branches/Malleability/workdir/addhosttest",MPI_ARGV_NULL,1,info,0,MPI_COMM_WORLD,&intercomm,MPI_ERRCODES_IGNORE);
	printf("spawn returned successfully\n");

out:
	printf("spawn completed successfully\n");
	MPI_Finalize();

}
Пример #6
0
/*@C
   PetscHMPISpawn - Initialize additional processes to be used as "worker" processes. This is not generally
     called by users. One should use -hmpi_spawn_size <n> to indicate that you wish to have n-1 new MPI
     processes spawned for each current process.

   Not Collective (could make collective on MPI_COMM_WORLD, generate one huge comm and then split it up)

   Input Parameter:
.  nodesize - size of each compute node that will share processors

   Options Database:
.   -hmpi_spawn_size nodesize

   Notes: This is only supported on systems with an MPI 2 implementation that includes the MPI_Comm_Spawn() routine.

$    Comparison of two approaches for HMPI usage (MPI started with N processes)
$
$    -hmpi_spawn_size <n> requires MPI 2, results in n*N total processes with N directly used by application code
$                                           and n-1 worker processes (used by PETSc) for each application node.
$                           You MUST launch MPI so that only ONE MPI process is created for each hardware node.
$
$    -hmpi_merge_size <n> results in N total processes, N/n used by the application code and the rest worker processes
$                            (used by PETSc)
$                           You MUST launch MPI so that n MPI processes are created for each hardware node.
$
$    petscmpiexec -n 2 ./ex1 -hmpi_spawn_size 3 gives 2 application nodes (and 4 PETSc worker nodes)
$    petscmpiexec -n 6 ./ex1 -hmpi_merge_size 3 gives the SAME 2 application nodes and 4 PETSc worker nodes
$       This is what would use if each of the computers hardware nodes had 3 CPUs.
$
$      These are intended to be used in conjunction with USER HMPI code. The user will have 1 process per
$   computer (hardware) node (where the computer node has p cpus), the user's code will use threads to fully
$   utilize all the CPUs on the node. The PETSc code will have p processes to fully use the compute node for
$   PETSc calculations. The user THREADS and PETSc PROCESSES will NEVER run at the same time so the p CPUs
$   are always working on p task, never more than p.
$
$    See PCHMPI for a PETSc preconditioner that can use this functionality
$

   For both PetscHMPISpawn() and PetscHMPIMerge() PETSC_COMM_WORLD consists of one process per "node", PETSC_COMM_LOCAL_WORLD
   consists of all the processes in a "node."

   In both cases the user's code is running ONLY on PETSC_COMM_WORLD (that was newly generated by running this command).

   Level: developer

   Concepts: HMPI

.seealso: PetscFinalize(), PetscInitializeFortran(), PetscGetArgs(), PetscHMPIFinalize(), PetscInitialize(), PetscHMPIMerge(), PetscHMPIRun()

@*/
PetscErrorCode  PetscHMPISpawn(PetscMPIInt nodesize)
{
  PetscErrorCode ierr;
  PetscMPIInt    size;
  MPI_Comm       parent,children;

  PetscFunctionBegin;
  ierr = MPI_Comm_get_parent(&parent);CHKERRQ(ierr);
  if (parent == MPI_COMM_NULL) {  /* the original processes started by user */
    char programname[PETSC_MAX_PATH_LEN];
    char **argv;

    ierr = PetscGetProgramName(programname,PETSC_MAX_PATH_LEN);CHKERRQ(ierr);
    ierr = PetscGetArguments(&argv);CHKERRQ(ierr);
    ierr = MPI_Comm_spawn(programname,argv,nodesize-1,MPI_INFO_NULL,0,PETSC_COMM_SELF,&children,MPI_ERRCODES_IGNORE);CHKERRQ(ierr);
    ierr = PetscFreeArguments(argv);CHKERRQ(ierr);
    ierr = MPI_Intercomm_merge(children,0,&PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr);

    ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
    ierr = PetscInfo2(0,"PETSc HMPI successfully spawned: number of nodes = %d node size = %d\n",size,nodesize);CHKERRQ(ierr);

    saved_PETSC_COMM_WORLD = PETSC_COMM_WORLD;
  } else { /* worker nodes that get spawned */
    ierr            = MPI_Intercomm_merge(parent,1,&PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr);
    ierr            = PetscHMPIHandle(PETSC_COMM_LOCAL_WORLD);CHKERRQ(ierr);
    PetscHMPIWorker = PETSC_TRUE; /* so that PetscHMPIFinalize() will not attempt a broadcast from this process */
    PetscEnd();  /* cannot continue into user code */
  }
  PetscFunctionReturn(0);
}
Пример #7
0
int main (int argc, const char *argv[])
{
	double		pi,
				avepi = 0.;

	int         taskid,
                size,
                rc;

    MPI_Comm    everyone;
    MPI_Status  status;

	MPI_Init(&argc, (char***)&argv);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Comm_rank(MPI_COMM_WORLD, &taskid);

	MPI_Comm_spawn("COMP428-A1-Parallel-Spawn-Slave", MPI_ARGV_NULL, MAXPROCS,  MPI_INFO_NULL, 0, MPI_COMM_SELF, &everyone, MPI_ERRCODES_IGNORE);

    int i;
    for (i = 0; i < MAXPROCS; i++) {
        MPI_Recv(&pi, 1, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, everyone, &status);
        printf("Process Master: received %.10f by %d\n", pi, status.MPI_SOURCE);
    }
    

	MPI_Finalize();

	return (EXIT_SUCCESS);
}
Пример #8
0
int
main (int argc, char **argv)
{
  int rank;
  int size;
  int *error_codes;
  int spawn_counter = 0;
  char *slave_argv[] = { "arg1", "arg2", 0 };
  MPI_Comm spawn;

  MPI_Init(&argc, &argv);

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);

  if (rank == 0)
  {
    printf("[master] running on %i processors\n", size);

    while (1)
    {
      printf("[master] (%i) forking processes\n", spawn_counter++);
      error_codes = (int*) malloc(sizeof(int)*size);
      MPI_Comm_spawn("./slave", slave_argv, size, MPI_INFO_NULL, 0, MPI_COMM_SELF, &spawn, error_codes);
      printf("[master] waiting at barrier\n");
      MPI_Barrier(spawn);
      free(error_codes);
    }
  }

  MPI_Finalize();
}
Пример #9
0
int main( int argc, char *argv[] )
{
    char str[10];
    int err=0, errcodes[256], rank, nprocs;
    MPI_Comm intercomm;

    MPI_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD,&nprocs); 

    if (nprocs != 4) {
        printf("Run this program with 4 processes\n");
        MPI_Abort(MPI_COMM_WORLD,1);
    }

    err = MPI_Comm_spawn("child", MPI_ARGV_NULL, 4,
                         MPI_INFO_NULL, 1, MPI_COMM_WORLD,
                         &intercomm, errcodes);  
    if (err) printf("Error in MPI_Comm_spawn\n");

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (rank == 3) {
        err = MPI_Recv(str, 3, MPI_CHAR, 3, 0, intercomm, MPI_STATUS_IGNORE);
        printf("Parent received from child: %s\n", str);
        fflush(stdout);
        
        err = MPI_Send("bye", 4, MPI_CHAR, 3, 0, intercomm); 
    }

    MPI_Finalize();

    return 0;
}
Пример #10
0
void mpi_comm_spawn_f(char *command, char *argv, MPI_Fint *maxprocs, 
		      MPI_Fint *info, MPI_Fint *root, MPI_Fint *comm, 
		      MPI_Fint *intercomm, MPI_Fint *array_of_errcodes,
		      MPI_Fint *ierr, int cmd_len, int string_len)
{
    MPI_Comm c_comm, c_new_comm;
    MPI_Info c_info;
    int size;
    int *c_errs;
    char **c_argv;
    char *c_command;
    OMPI_ARRAY_NAME_DECL(array_of_errcodes);
    
    c_comm = MPI_Comm_f2c(*comm);
    c_info = MPI_Info_f2c(*info);
    MPI_Comm_size(c_comm, &size);
    ompi_fortran_string_f2c(command, cmd_len, &c_command);

    /* It's allowed to ignore the errcodes */

    if (OMPI_IS_FORTRAN_ERRCODES_IGNORE(array_of_errcodes)) {
        c_errs = MPI_ERRCODES_IGNORE;
    } else {
        OMPI_ARRAY_FINT_2_INT_ALLOC(array_of_errcodes, size);
        c_errs = OMPI_ARRAY_NAME_CONVERT(array_of_errcodes);
    }

    /* It's allowed to have no argv */

    if (OMPI_IS_FORTRAN_ARGV_NULL(argv)) {
        c_argv = MPI_ARGV_NULL;
    } else {
        ompi_fortran_argv_f2c(argv, string_len, string_len, &c_argv);
    }

    *ierr = OMPI_INT_2_FINT(MPI_Comm_spawn(c_command, c_argv, 
					   OMPI_FINT_2_INT(*maxprocs),
					   c_info,
					   OMPI_FINT_2_INT(*root),
					   c_comm, &c_new_comm, c_errs));
    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
        *intercomm = MPI_Comm_c2f(c_new_comm);
    }
    free(c_command);
    if (MPI_ARGV_NULL != c_argv && NULL != c_argv) {
        opal_argv_free(c_argv);
    }
    if (!OMPI_IS_FORTRAN_ERRCODES_IGNORE(array_of_errcodes)) {
	OMPI_ARRAY_INT_2_FINT(array_of_errcodes, size);
    } else {
	OMPI_ARRAY_FINT_2_INT_CLEANUP(array_of_errcodes);
    }
}
Пример #11
0
/**
 * Crea una nueva instancia de proceo de RNA de RP con los argumentos proporcionados usando mpi
 *
 * Parametros:
 * 		const char *comandoRNA		- Comando para la ejecucion de la RNA
 * 		int cant_neu1				- Cantidad de neuronas en la capa oculta 1
 * 		int cant_neu2				- Cantidad de neuronas en la capa oculta 2
 * 		float const_apr				- Valor de la constante de aprendizaje
 * 		float raz_mom				- Valor de la razon de momentum
 * 		int max_iter				- Maxima cantidad de iteraciones para el entrenamiento
 * 		int tipo_fun_o				- Tipo de funcion de activacion a usar por las neuronas en las capas ocultas
 * 		int tipo_fun_s				- Tipo de funcion de activacion a usar por las neuronas en la capa de salida
 * 		int cant_rep				- Cantidad de repeticiones de entrenamiento e interrogatorio
 * 		const char *ruta			- Ruta al directorio donde se encuentra el archivo con los patrones
 * 		const char *nombre			- Nombre del archivo con los patrones (que se asume tiene sufijo .dat)
 * 		int pos_hijo				- Posicion asignada a este proceso en el vector de comunicadores (se asume es unico)
 *
 *	Salida
 *		int							- Entero con el valor 1 si pudo crearse la instancia, 0 en caso contrario
 */
int instancia_rna_mpi(char *comandoRNA, int cant_neu1, int cant_neu2, float const_apr, float raz_mom, int max_iter,
		int tipo_fun_o, int tipo_fun_s, int cant_rep, const char *ruta, const char *nombre, int pos_hijo){
	//Variables
	int result = 0;			//Indica si la instancia fue creada exitosamente

	//Se verifican que los datos se hayan preparado previamente
	if (preparado_ && esquema_ == MPI){
		#ifdef HAVE_MPI
			int ii;					//Contador
			char **params;			//Cadena de caracteres con los parametros
			int tam_int = 0;		//Cantidad maxima de bits en un int
			int num = INT_MAX;		//Valor auxiliar utilizado para determinar la cantidad maxima de bits

			//Cantidad maxima de bits de un entero
			do{
				tam_int++;
				num /= 10;
			}while (num > 0);

			//Conversion del valor de los parametros a cadenas de caracteres para la ejecucion del proceso
			params = calloc(15, sizeof(char *));
			params[0] = (char *) calloc(20, sizeof(char)); sprintf(params[0], "--debug=n");
			params[1] = (char *) calloc(20, sizeof(char)); sprintf(params[1], "--mensajes=n");
			params[2] = (char *) calloc(tam_int, sizeof(char)); sprintf(params[2], "1");
			params[3] = (char *) calloc(tam_int, sizeof(char)); sprintf(params[3], "%d", cant_neu1);
			params[4] = (char *) calloc(tam_int, sizeof(char)); sprintf(params[4], "%d", cant_neu2);
			params[5] = (char *) calloc(tam_int + 8, sizeof(char)); sprintf(params[5], "%.5f", const_apr);
			params[6] = (char *) calloc(tam_int + 8, sizeof(char)); sprintf(params[6], "%.3f", raz_mom);
			params[7] = (char *) calloc(tam_int, sizeof(char)); sprintf(params[7], "%d", max_iter);
			params[8] = (char *) calloc(tam_int, sizeof(char)); sprintf(params[8], "%d", tipo_fun_o);
			params[9] = (char *) calloc(tam_int, sizeof(char)); sprintf(params[9], "%d", tipo_fun_s);
			params[10] = (char *) calloc(tam_int, sizeof(char)); sprintf(params[10], "%d", cant_rep);
			params[11] = (char *) calloc(strlen(ruta) + 5, sizeof(char)); sprintf(params[11], "%s", ruta);
			params[12] = (char *) calloc(strlen(nombre) + 5, sizeof(char)); sprintf(params[12], "%s", nombre);
			params[13] = (char *) calloc(tam_int, sizeof(char)); sprintf(params[13], "3");
			params[14] = NULL;

			//Se crea de forma dinamica un nuevo proceso que ejecutara la RNA con los parametros dados
			if (MPI_Comm_spawn(comandoRNA, params, 1, MPI_INFO_NULL, 0, MPI_COMM_SELF, &(datos_mpi.vec_hijos[pos_hijo]),
							  MPI_ERRCODES_IGNORE) == MPI_SUCCESS){
				result = 1;
			}

			//Liberacion de memoria del vector de parametros
			for (ii = 0; ii < 13; ii++)
				free(params[ii]);
			free(params);
		#endif
	}

	//Resultado
	return result;
}
Пример #12
0
int main(int argc, char* argv[])
{
    int msg, rc;
    MPI_Comm parent, child;
    int rank, size;
    char hostname[512];
    pid_t pid;

    pid = getpid();
    printf("[pid %ld] starting up!\n", (long)pid);
    MPI_Init(NULL, NULL);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    printf("%d completed MPI_Init\n", rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_get_parent(&parent);
    /* If we get COMM_NULL back, then we're the parent */
    if (MPI_COMM_NULL == parent) {
        pid = getpid();
        printf("Parent [pid %ld] about to spawn!\n", (long)pid);
        if (MPI_SUCCESS != (rc = MPI_Comm_spawn(argv[0], MPI_ARGV_NULL, 3, MPI_INFO_NULL, 
                                                0, MPI_COMM_WORLD, &child, MPI_ERRCODES_IGNORE))) {
            printf("Child failed to spawn\n");
            return rc;
        }
        printf("Parent done with spawn\n");
        if (0 == rank) {
            msg = 38;
            printf("Parent sending message to child\n");
            MPI_Send(&msg, 1, MPI_INT, 0, 1, child);
        }
        MPI_Comm_disconnect(&child);
        printf("Parent disconnected\n");
    } 
    /* Otherwise, we're the child */
    else {
        MPI_Comm_rank(MPI_COMM_WORLD, &rank);
        MPI_Comm_size(MPI_COMM_WORLD, &size);
        gethostname(hostname, 512);
        pid = getpid();
        printf("Hello from the child %d of %d on host %s pid %ld\n", rank, 3, hostname, (long)pid);
        if (0 == rank) {
            MPI_Recv(&msg, 1, MPI_INT, 0, 1, parent, MPI_STATUS_IGNORE);
            printf("Child %d received msg: %d\n", rank, msg);
        }
        MPI_Comm_disconnect(&parent);
        printf("Child %d disconnected\n", rank);
    }

    MPI_Finalize();
    fprintf(stderr, "%d: exiting\n", pid);
    return 0;
}
Пример #13
0
int main(int argc, char **argv) {
    int rank, size, version, subversion, namelen, universe_size;
    char processor_name[MPI_MAX_PROCESSOR_NAME], worker_program[100];
    MPI_Comm esclavos_comm;
    MPI_Init(&argc, &argv);    /* starts MPI */
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);    /* get current process id */
    MPI_Comm_size(MPI_COMM_WORLD, &size);    /* get number of processes */
    MPI_Get_processor_name(processor_name, &namelen);
    MPI_Get_version(&version, &subversion);
    printf("[maestro] Iniciado proceso maestro %d de %d en %s ejecutando MPI %d.%d\n", rank, size, processor_name,
           version,
           subversion);
    strcpy(worker_program, "./Debug/esclavo");
    MPI_Comm_spawn(worker_program, MPI_ARGV_NULL,ESCLAVOS, MPI_INFO_NULL, 0, MPI_COMM_SELF, &esclavos_comm,
                   MPI_ERRCODES_IGNORE);

    /* Cálculo del número de puntos de intervalo por esclavo */
    int n_esclavo = (N +1)  / ESCLAVOS;
    printf("PUNTOS POR ESCLAVO: %d\n", n_esclavo);

    /* Cálculo de vector de valores de la función */
    double dx = (double)(B - A) / (double)N ;
    double h = ((double)B - (double)A) / (2*(double)N);

    printf("DIFERENCIAL DE X: %f\n", dx);
    int i = 0;
    double y[N+1], y_esclavo[n_esclavo]; // número de puntos = número de intervalos + 1
    double x = (double) A;

    for(i=0;i<N+1;i++){
        y[i]= x * x;
        x+=dx;
        printf("VALOR DE F(X) EN PUNTO i %d: %f\n", i, y[i]);
    }



    MPI_Bcast(&n_esclavo, 1, MPI_INT, MPI_ROOT, esclavos_comm);

    MPI_Scatter(y, n_esclavo, MPI_DOUBLE, y_esclavo, n_esclavo, MPI_DOUBLE, MPI_ROOT, esclavos_comm);

    double suma;
    MPI_Reduce(NULL, &suma, 1, MPI_DOUBLE, MPI_SUM, MPI_ROOT, esclavos_comm);
    printf("SUMA REDUCIDA ES: %f\n", suma);

    double integral = (double) dx/3 * ((double)A*(double)A + suma + (double) B * (double) B);
    printf("RESULTADO DE LA INTEGRAL: %f\n", integral);
    MPI_Comm_disconnect(&esclavos_comm);
    MPI_Finalize();
    return 0;
}
Пример #14
0
int main(int argc, char *argv[]) 
{ 
    int rank, size;
    int universe_size, *universe_sizep, flag;
    int lsize, rsize;
    int grank, gsize;
    MPI_Comm everyone, global;           /* intercommunicator */
    char worker_program[100];

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE,  
                &universe_sizep, &flag);  
    if (!flag) { 
        universe_size = 8;
    } else 
        universe_size = *universe_sizep; 

    if( rank == 0 ) {    
        printf("univ size = %d\n", universe_size);
    }

    sprintf(worker_program, "./slave");
    MPI_Comm_spawn(worker_program, MPI_ARGV_NULL, 6,
                   MPI_INFO_NULL, 0, MPI_COMM_WORLD, &everyone,
                   MPI_ERRCODES_IGNORE);

    MPI_Comm_size(everyone, &lsize);
    MPI_Comm_remote_size(everyone, &rsize);

    MPI_Intercomm_merge(everyone, 1, &global);
    MPI_Comm_rank(global, &grank);
    MPI_Comm_size(global, &gsize);
    printf("parent %d: lsize=%d, rsize=%d, grank=%d, gsize=%d\n",
           rank, lsize, rsize, grank, gsize);

    MPI_Barrier(global);

    printf("%d: after Barrier\n", grank);

    MPI_Comm_free(&global);


    MPI_Finalize();
    return 0;
}
Пример #15
0
void LeaderNode::spawn_swing_nodes(MPI_Comm parent, MPI_Comm *child,
   uint16_t count) {
   MPI_ASSERT(parent != MPI_COMM_NULL);
   MPI_ASSERT(child != NULL);

   // Duplicate the leader comm so that the original doesn't get cluttered.
   MPI_Comm_dup(parent, child);

   // Spawn the swing node process group.
   MPI_Comm_spawn("./swing_layer", MPI_ARGV_NULL, count, MPI_INFO_NULL, 0,
         *child, child, MPI_ERRCODES_IGNORE);

   // Add the new swing node set to the listing of swing nodes for easy cleanup
   // leater.
   swing_nodes.push_back(std::make_pair(*child, count));

   // Update the number of swing nodes.
   num_swing_nodes += count;
}
Пример #16
0
int main( int argc, char *argv[] ) {
  int np = NUM_SPAWNS;
  int my_rank, size;
  int errcodes[NUM_SPAWNS];
  MPI_Comm allcomm;
  MPI_Comm intercomm;

  MPI_Init( &argc, &argv );
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);


  MPI_Comm_spawn( (char*)"./spawntest_child", MPI_ARGV_NULL, np,
                  MPI_INFO_NULL, 0, MPI_COMM_WORLD, &intercomm, errcodes );

  if ( intercomm == MPI_COMM_NULL ) {
      fprintf(stdout, "intercomm is null\n");
  }

  MPI_Intercomm_merge(intercomm, 0, &allcomm);

  MPI_Comm_rank(allcomm, &my_rank);
  MPI_Comm_size(allcomm, &size);

  /* Without the Free of allcomm, the children *must not exit* until the
     master calls MPI_Finalize. */
  MPI_Barrier( allcomm );
  /* According to 10.5.4, case 1b in MPI2.2, the children and master are
     still connected unless MPI_Comm_disconnect is used with allcomm. 
     MPI_Comm_free is not sufficient */
  MPI_Comm_free( &allcomm );
  MPI_Comm_disconnect( &intercomm );

  fprintf(stdout, "%s:%d: Sleep starting; children should exit\n",
          __FILE__, __LINE__ );fflush(stdout);
  sleep(30);
  fprintf(stdout, 
          "%s:%d: Sleep done; all children should have already exited\n", 
          __FILE__, __LINE__ );fflush(stdout);

  MPI_Finalize();
  return 0;
}
Пример #17
0
int
main(int argc, char* argv[])
{
  MPI_Init(0, 0);

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);

  printf("MAIN: %i/%i\n", rank, size);

  int maxprocs = 1;
  int codes[maxprocs];

  MPI_Comm comm;
  MPI_Comm_spawn("./child.x", NULL, maxprocs, MPI_INFO_NULL, 0,
                 MPI_COMM_WORLD, &comm, codes);

  MPI_Finalize();
  return 0;
}
Пример #18
0
void mpif_comm_spawn_(char *command, char **argv, int *maxprocs,
		      MPI_Fint *info, int *root, MPI_Fint *comm,
		      MPI_Fint *intercomm, int *array_of_errcodes, int*error)
{
  MPI_Comm c_comm = MPI_Comm_f2c(*comm);
  MPI_Info c_info = MPI_Info_f2c(*info);
  ALLOCATE_ITEMS(MPI_Comm, *maxprocs, c_intercomm, p_intercomm);

  int i;
  for(i=0; i<*maxprocs;i++)
    p_intercomm[i] = MPI_Comm_f2c(intercomm[i]);

  *error = MPI_Comm_spawn(command, argv, *maxprocs,
			  c_info, *root, c_comm,
			  p_intercomm, array_of_errcodes);
  for(i=0; i<*maxprocs;i++)
    intercomm[i] = MPI_Comm_c2f(p_intercomm[i]);

  FREE_ITEMS(*maxprocs, p_intercomm);
}
Пример #19
0
Albany::MesoScaleLinkProblem::
MesoScaleLinkProblem(const Teuchos::RCP<Teuchos::ParameterList>& params_,
                     const Teuchos::RCP<ParamLib>& paramLib_,
                     const int numDim_,
                     Teuchos::RCP<const Teuchos::Comm<int> >& commT_):
  Albany::AbstractProblem(params_, paramLib_, numDim_),
  haveSource(false),
  numDim(numDim_),
  commT(commT_),
  mpi_comm(Albany::getMpiCommFromTeuchosComm(commT_)) {

  TEUCHOS_TEST_FOR_EXCEPTION(commT->getSize() != 1, std::logic_error,
                             "MesoScale bridge only supports 1 master processor currently:\n\tRun with \"mpirun -np 1 Albany\"");

  std::string& method = params->get("Name", "MesoScaleLink ");
  *out << "Problem Name = " << method << std::endl;

  haveSource =  params->isSublist("Source Functions");

  matModel = params->sublist("Material Model").get("Model Name", "LinearMesoScaleLink");

  TEUCHOS_TEST_FOR_EXCEPTION(matModel != "Bridge", std::logic_error,
                             "Must specify \"Bridge\" for the material model in the input file.");

  exeName = params->sublist("Material Model").get("Executable", "zzz");

  *out << "Establishing MPI bridging link to: " << exeName << std::endl;


  numMesoPEs = params->sublist("Material Model").get("Num Meso PEs", 1);

  interCommunicator = Teuchos::rcp(new MPI_Comm());

  // Fire off the remote processes

  MPI_Comm_spawn(&exeName[0], MPI_ARGV_NULL, numMesoPEs,
                 MPI_INFO_NULL, 0, mpi_comm, interCommunicator.get(), MPI_ERRCODES_IGNORE);


}
Пример #20
0
/**
 * Main loop of the function.
 */
int main(int argc, char **argv) {
	int rank, size, num_workers;
	char worker_name[] = "./demo/worker";
	MPI_Comm everyone;

	MPI_Init(&argc, &argv);

	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);

	num_workers = atoi(*++argv);	

	MPI_Comm_spawn(worker_name, MPI_ARGV_NULL, num_workers,
			MPI_INFO_NULL, 0, MPI_COMM_SELF, &everyone,
			MPI_ERRCODES_IGNORE);

	sleep(3);
	printf("Master woke up.\n");

	MPI_Finalize();
	
	return 0;
}
Пример #21
0
/* **** PA_Exec ****
 * The ParallelAgent's equivelant of "main".  This function spawns the
 * children processes, sends them the data, and gets back the results.
 */
SEXP PA_Exec(SEXP scriptLocn, SEXP sxInputVector) {
	int iFunction;
	int iSpawnFlag = 1;
	int iNumProcs;
	int ipDims[10]= { 0,0,0,0,0,0,0,0,0,0 };
	double *dpA = NULL;
	double *dpB = NULL;
	int iStatus;
	int returnValue;
	SEXP sRet;
#ifndef DONT_SPAWN_R
	char *cpProgram = "R";   /* Program to call; Usually "R" */

	char *child_args[] = {
		"BATCH",
		"--no-save",
		CHAR(STRING_ELT((scriptLocn),0)),
		"abc.out",
		NULL };

#else
	/* There are four ways to call the child processes (all go through
	 * MPI_COMM_SPAWN):
	 * 1.  Spawn a copy of R and let it call the child process function through
	 *        an R script.  This approach simplified the MPI-BLACS interation,
	 *        at the cost of major overhead.
	 * 2.  Spawn a shell script (dfCRDriver) which runs the child process
	 *        driver.  Spawning the program directly is better, but there is a
	 *        problem with needing the shared library in the library path.
	 * 3.  Spawn a shell (sh) which first adds the path to the scalapack.so
	 *        library to LD_LIBRARY_PATH and then calls the driver program.
	 *        This eliminates the need for the extra script (dfCRDriver), while
	 *        also being several thousandths of a second faster.
	 * 4.  Spawn the driver program directly.  This requires building the
	 *        program differently so that it doesn't need to scalapack.so
	 *        shared library.  This is the preferred/current method.
	 */
	char *cpProgram;    /* =R_PACKAGE_DIR"/exec/dfCRDriver.sh"; */
	char *child_args[] = { NULL, NULL };
	int iLength;

	/* The scriptLocn (script location) variable contains the path to the
	 * executable directory (followed by the script name).  Extract the path,
	 * and use it for the executable's path.
	 */
	cpProgram = (char *) (CHAR(STRING_ELT((scriptLocn), 0)));
	iLength = strrchr(cpProgram, '/') - cpProgram;
	if (iLength < 0) {
		Rprintf("Path to script is not complete.  Unable to continue.\n");
		return R_NilValue;
	}
	cpProgram = (char *) malloc(sizeof(char) * (iLength + 12));
	if (cpProgram == NULL) {
		Rprintf("Memory allocation (%d bytes) failed!\n", sizeof(char) *
				(iLength + 12));
		return R_NilValue;
	}
	*(cpProgram) = '\0';
	strncat(cpProgram, CHAR(STRING_ELT((scriptLocn),0)), iLength);
	strncat(cpProgram, "/CRDriver", 10);

	D_Rprintf(("Child process: \"%s\" \"%s\" \"%s\"\n", cpProgram, child_args[0], child_args[1]));
#endif    /* Endof  If DONT_SPAWN_R is defined */

	/*  Begin by unpacking the input vector into all of the seperate variables
	 *  that get their values from it */
	if (PA_UnpackInput(sxInputVector, ipDims, &dpA, &dpB, &iNumProcs,
				&iFunction, &iSpawnFlag) != 0) {
		free(cpProgram);
		return R_NilValue;
	}

#if 0
// Guru
	int ig;
	for ( ig = 0; ig < 10; ig++ )
	{
		fprintf(stdout, "ipDims[%d] = %d\n", ig, ipDims[ig] );fflush(stdout);
	}


#endif

	/*  Initialize MPI (if it is already initialized, it won't be
	 *  initialized again).	*/
	if (PA_Init() != 0){
		Rprintf(" ERROR[1]: Failed while intializing MPI \n");
		free(cpProgram);
		return R_NilValue;
	}

	if (iSpawnFlag != 0  && iGlobalNumChildren != 0) {
		Rprintf(" Error:  Attempt to spawn a new grid without releasing the previous grid.\n");
		return R_NilValue;
	}

	if(iSpawnFlag == 0 && iGlobalNumChildren == 0){
		Rprintf(" Error: Process Grid not present and Spawn option is set FALSE \n");
		return R_NilValue;
	}

	int *ipErrcodes = (int *)Calloc(iNumProcs, int);



	/* Begin:  Spawn the child processes */
	if ( iSpawnFlag != 0 ) {
		/* Begin:  Spawn the child processes */
		D_Rprintf(("PA: Preparing to spawn %d child processes.\n", iNumProcs)); fflush(stdout);
		iStatus = MPI_Comm_spawn(cpProgram, child_args, iNumProcs, MPI_INFO_NULL,
				0, MPI_COMM_WORLD, &childComm, ipErrcodes);
		free(cpProgram);
		if (iStatus != MPI_SUCCESS) {
			Rprintf(" ERROR:  Failed to spawn (%d) child processes.\n", iNumProcs);
			return R_NilValue;
		}

		D_Rprintf(("SPAWNING SUCCESSFUL\n")); fflush(stdout);
		/* End:  Spawn the child processes */
		iGlobalNumChildren = iNumProcs;
		iNprows = ipDims[6];
		iNpcols = ipDims[7];
	}


	/* SPECIAL for SVD */
	/* If the function is SVD, the child process needs to know the nu,nv
	 * parameters.
	 */

	if (iFunction == 2) {
		ipDims[2] = (int) dpB[0];
		ipDims[3] = (int) dpB[1];
	}

	
	/* DATA DISTRIBUTION */
	/* The data is distributed by the PA to all of the child processes. */
	if ((returnValue = PA_SendData(ipDims, dpA, dpB)) == 0)	{
		D_Rprintf(("PA: DATA SENT TO CHILD PROCESSES.\n")); fflush(stdout);
	} else {	/* The send data failed, */ 
		Rprintf("ERROR [1] : DATA COULD NOT BE SENT TO CHILD PROCESSES.\n");
		iGlobalNumChildren = 0;
		iNprows = 0;
		iNpcols = 0;
		return R_NilValue;
	}
	D_Rprintf(("PA: back to exec.\n")); fflush(stdout);

	/* If the release flag == 1, then the grid will be (or was) released. */
	if (ipDims[9] == 1) 
	{
		iGlobalNumChildren = 0;
		iNprows = 0;
		iNpcols = 0;
	}

	/* If the function is sla.gridInit or sla.gridExit, just return. */
	if (iFunction == 0) 
	{
		// MPI_Comm_free(&intercomm);
		
		return R_NilValue;
	}

	/* GET BACK THE RESULT */
	sRet = PA_RecvResult(ipDims);

	return sRet;
}
Пример #22
0
int main(int argc, char *argv[])
{
    int error;
    int rank, size;
    char *argv1[2] = { (char*)"connector", NULL };
    char *argv2[2] = { (char*)"acceptor", NULL };
    MPI_Comm comm_connector, comm_acceptor, comm_parent, comm;
    char port[MPI_MAX_PORT_NAME];
    MPI_Status status;
    MPI_Info spawn_path = MPI_INFO_NULL;
    int verbose = 0;

    if (getenv("MPITEST_VERBOSE"))
    {
	verbose = 1;
    }

    IF_VERBOSE(("init.\n"));
    error = MPI_Init(&argc, &argv);
    check_error(error, "MPI_Init");

    /* To improve reporting of problems about operations, we
       change the error handler to errors return */
    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
    MPI_Comm_set_errhandler( MPI_COMM_SELF, MPI_ERRORS_RETURN );

    IF_VERBOSE(("size.\n"));
    error = MPI_Comm_size(MPI_COMM_WORLD, &size);
    check_error(error, "MPI_Comm_size");

    IF_VERBOSE(("rank.\n"));
    error = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    check_error(error, "MPI_Comm_rank");

    if (argc == 1)
    {
	/* Make sure that the current directory is in the path.
	   Not all implementations may honor or understand this, but
	   it is highly recommended as it gives users a clean way
	   to specify the location of the executable without
	   specifying a particular directory format (e.g., this 
	   should work with both Windows and Unix implementations) */
	error = MPI_Info_create( &spawn_path );
	check_error( error, "MPI_Info_create" );
	error = MPI_Info_set( spawn_path, (char*)"path", (char*)"." );
	check_error( error, "MPI_Info_set" );

	IF_VERBOSE(("spawn connector.\n"));
	error = MPI_Comm_spawn((char*)"spaconacc", argv1, 1, spawn_path, 0, 
			       MPI_COMM_SELF, &comm_connector, 
			       MPI_ERRCODES_IGNORE);
	check_error(error, "MPI_Comm_spawn");

	IF_VERBOSE(("spawn acceptor.\n"));
	error = MPI_Comm_spawn((char*)"spaconacc", argv2, 1, spawn_path, 0, 
			       MPI_COMM_SELF, &comm_acceptor, 
			       MPI_ERRCODES_IGNORE);
	check_error(error, "MPI_Comm_spawn");
	error = MPI_Info_free( &spawn_path );
	check_error( error, "MPI_Info_free" );

	MPI_Comm_set_errhandler( comm_connector, MPI_ERRORS_RETURN );
	MPI_Comm_set_errhandler( comm_acceptor, MPI_ERRORS_RETURN );

	IF_VERBOSE(("recv port.\n"));
	error = MPI_Recv(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, 
			 comm_acceptor, &status);
	check_error(error, "MPI_Recv");

	IF_VERBOSE(("send port.\n"));
	error = MPI_Send(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, 
			 comm_connector);
	check_error(error, "MPI_Send");

	IF_VERBOSE(("barrier acceptor.\n"));
	error = MPI_Barrier(comm_acceptor);
	check_error(error, "MPI_Barrier");

	IF_VERBOSE(("barrier connector.\n"));
	error = MPI_Barrier(comm_connector);
	check_error(error, "MPI_Barrier");

        error = MPI_Comm_free(&comm_acceptor);
	check_error(error, "MPI_Comm_free");
        error = MPI_Comm_free(&comm_connector);
	check_error(error, "MPI_Comm_free");

	printf(" No Errors\n");
    }
    else if ((argc == 2) && (strcmp(argv[1], "acceptor") == 0))
    {
	IF_VERBOSE(("get_parent.\n"));
	error = MPI_Comm_get_parent(&comm_parent);
	check_error(error, "MPI_Comm_get_parent");
	if (comm_parent == MPI_COMM_NULL)
	{
	    printf("acceptor's parent is NULL.\n");fflush(stdout);
	    MPI_Abort(MPI_COMM_WORLD, -1);
	}
	IF_VERBOSE(("open_port.\n"));
	error = MPI_Open_port(MPI_INFO_NULL, port);
	check_error(error, "MPI_Open_port");

	MPI_Comm_set_errhandler( comm_parent, MPI_ERRORS_RETURN );

	IF_VERBOSE(("0: opened port: <%s>\n", port));
	IF_VERBOSE(("send.\n"));
	error = MPI_Send(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, comm_parent);
	check_error(error, "MPI_Send");

	IF_VERBOSE(("accept.\n"));
	error = MPI_Comm_accept(port, MPI_INFO_NULL, 0, MPI_COMM_SELF, &comm);
	check_error(error, "MPI_Comm_accept");

	IF_VERBOSE(("close_port.\n"));
	error = MPI_Close_port(port);
	check_error(error, "MPI_Close_port");

	IF_VERBOSE(("disconnect.\n"));
	error = MPI_Comm_disconnect(&comm);
	check_error(error, "MPI_Comm_disconnect");

	IF_VERBOSE(("barrier.\n"));
	error = MPI_Barrier(comm_parent);
	check_error(error, "MPI_Barrier");

	MPI_Comm_free( &comm_parent );
    }
    else if ((argc == 2) && (strcmp(argv[1], "connector") == 0))
    {
	IF_VERBOSE(("get_parent.\n"));
	error = MPI_Comm_get_parent(&comm_parent);
	check_error(error, "MPI_Comm_get_parent");
	if (comm_parent == MPI_COMM_NULL)
	{
	    printf("acceptor's parent is NULL.\n");fflush(stdout);
	    MPI_Abort(MPI_COMM_WORLD, -1);
	}

	MPI_Comm_set_errhandler( comm_parent, MPI_ERRORS_RETURN );
	IF_VERBOSE(("recv.\n"));
	error = MPI_Recv(port, MPI_MAX_PORT_NAME, MPI_CHAR, 0, 0, 
			 comm_parent, &status);
	check_error(error, "MPI_Recv");

	IF_VERBOSE(("1: received port: <%s>\n", port));
	IF_VERBOSE(("connect.\n"));
	error = MPI_Comm_connect(port, MPI_INFO_NULL, 0, MPI_COMM_SELF, &comm);
	check_error(error, "MPI_Comm_connect");

	MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
	IF_VERBOSE(("disconnect.\n"));
	error = MPI_Comm_disconnect(&comm);
	check_error(error, "MPI_Comm_disconnect");

	IF_VERBOSE(("barrier.\n"));
	error = MPI_Barrier(comm_parent);
	check_error(error, "MPI_Barrier");

	MPI_Comm_free( &comm_parent );
    }
    else
    {
	printf("invalid command line.\n");fflush(stdout);
	{
	    int i;
	    for (i=0; i<argc; i++)
	    {
		printf("argv[%d] = <%s>\n", i, argv[i]);
	    }
	}
	fflush(stdout);
	MPI_Abort(MPI_COMM_WORLD, -2);
    }

    MPI_Finalize();
    return 0;
}
Пример #23
0
        int MpiCommunicator::init( int minId, long thecomm_ )
        {
            VT_FUNC_I( "MpiCommunicator::init" );

            assert( sizeof(thecomm_) >= sizeof(MPI_Comm) );
            MPI_Comm thecomm = (MPI_Comm)thecomm_;

            // turn wait mode on for intel mpi if possible
            // this should greatly improve performance for intel mpi
            PAL_SetEnvVar( "I_MPI_WAIT_MODE", "enable", 0);

            int flag;
            MPI_Initialized( &flag );
            if ( ! flag ) {
                int p;
                //!! FIXME passing NULL ptr breaks mvapich1 mpi implementation
                MPI_Init_thread( 0, NULL, MPI_THREAD_MULTIPLE, &p );
                if( p != MPI_THREAD_MULTIPLE ) {
                    // can't use Speaker yet, need Channels to be inited
                    std::cerr << "[CnC] Warning: not MPI_THREAD_MULTIPLE (" << MPI_THREAD_MULTIPLE << "), but " << p << std::endl;
                }
            } else if( thecomm == 0 ) {
                CNC_ABORT( "Process has already been initialized" );
            }


            MPI_Comm myComm = MPI_COMM_WORLD;
            int rank;
            MPI_Comm parentComm;
            if( thecomm == 0 ) {
                MPI_Comm_get_parent( &parentComm );
            } else {
                m_customComm = true;
                m_exit0CallOk = false;
                myComm = thecomm;
            }
            MPI_Comm_rank( myComm, &rank );
            
            // father of all checks if he's requested to spawn processes:
            if ( rank == 0 && parentComm == MPI_COMM_NULL ) {
                // Ok, let's spawn the clients.
                // I need some information for the startup.
                // 1. Name of the executable (default is the current exe)
                const char * _tmp = getenv( "CNC_MPI_SPAWN" );
                if ( _tmp ) {
                    int nClientsToSpawn = atol( _tmp );
                    _tmp = getenv( "CNC_MPI_EXECUTABLE" );
                    std::string clientExe( _tmp ? _tmp : "" );
                    if( clientExe.empty() ) clientExe = PAL_GetProgname();
                    CNC_ASSERT( ! clientExe.empty() );
                    // 3. Special setting for MPI_Info: hosts
                    const char * clientHost = getenv( "CNC_MPI_HOSTS" );
                    
                    // Prepare MPI_Info object:
                    MPI_Info clientInfo = MPI_INFO_NULL;
                    if ( clientHost ) {
                        MPI_Info_create( &clientInfo );
                        if ( clientHost ) {
                            MPI_Info_set( clientInfo, const_cast< char * >( "host" ), const_cast< char * >( clientHost ) );
                            // can't use Speaker yet, need Channels to be inited
                            std::cerr << "[CnC " << rank << "] Set MPI_Info_set( \"host\", \"" << clientHost << "\" )\n";
                        }
                    }
                    // Now spawn the client processes:
                    // can't use Speaker yet, need Channels to be inited
                    std::cerr << "[CnC " << rank << "] Spawning " << nClientsToSpawn << " MPI processes" << std::endl;
                    int* errCodes = new int[nClientsToSpawn];
                    MPI_Comm interComm;
                    int err = MPI_Comm_spawn( const_cast< char * >( clientExe.c_str() ),
                                              MPI_ARGV_NULL, nClientsToSpawn,
                                              clientInfo, 0, MPI_COMM_WORLD,
                                              &interComm, errCodes );
                    delete [] errCodes;
                    if ( err ) {
                        // can't use Speaker yet, need Channels to be inited
                        std::cerr << "[CnC " << rank << "] Error in MPI_Comm_spawn. Skipping process spawning";
                    } else {
                        MPI_Intercomm_merge( interComm, 0, &myComm );
                    }
                } // else {
                // No process spawning
                // MPI-1 situation: all clients to be started by mpiexec
                //                    myComm = MPI_COMM_WORLD;
                //}
            }
            if ( thecomm == 0 && parentComm != MPI_COMM_NULL ) {
                // I am a child. Build intra-comm to the parent.
                MPI_Intercomm_merge( parentComm, 1, &myComm );
            }
            MPI_Comm_rank( myComm, &rank );

            CNC_ASSERT( m_channel == NULL );
            MpiChannelInterface* myChannel = new MpiChannelInterface( use_crc(), myComm );
            m_channel = myChannel;

            int size;
            MPI_Comm_size( myComm, &size );
            // Are we on the host or on the remote side?
            if ( rank == 0 ) {
                if( size <= 1 ) {
                    Speaker oss( std::cerr ); oss << "Warning: no clients avabilable. Forgot to set CNC_MPI_SPAWN?";
                }
                // ==> HOST startup: 
                // This initializes the mpi environment in myChannel.
                MpiHostInitializer hostInitializer( *myChannel );
                hostInitializer.init_mpi_comm( myComm );
            } else {
                // ==> CLIENT startup:
                // This initializes the mpi environment in myChannel.
                MpiClientInitializer clientInitializer( *myChannel );
                clientInitializer.init_mpi_comm( myComm );
            }

            { Speaker oss( std::cerr ); oss << "MPI initialization complete (rank " << rank << ")."; }

            //            MPI_Barrier( myComm );

            // Now the mpi specific setup is finished.
            // Do the generic initialization stuff.
            GenericCommunicator::init( minId );

            return 0;
        }
Пример #24
0
int main(int argc, char *argv[])
{
    complex_t coord_point, julia_constant;
    double x_max, x_min, y_max, y_min, x_resolution, y_resolution;
    double divergent_limit;
    char file_message[160];
    char filename[100];
    int icount, imax_iterations;
    int ipixels_across, ipixels_down;
    int i, j, k, julia, alternate_equation;
    int imin, imax, jmin, jmax;
    int work[5];
    header_t *result = NULL;
    /* make an integer array of size [N x M] to hold answers. */
    int *in_grid_array, *out_grid_array = NULL;
    int numprocs;
    int  namelen;
    char processor_name[MPI_MAX_PROCESSOR_NAME];
    int num_colors;
    color_t *colors = NULL;
    MPI_Status status;
    int listener;
    int save_image = 0;
    int optval;
    int num_children = DEFAULT_NUM_SLAVES;
    int master = 1;
    MPI_Comm parent, *child_comm = NULL;
    MPI_Request *child_request = NULL;
    int error_code, error;
    char error_str[MPI_MAX_ERROR_STRING];
    int length;
    int index;
    int pid;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &myid);
    MPI_Comm_get_parent(&parent);
    MPI_Get_processor_name(processor_name, &namelen);

    pid = getpid();

    if (parent == MPI_COMM_NULL)
    {
	if (numprocs > 1)
	{
	    printf("Error: only one process allowed for the master.\n");
	    PrintUsage();
	    error_code = MPI_Abort(MPI_COMM_WORLD, -1);
	    exit(error_code);
	}

	printf("Welcome to the Mandelbrot/Julia set explorer.\n");

	master = 1;

	/* Get inputs-- region to view (must be within x/ymin to x/ymax, make sure
	xmax>xmin and ymax>ymin) and resolution (number of pixels along an edge,
	N x M, i.e. 256x256)
	*/

	read_mand_args(argc, argv, &imax_iterations, &ipixels_across, &ipixels_down,
	    &x_min, &x_max, &y_min, &y_max, &julia, &julia_constant.real,
	    &julia_constant.imaginary, &divergent_limit,
	    &alternate_equation, filename, &num_colors, &use_stdin, &save_image,
	    &num_children);
	check_mand_params(&imax_iterations, &ipixels_across, &ipixels_down,
	    &x_min, &x_max, &y_min, &y_max, &divergent_limit, &num_children);

	if (julia == 1) /* we're doing a julia figure */
	    check_julia_params(&julia_constant.real, &julia_constant.imaginary);

	/* spawn slaves */
	child_comm = (MPI_Comm*)malloc(num_children * sizeof(MPI_Comm));
	child_request = (MPI_Request*)malloc(num_children * sizeof(MPI_Request));
	result = (header_t*)malloc(num_children * sizeof(header_t));
	if (child_comm == NULL || child_request == NULL || result == NULL)
	{
	    printf("Error: unable to allocate an array of %d communicators, requests and work objects for the slaves.\n", num_children);
	    error_code = MPI_Abort(MPI_COMM_WORLD, -1);
	    exit(error_code);
	}
	printf("Spawning %d slaves.\n", num_children);
	for (i=0; i<num_children; i++)
	{
	    error = MPI_Comm_spawn(argv[0], MPI_ARGV_NULL, 1,
		MPI_INFO_NULL, 0, MPI_COMM_WORLD, &child_comm[i], &error_code);
	    if (error != MPI_SUCCESS)
	    {
		error_str[0] = '\0';
		length = MPI_MAX_ERROR_STRING;
		MPI_Error_string(error, error_str, &length);
		printf("Error: MPI_Comm_spawn failed: %s\n", error_str);
		error_code = MPI_Abort(MPI_COMM_WORLD, -1);
		exit(error_code);
	    }
	}

	/* send out parameters */
	for (i=0; i<num_children; i++)
	{
	    MPI_Send(&num_colors, 1, MPI_INT, 0, 0, child_comm[i]);
	    MPI_Send(&imax_iterations, 1, MPI_INT, 0, 0, child_comm[i]);
	    MPI_Send(&ipixels_across, 1, MPI_INT, 0, 0, child_comm[i]);
	    MPI_Send(&ipixels_down, 1, MPI_INT, 0, 0, child_comm[i]);
	    MPI_Send(&divergent_limit, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
	    MPI_Send(&julia, 1, MPI_INT, 0, 0, child_comm[i]);
	    MPI_Send(&julia_constant.real, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
	    MPI_Send(&julia_constant.imaginary, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
	    MPI_Send(&alternate_equation, 1, MPI_INT, 0, 0, child_comm[i]);
	}
    }
    else
    {
	master = 0;
	MPI_Recv(&num_colors, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&imax_iterations, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&ipixels_across, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&ipixels_down, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&divergent_limit, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&julia, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&julia_constant.real, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&julia_constant.imaginary, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	MPI_Recv(&alternate_equation, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
    }

    if (master)
    {
	colors = malloc((num_colors+1)* sizeof(color_t));
	if (colors == NULL)
	{
	    MPI_Abort(MPI_COMM_WORLD, -1);
	    exit(-1);
	}
	Make_color_array(num_colors, colors);
	colors[num_colors] = 0; /* add one on the top to avoid edge errors */
    }

    /* allocate memory */
    if ( (in_grid_array = (int *)calloc(ipixels_across * ipixels_down, sizeof(int))) == NULL)
    {
	printf("Memory allocation failed for data array, aborting.\n");
	MPI_Abort(MPI_COMM_WORLD, -1);
	exit(-1);
    }

    if (master)
    {
	int istep, jstep;
	int i1[400], i2[400], j1[400], j2[400];
	int ii, jj;
	struct sockaddr_in addr;
	int len;
	char line[1024], *token;

	if ( (out_grid_array = (int *)calloc(ipixels_across * ipixels_down, sizeof(int))) == NULL)
	{
	    printf("Memory allocation failed for data array, aborting.\n");
	    MPI_Abort(MPI_COMM_WORLD, -1);
	    exit(-1);
	}

	srand(getpid());

	if (!use_stdin)
	{
	    addr.sin_family = AF_INET;
	    addr.sin_addr.s_addr = INADDR_ANY;
	    addr.sin_port = htons(DEFAULT_PORT);

	    listener = socket(AF_INET, SOCK_STREAM, 0);
	    if (listener == -1)
	    {
		printf("unable to create a listener socket.\n");
		MPI_Abort(MPI_COMM_WORLD, -1);
		exit(-1);
	    }
	    if (bind(listener, &addr, sizeof(addr)) == -1)
	    {
		addr.sin_port = 0;
		if (bind(listener, &addr, sizeof(addr)) == -1)
		{
		    printf("unable to create a listener socket.\n");
		    MPI_Abort(MPI_COMM_WORLD, -1);
		    exit(-1);
		}
	    }
	    if (listen(listener, 1) == -1)
	    {
		printf("unable to listen.\n");
		MPI_Abort(MPI_COMM_WORLD, -1);
		exit(-1);
	    }
	    len = sizeof(addr);
	    getsockname(listener, &addr, &len);
	    
	    printf("%s listening on port %d\n", processor_name, ntohs(addr.sin_port));
	    fflush(stdout);

	    sock = accept(listener, NULL, NULL);
	    if (sock == -1)
	    {
		printf("unable to accept a socket connection.\n");
		MPI_Abort(MPI_COMM_WORLD, -1);
		exit(-1);
	    }
	    printf("accepted connection from visualization program.\n");
	    fflush(stdout);

#ifdef HAVE_WINDOWS_H
	    optval = 1;
	    setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *)&optval, sizeof(optval));
#endif

	    printf("sending image size to visualizer.\n");
	    sock_write(sock, &ipixels_across, sizeof(int));
	    sock_write(sock, &ipixels_down, sizeof(int));
	    sock_write(sock, &num_colors, sizeof(int));
	    sock_write(sock, &imax_iterations, sizeof(int));
	}

	for (;;)
	{
	    /* get x_min, x_max, y_min, and y_max */
	    if (use_stdin)
	    {
		printf("input xmin ymin xmax ymax max_iter, (0 0 0 0 0 to quit):\n");fflush(stdout);
		fgets(line, 1024, stdin);
		printf("read <%s> from stdin\n", line);fflush(stdout);
		token = strtok(line, " \n");
		x_min = atof(token);
		token = strtok(NULL, " \n");
		y_min = atof(token);
		token = strtok(NULL, " \n");
		x_max = atof(token);
		token = strtok(NULL, " \n");
		y_max = atof(token);
		token = strtok(NULL, " \n");
		imax_iterations = atoi(token);
		/*sscanf(line, "%g %g %g %g", &x_min, &y_min, &x_max, &y_max);*/
		/*scanf("%g %g %g %g", &x_min, &y_min, &x_max, &y_max);*/
	    }
	    else
	    {
		printf("reading xmin,ymin,xmax,ymax.\n");fflush(stdout);
		sock_read(sock, &x_min, sizeof(double));
		sock_read(sock, &y_min, sizeof(double));
		sock_read(sock, &x_max, sizeof(double));
		sock_read(sock, &y_max, sizeof(double));
		sock_read(sock, &imax_iterations, sizeof(int));
	    }
	    printf("x0,y0 = (%f, %f) x1,y1 = (%f,%f) max_iter = %d\n", x_min, y_min, x_max, y_max, imax_iterations);fflush(stdout);

	    /*printf("sending the limits: (%f,%f)(%f,%f)\n", x_min, y_min, x_max, y_max);fflush(stdout);*/
	    /* let everyone know the limits */
	    for (i=0; i<num_children; i++)
	    {
		MPI_Send(&x_min, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
		MPI_Send(&x_max, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
		MPI_Send(&y_min, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
		MPI_Send(&y_max, 1, MPI_DOUBLE, 0, 0, child_comm[i]);
		MPI_Send(&imax_iterations, 1, MPI_INT, 0, 0, child_comm[i]);
	    }

	    /* check for the end condition */
	    if (x_min == x_max && y_min == y_max)
	    {
		/*printf("root bailing.\n");fflush(stdout);*/
		break;
	    }

	    /* break the work up into 400 pieces */
	    istep = ipixels_across / 20;
	    jstep = ipixels_down / 20;
	    if (istep < 1)
		istep = 1;
	    if (jstep < 1)
		jstep = 1;
	    k = 0;
	    for (i=0; i<20; i++)
	    {
		for (j=0; j<20; j++)
		{
		    i1[k] = MIN(istep * i, ipixels_across - 1);
		    i2[k] = MIN((istep * (i+1)) - 1, ipixels_across - 1);
		    j1[k] = MIN(jstep * j, ipixels_down - 1);
		    j2[k] = MIN((jstep * (j+1)) - 1, ipixels_down - 1);
		    k++;
		}
	    }

	    /* shuffle the work */
	    for (i=0; i<500; i++)
	    {
		ii = rand() % 400;
		jj = rand() % 400;
		swap(&i1[ii], &i1[jj]);
		swap(&i2[ii], &i2[jj]);
		swap(&j1[ii], &j1[jj]);
		swap(&j2[ii], &j2[jj]);
	    }

	    /* send a piece of work to each worker (there must be more work than workers) */
	    k = 0;
	    for (i=0; i<num_children; i++)
	    {
		work[0] = k+1;
		work[1] = i1[k]; /* imin */
		work[2] = i2[k]; /* imax */
		work[3] = j1[k]; /* jmin */
		work[4] = j2[k]; /* jmax */

		/*printf("sending work(%d) to %d\n", k+1, i);fflush(stdout);*/
		MPI_Send(work, 5, MPI_INT, 0, 100, child_comm[i]);
		MPI_Irecv(&result[i], 5, MPI_INT, 0, 200, child_comm[i], &child_request[i]);
		k++;
	    }
	    /* receive the results and hand out more work until the image is complete */
	    while (k<400)
	    {
		MPI_Waitany(num_children, child_request, &index, &status);
		memcpy(work, &result[index], 5 * sizeof(int));
		/*printf("master receiving data in k<400 loop.\n");fflush(stdout);*/
		MPI_Recv(in_grid_array, (work[2] + 1 - work[1]) * (work[4] + 1 - work[3]), MPI_INT, 0, 201, child_comm[index], &status);
		/* draw data */
		output_data(in_grid_array, &work[1], out_grid_array, ipixels_across, ipixels_down);
		work[0] = k+1;
		work[1] = i1[k];
		work[2] = i2[k];
		work[3] = j1[k];
		work[4] = j2[k];
		/*printf("sending work(%d) to %d\n", k+1, index);fflush(stdout);*/
		MPI_Send(work, 5, MPI_INT, 0, 100, child_comm[index]);
		MPI_Irecv(&result[index], 5, MPI_INT, 0, 200, child_comm[index], &child_request[index]);
		k++;
	    }
	    /* receive the last pieces of work */
	    /* and tell everyone to stop */
	    for (i=0; i<num_children; i++)
	    {
		MPI_Wait(&child_request[i], &status);
		memcpy(work, &result[i], 5 * sizeof(int));
		/*printf("master receiving data in tail loop.\n");fflush(stdout);*/
		MPI_Recv(in_grid_array, (work[2] + 1 - work[1]) * (work[4] + 1 - work[3]), MPI_INT, 0, 201, child_comm[i], &status);
		/* draw data */
		output_data(in_grid_array, &work[1], out_grid_array, ipixels_across, ipixels_down);
		work[0] = 0;
		work[1] = 0;
		work[2] = 0;
		work[3] = 0;
		work[4] = 0;
		/*printf("sending %d to tell %d to stop\n", work[0], i);fflush(stdout);*/
		MPI_Send(work, 5, MPI_INT, 0, 100, child_comm[i]);
	    }

	    /* tell the visualizer the image is done */
	    if (!use_stdin)
	    {
		work[0] = 0;
		work[1] = 0;
		work[2] = 0;
		work[3] = 0;
		sock_write(sock, work, 4 * sizeof(int));
	    }
	}
    }
    else
    {
	for (;;)
	{
	    /*printf("slave[%d] receiveing bounds.\n", pid);fflush(stdout);*/
	    MPI_Recv(&x_min, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	    MPI_Recv(&x_max, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	    MPI_Recv(&y_min, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	    MPI_Recv(&y_max, 1, MPI_DOUBLE, 0, 0, parent, MPI_STATUS_IGNORE);
	    MPI_Recv(&imax_iterations, 1, MPI_INT, 0, 0, parent, MPI_STATUS_IGNORE);
	    /*printf("slave[%d] received bounding box: (%f,%f)(%f,%f)\n", pid, x_min, y_min, x_max, y_max);fflush(stdout);*/

	    /* check for the end condition */
	    if (x_min == x_max && y_min == y_max)
	    {
		/*printf("slave[%d] done.\n", pid);fflush(stdout);*/
		break;
	    }

	    x_resolution = (x_max-x_min)/ ((double)ipixels_across);
	    y_resolution = (y_max-y_min)/ ((double)ipixels_down);

	    MPI_Recv(work, 5, MPI_INT, 0, 100, parent, &status);
	    /*printf("slave[%d] received work: %d, (%d,%d)(%d,%d)\n", pid, work[0], work[1], work[2], work[3], work[4]);fflush(stdout);*/
	    while (work[0] != 0)
	    {
		imin = work[1];
		imax = work[2];
		jmin = work[3];
		jmax = work[4];

		k = 0;
		for (j=jmin; j<=jmax; ++j)
		{
		    coord_point.imaginary = y_max - j*y_resolution; /* go top to bottom */

		    for (i=imin; i<=imax; ++i)
		    {
			/* Call Mandelbrot routine for each code, fill array with number of iterations. */

			coord_point.real = x_min + i*x_resolution; /* go left to right */
			if (julia == 1)
			{
			    /* doing Julia set */
			    /* julia eq:  z = z^2 + c, z_0 = grid coordinate, c = constant */
			    icount = single_mandelbrot_point(coord_point, julia_constant, imax_iterations, divergent_limit);
			}
			else if (alternate_equation == 1)
			{
			    /* doing experimental form 1 */
			    icount = subtractive_mandelbrot_point(coord_point, julia_constant, imax_iterations, divergent_limit);
			}
			else if (alternate_equation == 2)
			{
			    /* doing experimental form 2 */
			    icount = additive_mandelbrot_point(coord_point, julia_constant, imax_iterations, divergent_limit);
			}
			else
			{
			    /* default to doing Mandelbrot set */
			    /* mandelbrot eq: z = z^2 + c, z_0 = c, c = grid coordinate */
			    icount = single_mandelbrot_point(coord_point, coord_point, imax_iterations, divergent_limit);
			}
			in_grid_array[k] = icount;
			++k;
		    }
		}
		/* send the result to the root */
		/*printf("slave[%d] sending work %d back.\n", pid, work[0]);fflush(stdout);*/
		MPI_Send(work, 5, MPI_INT, 0, 200, parent);
		/*printf("slave[%d] sending work %d data.\n", pid, work[0]);fflush(stdout);*/
		MPI_Send(in_grid_array, (work[2] + 1 - work[1]) * (work[4] + 1 - work[3]), MPI_INT, 0, 201, parent);
		/* get the next piece of work */
		/*printf("slave[%d] receiving new work.\n", pid);fflush(stdout);*/
		MPI_Recv(work, 5, MPI_INT, 0, 100, parent, &status);
		/*printf("slave[%d] received work: %d, (%d,%d)(%d,%d)\n", pid, work[0], work[1], work[2], work[3], work[4]);fflush(stdout);*/
	    }
	}
    }

    if (master && save_image)
    {
	imax_iterations = 0;
	for (i=0; i<ipixels_across * ipixels_down; ++i)
	{
	    /* look for "brightest" pixel value, for image use */
	    if (out_grid_array[i] > imax_iterations)
		imax_iterations = out_grid_array[i];
	}

	if (julia == 0)
	    printf("Done calculating mandelbrot, now creating file\n");
	else
	    printf("Done calculating julia, now creating file\n");
	fflush(stdout);

	/* Print out the array in some appropriate form. */
	if (julia == 0)
	{
	    /* it's a mandelbrot */
	    sprintf(file_message, "Mandelbrot over (%lf-%lf,%lf-%lf), size %d x %d",
		x_min, x_max, y_min, y_max, ipixels_across, ipixels_down);
	}
	else
	{
	    /* it's a julia */
	    sprintf(file_message, "Julia over (%lf-%lf,%lf-%lf), size %d x %d, center (%lf, %lf)",
		x_min, x_max, y_min, y_max, ipixels_across, ipixels_down,
		julia_constant.real, julia_constant.imaginary);
	}

	dumpimage(filename, out_grid_array, ipixels_across, ipixels_down, imax_iterations, file_message, num_colors, colors);
    }

    if (master)
    {
	for (i=0; i<num_children; i++)
	{
	    MPI_Comm_disconnect(&child_comm[i]);
	}
	free(child_comm);
	free(child_request);
	free(colors);
    }

    MPI_Finalize();
    return 0;
}
Пример #25
0
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size, rsize;
    int np = 3;
    MPI_Comm parentcomm, intercomm;
    int verbose = 0;
    char *env;
    int can_spawn;

    env = getenv("MPITEST_VERBOSE");
    if (env) {
        if (*env != '0')
            verbose = 1;
    }

    MTest_Init(&argc, &argv);

    errs += MTestSpawnPossible(&can_spawn);

    if (can_spawn) {
        MPI_Comm_get_parent(&parentcomm);

        if (parentcomm == MPI_COMM_NULL) {
            IF_VERBOSE(("spawning %d processes\n", np));
            /* Create 3 more processes */
            MPI_Comm_spawn((char *) "./disconnect", MPI_ARGV_NULL, np,
                           MPI_INFO_NULL, 0, MPI_COMM_WORLD, &intercomm, MPI_ERRCODES_IGNORE);
        } else {
            intercomm = parentcomm;
        }

        /* We now have a valid intercomm */

        MPI_Comm_remote_size(intercomm, &rsize);
        MPI_Comm_size(intercomm, &size);
        MPI_Comm_rank(intercomm, &rank);

        if (parentcomm == MPI_COMM_NULL) {
            IF_VERBOSE(("parent rank %d alive.\n", rank));
            /* Parent */
            if (rsize != np) {
                errs++;
                printf("Did not create %d processes (got %d)\n", np, rsize);
                fflush(stdout);
            }
            IF_VERBOSE(("disconnecting child communicator\n"));
            MPI_Comm_disconnect(&intercomm);

            /* Errors cannot be sent back to the parent because there is no
             * communicator connected to the children
             * for (i=0; i<rsize; i++)
             * {
             * MPI_Recv(&err, 1, MPI_INT, i, 1, intercomm, MPI_STATUS_IGNORE);
             * errs += err;
             * }
             */
        } else {
            IF_VERBOSE(("child rank %d alive.\n", rank));
            /* Child */
            if (size != np) {
                errs++;
                printf("(Child) Did not create %d processes (got %d)\n", np, size);
                fflush(stdout);
            }

            IF_VERBOSE(("disconnecting communicator\n"));
            MPI_Comm_disconnect(&intercomm);

            /* Send the errs back to the master process */
            /* Errors cannot be sent back to the parent because there is no
             * communicator connected to the parent */
            /*MPI_Ssend(&errs, 1, MPI_INT, 0, 1, intercomm); */
        }

        /* Note that the MTest_Finalize get errs only over COMM_WORLD */
        /* Note also that both the parent and child will generate "No Errors"
         * if both call MTest_Finalize */
        if (parentcomm == MPI_COMM_NULL) {
            MTest_Finalize(errs);
        } else {
            MPI_Finalize();
        }
    } else {
        MTest_Finalize(errs);
    }

    IF_VERBOSE(("calling finalize\n"));
    return MTestReturnValue(errs);
}
Пример #26
0
// Método que constroi a árvore de eliminação de threads
ThreadTree* buildBNThreadTree(int nxq, int* xq, int nxe, int* xe, Potential** findings,BayesNet* bayesnet, int* elmorder, Graph* moral, Graph* elmtree,MPI_Comm *everyone) {
	
	int msg = -1;
	MPI_Comm t;
	ThreadTree* threadtree = NULL;		// Árvore de Threads
	ThreadVertex* thread = NULL;		// Variável auxiliar que instancia um vértive da árvore de threads

	VertexNode* child;				// Nó de uma lista ligada simples
	VertexNode* current = NULL;		// Variável auxiliar para percorrer o Moral Graph

	Potential* result = NULL;		// Potencial resultante da sequencia de operações
	Potential* potential = NULL;	// Variável potencial auxiliar

	int varelmorder[bnsize(bayesnet)];	// Mapa o id da variável para a ordem de eliminação
	int used[bnsize(bayesnet)];			// Vetor de flag para indicar se um potencial foi utilizado

	Elimination** eliminations = NULL;

	int nvars, ct_used;;
	int i,j,id;;

	// Número de variáveis envolvidas, pode não ser toda a rede
	nvars = graphsize(elmtree);
	
	//MPI_Comm_spawn(char *command, char *argv[], int maxprocs, MPI_Info info, int root, MPI_Comm comm, MPI_Comm *intercomm, int array_of_errcodes[])
	MPI_Comm_spawn("./mpitask",MPI_ARGV_NULL,nvars,MPI_INFO_NULL,0,MPI_COMM_SELF,everyone,MPI_ERRCODES_IGNORE);
	//MPI_Bcast(&msg,1,MPI_INT,MPI_ROOT,t);
	

	// Monta a estrutura que será utilizada para passar dados para as threads
	eliminations = malloc(nvars*sizeof(Elimination*));

	// Constrói a árvore de thread
	threadtree = buildThreadTree();

	// Cria-se as threads vértices
	for (i=0;i<nvars;i++) {

		// Inicializa o mapa
		varelmorder[elmorder[i]] = i;

		// Inicializa a flag
		used[elmorder[i]] = 0;

		// Monta as eliminações e popula varelmorder
		eliminations[i] = (Elimination*) buildElimination(i,bayesnet->variables[elmorder[i]],1);

		// Constrói uma thread da árvore de thread
		thread = buildThreadVertex(taskVariableElimination,(void*)(eliminations));

		// Atribui o vértice à árvore de threads
		addThreadVertex(&threadtree,&thread);
		
	}

	// Adiciona os arcos de dependência
	for (i=0;i<nvars;i++) {
		child = (getVertexChildren(getVertex(&elmtree,i)))->first;
		while (child!=NULL) {
			addTreeArcByIds(&threadtree,i,child->vertex->id);
			child = child->next;
		}
	}

	//---
	
	// Tomando Xq, se não NULL, marca as operações que não devem ser marginalizadas
	if (xq!=NULL) { // Apenas por segurança
		for (i=0;i<nxq;i++) eliminations[varelmorder[xq[i]]]->marginalize = 0;
	}
	
	for (i=0, ct_used=0;i<nvars && ct_used<nvars;i++) {
	
		// Atribui os potenciais da operação de eliminação
		// 1. Verifica o nó da variável "da vez"
		if (!used[elmorder[i]]) {
			id = elmorder[i];
			if (findings && findings[id]) {
				// Marginaliza a distribuição de probabilidade se necessário
				potential =(bayesnet->potentials[id]->nvars>1)?marginalizeNotRequired(moral,bayesnet->potentials[id]):NULL;
				// Produto do finding pela distribuição de probabilidade
				sllappend(&eliminations[i]->potentials,multPotentials((potential?potential:bayesnet->potentials[id]),findings[id]));
				// Libera o espaço alocado (se necessário)
				if (potential) destroyPotential(&potential);
			} else {
				sllappend(&eliminations[i]->potentials,bayesnet->potentials[id]);
			}
			used[id] = 1;
			ct_used++;
		}
	
		// 2. Verifica os filhos da variável "da vez"
		current = getVertexChildren(getVertexById(&bayesnet->graph,elmorder[i]))->first;
		while (current!=NULL) {
			if (getVertexById(&moral,current->vertex->id) && !used[current->vertex->id]) {
				id = current->vertex->id;
				if (findings && findings[id]) {
					// Marginaliza a distribuição de probabilidade se necessário
					potential =(bayesnet->potentials[id]->nvars>1)?marginalizeNotRequired(moral,bayesnet->potentials[id]):NULL;
					// Produto do finding pela distribuição de probabilidade
					sllappend(&eliminations[i]->potentials,multPotentials((potential?potential:bayesnet->potentials[id]),findings[id]));
					// Libera o espaço alocado (se necessário)
					if (potential) destroyPotential(&potential);
				} else {
					sllappend(&eliminations[i]->potentials,bayesnet->potentials[id]);
				}
				used[id] = 1;
				ct_used++;
			}
			current = current->next;
		}
	
		// Fixa o número de potenciais fixos na elimiantion
		eliminations[i]->nconst = slllength(eliminations[i]->potentials);
	}	

	//---

	// Tem que atribuir NULL para liberar a memória pois eu fiz um malloc explicito para ela
	eliminations = NULL;

	return threadtree;
}
int mca_sharedfp_addproc_file_open (struct ompi_communicator_t *comm,
                                    const char* filename,
                                    int amode,
                                    struct ompi_info_t *info,
                                    mca_io_ompio_file_t *fh)
{
    int ret = OMPI_SUCCESS, err;
    int rank;
    struct mca_sharedfp_base_data_t* sh;
    mca_io_ompio_file_t * shfileHandle, *ompio_fh;
    MPI_Comm newInterComm;
    struct mca_sharedfp_addproc_data * addproc_data = NULL;
    mca_io_ompio_data_t *data;


    /*-------------------------------------------------*/
    /*Open the same file again without shared file pointer*/
    /*-------------------------------------------------*/
    shfileHandle =  (mca_io_ompio_file_t *)malloc(sizeof(mca_io_ompio_file_t));
    ret = mca_common_ompio_file_open(comm,filename,amode,info,shfileHandle,false);
    if ( OMPI_SUCCESS != ret) {
        opal_output(0, "mca_sharedfp_addproc_file_open: Error during file open\n");
        return ret;
    }
    shfileHandle->f_fh = fh->f_fh;
    data = (mca_io_ompio_data_t *) fh->f_fh->f_io_selected_data;
    ompio_fh = &data->ompio_fh;

    err = mca_common_ompio_set_view (shfileHandle,
                                     ompio_fh->f_disp,
                                     ompio_fh->f_etype,
                                     ompio_fh->f_orig_filetype,
                                     ompio_fh->f_datarep,
                                     MPI_INFO_NULL);

    /*Memory is allocated here for the sh structure*/
    if ( mca_sharedfp_addproc_verbose ) {
	opal_output(ompi_sharedfp_base_framework.framework_output,
                    "mca_sharedfp_addproc_file_open: malloc f_sharedfp_ptr struct\n");
    }
    sh = (struct mca_sharedfp_base_data_t*)malloc(sizeof(struct mca_sharedfp_base_data_t));
    if ( NULL == sh ){
        opal_output(ompi_sharedfp_base_framework.framework_output,
                    "mca_sharedfp_addproc_file_open: Error, unable to malloc f_sharedfp_ptr struct\n");
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    /*Populate the sh file structure based on the implementation*/
    sh->sharedfh      = shfileHandle;			/* Shared file pointer*/
    sh->global_offset = 0;				/* Global Offset*/
    sh->comm          = comm; 				/* Communicator*/
    sh->selected_module_data = NULL;

    rank = ompi_comm_rank ( sh->comm );

    if ( mca_sharedfp_addproc_verbose ) {
        opal_output(ompi_sharedfp_base_framework.framework_output,
                    "mca_sharedfp_addproc_file_open: START spawn by rank=%d\n",rank);
    }

    /*Spawn a new process which will maintain the offsets for this file open*/
    ret = MPI_Comm_spawn("mca_sharedfp_addproc_control", MPI_ARGV_NULL, 1, MPI_INFO_NULL,
		   0, sh->comm, &newInterComm, &err);
    if ( OMPI_SUCCESS != ret  ) {
        opal_output(0, "mca_sharedfp_addproc_file_open: error spawning control process ret=%d\n",
                    ret);
    }

    /*If spawning successful*/
    if (newInterComm)    {
        addproc_data = (struct mca_sharedfp_addproc_data*)malloc(sizeof(struct mca_sharedfp_addproc_data));
        if ( NULL == addproc_data ){
            opal_output (0,"mca_sharedfp_addproc_file_open: Error, unable to malloc addproc_data struct\n");
        return OMPI_ERR_OUT_OF_RESOURCE;
        }

        /*Store the new Intercommunicator*/
        addproc_data->intercom = newInterComm;

        /*save the addproc data*/
        sh->selected_module_data = addproc_data;
        /*remember the shared file handle*/
        fh->f_sharedfp_data = sh;
    }
    else{
        opal_output(ompi_sharedfp_base_framework.framework_output,
                    "mca_sharedfp_addproc_file_open: DONE spawn by rank=%d, errcode[success=%d, err=%d]=%d\n",
		    rank, MPI_SUCCESS, MPI_ERR_SPAWN, ret);
        ret = OMPI_ERROR;
    }

    return ret;
}
Пример #28
0
int
main (int argc, char **argv)
{
  int rank, size;
  int *error_codes;
  int i;
  char *buffer;
  int *flag;
  int all_done;
  MPI_Comm intercomm;
  MPI_Request *request;
  MPI_Status *status;
  struct timespec nanoseconds;

  MPI_Init(&argc, &argv);

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);

  if (rank == 0)
  {
    error_codes = (int*) malloc(sizeof(int)*size);
    buffer = (char*) malloc(sizeof(char)*size);
    request = (MPI_Request*) malloc(sizeof(MPI_Request)*size);
    status = (MPI_Status*) malloc(sizeof(MPI_Status)*size);
    flag = (int*) malloc(sizeof(int)*size);

    while (1)
    {
      printf("[master] spawning %i processes\n", size);
      MPI_Comm_spawn("./other", argv, size, MPI_INFO_NULL, 0, MPI_COMM_SELF, &intercomm, error_codes);

      /* Wait for children to finish. */
      for (i = 0; i < size; ++i)
      {
        MPI_Irecv(buffer, 1, MPI_CHAR, MPI_ANY_SOURCE, 1, intercomm, &request[i]);
        flag[i] = 0;
      }

      all_done = 0;
      while (! all_done)
      {
        all_done = 1;
        for (i = 0; i < size; ++i)
        {
          if (! flag[i])
          {
            MPI_Test(&request[i], &flag[i], &status[i]);
            if (! flag[i])
            {
              all_done = 0;
            }
          }
        }

        /* Sleep a little. */
        nanoseconds.tv_sec = 0;
        nanoseconds.tv_nsec = 0.5e9;
        nanosleep(&nanoseconds, NULL);
      }
    }
  }

  printf("[master (%i)] waiting at barrier\n", rank);
  MPI_Barrier(MPI_COMM_WORLD);
  printf("[master (%i)] done\n", rank);

  MPI_Finalize();
}
Пример #29
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, rsize, i;
    int np = 2;
    int errcodes[2];
    MPI_Comm      parentcomm, intercomm, intracomm, intracomm2, intracomm3;
    int           isChild = 0;
    MPI_Status    status;

    MTest_Init( &argc, &argv );

    MPI_Comm_get_parent( &parentcomm );

    if (parentcomm == MPI_COMM_NULL) {
	/* Create 2 more processes */
	MPI_Comm_spawn( (char*)"./spawnintra", MPI_ARGV_NULL, np,
			MPI_INFO_NULL, 0, MPI_COMM_WORLD,
			&intercomm, errcodes );
    }
    else 
	intercomm = parentcomm;

    /* We now have a valid intercomm */

    MPI_Comm_remote_size( intercomm, &rsize );
    MPI_Comm_size( intercomm, &size );
    MPI_Comm_rank( intercomm, &rank );

    if (parentcomm == MPI_COMM_NULL) {
	/* Master */
	if (rsize != np) {
	    errs++;
	    printf( "Did not create %d processes (got %d)\n", np, rsize );
	}
	if (rank == 0) {
	    for (i=0; i<rsize; i++) {
		MPI_Send( &i, 1, MPI_INT, i, 0, intercomm );
	    }
	}
    }
    else {
	/* Child */
	isChild = 1;
	if (size != np) {
	    errs++;
	    printf( "(Child) Did not create %d processes (got %d)\n", 
		    np, size );
	}
	MPI_Recv( &i, 1, MPI_INT, 0, 0, intercomm, &status );
	if (i != rank) {
	    errs++;
	    printf( "Unexpected rank on child %d (%d)\n", rank, i );
	}
    }

    /* At this point, try to form the intracommunicator */
    MPI_Intercomm_merge( intercomm, isChild, &intracomm );

    /* Check on the intra comm */
    {
	int icsize, icrank, wrank;

	MPI_Comm_size( intracomm, &icsize );
	MPI_Comm_rank( intracomm, &icrank );
	MPI_Comm_rank( MPI_COMM_WORLD, &wrank );

	if (icsize != rsize + size) {
	    errs++;
	    printf( "Intracomm rank %d thinks size is %d, not %d\n",
		    icrank, icsize, rsize + size );
	}
	/* Make sure that the processes are ordered correctly */
	if (isChild) {
	    int psize;
	    MPI_Comm_remote_size( parentcomm, &psize );
	    if (icrank != psize + wrank ) {
		errs++;
		printf( "Intracomm rank %d (from child) should have rank %d\n",
			icrank, psize + wrank );
	    }
	}
	else {
	    if (icrank != wrank) {
		errs++;
		printf( "Intracomm rank %d (from parent) should have rank %d\n",
			icrank, wrank );
	    }
	}
    }

    /* At this point, try to form the intracommunicator, with the other 
     processes first */
    MPI_Intercomm_merge( intercomm, !isChild, &intracomm2 );

    /* Check on the intra comm */
    {
	int icsize, icrank, wrank;

	MPI_Comm_size( intracomm2, &icsize );
	MPI_Comm_rank( intracomm2, &icrank );
	MPI_Comm_rank( MPI_COMM_WORLD, &wrank );

	if (icsize != rsize + size) {
	    errs++;
	    printf( "(2)Intracomm rank %d thinks size is %d, not %d\n",
		    icrank, icsize, rsize + size );
	}
	/* Make sure that the processes are ordered correctly */
	if (isChild) {
	    if (icrank != wrank ) {
		errs++;
		printf( "(2)Intracomm rank %d (from child) should have rank %d\n",
			icrank, wrank );
	    }
	}
	else {
	    int csize;
	    MPI_Comm_remote_size( intercomm, &csize );
	    if (icrank != wrank + csize) {
		errs++;
		printf( "(2)Intracomm rank %d (from parent) should have rank %d\n",
			icrank, wrank + csize );
	    }
	}
    }

    /* At this point, try to form the intracommunicator, with an 
       arbitrary choice for the first group of processes */
    MPI_Intercomm_merge( intercomm, 0, &intracomm3 );
    /* Check on the intra comm */
    {
	int icsize, icrank, wrank;

	MPI_Comm_size( intracomm3, &icsize );
	MPI_Comm_rank( intracomm3, &icrank );
	MPI_Comm_rank( MPI_COMM_WORLD, &wrank );

	if (icsize != rsize + size) {
	    errs++;
	    printf( "(3)Intracomm rank %d thinks size is %d, not %d\n",
		    icrank, icsize, rsize + size );
	}
	/* Eventually, we should test that the processes are ordered 
	   correctly, by groups (must be one of the two cases above) */
    }

    /* Update error count */
    if (isChild) {
	/* Send the errs back to the master process */
	MPI_Ssend( &errs, 1, MPI_INT, 0, 1, intercomm );
    }
    else {
	if (rank == 0) {
	    /* We could use intercomm reduce to get the errors from the 
	       children, but we'll use a simpler loop to make sure that
	       we get valid data */
	    for (i=0; i<rsize; i++) {
		MPI_Recv( &err, 1, MPI_INT, i, 1, intercomm, MPI_STATUS_IGNORE );
		errs += err;
	    }
	}
    }

    /* It isn't necessary to free the intracomms, but it should not hurt */
    MPI_Comm_free( &intracomm );
    MPI_Comm_free( &intracomm2 );
    MPI_Comm_free( &intracomm3 );

    /* It isn't necessary to free the intercomm, but it should not hurt */
    MPI_Comm_free( &intercomm );

    /* Note that the MTest_Finalize get errs only over COMM_WORLD */
    /* Note also that both the parent and child will generate "No Errors"
       if both call MTest_Finalize */
    if (parentcomm == MPI_COMM_NULL) {
	MTest_Finalize( errs );
    }

    MPI_Finalize();
    return 0;
}
Пример #30
0
int
main(int ac, char *av[])
{
	int  rank, size;
	char name[MPI_MAX_PROCESSOR_NAME];
	int  nameLen;
	int  n = 1, i;
	int  slave = 0;
	int  *errs;
	char *args[] = { "-W", NULL};
	MPI_Comm intercomm, icomm;
	int  err;
	char *line;
	char  *buff;
	int   buffSize;
	int   one_int;
	char  who[1024];

	memset(name, sizeof(name), 0);

	for(i=1; i<ac; i++){
		if (av[i] == NULL)
			continue;

		if (strcmp(av[i],"-W") == 0){
			slave = 1;
		}
		else if (strcmp(av[i],"-n") == 0){
			n = atoi(av[i+1]);
			av[i+1] = NULL;
		}
	}

	if (n <= 0){
		fprintf(stderr, "n=%d has an illegal value.\n", n);
		return -1;
	}

	sprintf(who, "%s[%d]", slave? " slave": "master", getpid());
	if (!slave)
		printf("Generating %d slave processes\n", n);


	errs = (int *)alloca(sizeof(int)*n);

	fprintf(stderr, "%s before MPI_Init()\n", who);
	MPI_Init(&ac, &av);
	fprintf(stderr, "%s after MPI_Init()\n", who);

	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Get_processor_name(name, &nameLen);
	sprintf(&who[strlen(who)], " (%s) %d/%d", name, rank, size);
	fprintf(stderr, "%s\n", who);


	if (!slave){
		err = MPI_Comm_spawn(av[0], args, n, MPI_INFO_NULL, 0, MPI_COMM_SELF, &intercomm, errs);
		if (err){
			fprintf(stderr, "MPI_Comm_spawn generated error %d.\n", err);
		}
	}
	else {
		fprintf(stderr, "%s before MPI_Comm_get_parent()\n", who);
		MPI_Comm_get_parent(&intercomm);
	}

	fprintf(stderr, "%s before MPI_Bcast()\n", who);
	MPI_Bcast(&one_int, 1, MPI_INT, 0, intercomm);
	fprintf(stderr, "%s after MPI_Bcast()\n", who);

	fprintf(stderr, "%s before MPI_Barrier()\n", who);
	MPI_Barrier(intercomm);
	fprintf(stderr, "%s after MPI_Barrier()\n", who);

	fprintf(stderr, "%s before MPI_Finalize()\n", who);
	MPI_Finalize();

	return 0;
}