Exemple #1
0
int
main(int argc, char *argv[])
{
    /* Check the rank, not using MPI.  It wants to use MPI4PY on the
       master only.  Workers are simply to serve spawning.  Use
       Open-MPI specific environment variables. */

    int vpid;
    int nprocs;
    {
	char gomi[4];

	char *e0 = "OMPI_MCA_orte_ess_vpid";
	char *s0 = getenv(e0);
	if (s0 == 0) {
	    fprintf(stderr, ("Environment variable %s needs to be set;"
			     " Seems not Open-MPI.\n"), e0);
	    fflush(0);
	    abort();
	}

	int v0;
	int cc0 = sscanf(s0, "%d%c", &v0, gomi);
	if (cc0 != 1) {
	    fprintf(stderr, "Bad environment variable %s (%s).\n", e0, s0);
	    fflush(0);
	    abort();
	}
	if (v0 < 0) {
	    fprintf(stderr, "Bad environment variable %s (%s).\n", e0, s0);
	    fflush(0);
	    abort();
	}

	char *e1 = "OMPI_MCA_orte_ess_num_procs";
	char *s1 = getenv(e1);
	if (s1 == 0) {
	    fprintf(stderr, ("Environment variable %s needs to be set;"
			     " Seems not Open-MPI.\n"), e1);
	    fflush(0);
	    abort();
	}

	int v1;
	int cc1 = sscanf(s1, "%d%c", &v1, gomi);
	if (cc1 != 1) {
	    fprintf(stderr, "Bad environment variable %s (%s).\n", e1, s1);
	    fflush(0);
	    abort();
	}
	if (v1 < 0) {
	    fprintf(stderr, "Bad environment variable %s (%s).\n", e1, s1);
	    fflush(0);
	    abort();
	}

	if (v0 >= v1) {
	    fprintf(stderr, ("Environment variables %s=%d and %s=%d"
			     " have bad values.\n"), e0, v0, e1, v1);
	    fflush(0);
	    abort();
	}

	vpid = v0;
	nprocs = v1;
    }

    /* Start processing. */

    int verbosity;
    int xargc;
    char **xargv;
    if (argc >= 3 && strncasecmp(argv[1], "-v", 2) == 0) {
	if (argv[1][2] == 0) {
	    verbosity = 2;
	} else if (argv[1][2] == '0') {
	    verbosity = 0;
	} else if (argv[1][2] == '1') {
	    verbosity = 1;
	} else if (argv[1][2] == '2') {
	    verbosity = 2;
	} else if (argv[1][2] == '3') {
	    verbosity = 3;
	} else {
	    verbosity = 2;
	}
	xargc = (argc - 2);
	xargv = &argv[2];
    } else {
	verbosity = -1;
	xargc = (argc - 1);
	xargv = &argv[1];
    }

    int master = (nprocs - 1);
    if (vpid == master) {
	/* MASTER */

	if (xargc < 1) {
	    fprintf(stderr, "USAGE: %s python-script-file\n", argv[0]);
	    fflush(0);

	    int nprocs, rank, thlv;
	    MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &thlv);
	    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
	    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	    MPI_Abort(MPI_COMM_WORLD, 1);
	    exit(1);
	}

	char *cmd = xargv[0];
	FILE *f = fopen(cmd, "r");
	if (f == 0) {
	    char ee[80];
	    snprintf(ee, sizeof(ee), "fopen(%s): %s\n",
		     cmd, strerror(errno));
	    fprintf(stderr, ee);
	    fprintf(stderr, "USAGE: %s python-script-file\n", argv[0]);
	    fflush(0);

	    int nprocs, rank, thlv;
	    MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &thlv);
	    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
	    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	    MPI_Abort(MPI_COMM_WORLD, 1);
	    exit(1);
	}

	Py_SetProgramName(argv[0]);
	Py_Initialize();
	PyRun_SimpleFile(f, cmd);
	Py_Finalize();
	return 0;
    } else {
	/* WORKERS */

	/* The actions of workers should exactly match with the master
	   in Python.  It assumes MPI4PY itself only does local MPI
	   operations. */

	int cc;

	int nprocs, rank, thlv;
	MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &thlv);
	MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);

	kmr_init();

#pragma omp parallel
	{
	    sleep(0);
	}

	KMR *mr = kmr_create_context(MPI_COMM_WORLD, MPI_INFO_NULL, 0);
	assert(mr != 0);

	mr->trace_map_spawn = (verbosity > 0);

	MPI_Comm splitcomms[4];
	cc = kmr_split_swf_lanes(mr, splitcomms, master, 0, 1);
	assert(cc == MPI_SUCCESS);

	cc = kmr_init_swf(mr, splitcomms, master);
	assert(cc == MPI_SUCCESS);

	if (verbosity != -1) {
	    kmr_set_swf_verbosity(mr, verbosity);
	}
	cc = kmr_detach_swf_workers(mr);
	assert(cc == MPI_SUCCESS);

	/* NEVER COMES HERE. */

	return 0;
    }
}
Exemple #2
0
static int
gen_cmdkvs(const struct kmr_kv_box kv,
	   const KMR_KVS *kvi, KMR_KVS *kvo, void *p, long i_)
{
    char *cmd1 = "maxprocs=1 /bin/sleep  1";
    char *cmd2 = "maxprocs=1 /bin/sleep  5";
    char *cmd3 = "maxprocs=1 /bin/sleep 10";
    int vlen = (int)strlen(cmd2) + 1;
    for (int i = 0; i < NUM_COMMANDS; i++) {
	char *cmd = NULL;
	if (i % 3 == 0) {
	    cmd = cmd1;
	} else if (i % 3 == 1) {
	    cmd = cmd2;
	} else {
	    cmd = cmd3;
	}
	struct kmr_kv_box nkv = { .klen = sizeof(long),
				  .vlen = vlen * (int)sizeof(char),
				  .k.i  = i,
				  .v.p  = (void *)cmd };
	kmr_add_kv(kvo, nkv);
    }
    return MPI_SUCCESS;
}

static int
output_result(const struct kmr_kv_box kv,
	      const KMR_KVS *kvi, KMR_KVS *kvo, void *p, long i_)
{
    struct kmr_kv_box nkv = { .klen = sizeof(long),
			      .vlen = sizeof(long),
			      .k.i  = kv.k.i,
			      .v.i  = kv.k.i };
    kmr_add_kv(kvo, nkv);
    return MPI_SUCCESS;
}

int
main(int argc, char **argv)
{
    MPI_Init(&argc, &argv);
    kmr_init();
    KMR *mr = kmr_create_context(MPI_COMM_WORLD, MPI_INFO_NULL, 0);
    int rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == 0) {
	fprintf(stderr, "Start\n");
    }
    MPI_Barrier(MPI_COMM_WORLD);

    KMR_KVS *kvs_commands = kmr_create_kvs(mr, KMR_KV_INTEGER, KMR_KV_OPAQUE);
    int ret = kmr_map_once(kvs_commands, 0, kmr_noopt, 1, gen_cmdkvs);
    if (ret != MPI_SUCCESS) {
	MPI_Abort(MPI_COMM_WORLD, 1);
    }
    kmr_dump_kvs(kvs_commands, 1);

    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == 0) {
	fprintf(stderr, "MAP_ONCE DONE\n");
    }
    MPI_Barrier(MPI_COMM_WORLD);

    KMR_KVS *kvs_runcmds = kmr_create_kvs(mr, KMR_KV_INTEGER, KMR_KV_OPAQUE);
    ret = kmr_shuffle(kvs_commands, kvs_runcmds, kmr_noopt);
    if (ret != MPI_SUCCESS) {
	MPI_Abort(MPI_COMM_WORLD, 1);
    }
    kmr_dump_kvs(kvs_runcmds, 1);

    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == 0) {
	fprintf(stderr, "SHUFFLE DONE\n");
    }
    MPI_Barrier(MPI_COMM_WORLD);

    KMR_KVS *kvs_results = kmr_create_kvs(mr, KMR_KV_INTEGER, KMR_KV_INTEGER);
    struct kmr_spawn_option sopt_sepsp = { .separator_space = 1 };
    ret = kmr_map_serial_processes(kvs_runcmds, kvs_results, 0, MPI_INFO_NULL,
				   sopt_sepsp, output_result);
    kmr_dump_kvs(kvs_results, 1);
    kmr_free_kvs(kvs_results);

    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == 0) {
	fprintf(stderr, "MAP_SPAWN DONE\n");
    }
    MPI_Barrier(MPI_COMM_WORLD);

    if (rank == 0) {
	fprintf(stderr, "Finish\n");
    }

    kmr_free_context(mr);
    kmr_fin();
    MPI_Finalize();
}
Exemple #3
0
static double
wtime()
{
    static struct timeval tv0 = {.tv_sec = 0};
    struct timeval tv;
    int cc;
    cc = gettimeofday(&tv, 0);
    assert(cc == 0);
    if (tv0.tv_sec == 0) {
	tv0 = tv;
	assert(tv0.tv_sec != 0);
    }
    double dt = ((double)(tv.tv_sec - tv0.tv_sec)
		 + ((double)(tv.tv_usec - tv0.tv_usec) * 1e-6));
    return dt;
}

/* Puts 200 key-value pairs to output KVO.  It is a map-function.  It
   runs only on rank0.  Inputs (KV0 and KVS0) are dummy. */

static int
addkeysfn(const struct kmr_kv_box kv0,
	  const KMR_KVS *kvs0, KMR_KVS *kvo, void *p, const long ind)
{
    assert(kvs0 == 0 && kv0.klen == 0 && kv0.vlen == 0 && kvo != 0);
    char k[80];
    char v[80];
    int cc;
    for (int i = 0; i < 200; i++) {
	snprintf(k, 80, "key%d", i);
	snprintf(v, 80, "value%d", i);
	struct kmr_kv_box kv = {
	    .klen = (int)(strlen(k) + 1),
	    .vlen = (int)(strlen(v) + 1),
	    .k.p = k,
	    .v.p = v
	};
	cc = kmr_add_kv(kvo, kv);
	assert(cc == MPI_SUCCESS);
    }
    return MPI_SUCCESS;
}

static int
replacevaluefn(const struct kmr_kv_box kv0,
	       const KMR_KVS *kvs0, KMR_KVS *kvo, void *p,
	       const long i)
{
    assert(kvs0 != 0 && kvo != 0);
    int cc, x;
    char gomi;
    cc = sscanf((&((char *)kv0.k.p)[3]), "%d%c", &x, &gomi);
    assert(cc == 1);
    char v[80];
    snprintf(v, 10, "newvalue%d", x);
    struct kmr_kv_box kv = {.klen = kv0.klen,
			    .vlen = (int)(strlen(v) + 1),
			    .k.p = kv0.k.p,
			    .v.p = v
    };
    cc = kmr_add_kv(kvo, kv);
    assert(cc == MPI_SUCCESS);
    return MPI_SUCCESS;
}

static int
emptyreducefn(const struct kmr_kv_box kv[], const long n,
	      const KMR_KVS *kvs, KMR_KVS *kvo, void *p)
{
    return MPI_SUCCESS;
}

/* Do KMR operations many times. */

static void
simple0(int nprocs, int rank)
{
    int cc;

    KMR *mr = kmr_create_context(MPI_COMM_WORLD, MPI_INFO_NULL, 0);

    double t0, t1;
    t0 = wtime();

    for (int i = 0; i < 10000; i++) {

	/* Check timeout. */

	t1 = wtime();
	KMR_KVS *to0 = kmr_create_kvs(mr, KMR_KV_INTEGER, KMR_KV_INTEGER);
	if (rank == 0) {
	    struct kmr_kv_box kv = {
		.klen = (int)sizeof(long),
		.vlen = (int)sizeof(long),
		.k.i = 0,
		.v.i = ((t1 - t0) > 20.0)
	    };
	    cc = kmr_add_kv(to0, kv);
	    assert(cc == MPI_SUCCESS);
	}
	cc = kmr_add_kv_done(to0);
	assert(cc == MPI_SUCCESS);
	KMR_KVS *to1 = kmr_create_kvs(mr, KMR_KV_INTEGER, KMR_KV_INTEGER);
	cc = kmr_replicate(to0, to1, kmr_noopt);
	assert(cc == MPI_SUCCESS);
	struct kmr_kv_box tok = {.klen = (int)sizeof(long), .k.p = 0,
				 .vlen = 0, .v.p = 0};
	struct kmr_kv_box tov;
	cc = kmr_find_key(to1, tok, &tov);
	assert(cc == MPI_SUCCESS);
	cc = kmr_free_kvs(to1);
	assert(cc == MPI_SUCCESS);
	if (tov.v.i) {
	    if (rank == 0) {
		printf("loops %d\n", i);
	    }
	    break;
	}

	/* Put some pairs. */

	KMR_KVS *kvs0 = kmr_create_kvs(mr, KMR_KV_OPAQUE, KMR_KV_OPAQUE);
	cc = kmr_map_on_rank_zero(kvs0, 0, kmr_noopt, addkeysfn);
	assert(cc == MPI_SUCCESS);

	/* Replicate pairs to all ranks. */

	KMR_KVS *kvs1 = kmr_create_kvs(mr, KMR_KV_OPAQUE, KMR_KV_OPAQUE);
	cc = kmr_replicate(kvs0, kvs1, kmr_noopt);
	assert(cc == MPI_SUCCESS);

	/* Map pairs. */

	KMR_KVS *kvs2 = kmr_create_kvs(mr, KMR_KV_OPAQUE, KMR_KV_OPAQUE);
	cc = kmr_map(kvs1, kvs2, 0, kmr_noopt, replacevaluefn);
	assert(cc == MPI_SUCCESS);

	/* Collect pairs by theirs keys. */

	KMR_KVS *kvs3 = kmr_create_kvs(mr, KMR_KV_OPAQUE, KMR_KV_OPAQUE);
	cc = kmr_shuffle(kvs2, kvs3, kmr_noopt);
	assert(cc == MPI_SUCCESS);

	/* Reduce collected pairs. */

	KMR_KVS *kvs4 = kmr_create_kvs(mr, KMR_KV_OPAQUE, KMR_KV_OPAQUE);
	cc = kmr_reduce(kvs3, kvs4, 0, kmr_noopt, emptyreducefn);
	assert(cc == MPI_SUCCESS);

	cc = kmr_free_kvs(kvs4);
	assert(cc == MPI_SUCCESS);
    }

    cc = kmr_free_context(mr);
    assert(cc == MPI_SUCCESS);
}

int
main(int argc, char *argv[])
{
    char cmd[256];
    int pid = getpid();
    int N = 8;

    int nprocs, rank, thlv;
    /*MPI_Init(&argc, &argv);*/
    MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &thlv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    kmr_init();

    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == 0) {printf("Check leakage by observing heap size.\n");}
    if (rank == 0) {printf("Watch VSZ changes (loops %d times)...\n", N);}
    if (rank == 0) {printf("(Each loop will take approx. 20 sec).\n");}
    fflush(0);
    usleep(50 * 1000);
    MPI_Barrier(MPI_COMM_WORLD);

    for (int i = 0; i < N; i++) {
	simple0(nprocs, rank);

	MPI_Barrier(MPI_COMM_WORLD);
	if (rank == 0) {
	    snprintf(cmd, sizeof(cmd), "ps l %d", pid);
	    system(cmd);
	}
	fflush(0);
    }

    MPI_Barrier(MPI_COMM_WORLD);
    if (rank == 0) printf("OK\n");
    fflush(0);

    kmr_fin();

    MPI_Finalize();

    return 0;
}
Exemple #4
0
static int
add_initial_data(const struct kmr_kv_box kv,
		 const KMR_KVS *kvi, KMR_KVS *kvo, void *p, long i_)
{
    common_t *common = (common_t *)p;
    char filename[FILENAME_LEN];
    create_file(common->rank, common->iteration, common->file_size,
		filename, FILENAME_LEN);
    common->val_count = IO_COUNT * common->file_size;
    struct kmr_kv_box nkv = { .klen = sizeof(char) * (strlen(common->key) + 1),
			      .k.p = common->key,
			      .vlen = sizeof(char) * (strlen(filename) + 1),
			      .v.p = (void *)filename };
    kmr_add_kv(kvo, nkv);
    return MPI_SUCCESS;
}

static int
increment_in_file_value(const struct kmr_kv_box kv,
			const KMR_KVS *kvi, KMR_KVS *kvo, void *p, long i_)
{
    common_t *common = (common_t *)p;
    char *infile = (char *)kv.v.p;
    char outfile[FILENAME_LEN];
    snprintf(outfile, FILENAME_LEN, "./%06d-%02d.dat", common->rank,
	     common->iteration + 1);

    FILE *ifp = fopen(infile, "r");
    FILE *ofp = fopen(outfile, "w+");
    assert(ifp != 0 && ofp != 0);
    /* read/write 1MB at once */
    long *buf = (long *)malloc(sizeof(long) * IO_COUNT);
    for (int i = 0; i < common->file_size; i++) {
	size_t cc = fread(buf, sizeof(long), IO_COUNT, ifp);
	assert(cc == IO_COUNT);
	for (int j = 0; j < IO_COUNT; j++) {
	    buf[j] += 1;
	}
	cc = fwrite(buf, sizeof(long), IO_COUNT, ofp);
	assert(cc == IO_COUNT);
    }
    free(buf);
    fclose(ofp);

    struct kmr_kv_box nkv = { .klen = sizeof(char) * (strlen(common->key) + 1),
			      .k.p = common->key,
			      .vlen = sizeof(char) * (strlen(outfile) + 1),
			      .v.p = (void *)outfile };
    kmr_add_kv(kvo, nkv);
#ifdef DEBUG
    fseek(ifp, 0, SEEK_SET);
    long val;
    fread(&val, sizeof(long), 1, ifp);
    fprintf(stderr, "Rank[%d]: process key[%s]-val[%ld]\n",
	    common->rank, (char *)kv.k.p, val);
#endif
    fclose(ifp);
    delete_file(common->rank, common->iteration);

    return MPI_SUCCESS;
}


int
main(int argc, char **argv)
{
    int thlv;
    MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &thlv);
    int nprocs, rank, task_nprocs;
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    check_nprocs(nprocs, rank, &task_nprocs);
    kmr_init();
    KMR *mr = kmr_create_context(MPI_COMM_WORLD, MPI_INFO_NULL, 0);
    mr->verbosity = 5;
    mr->trace_map_mp = 1;

    char even_key[KEY_LEN];
    char odd_key[KEY_LEN];
    snprintf(even_key, KEY_LEN, "even%06d", (rank / task_nprocs + 1));
    snprintf(odd_key,  KEY_LEN, "odd%06d",  (rank % task_nprocs + 1));

    common_t common0;
    common0.key = even_key;
    parse_param_file(argc, argv, &(common0.file_size));
    common0.rank = rank;
    common0.iteration = 0;
    KMR_KVS *kvs0 = kmr_create_kvs(mr, KMR_KV_OPAQUE, KMR_KV_OPAQUE);
    kmr_map_once(kvs0, &common0, kmr_noopt, 0, add_initial_data);

    double itr_times[ITERATIONS];
    for (int i = 0; i < ITERATIONS; i++) {
	common0.key = (i % 2 == 0)? odd_key : even_key;
	common0.iteration = i;
	KMR_KVS *kvs1 = kmr_create_kvs(mr, KMR_KV_OPAQUE, KMR_KV_OPAQUE);

	struct timeval ts;
	measure_time(&ts);
	kmr_map_multiprocess_by_key(kvs0, kvs1, &common0, kmr_noopt, rank,
				    increment_in_file_value);
	struct timeval te;
	measure_time(&te);
	itr_times[i] = calc_time_diff(&ts, &te);

	kvs0 = kvs1;
    }
    kmr_free_kvs(kvs0);
    delete_file(common0.rank, common0.iteration + 1);

    print_time(itr_times, ITERATIONS, rank);

    kmr_free_context(mr);
    kmr_fin();
    MPI_Finalize();
    return 0;
}