Пример #1
0
int main(int argc, char **argv){
    MPI_Init(&argc, &argv);
    int rank, nproc;
    
    MPI_Comm_size(MPI_COMM_WORLD, &(nproc));
    MPI_Comm_rank(MPI_COMM_WORLD, &(rank));
    MPI_Win win;
    MPI_Aint remote;
        MPI_Aint local;
    MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &win);
    
    if(rank==0){
        //int *a = (int*)malloc(sizeof(int)); 
        int a= 4;
        MPI_Win_attach(win, &a, sizeof(int));
        MPI_Get_address(&a,  &local);
        MPI_Send(&local, 1, MPI_AINT, 1, 1, MPI_COMM_WORLD);
    }
    else{
        //MPI_Status reqstat;
        //MPI_Recv(&sdisp_remote, 1, MPI_AINT, 0, 1, MPI_COMM_WORLD, &reqstat );
        int val;
        MPI_Status reqstat;
        MPI_Recv(&remote, 1, MPI_AINT, 0, 1, MPI_COMM_WORLD, &reqstat );
        MPI_Get(&val, 1, MPI_INT, 0, remote, 1, MPI_INT, win);
        
    }
    //MPI_Win_free(&win);
}
Пример #2
0
void allocate_memory(int rank, char *rbuf, int size, WINDOW type, MPI_Win *win)
{
    MPI_Status  reqstat;

    switch (type){
        case WIN_DYNAMIC:
            MPI_CHECK(MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, win));
            MPI_CHECK(MPI_Win_attach(*win, (void *)rbuf, size));
            MPI_CHECK(MPI_Get_address(rbuf, &sdisp_local));
            if(rank == 0){
                MPI_CHECK(MPI_Send(&sdisp_local, 1, MPI_AINT, 1, 1, MPI_COMM_WORLD));
                MPI_CHECK(MPI_Recv(&sdisp_remote, 1, MPI_AINT, 1, 1, MPI_COMM_WORLD, &reqstat));
            }
            else{
                MPI_CHECK(MPI_Recv(&sdisp_remote, 1, MPI_AINT, 0, 1, MPI_COMM_WORLD, &reqstat));
                MPI_CHECK(MPI_Send(&sdisp_local, 1, MPI_AINT, 0, 1, MPI_COMM_WORLD));
            }
            break;
        case WIN_CREATE:
            MPI_CHECK(MPI_Win_create(rbuf, size, 1, MPI_INFO_NULL, MPI_COMM_WORLD, win));
            break;
        default:
            MPI_CHECK(MPI_Win_allocate(size, 1, MPI_INFO_NULL, MPI_COMM_WORLD, rbuf, win));
            break;
    }
}
Пример #3
0
JNIEXPORT void JNICALL Java_mpi_Win_attach(
        JNIEnv *env, jobject jthis, jlong win, jobject jBase,
        jint size)
{
    void *base = (*env)->GetDirectBufferAddress(env, jBase);

    int rc = MPI_Win_attach((MPI_Win)win, base, (MPI_Aint)size);

    ompi_java_exceptionCheck(env, rc);
}
Пример #4
0
int main(int argc, char **argv)
{
    int i, rank, nproc;
    int errors = 0, all_errors = 0;
    int val = 0, one = 1;
    int iter;
    MPI_Aint *val_ptrs;
    MPI_Win dyn_win;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    iter = ITER_PER_RANK * nproc;

    val_ptrs = malloc(nproc * sizeof(MPI_Aint));
    MPI_Get_address(&val, &val_ptrs[rank]);

    MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, val_ptrs, 1, MPI_AINT, MPI_COMM_WORLD);

    MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &dyn_win);
    MPI_Win_attach(dyn_win, &val, sizeof(int));

    for (i = 0; i < iter; i++) {
        MPI_Win_fence(MPI_MODE_NOPRECEDE, dyn_win);
        MPI_Accumulate(&one, 1, MPI_INT, i % nproc, val_ptrs[i % nproc], 1, MPI_INT, MPI_SUM,
                       dyn_win);
        MPI_Win_fence(MPI_MODE_NOSUCCEED, dyn_win);
    }

    MPI_Barrier(MPI_COMM_WORLD);

    /* Read and verify my data */
    if (val != iter) {
        errors++;
        printf("%d -- Got %d, expected %d\n", rank, val, iter);
    }

    MPI_Win_detach(dyn_win, &val);
    MPI_Win_free(&dyn_win);

    MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0 && all_errors == 0)
        printf(" No Errors\n");

    free(val_ptrs);
    MPI_Finalize();

    return 0;
}
Пример #5
0
void _XMP_mpi_coarray_attach(_XMP_coarray_t *coarray_desc, void *addr, const size_t coarray_size, const bool is_acc)
{
  MPI_Win win = MPI_WIN_NULL;
  char **each_addr = NULL;  // head address of a local array on each node

  _XMP_nodes_t *nodes = _XMP_get_execution_nodes();
  int comm_size = nodes->comm_size;
  MPI_Comm comm = *(MPI_Comm *)nodes->comm;

  XACC_DEBUG("attach addr=%p, size=%zd, is_acc=%d", addr, coarray_size, is_acc);

  if(_XMP_flag_multi_win){
    _XMP_mpi_onesided_create_win(&win, addr, coarray_size, comm);
    MPI_Win_lock_all(MPI_MODE_NOCHECK, win);
  }else{
    win = _xmp_mpi_distarray_win;
#ifdef _XMP_XACC
    if(is_acc){
      win = _xmp_mpi_distarray_win_acc;
    }
#endif
    MPI_Win_attach(win, addr, coarray_size);

    each_addr = (char**)_XMP_alloc(sizeof(char *) * comm_size);

    MPI_Allgather(&addr, sizeof(char *), MPI_BYTE,
		  each_addr, sizeof(char *), MPI_BYTE,
		  comm); // exchange displacement
  }

  if(is_acc){
#ifdef _XMP_XACC
    coarray_desc->addr_dev = each_addr;
    coarray_desc->real_addr_dev = addr;
    coarray_desc->win_acc = win;
    coarray_desc->nodes = nodes;
#endif
  }else{
    coarray_desc->addr = each_addr;
    coarray_desc->real_addr = addr;
    coarray_desc->win = win;
    coarray_desc->win_acc = MPI_WIN_NULL;
    coarray_desc->nodes = nodes;
  }
}
Пример #6
0
/* Allocate a new shared linked list element */
MPI_Aint alloc_elem(int value, MPI_Win win) {
    MPI_Aint disp;
    llist_elem_t *elem_ptr;

    /* Allocate the new element and register it with the window */
    MPI_Alloc_mem(sizeof(llist_elem_t), MPI_INFO_NULL, &elem_ptr);
    elem_ptr->value = value;
    elem_ptr->next  = nil;
    MPI_Win_attach(win, elem_ptr, sizeof(llist_elem_t));

    /* Add the element to the list of local elements so we can free it later. */
    if (my_elems_size == my_elems_count) {
        my_elems_size += 100;
        my_elems = realloc(my_elems, my_elems_size*sizeof(void*));
    }
    my_elems[my_elems_count] = elem_ptr;
    my_elems_count++;

    MPI_Get_address(elem_ptr, &disp);
    return disp;
}
void run_rma_test(int nprocs_per_node)
{
  int myrank, nprocs;
  int mem_rank;
  MPI_Win win;
  int *baseptr;
  MPI_Aint local_size;

  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

  if (nprocs < nprocs_per_node * 2)
  {
    if (!myrank) printf("should start program with at least %d processes\n", nprocs_per_node * 2);
    MPI_Finalize();
    exit(EXIT_FAILURE);
  }


  mem_rank = nprocs_per_node + nprocs_per_node / 2;

  local_size = (myrank == mem_rank) ? COUNT : 0;

  MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &win);

  MPI_Win_lock_all(0, win);



  int type_size;
  MPI_Type_size(MPI_INT, &type_size);

  size_t nbytes = COUNT * type_size;

  assert(MPI_Alloc_mem(nbytes, MPI_INFO_NULL, &baseptr) == MPI_SUCCESS);
  assert(MPI_Win_attach(win, baseptr, nbytes) == MPI_SUCCESS);

  MPI_Aint ldisp;
  MPI_Aint *disps = malloc(nprocs * sizeof(MPI_Aint));

  assert(MPI_Get_address(baseptr, &ldisp) == MPI_SUCCESS);

  assert(MPI_Allgather(&ldisp, 1, MPI_AINT, disps, nprocs, MPI_AINT, MPI_COMM_WORLD) == MPI_SUCCESS);

  if (myrank == 0)
  {
    for (size_t idx = 0; idx < COUNT; ++idx) {
      baseptr[idx] = idx * COUNT + 1;
    }
  }

  MPI_Barrier(MPI_COMM_WORLD);

  if (myrank == mem_rank) {
    assert(MPI_Get(baseptr, 10, MPI_INT, 0, disps[0], 10, MPI_INT, win) == MPI_SUCCESS);
    assert(MPI_Win_flush(0, win) == MPI_SUCCESS);

    for (size_t idx = 0; idx < COUNT; ++idx) {
      assert(baseptr[idx] == idx * 10 + 1);
    }
  }

  MPI_Barrier(MPI_COMM_WORLD);

  MPI_Win_unlock_all(win);

  MPI_Barrier(MPI_COMM_WORLD);

  MPI_Win_free(&win);

  MPI_Free_mem(baseptr);

  printf("Test finished\n");
}
Пример #8
0
int main(int argc, char **argv)
{
    int rank, nproc;
    int errs = 0;
    int array[1024];
    int val = 0;
    int target_rank;
    MPI_Aint bases[2];
    MPI_Aint disp, offset;
    MPI_Win  win;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    if (rank == 0 && nproc != 2) {
        MTestError("Must run with 2 ranks\n");
    }

    /* Get the base address in the middle of the array */
    if (rank == 0) {
        target_rank = 1;
        array[0] = 1234;
        MPI_Get_address(&array[512], &bases[0]);
    } else if (rank == 1) {
        target_rank = 0;
        array[1023] = 1234;
        MPI_Get_address(&array[512], &bases[1]);
    }

    /* Exchange bases */
    MPI_Allgather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, bases, 1, MPI_AINT, MPI_COMM_WORLD);

    MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, &win);
    MPI_Win_attach(win, array, sizeof(int)*1024);

    /* Do MPI_Aint addressing arithmetic */
    if (rank == 0) {
        disp = sizeof(int)*511;
        offset = MPIX_Aint_add(bases[1], disp); /* offset points to array[1023]*/
    } else if (rank == 1) {
        disp = sizeof(int)*512;
        offset = MPIX_Aint_diff(bases[0], disp); /* offset points to array[0] */
    }

    /* Get val and verify it */
    MPI_Win_fence(MPI_MODE_NOPRECEDE, win);
    MPI_Get(&val, 1, MPI_INT, target_rank, offset, 1, MPI_INT, win);
    MPI_Win_fence(MPI_MODE_NOSUCCEED, win);

    if (val != 1234) {
        errs++;
        printf("%d -- Got %d, expected 1234\n", rank, val);
    }

    MPI_Win_detach(win, array);
    MPI_Win_free(&win);

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}