Пример #1
0
Arr* CombineStreams(DGraph *dg,DGNode *nd){
  Arr *resfeat=newArr(NUM_SAMPLES*fielddim);
  int i=0,len=0,tag=0;
  DGArc *ar=NULL;
  DGNode *tail=NULL;
  MPI_Status status;
  Arr *feat=NULL,*featp=NULL;

  if(nd->inDegree==0) return NULL;
  for(i=0;i<nd->inDegree;i++){
    ar=nd->inArc[i];
    if(ar->head!=nd) continue;
    tail=ar->tail;
    if(tail->address!=nd->address){
      len=0;
      tag=ar->id;
      MPI_Recv(&len,1,MPI_INT,tail->address,tag,MPI_COMM_WORLD,&status);
      feat=newArr(len);
      MPI_Recv(feat->val,feat->len,MPI_DOUBLE,tail->address,tag,MPI_COMM_WORLD,&status);
      resfeat=WindowFilter(resfeat,feat,nd->id);
      SMPI_SHARED_FREE(feat);
    }else{
      featp=(Arr *)tail->feat;
      feat=newArr(featp->len);
      memcpy(feat->val,featp->val,featp->len*sizeof(double));
      resfeat=WindowFilter(resfeat,feat,nd->id);  
      SMPI_SHARED_FREE(feat);
    }
  }
  for(i=0;i<resfeat->len;i++) resfeat->val[i]=((int)resfeat->val[i])/nd->inDegree;
  nd->feat=resfeat;
  return nd->feat;
}
Пример #2
0
double ReduceStreams(DGraph *dg,DGNode *nd){
  double csum=0.0;
  int i=0,len=0,tag=0;
  DGArc *ar=NULL;
  DGNode *tail=NULL;
  Arr *feat=NULL;
  double retv=0.0;

  TRACE_smpi_set_category ("ReduceStreams");

  for(i=0;i<nd->inDegree;i++){
    ar=nd->inArc[i];
    if(ar->head!=nd) continue;
    tail=ar->tail;
    if(tail->address!=nd->address){
      MPI_Status status;
      len=0;
      tag=ar->id;
      MPI_Recv(&len,1,MPI_INT,tail->address,tag,MPI_COMM_WORLD,&status);
      feat=newArr(len);
      MPI_Recv(feat->val,feat->len,MPI_DOUBLE,tail->address,tag,MPI_COMM_WORLD,&status);
      csum+=Reduce(feat,(nd->id+1));  
      SMPI_SHARED_FREE(feat);
    }else{
      csum+=Reduce(tail->feat,(nd->id+1));  
    }
  }
  if(nd->inDegree>0)csum=(((long long int)csum)/nd->inDegree);
  retv=(nd->id+1)*csum;
  return retv;
}
Пример #3
0
int main(int argc, char *argv[])
{
  MPI_Init(&argc, &argv);
  int rank;
  int size;
  size_t mem_size = 0x1000000;
  size_t shared_blocks[] = {
    0,         0x123456,
    0x130000, 0x130001,
    0x345678, 0x345789,
    0x444444, 0x555555,
    0x555556, 0x560000,
    0x800000, 0x1000000
  };
  int nb_blocks = (sizeof(shared_blocks)/sizeof(size_t))/2;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  //Let's Allocate a shared memory buffer
  uint8_t *buf;
  buf = SMPI_PARTIAL_SHARED_MALLOC(mem_size, shared_blocks, nb_blocks);
  set(buf, 0, mem_size, 0);
  MPI_Barrier(MPI_COMM_WORLD);

  // Process 0 write in shared blocks
  if(rank == 0) {
    for(int i = 0; i < nb_blocks; i++) {
      size_t start = shared_blocks[2*i];
      size_t stop = shared_blocks[2*i+1];
      set(buf, start, stop, 42);
    }
  }
  MPI_Barrier(MPI_COMM_WORLD);
  // All processes check that their shared blocks have been written (at least partially)
  for(int i = 0; i < nb_blocks; i++) {
    size_t start = shared_blocks[2*i];
    size_t stop = shared_blocks[2*i+1];
    int is_shared = check_enough(buf, start, stop, 42);
    printf("[%d] The result of the shared check for block (0x%zx, 0x%zx) is: %d\n", rank, start, stop, is_shared);
  }


  // Check the private blocks
  MPI_Barrier(MPI_COMM_WORLD);
  for(int i = 0; i < nb_blocks-1; i++) {
    size_t start = shared_blocks[2*i+1];
    size_t stop = shared_blocks[2*i+2];
    int is_private = check_all(buf, start, stop, 0);
    printf("[%d] The result of the private check for block (0x%zx, 0x%zx) is: %d\n", rank, start, stop, is_private);
  }

  SMPI_SHARED_FREE(buf);

  MPI_Finalize();
  return 0;
}
Пример #4
0
int main(int argc, char *argv[])
{
  int rank, proc_count;
  int i;
  char *sb;
  char *rb;
  int status;
  int datasize;
   
  /* Make sure that previous output are written over the NFS even if G5K decides to kill the current machine */
  fdatasync(1);
  fdatasync(2);
   
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &proc_count);

  if (argc<2 || atoi(argv[1]) == 0) {
     printf("Usage: alltoall datasize\n");
     exit(1);
  }
  datasize=atoi(argv[1]);
   
  rb = sb = (char *) SMPI_SHARED_MALLOC(proc_count * datasize);

  if (sb == NULL) {
     printf("Malloc error");
     exit(1);
  }   
   
  memset(sb, 1, proc_count * datasize);

  status = MPI_Alltoall(sb, datasize, MPI_CHAR, rb, datasize, MPI_CHAR, MPI_COMM_WORLD);

  if (rank == 0) {
    if (status != MPI_SUCCESS) {
      printf("[%f] all_to_all returned %d", MPI_Wtime(), status);
      fflush(stdout);
      return status;
    } else  {
      printf("simTime:%f Success numproc=%d msgsize=%d", MPI_Wtime(),proc_count,datasize);
    }
  }
  SMPI_SHARED_FREE(sb);
  MPI_Finalize();
  return EXIT_SUCCESS;
}
Пример #5
0
void Resample(Arr *a,int blen){
    long long int i=0,j=0,jlo=0,jhi=0;
    double avval=0.0;
    double *nval=(double *)SMPI_SHARED_MALLOC(blen*sizeof(double));
    Arr *tmp=newArr(10);
    for(i=0;i<blen;i++) nval[i]=0.0;
    for(i=1;i<a->len-1;i++){
      jlo=(int)(0.5*(2*i-1)*(blen/a->len)); 
      jhi=(int)(0.5*(2*i+1)*(blen/a->len));

      avval=a->val[i]/(jhi-jlo+1);    
      for(j=jlo;j<=jhi;j++){
        nval[j]+=avval;
      }
    }
    nval[0]=a->val[0];
    nval[blen-1]=a->val[a->len-1];
    SMPI_SHARED_FREE(a->val);
    a->val=nval;
    a->len=blen;
}
Пример #6
0
int main(int argc, char *argv[])
{
  MPI_Init(&argc, &argv);
  int rank, size;
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  //Let's Allocate a shared memory buffer
  uint64_t* buf = SMPI_SHARED_MALLOC(sizeof(uint64_t));
  //one writes data in it
  if(rank==0){
    *buf=size;  
  }
  
  MPI_Barrier(MPI_COMM_WORLD);
  //everyobne reads from it. 
  printf("[%d] The value in the shared buffer is: %" PRIu64"\n", rank, *buf);
  
  
  MPI_Barrier(MPI_COMM_WORLD);
  //Try SMPI_SHARED_CALL function, which should call hash only once and for all.
  char *str = strdup("onceandforall");
  if(rank==size-1){
    SMPI_SHARED_CALL(hash,str,str,buf);  
  }
  
  MPI_Barrier(MPI_COMM_WORLD);
  
  printf("[%d] After change, the value in the shared buffer is: %" PRIu64"\n", rank, *buf);
  
  SMPI_SHARED_FREE(buf);  
  buf=NULL;  
  free(str);
    
  MPI_Finalize();
  return 0;
}