Beispiel #1
0
Arr* CombineStreams(DGraph *dg,DGNode *nd){
  Arr *resfeat=newArr(NUM_SAMPLES*fielddim);
  int i=0,len=0,tag=0;
  DGArc *ar=NULL;
  DGNode *tail=NULL;
  MPI_Status status;
  Arr *feat=NULL,*featp=NULL;

  if(nd->inDegree==0) return NULL;
  for(i=0;i<nd->inDegree;i++){
    ar=nd->inArc[i];
    if(ar->head!=nd) continue;
    tail=ar->tail;
    if(tail->address!=nd->address){
      len=0;
      tag=ar->id;
      MPI_Recv(&len,1,MPI_INT,tail->address,tag,MPI_COMM_WORLD,&status);
      feat=newArr(len);
      MPI_Recv(feat->val,feat->len,MPI_DOUBLE,tail->address,tag,MPI_COMM_WORLD,&status);
      resfeat=WindowFilter(resfeat,feat,nd->id);
      SMPI_SHARED_FREE(feat);
    }else{
      featp=(Arr *)tail->feat;
      feat=newArr(featp->len);
      memcpy(feat->val,featp->val,featp->len*sizeof(double));
      resfeat=WindowFilter(resfeat,feat,nd->id);  
      SMPI_SHARED_FREE(feat);
    }
  }
  for(i=0;i<resfeat->len;i++) resfeat->val[i]=((int)resfeat->val[i])/nd->inDegree;
  nd->feat=resfeat;
  return nd->feat;
}
Beispiel #2
0
double ReduceStreams(DGraph *dg,DGNode *nd){
  double csum=0.0;
  int i=0,len=0,tag=0;
  DGArc *ar=NULL;
  DGNode *tail=NULL;
  Arr *feat=NULL;
  double retv=0.0;

  TRACE_smpi_set_category ("ReduceStreams");

  for(i=0;i<nd->inDegree;i++){
    ar=nd->inArc[i];
    if(ar->head!=nd) continue;
    tail=ar->tail;
    if(tail->address!=nd->address){
      MPI_Status status;
      len=0;
      tag=ar->id;
      MPI_Recv(&len,1,MPI_INT,tail->address,tag,MPI_COMM_WORLD,&status);
      feat=newArr(len);
      MPI_Recv(feat->val,feat->len,MPI_DOUBLE,tail->address,tag,MPI_COMM_WORLD,&status);
      csum+=Reduce(feat,(nd->id+1));  
      SMPI_SHARED_FREE(feat);
    }else{
      csum+=Reduce(tail->feat,(nd->id+1));  
    }
  }
  if(nd->inDegree>0)csum=(((long long int)csum)/nd->inDegree);
  retv=(nd->id+1)*csum;
  return retv;
}
Beispiel #3
0
Arr* RandomFeatures(char *bmname,int fdim,int id){
  int len=GetFeatureNum(bmname,id)*fdim;
  Arr* feat=newArr(len);
  int nxg=2,nyg=2,nzg=2,nfg=5;
  int nx=421,ny=419,nz=1427,nf=3527;
  long long int expon=(len*(id+1))%3141592;
  int seedx=ipowMod(nxg,expon,nx),
      seedy=ipowMod(nyg,expon,ny),
      seedz=ipowMod(nzg,expon,nz),
      seedf=ipowMod(nfg,expon,nf);
  int i=0;
  if(timer_on){
    timer_clear(id+1);
    timer_start(id+1);
  }
  for(i=0;i<len;i+=fdim){
    seedx=(seedx*nxg)%nx;
    seedy=(seedy*nyg)%ny;
    seedz=(seedz*nzg)%nz;
    seedf=(seedf*nfg)%nf;
    feat->val[i]=seedx;
    feat->val[i+1]=seedy;
    feat->val[i+2]=seedz;
    feat->val[i+3]=seedf;
  }
  if(timer_on){
    timer_stop(id+1);
    fprintf(stderr,"** RandomFeatures time in node %d = %f\n",id,timer_read(id+1));
  }
  return feat;   
}
Beispiel #4
0
void Resample(Arr *a,int blen){
    long long int i=0,j=0,jlo=0,jhi=0;
    double avval=0.0;
    double *nval=(double *)SMPI_SHARED_MALLOC(blen*sizeof(double));
    Arr *tmp=newArr(10);
    for(i=0;i<blen;i++) nval[i]=0.0;
    for(i=1;i<a->len-1;i++){
      jlo=(int)(0.5*(2*i-1)*(blen/a->len)); 
      jhi=(int)(0.5*(2*i+1)*(blen/a->len));

      avval=a->val[i]/(jhi-jlo+1);    
      for(j=jlo;j<=jhi;j++){
        nval[j]+=avval;
      }
    }
    nval[0]=a->val[0];
    nval[blen-1]=a->val[a->len-1];
    SMPI_SHARED_FREE(a->val);
    a->val=nval;
    a->len=blen;
}
Beispiel #5
0
	void Array<T>::resize(Array<T>* arr, UInt newSize)
	{
		Array<T> newArr(newSize);
		arr->copyTo(newArr);
		*arr = newArr;
	}
Beispiel #6
0
json cArrayToJson(float c_arr[], int numElements) {
    json newArr(json::an_array);
    for (int i = 0; i < numElements; i++)
        newArr.add((double)c_arr[i]);
    return newArr;
}