int StochasticReleaseTestProbe::computePValues(long int step, int f) { int status = PV_SUCCESS; assert(step >=0 && step < INT_MAX); int nf = getTargetLayer()->getLayerLoc()->nf; assert(f >= 0 && f < nf); int idx = (step-1)*nf + f; pvwdata_t wgt = conn->get_wDataStart(0)[f*(nf+1)]; // weights should be one-to-one weights HyPerLayer * pre = conn->preSynapticLayer(); const pvdata_t * preactPtr = pre->getLayerData(); const PVLayerLoc * preLoc = pre->getLayerLoc(); const int numPreNeurons = pre->getNumNeurons(); bool found=false; pvdata_t preact = 0.0f; for (int n=f; n<numPreNeurons; n+=nf) { int nExt = kIndexExtended(n, preLoc->nx, preLoc->ny, preLoc->nf, preLoc->halo.lt, preLoc->halo.rt, preLoc->halo.dn, preLoc->halo.up); pvdata_t a = preactPtr[nExt]; if (a!=0.0f) { if (found) { assert(preact==a); } else { found = true; preact = a; } } } preact *= getParent()->getDeltaTime(); if (preact < 0.0f) preact = 0.0f; if (preact > 1.0f) preact = 1.0f; const PVLayerLoc * loc = getTargetLayer()->getLayerLoc(); const pvdata_t * activity = getTargetLayer()->getLayerData(); int nnzf = 0; const int numNeurons = getTargetLayer()->getNumNeurons(); for (int n=f; n<numNeurons; n+=nf) { int nExt = kIndexExtended(n, loc->nx, loc->ny, loc->nf, loc->halo.lt, loc->halo.rt, loc->halo.dn, loc->halo.up); assert(activity[nExt]==0 || activity[nExt]==wgt); if (activity[nExt]!=0) nnzf++; } HyPerLayer * l = getTargetLayer(); HyPerCol * hc = l->getParent(); MPI_Allreduce(MPI_IN_PLACE, &nnzf, 1, MPI_INT, MPI_SUM, hc->icCommunicator()->communicator()); if (hc->columnId()==0) { const int neuronsPerFeature = l->getNumGlobalNeurons()/nf; double mean = preact * neuronsPerFeature; double stddev = sqrt(neuronsPerFeature*preact*(1-preact)); double numdevs = (nnzf-mean)/stddev; pvalues[idx] = erfc(fabs(numdevs)/sqrt(2)); fprintf(outputstream->fp, " Feature %d, nnz=%5d, expectation=%7.1f, std.dev.=%5.1f, discrepancy of %f deviations, p-value %f\n", f, nnzf, mean, stddev, numdevs, pvalues[idx]); } assert(status==PV_SUCCESS); return status; }
int customexit(HyPerCol * hc, int argc, char ** argv) { pvadata_t correctvalue = 0.5f; pvadata_t tolerance = 1.0e-7f; if (hc->columnId()==0) { pvInfo().printf("Checking whether input layer has all values equal to %f ...\n", correctvalue); } HyPerLayer * inputlayer = hc->getLayerFromName("input"); assert(inputlayer); PVLayerLoc const * loc = inputlayer->getLayerLoc(); assert(loc->nf==1); const int numNeurons = inputlayer->getNumNeurons(); assert(numNeurons>0); int status = PV_SUCCESS; int numExtended = inputlayer->getNumExtended(); InterColComm * icComm = hc->icCommunicator(); pvadata_t * layerData = (pvadata_t *) icComm->publisherStore(inputlayer->getLayerId())->buffer(LOCAL); int rootproc = 0; if (icComm->commRank()==rootproc) { pvadata_t * databuffer = (pvadata_t *) malloc(numExtended*sizeof(pvadata_t)); assert(databuffer); for (int proc=0; proc<icComm->commSize(); proc++) { if (proc==rootproc) { memcpy(databuffer, layerData, numExtended*sizeof(pvadata_t)); } else { MPI_Recv(databuffer, numExtended*sizeof(pvadata_t),MPI_BYTE,proc,15,icComm->communicator(), MPI_STATUS_IGNORE); } // At this point, databuffer on rank 0 should contain the extended input layer on rank proc for (int k=0; k<numNeurons; k++) { int kExt = kIndexExtended(k,loc->nx,loc->ny,loc->nf,loc->halo.lt,loc->halo.rt,loc->halo.dn,loc->halo.up); pvadata_t value = databuffer[kExt]; if (fabs(value-correctvalue)>=tolerance) { pvErrorNoExit().printf("Rank %d, restricted index %d, extended index %d, value is %f instead of %f\n", proc, k, kExt, value, correctvalue); status = PV_FAILURE; } } } free(databuffer); if (status == PV_SUCCESS) { pvInfo().printf("%s succeeded.\n", argv[0]); } else { pvError().printf("%s failed.\n", argv[0]); } } else { MPI_Send(layerData,numExtended*sizeof(pvadata_t),MPI_BYTE,rootproc,15,icComm->communicator()); } MPI_Barrier(icComm->communicator()); return status; }
int checkComparisonNonzero(HyPerCol * hc, int argc, char * argv[]) { int status = PV_FAILURE; int numLayers = hc->numberOfLayers(); int layerIndex; HyPerLayer * layer; for( layerIndex=0; layerIndex<numLayers; layerIndex++ ) { layer = hc->getLayer(layerIndex); if( !strcmp(hc->getLayer(layerIndex)->getName(), "Comparison") ) break; } if( layerIndex >= numLayers) { pvErrorNoExit().printf("%s: couldn't find layer \"Comparison\".", argv[0]); return PV_FAILURE; } pvdata_t * V = layer->getV(); for( int k=0; k<layer->getNumNeurons(); k++ ) { if( V[k] ) { status = PV_SUCCESS; break; } } return status; }