void SkeletonBlendedGeometry::addJointBlending(UInt32 VertexIndex, Joint* const TheJoint, Real32 BlendAmount)
{
    if(getWeightIndexes() == NULL)
    {
        GeoUInt32PropertyUnrecPtr Indexes = GeoUInt32Property::create();
        setWeightIndexes(Indexes);
    }
    if(getWeights() == NULL)
    {
        GeoVec1fPropertyUnrecPtr Weights = GeoVec1fProperty::create();
        setWeights(Weights);
    }
    
    Int32 JointIndex(getMFInternalJoints()->findIndex(TheJoint));
    if(JointIndex < 0)
    {
        SFATAL << "Cannot add weight for joint, because that joint is not connected." << std::endl;
        return;
    }

    //Vertex Index
    getWeightIndexes()->push_back(VertexIndex);

    //Joint Index
    getWeightIndexes()->push_back(JointIndex);

    //Weight Index
    getWeightIndexes()->push_back(getWeights()->getSize());

    getWeights()->push_back(Pnt1f(BlendAmount));
}
void SkeletonBlendedGeometry::addJointBlending(UInt32 VertexIndex,
                                               UInt32 JointIndex,
                                               Real32 BlendAmount)
{
    if(getWeightIndexes() == NULL)
    {
        GeoUInt32PropertyUnrecPtr Indexes = GeoUInt32Property::create();
        setWeightIndexes(Indexes);
    }
    if(getWeights() == NULL)
    {
        GeoVec1fPropertyUnrecPtr Weights = GeoVec1fProperty::create();
        setWeights(Weights);
    }

    //Vertex Index
    getWeightIndexes()->push_back(VertexIndex);

    //Joint Index
    getWeightIndexes()->push_back(JointIndex);

    //Weight Index
    getWeightIndexes()->push_back(getWeights()->getSize());

    getWeights()->push_back(Pnt1f(BlendAmount));
}
Exemplo n.º 3
0
ElmanNetwork::ElmanNetwork(int n, int k, int m, double *vec, char *filename)
{
	mXS = n;
	mHS = k;
	mYS = m;
	srand ( time(NULL) );
	mXL = new Neuron *[mXS];
	int i;
	for (i = 0; i < mXS; i++) {
		mXL [i] = new Neuron (mHS, vec [i]);
	}
	mCL = new Neuron *[mHS];
	for (i = 0; i < mHS; i++) {
		mCL [i] = new Neuron (mHS, 0.5);
	}
	mHL = new Neuron *[mHS];
	for (i = 0; i < mHS; i++) {
		mHL [i] = new Neuron (mYS);
	}
	mYL = new Neuron *[mYS];
	for (i = 0; i < mYS; i++) {
		mYL [i] = new Neuron (0);
	}
	getWeights (filename);
}
Exemplo n.º 4
0
void GlobalGrid::integrate( double y[] ) const{
        double *weights = 0;
        getWeights(weights);

        tzero(num_outputs, y);

        if ( points->getNumIndexes() > points->getNumValues() ){
                for( int j=0; j<num_outputs; j++ ){
                        double sum = 0.0;
                        #pragma omp parallel for reduction( + : sum )
                        for( int i=0; i<points->getNumIndexes(); i++ ){
                                const double *val = points->getValueList(i);
                                sum += weights[i] * val[j];
                        }
                        y[j] = sum;
                }
        }else{
                for( int i=0; i<points->getNumIndexes(); i++ ){
                        const double *val = points->getValueList(i);
                        #pragma omp parallel for
                        for( int j=0; j<num_outputs; j++ ){
                                y[j] += weights[i] * val[j];
                        }
                }
        }
        delete[] weights;
}
void SkeletonBlendedGeometry::calculatePositions(void)
{
    if(getBaseGeometry() == NULL)
    {
        //Error
        SWARNING << "Base Geometry is NULL." << std::endl;
        return;
    }
    if(getPositions() == NULL)
    {
        //Error
        SWARNING << "Positions is NULL." << std::endl;
        return;
    }
    if(getBaseGeometry()->getPositions() == NULL)
    {
        //Error
        SWARNING << "Base Geometry Postions is NULL." << std::endl;
        return;
    }

    Pnt3f CalculatedPoint;
    Pnt3f BasePoint;
    //Vec3f CalculatedNormal;

    //Zero the Position Property
    zeroGeoProperty(getPositions());

    //Update the Positions and Normals
    UInt32 WeightIndex, JointIndex, VertexIndex;
    UInt32 NumWeightIndexTuples(getWeightIndexes()->size()/3);
    for(UInt32 i(0) ; i < NumWeightIndexTuples ; ++i)
    {
        VertexIndex = getWeightIndexes()->getValue<UInt32>( 3 * i     );
        JointIndex  = getWeightIndexes()->getValue<UInt32>((3 * i) + 1);
        WeightIndex = getWeightIndexes()->getValue<UInt32>((3 * i) + 2);

        //v*BSM*IBM*JM*JW
        getBaseGeometry()->getPositions()->getValue<Pnt3f>(BasePoint, VertexIndex);

        _JointPoseTransforms[JointIndex].mult(BasePoint, BasePoint);

        //Add the displacement to the value at this position
        getPositions()->getValue<Pnt3f>(CalculatedPoint, VertexIndex);
        CalculatedPoint += Vec3f(BasePoint) * getWeights()->getValue<Pnt1f>(WeightIndex)[0];
        getPositions()->setValue<Pnt3f>(CalculatedPoint, VertexIndex);
    }

    for(UInt32 i = 0; i < _mfParents.size(); i++)
    {
        _mfParents[i]->invalidateVolume();
    }

    _volumeCache.setValid();
    _volumeCache.setEmpty();

    _NeedRecalc = false;
}
Exemplo n.º 6
0
Datum
ts_rankcd_tt(PG_FUNCTION_ARGS)
{
	TSVector	txt = PG_GETARG_TSVECTOR(0);
	TSQuery		query = PG_GETARG_TSQUERY(1);
	float		res;

	res = calc_rank_cd(getWeights(NULL), txt, query, DEF_NORM_METHOD);

	PG_FREE_IF_COPY(txt, 0);
	PG_FREE_IF_COPY(query, 1);
	PG_RETURN_FLOAT4(res);
}
Exemplo n.º 7
0
Datum
ts_rankcd_ttf(PG_FUNCTION_ARGS)
{
	TSVector	txt = PG_GETARG_TSVECTOR(0);
	TSQuery		query = PG_GETARG_TSQUERY(1);
	int			method = PG_GETARG_INT32(2);
	float		res;

	res = calc_rank_cd(getWeights(NULL), txt, query, method);

	PG_FREE_IF_COPY(txt, 0);
	PG_FREE_IF_COPY(query, 1);
	PG_RETURN_FLOAT4(res);
}
Exemplo n.º 8
0
void CGradientUpdateFunction::copy(CLearnDataObject *l_gradientUpdateFunction)
{
	CGradientUpdateFunction *gradientUpdateFunction = dynamic_cast<CGradientUpdateFunction *>(l_gradientUpdateFunction);

	assert(gradientUpdateFunction->getNumWeights() == getNumWeights());	

	double *weights = new double[getNumWeights()];

	getWeights(weights);

	gradientUpdateFunction->setWeights( weights);	

	delete [] weights;
}
Exemplo n.º 9
0
Datum
ts_rankcd_wtt(PG_FUNCTION_ARGS)
{
	ArrayType  *win = (ArrayType *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
	TSVector	txt = PG_GETARG_TSVECTOR(1);
	TSQuery		query = PG_GETARG_TSQUERY(2);
	float		res;

	res = calc_rank_cd(getWeights(win), txt, query, DEF_NORM_METHOD);

	PG_FREE_IF_COPY(win, 0);
	PG_FREE_IF_COPY(txt, 1);
	PG_FREE_IF_COPY(query, 2);
	PG_RETURN_FLOAT4(res);
}
Exemplo n.º 10
0
void CGradientUpdateFunction::saveData(FILE *stream)
{
	double *parameters = new double[getNumWeights()];
	getWeights(parameters);

	fprintf(stream, "Gradient Function\n");
	fprintf(stream, "Parameters: %d\n", getNumWeights());

	for (int i = 0; i < getNumWeights(); i ++)
	{
		fprintf(stream, "%f ", parameters[i]);
	}
	fprintf(stream, "\n");
	delete [] parameters;
}
Exemplo n.º 11
0
int VaryingHyPerConn::updateWeights(int axonId) {
   int syPatch = yPatchStride();
   for( int kPatch = 0; kPatch < getNumDataPatches(); kPatch++) {
      PVPatch * W = getWeights(kPatch, axonId);
      int nkPatch = fPatchSize() * W->nx;
      pvwdata_t * Wdata = get_wData(axonId, kPatch); // W->data;
      pvdata_t * dWdata = get_dwData(axonId, kPatch);
      for(int kyPatch = 0; kyPatch < W->ny; kyPatch++) {
         for(int kPatch = 0; kPatch < nkPatch; kPatch++) {
            Wdata[kPatch] += dWdata[kPatch];
         }
         dWdata += syPatch;
      }
   }
   return PV_SUCCESS;
}
Exemplo n.º 12
0
    void updateWeights(Array tValues, float eta, float a){
        errors = computeErrors(tValues);
        oGrads = computeOutputGradients(tValues);
        hGrads = computeHiddenGradients();

        Array hiddenDeltas = computeHiddenDeltas(eta, a);
        Array outputDeltas = computeOutputDeltas(eta, a);

        Array deltas = combine(hiddenDeltas, outputDeltas);
        Array weights = getWeights();

        for(int i = 0; i < weights.size(); i++){
            weights[i] += deltas[i];
        }
        setWeights(weights);
    }
Exemplo n.º 13
0
Datum
ts_rank_wttf(PG_FUNCTION_ARGS)
{
	ArrayType  *win = (ArrayType *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
	TSVector	txt = PG_GETARG_TSVECTOR(1);
	TSQuery		query = PG_GETARG_TSQUERY(2);
	int			method = PG_GETARG_INT32(3);
	float		res;

	res = calc_rank(getWeights(win), txt, query, method);

	PG_FREE_IF_COPY(win, 0);
	PG_FREE_IF_COPY(txt, 1);
	PG_FREE_IF_COPY(query, 2);
	PG_RETURN_FLOAT4(res);
}
const TargetPhraseCollection *PhraseDictionaryMultiModel::GetTargetPhraseCollectionLEGACY(const Phrase& src) const
{

  std::vector<std::vector<float> > multimodelweights = getWeights(m_numScoreComponents, true);
  TargetPhraseCollection *ret = NULL;

  std::map<std::string,multiModelStatistics*>* allStats = new(std::map<std::string,multiModelStatistics*>);
  CollectSufficientStatistics(src, allStats);
  ret = CreateTargetPhraseCollectionLinearInterpolation(src, allStats, multimodelweights);
  RemoveAllInMap(*allStats);
  delete allStats;

  ret->NthElement(m_tableLimit); // sort the phrases for pruning later
  const_cast<PhraseDictionaryMultiModel*>(this)->CacheForCleanup(ret);

  return ret;
}
Exemplo n.º 15
0
  int LCAConn::update_dW(int axonId)
  { // compute dW but don't add them to the weights yet.
    // That takes place in reduceKernels, so that the output is
    // independent of the number of processors.
    int nExt = preSynapticLayer()->getNumExtended();
    int numKernelIndices = getNumDataPatches();
    const pvdata_t * preactbuf = preSynapticLayer()->getLayerData(getDelay(axonId));
    const pvdata_t * postactbuf = postSynapticLayer()->getLayerData(getDelay(axonId));

    int sya = (post->getLayerLoc()->nf * (post->getLayerLoc()->nx + 2*post->getLayerLoc()->nb));
    
    for(int kExt=0; kExt<nExt;kExt++) {
      PVPatch * weights = getWeights(kExt,axonId);
      size_t offset = getAPostOffset(kExt, axonId);
      pvdata_t preact = preactbuf[kExt];
      int ny = weights->ny;
      int nk = weights->nx * nfp;
      const pvdata_t * postactRef = &postactbuf[offset];
      pvdata_t * dwdata = get_dwData(axonId, kExt);
      int lineoffsetw = 0;
      int lineoffseta = 0;
      for( int y=0; y<ny; y++ ) {
	for( int k=0; k<nk; k++ ) {
	  dwdata[lineoffsetw + k] += updateRule_dW(preact, postactRef[lineoffseta+k],lineoffseta+k);
	}
	lineoffsetw += syp;
	lineoffseta += sya;
      }
    }
    
    // Divide by (numNeurons/numKernels)
    int divisor = pre->getNumNeurons()/numKernelIndices;
    assert( divisor*numKernelIndices == pre->getNumNeurons() );
    for( int kernelindex=0; kernelindex<numKernelIndices; kernelindex++ ) {
      int numpatchitems = nxp*nyp*nfp;
      pvdata_t * dwpatchdata = get_dwDataHead(axonId,kernelindex);
      for( int n=0; n<numpatchitems; n++ ) {
	dwpatchdata[n] /= divisor;
      }
    }

    lastUpdateTime = parent->simulationTime();

    return PV_SUCCESS;    
  }
Exemplo n.º 16
0
void ParsSprProposer::determineBackProb( TreeAln &traln, LikelihoodEvaluator &eval, const std::vector<AbstractParameter*> &params )
{
  // assumes the move has alread been applied!!! 

  auto prunedTree = _move.getEvalBranch(traln); 
  auto prunedFromBranch = _move.getInverseMove().getInsertBranch();

  ParsimonyEvaluator::disorientNode( traln.findNodePtr(prunedTree)); 
  _pEval.evaluateSubtree(traln, traln.findNodePtr(prunedTree));

  auto backScores = determineScoresOfInsertions(traln, prunedTree); 
  auto weightedInsertionsBack = getWeights(traln, backScores); 
  
  assert(weightedInsertionsBack.find(prunedFromBranch) != end(weightedInsertionsBack)); 

  // RESULT 
  _backProb = log_double::fromAbs(weightedInsertionsBack[prunedFromBranch]); 
} 
Exemplo n.º 17
0
int VaryingHyPerConn::allocateDataStructures() {
   HyPerConn::allocateDataStructures();
   // initialize all dW's to one.
   int syPatch = yPatchStride();
   for(int kAxon = 0; kAxon < numberOfAxonalArborLists(); kAxon++){
      for(int kPatch = 0; kPatch < getNumDataPatches(); kPatch++){
         PVPatch * W = getWeights(kPatch, kAxon);
         int nkPatch = fPatchSize() * W->nx;
         float * dWdata = get_dwData(kAxon, kPatch);
         for(int kyPatch = 0; kyPatch < W->ny; kyPatch++){
            for(int kPatch = 0; kPatch < nkPatch; kPatch++){
               dWdata[kPatch] = 1.0f;
            }
            dWdata += syPatch;
         }
      }
   }

   return PV_SUCCESS;
}
Exemplo n.º 18
0
const TargetPhraseCollection *PhraseDictionaryMultiModelCounts::GetTargetPhraseCollection(const Phrase& src) const
{
  vector<vector<float> > multimodelweights;
  bool normalize;
  normalize = (m_mode == "interpolate") ? true : false;
  multimodelweights = getWeights(4,normalize);

  //source phrase frequency is shared among all phrase pairs
  vector<float> fs(m_numModels);

  map<string,multiModelCountsStatistics*>* allStats = new(map<string,multiModelCountsStatistics*>);

  CollectSufficientStatistics(src, fs, allStats);

  TargetPhraseCollection *ret = CreateTargetPhraseCollectionCounts(src, fs, allStats, multimodelweights);

  ret->NthElement(m_tableLimit); // sort the phrases for pruning later
  const_cast<PhraseDictionaryMultiModelCounts*>(this)->CacheForCleanup(ret);
  return ret;
}
Exemplo n.º 19
0
void ParsSprProposer::determineMove(TreeAln &traln, LikelihoodEvaluator &eval, Randomness& rand, BranchPlain primeBranch, const std::vector<AbstractParameter*> &params)  
{
  // auto result = new SprMove{}; 
  auto blParams = params; 
  auto prunedTree = primeBranch; 

  _pEval = ParsimonyEvaluator{} ; 

  _pEval.evaluate(traln, traln.findNodePtr(prunedTree), true );

  // decide upon an spr move 
  _computedInsertions = branch2parsScore {}; 
  auto forwInsertions  = determineScoresOfInsertions(traln, prunedTree);
  auto weightedInsertions = getWeights(traln, forwInsertions); 

  auto r = rand.drawRandDouble01(); 
  auto chosen = std::pair<BranchPlain,double>{}; 
  for(auto &v : weightedInsertions)
    {
      if(r < v.second)
	{
	  chosen = v; 
	  break; 
	}
      else 
	r -= v.second; 
    }

  // determine the branch, we pruned from 
  auto desc = traln.getDescendents(prunedTree);
  auto prunedFromBranch = BranchPlain{std::get<0>(desc).getSecNode() , std::get<1>(desc).getSecNode()}; 

  // important: save the move 
  _move = SprMove(traln, prunedTree,chosen.first );
  _forwProb = log_double::fromAbs(std::get<1>(chosen));
} 
void SkeletonBlendedGeometry::calculatePositions(void)
{
	if(getBaseGeometry() == NULL)
	{
		//Error
		SWARNING << "SkeletonBlendedGeometry::calculatePositions(): Base Geometry is NULL." << std::endl;
        return;
    }
	if(getPositions() == NULL)
	{
		//Error
		SWARNING << "SkeletonBlendedGeometry::calculatePositions(): Positions is NULL." << std::endl;
        return;
	}
	if(getBaseGeometry()->getPositions() == NULL)
	{
		//Error
		SWARNING << "SkeletonBlendedGeometry::calculatePositions(): Base Geometry Postions is NULL." << std::endl;
        return;
	}

    Pnt3f CalculatedPoint;
    Pnt3f BasePointInfluenced;
    Pnt3f BasePoint;
    Vec3f CalculatedNormal;


    //Reset all points
    GeoVectorPropertyUnrecPtr ResetPositions(dynamic_pointer_cast<GeoVectorProperty>(getBaseGeometry()->getPositions()->clone()));
    setPositions(ResetPositions);

    UInt32 WeightIndex, JointIndex, VertexIndex;
    //Update the Positions and Normals
    for(UInt32 i(0) ; i < getWeights()->size() ; ++i)
    {
        VertexIndex = getWeightIndexes()->getValue<UInt32>( 3 * i     );
        JointIndex  = getWeightIndexes()->getValue<UInt32>((3 * i) + 1);
        WeightIndex = getWeightIndexes()->getValue<UInt32>((3 * i) + 2);

        //Get the position of this index from the base geometry
        getBaseGeometry()->getPositions()->getValue<Pnt3f>(BasePoint, VertexIndex);

        //Get the Influence matrix of this joint
        //Apply the influence of this joint to the position
        getBindTransformationDiff(JointIndex).mult(BasePoint, BasePointInfluenced);

        //Add the displacement to the value at this position
        getPositions()->getValue<Pnt3f>(CalculatedPoint, VertexIndex);
        //Scale the influence by the blend amount
        CalculatedPoint += getWeights()->getValue<Pnt1f>(WeightIndex)[0] * (BasePointInfluenced - BasePoint);
        getPositions()->setValue<Pnt3f>(CalculatedPoint, VertexIndex);
    }

    for(UInt32 i = 0; i < _mfParents.size(); i++)
    {
        _mfParents[i]->invalidateVolume();
    }

    _volumeCache.setValid();
    _volumeCache.setEmpty();
}
Exemplo n.º 21
0
int TransposePoolingConn::deliverPresynapticPerspective(PVLayerCube const * activity, int arborID) {
   //Check if we need to update based on connection's channel
   if(getChannel() == CHANNEL_NOUPDATE){
      return PV_SUCCESS;
   }
   assert(post->getChannel(getChannel()));

   const PVLayerLoc * preLoc = preSynapticLayer()->getLayerLoc();
   const PVLayerLoc * postLoc = postSynapticLayer()->getLayerLoc();

   assert(arborID >= 0);
   const int numExtended = activity->numItems;

   //Grab postIdxLayer's data
   int* postIdxData = NULL;
   if(pvpatchAccumulateType == ACCUMULATE_MAXPOOLING){
      PoolingIndexLayer* postIndexLayer = originalConn->getPostIndexLayer();
      assert(postIndexLayer);
      //Make sure this layer is an integer layer
      assert(postIndexLayer->getDataType() == PV_INT);
      DataStore * store = parent->icCommunicator()->publisherStore(postIndexLayer->getLayerId());
      int delay = getDelay(arborID);

      //TODO this is currently a hack, need to properly implement data types.
      postIdxData = (int*) store->buffer(LOCAL, delay);
   }

   for(int b = 0; b < parent->getNBatch(); b++){
      pvdata_t * activityBatch = activity->data + b * (preLoc->nx + preLoc->halo.rt + preLoc->halo.lt) * (preLoc->ny + preLoc->halo.up + preLoc->halo.dn) * preLoc->nf;
      pvdata_t * gSynPatchHeadBatch = post->getChannel(getChannel()) + b * postLoc->nx * postLoc->ny * postLoc->nf;
      int * postIdxDataBatch = NULL;
      if(pvpatchAccumulateType == ACCUMULATE_MAXPOOLING){
         postIdxDataBatch = postIdxData + b * originalConn->getPostIndexLayer()->getNumExtended();
      }

      unsigned int * activeIndicesBatch = NULL;
      if(activity->isSparse){
         activeIndicesBatch = activity->activeIndices + b * (preLoc->nx + preLoc->halo.rt + preLoc->halo.lt) * (preLoc->ny + preLoc->halo.up + preLoc->halo.dn) * preLoc->nf;
      }

      int numLoop;
      if(activity->isSparse){
         numLoop = activity->numActive[b];
      }
      else{
         numLoop = numExtended;
      }

#ifdef PV_USE_OPENMP_THREADS
      //Clear all thread gsyn buffer
      if(thread_gSyn){
         int numNeurons = post->getNumNeurons();
#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for
#endif
         for(int i = 0; i < parent->getNumThreads() * numNeurons; i++){
            int ti = i/numNeurons;
            int ni = i % numNeurons;
            thread_gSyn[ti][ni] = 0;
         }
      }
#endif // PV_USE_OPENMP_THREADS


#ifdef PV_USE_OPENMP_THREADS
#pragma omp parallel for schedule(static)
#endif
      for (int loopIndex = 0; loopIndex < numLoop; loopIndex++) {
         int kPreExt;
         if(activity->isSparse){
            kPreExt = activeIndicesBatch[loopIndex];
         }
         else{
            kPreExt = loopIndex;
         }

         float a = activityBatch[kPreExt];
         if (a == 0.0f) continue;

         //If we're using thread_gSyn, set this here
         pvdata_t * gSynPatchHead;
#ifdef PV_USE_OPENMP_THREADS
         if(thread_gSyn){
            int ti = omp_get_thread_num();
            gSynPatchHead = thread_gSyn[ti];
         }
         else{
            gSynPatchHead = gSynPatchHeadBatch;
         }
#else // PV_USE_OPENMP_THREADS
         gSynPatchHead = gSynPatchHeadBatch;
#endif // PV_USE_OPENMP_THREADS

         const int kxPreExt = kxPos(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);
         const int kyPreExt = kyPos(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);
         const int kfPre = featureIndex(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);

         if(pvpatchAccumulateType == ACCUMULATE_MAXPOOLING){
            const int kxPreGlobalExt = kxPreExt + preLoc->kx0;
            const int kyPreGlobalExt = kyPreExt + preLoc->ky0;
            if(kxPreGlobalExt < preLoc->halo.lt || kxPreGlobalExt >= preLoc->nxGlobal + preLoc->halo.lt ||
               kyPreGlobalExt < preLoc->halo.up || kyPreGlobalExt >= preLoc->nyGlobal + preLoc->halo.up){
               continue;
            }

            //Convert stored global extended index into local extended index
            int postGlobalExtIdx = postIdxDataBatch[kPreExt];

            // If all inputs are zero and input layer is sparse, postGlobalExtIdx will still be -1.
            if(postGlobalExtIdx == -1) { continue; }

            //Make sure the index is in bounds
            assert(postGlobalExtIdx >= 0 && postGlobalExtIdx <
                  (postLoc->nxGlobal + postLoc->halo.lt + postLoc->halo.rt) * 
                  (postLoc->nyGlobal + postLoc->halo.up + postLoc->halo.dn) * 
                  postLoc->nf);

            const int kxPostGlobalExt = kxPos(postGlobalExtIdx, postLoc->nxGlobal + postLoc->halo.lt + postLoc->halo.rt, postLoc->nyGlobal + postLoc->halo.dn + postLoc->halo.up, postLoc->nf);
            const int kyPostGlobalExt = kyPos(postGlobalExtIdx, postLoc->nxGlobal + postLoc->halo.lt + postLoc->halo.rt, postLoc->nyGlobal + postLoc->halo.dn + postLoc->halo.up, postLoc->nf);
            const int kfPost = featureIndex(postGlobalExtIdx, postLoc->nxGlobal + postLoc->halo.lt + postLoc->halo.rt, postLoc->nyGlobal + postLoc->halo.dn + postLoc->halo.up, postLoc->nf);

            const int kxPostLocalRes = kxPostGlobalExt - postLoc->kx0 - postLoc->halo.lt;
            const int kyPostLocalRes = kyPostGlobalExt - postLoc->ky0 - postLoc->halo.up;
            if(kxPostLocalRes < 0 || kxPostLocalRes >= postLoc->nx|| 
               kyPostLocalRes < 0 || kyPostLocalRes >= postLoc->ny){
               continue;
            }

            const int kPostLocalRes = kIndex(kxPostLocalRes, kyPostLocalRes, kfPost, postLoc->nx, postLoc->ny, postLoc->nf);
            gSynPatchHeadBatch[kPostLocalRes] = a;
         }
         else{
            PVPatch * weights = getWeights(kPreExt, arborID);
            const int nk = weights->nx * fPatchSize();
            const int ny = weights->ny;
            pvgsyndata_t * postPatchStart = gSynPatchHead + getGSynPatchStart(kPreExt, arborID);
            const int sy  = getPostNonextStrides()->sy;       // stride in layer

            int offset = kfPre;
            int sf = fPatchSize();

            pvwdata_t w = 1.0;
            if(getPvpatchAccumulateType() == ACCUMULATE_SUMPOOLING){
              float relative_XScale = pow(2, (post->getXScale() - pre->getXScale()));
              float relative_YScale = pow(2, (post->getYScale() - pre->getYScale()));
              w = 1.0/(nxp*nyp*relative_XScale*relative_YScale);
            }
            void* auxPtr = NULL;
            for (int y = 0; y < ny; y++) {
               (accumulateFunctionPointer)(0, nk, postPatchStart + y*sy + offset, a, &w, auxPtr, sf);
            }
         }
      }

#ifdef PV_USE_OPENMP_THREADS
      //Set back into gSyn
      if(thread_gSyn){
         pvdata_t * gSynPatchHead = gSynPatchHeadBatch;
         int numNeurons = post->getNumNeurons();
         //Looping over neurons first to be thread safe
#pragma omp parallel for
         for(int ni = 0; ni < numNeurons; ni++){
            for(int ti = 0; ti < parent->getNumThreads(); ti++){
               if(pvpatchAccumulateType == ACCUMULATE_MAXPOOLING){
                  if(gSynPatchHead[ni] < fabs(thread_gSyn[ti][ni])){
                     gSynPatchHead[ni] = thread_gSyn[ti][ni];
                  }
               }
               else{
                  gSynPatchHead[ni] += thread_gSyn[ti][ni];
               }
            }
         }
      }
#endif
   }
   return PV_SUCCESS;
}
Exemplo n.º 22
0
void hgExpDistance(char *database, char *posTable, char *expTable, char *outTable)
/* hgExpDistance - Create table that measures expression distance between pairs. */
{
struct sqlConnection *conn = sqlConnect(database);
struct sqlResult *sr;
char query[256];
char **row;
struct hash *expHash = hashNew(16);
int realExpCount = -1;
struct microData *gene;
int rc, t;
pthread_t *threads = NULL;
pthread_attr_t attr;
int *threadID = NULL;
void *status;
char *tempDir = ".";
long time1, time2;

time1 = clock1000();

/* Get list/hash of all items with expression values. */
sqlSafef(query, sizeof(query), "select name,expCount,expScores from %s", posTable);
sr = sqlGetResult(conn, query);
while ((row = sqlNextRow(sr)) != NULL)
    {
    char *name = row[0];
    if (!hashLookup(expHash, name))
	{
	int expCount = sqlUnsigned(row[1]);
	int commaCount;
	float *expScores = NULL;

	sqlFloatDynamicArray(row[2], &expScores, &commaCount);
	if (expCount != commaCount)
	    errAbort("expCount and expScores don't match on %s in %s", name, posTable);
	if (realExpCount == -1)
	    realExpCount = expCount;
	if (expCount != realExpCount)
	    errAbort("In %s some rows have %d experiments others %d", 
	    	name, expCount, realExpCount);
	AllocVar(gene);
	gene->expCount = expCount;
	gene->expScores = expScores;
	hashAddSaveName(expHash, name, gene, &gene->name);
	slAddHead(&geneList, gene);
	}
    }
sqlFreeResult(&sr);
conn = sqlConnect(database);
slReverse(&geneList);
geneCount = slCount(geneList);
printf("Have %d elements in %s\n", geneCount, posTable);

weights = getWeights(realExpCount);

if (optionExists("lookup"))
    geneList = lookupGenes(conn, optionVal("lookup", NULL), geneList);
geneCount = slCount(geneList);
printf("Got %d unique elements in %s\n", geneCount, posTable);

sqlDisconnect(&conn);	/* Disconnect because next step is slow. */


if (geneCount < 1)
    errAbort("ERROR: unique gene count less than one ?");

time2 = clock1000();
verbose(2, "records read time: %.2f seconds\n", (time2 - time1) / 1000.0);

f = hgCreateTabFile(tempDir, outTable);

/* instantiate threads */
AllocArray( threadID, numThreads );
AllocArray( threads, numThreads );
pthread_attr_init( &attr );
pthread_mutex_init( &mutexfilehandle, NULL );
pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );

for (t = 0; t < numThreads; t++) {
	threadID[t] = t;
	rc = pthread_create( &threads[t], &attr, computeDistance, 
						(void *) &threadID[t]);
	if (rc)
		errAbort("ERROR: in pthread_create() %d\n", rc );
} 

/* synchronize all threads */
for (t = 0; t < numThreads; t++) {
	rc = pthread_join( threads[t], &status);
	if (rc)
		errAbort("ERROR: in pthread_join() %d\n", rc );
} 

printf("Made %s.tab\n", outTable);

slFreeList( &geneList );

pthread_mutex_destroy( &mutexfilehandle );
pthread_attr_destroy( &attr );

time1 = time2;
time2 = clock1000();
verbose(2, "distance computation time: %.2f seconds\n", (time2 - time1) / 1000.0);

/* Create and load table. */
conn = sqlConnect(database);
distanceTableCreate(conn, outTable);
hgLoadTabFile(conn, tempDir, outTable, &f);
printf("Loaded %s\n", outTable);

/* Add indices. */
sqlSafef(query, sizeof(query), "alter table %s add index(query(12))", outTable);
sqlUpdate(conn, query);
printf("Made query index\n");
if (optionExists("targetIndex"))
    {
    sqlSafef(query, sizeof(query), "alter table %s add index(target(12))", outTable);
    sqlUpdate(conn, query);
    printf("Made target index\n");
    }

hgRemoveTabFile(tempDir, outTable);

time1 = time2;
time2 = clock1000();
verbose(2, "table create/load/index time: %.2f seconds\n", (time2 - time1) / 1000.0);

}
Exemplo n.º 23
0
void hgExpDistance(char *database, char *posTable, char *expTable, char *outTable)
/* hgExpDistance - Create table that measures expression distance between pairs. */
{
struct sqlConnection *conn = sqlConnect(database);
struct sqlResult *sr;
char query[256];
char **row;
struct hash *expHash = hashNew(16);
int realExpCount = -1;
struct microData *geneList = NULL, *curGene, *gene;
int geneIx, geneCount = 0;
struct microData **geneArray = NULL;
float *weights = NULL;
char *tempDir = ".";
FILE *f = hgCreateTabFile(tempDir, outTable);

/* Get list/hash of all items with expression values. */
sqlSafef(query, sizeof(query), "select name,expCount,expScores from %s", posTable);
sr = sqlGetResult(conn, query);
while ((row = sqlNextRow(sr)) != NULL)
    {
    char *name = row[0];
    if (!hashLookup(expHash, name))
	{
	int expCount = sqlUnsigned(row[1]);
	int commaCount;
	float *expScores = NULL;

	sqlFloatDynamicArray(row[2], &expScores, &commaCount);
	if (expCount != commaCount)
	    errAbort("expCount and expScores don't match on %s in %s", name, posTable);
	if (realExpCount == -1)
	    realExpCount = expCount;
	if (expCount != realExpCount)
	    errAbort("In %s some rows have %d experiments others %d", 
	    	name, expCount, realExpCount);
	AllocVar(gene);
	gene->expCount = expCount;
	gene->expScores = expScores;
	hashAddSaveName(expHash, name, gene, &gene->name);
	slAddHead(&geneList, gene);
	}
    }
sqlFreeResult(&sr);
conn = sqlConnect(database);
slReverse(&geneList);
geneCount = slCount(geneList);
printf("Have %d elements in %s\n", geneCount, posTable);

weights = getWeights(realExpCount);

if (optionExists("lookup"))
    geneList = lookupGenes(conn, optionVal("lookup", NULL), geneList);
geneCount = slCount(geneList);
printf("Got %d unique elements in %s\n", geneCount, posTable);

sqlDisconnect(&conn);	/* Disconnect because next step is slow. */


if (geneCount < 1)
    errAbort("ERROR: unique gene count less than one ?");
/* Get an array for sorting. */
AllocArray(geneArray, geneCount);
for (gene = geneList,geneIx=0; gene != NULL; gene = gene->next, ++geneIx)
    geneArray[geneIx] = gene;

/* Print out closest 1000 in tab file. */
for (curGene = geneList; curGene != NULL; curGene = curGene->next)
    {
    calcDistances(curGene, geneList, weights);
    qsort(geneArray, geneCount, sizeof(geneArray[0]), cmpMicroDataDistance);
    for (geneIx=0; geneIx < 1000 && geneIx < geneCount; ++geneIx)
        {
	gene = geneArray[geneIx];
	fprintf(f, "%s\t%s\t%f\n", curGene->name, gene->name, gene->distance);
	}
    dotOut();
    }
printf("Made %s.tab\n", outTable);

/* Create and load table. */
conn = sqlConnect(database);
distanceTableCreate(conn, outTable);
hgLoadTabFile(conn, tempDir, outTable, &f);
printf("Loaded %s\n", outTable);

/* Add indices. */
sqlSafef(query, sizeof(query), "alter table %s add index(query)", outTable);
sqlUpdate(conn, query);
printf("Made query index\n");
if (optionExists("targetIndex"))
    {
    sqlSafef(query, sizeof(query), "alter table %s add index(target)", outTable);
    sqlUpdate(conn, query);
    printf("Made target index\n");
    }

hgRemoveTabFile(tempDir, outTable);
}
Exemplo n.º 24
0
int PoolingConn::deliverPresynapticPerspective(PVLayerCube const * activity, int arborID) {

    //Check if we need to update based on connection's channel
    if(getChannel() == CHANNEL_NOUPDATE) {
        return PV_SUCCESS;
    }
    assert(post->getChannel(getChannel()));

    float dt_factor;
    if (getPvpatchAccumulateType()==ACCUMULATE_STOCHASTIC) {
        dt_factor = getParent()->getDeltaTime();
    }
    else {
        dt_factor = getConvertToRateDeltaTimeFactor();
    }

    const PVLayerLoc * preLoc = preSynapticLayer()->getLayerLoc();
    const PVLayerLoc * postLoc = postSynapticLayer()->getLayerLoc();

    assert(arborID >= 0);
    const int numExtended = activity->numItems;

    float resetVal = 0;
    if(getPvpatchAccumulateType() == ACCUMULATE_MAXPOOLING) {
        resetVal = -INFINITY;
        float* gSyn = post->getChannel(getChannel());
        //gSyn is res
#ifdef PV_USE_OPENMP_THREADS
        #pragma omp parallel for
#endif
        for(int i = 0; i < post->getNumNeuronsAllBatches(); i++) {
            gSyn[i] = resetVal;
        }

    }


    clearGateIdxBuffer();

    for(int b = 0; b < parent->getNBatch(); b++) {
        pvdata_t * activityBatch = activity->data + b * (preLoc->nx + preLoc->halo.rt + preLoc->halo.lt) * (preLoc->ny + preLoc->halo.up + preLoc->halo.dn) * preLoc->nf;
        pvdata_t * gSynPatchHeadBatch = post->getChannel(getChannel()) + b * postLoc->nx * postLoc->ny * postLoc->nf;
        int* gatePatchHeadBatch = NULL;
        if(needPostIndexLayer) {
            gatePatchHeadBatch = postIndexLayer->getChannel(CHANNEL_EXC) + b * postIndexLayer->getNumNeurons();
        }

        unsigned int * activeIndicesBatch = NULL;
        if(activity->isSparse) {
            activeIndicesBatch = activity->activeIndices + b * (preLoc->nx + preLoc->halo.rt + preLoc->halo.lt) * (preLoc->ny + preLoc->halo.up + preLoc->halo.dn) * preLoc->nf;
        }
        int numLoop;
        if(activity->isSparse) {
            numLoop = activity->numActive[b];
        }
        else {
            numLoop = numExtended;
        }

        if(thread_gateIdxBuffer) {
#ifdef PV_USE_OPENMP_THREADS
            #pragma omp parallel for
#endif
            for(int i = 0; i < parent->getNumThreads() * post->getNumNeurons(); i++) {
                int ti = i/post->getNumNeurons();
                int ni = i % post->getNumNeurons();
                thread_gateIdxBuffer[ti][ni] = -1;
            }
        }

#ifdef PV_USE_OPENMP_THREADS
        //Clear all gsyn buffers
        if(thread_gSyn) {
            int numNeurons = post->getNumNeurons();
#ifdef PV_USE_OPENMP_THREADS
            #pragma omp parallel for
#endif
            for(int i = 0; i < parent->getNumThreads() * numNeurons; i++) {
                int ti = i/numNeurons;
                int ni = i % numNeurons;
                thread_gSyn[ti][ni] = resetVal;
            }
        }
#endif // PV_USE_OPENMP_THREADS

#ifdef PV_USE_OPENMP_THREADS
        #pragma omp parallel for schedule(static)
#endif
        for (int loopIndex = 0; loopIndex < numLoop; loopIndex++) {
            int kPreExt;
            if(activity->isSparse) {
                kPreExt = activeIndicesBatch[loopIndex];
            }
            else {
                kPreExt = loopIndex;
            }

            float a = activityBatch[kPreExt] * dt_factor;
            //if (a == 0.0f) continue;

            //If we're using thread_gSyn, set this here
            pvdata_t * gSynPatchHead;
            //float * gatePatchHead = NULL;
            int * gatePatchHead = NULL;
#ifdef PV_USE_OPENMP_THREADS
            if(thread_gSyn) {
                int ti = omp_get_thread_num();
                gSynPatchHead = thread_gSyn[ti];
            }
            else {
                gSynPatchHead = gSynPatchHeadBatch;
            }

            if(needPostIndexLayer) {
                if(thread_gateIdxBuffer) {
                    int ti = omp_get_thread_num();
                    gatePatchHead = thread_gateIdxBuffer[ti];
                }
                else {
                    gatePatchHead = gatePatchHeadBatch;
                }
            }
#else // PV_USE_OPENMP_THREADS
            gSynPatchHead = gSynPatchHeadBatch;
            if(needPostIndexLayer) {
                gatePatchHead = gatePatchHeadBatch;
            }
#endif // PV_USE_OPENMP_THREADS
            //deliverOnePreNeuronActivity(kPreExt, arborID, a, gSynPatchHead, gatePatchHead);

            PVPatch * weights = getWeights(kPreExt, arborID);
            const int nk = weights->nx * fPatchSize();
            const int ny = weights->ny;
            const int sy  = getPostNonextStrides()->sy;       // stride in layer
            pvwdata_t * weightDataStart = NULL;
            pvgsyndata_t * postPatchStart = gSynPatchHead + getGSynPatchStart(kPreExt, arborID);
            int* postGatePatchStart = gatePatchHead + getGSynPatchStart(kPreExt, arborID);
            //float* postGatePatchStart = gatePatchHead + getGSynPatchStart(kPreExt, arborID);

            const int kxPreExt = kxPos(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);
            const int kyPreExt = kyPos(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);
            const int kfPre = featureIndex(kPreExt, preLoc->nx + preLoc->halo.lt + preLoc->halo.rt, preLoc->ny + preLoc->halo.dn + preLoc->halo.up, preLoc->nf);

            const int kxPreGlobalExt = kxPreExt + preLoc->kx0;
            const int kyPreGlobalExt = kyPreExt + preLoc->ky0;

            const int kPreGlobalExt = kIndex(kxPreGlobalExt, kyPreGlobalExt, kfPre, preLoc->nxGlobal + preLoc->halo.lt + preLoc->halo.rt, preLoc->nyGlobal + preLoc->halo.up + preLoc->halo.dn, preLoc->nf);

            int offset = kfPre;
            int sf = fPatchSize();
            pvwdata_t w = 1.0;
            if(getPvpatchAccumulateType() == ACCUMULATE_SUMPOOLING) {
                float relative_XScale = pow(2, (post->getXScale() - pre->getXScale()));
                float relative_YScale = pow(2, (post->getYScale() - pre->getYScale()));
                w = 1.0/(nxp*nyp*relative_XScale*relative_YScale);
            }
            void* auxPtr = NULL;
            for (int y = 0; y < ny; y++) {
                if(needPostIndexLayer) {
                    auxPtr = (postGatePatchStart+ y*sy + offset);
                }
                (accumulateFunctionPointer)(kPreGlobalExt, nk, postPatchStart + y*sy + offset, a, &w, auxPtr, sf);
            }
        }
#ifdef PV_USE_OPENMP_THREADS
        //Accumulate back into gSyn // Should this be done in HyPerLayer where it can be done once, as opposed to once per connection?
        if(thread_gSyn) {
            pvdata_t * gSynPatchHead = gSynPatchHeadBatch;
            //float* gateIdxBuffer = postIndexLayer->getChannel(CHANNEL_EXC);
            int * gateIdxBuffer = NULL;
            if(needPostIndexLayer && thread_gateIdxBuffer) {
                gateIdxBuffer = gatePatchHeadBatch;
            }
            int numNeurons = post->getNumNeurons();
            //Looping over neurons first to be thread safe
            #pragma omp parallel for
            for(int ni = 0; ni < numNeurons; ni++) {
                //Different for maxpooling
                if(getPvpatchAccumulateType() == ACCUMULATE_MAXPOOLING) {
                    for(int ti = 0; ti < parent->getNumThreads(); ti++) {
                        if(gSynPatchHead[ni] < thread_gSyn[ti][ni]) {
                            gSynPatchHead[ni] = thread_gSyn[ti][ni];
                            if(needPostIndexLayer && thread_gateIdxBuffer) {
                                gateIdxBuffer[ni] = thread_gateIdxBuffer[ti][ni];
                                assert(gateIdxBuffer >= 0);
                            }
                        }
                    }
                }
                else {
                    for(int ti = 0; ti < parent->getNumThreads(); ti++) {
                        gSynPatchHead[ni] += thread_gSyn[ti][ni];
                    }
                }
            }
        }
#endif
    }
    if(activity->isSparse) {
        pvdata_t * gSyn = post->getChannel(getChannel());
        for (int k=0; k<post->getNumNeuronsAllBatches(); k++) {
            if (gSyn[k]==-INFINITY) {
                gSyn[k] = 0.0f;
            }
        }
    }
    return PV_SUCCESS;
}
Exemplo n.º 25
0
/** Print useful information about this object. In this case,
output all the weights for each layer. */
void BPN::printInfo(bool printWeights /* = true */) const
{
	LogWriter log;
	char buffer[50];
	sprintf(buffer, "Filename: %s", filename.c_str());
	log.println(buffer);
	sprintf(buffer, "Save version: %d", saveVersion);
	log.println(buffer);
	sprintf(buffer, "Type: %s", typeToString(id));
	log.println(buffer);
	sprintf(buffer, "Learning rate: %*g", 7, learningRate);
	log.println(buffer);
	sprintf(buffer, "Momentum: %*g", 7, momentum);
	log.println(buffer);
	sprintf(buffer, "Epochs completed: %d", epochsCompleted);
	log.println(buffer);
	sprintf(buffer, "Patterns completed: %g", patternsCompleted);
	log.println(buffer);
	sprintf(buffer, "Last pattern test: %g", lastPatternTest);
	log.println(buffer);
	if(dynamicLearningRate)
		sprintf(buffer, "Dynamic learning rate: True");
	else
		sprintf(buffer, "Dynamic learning rate: False");
	log.println(buffer);
	if(dynamicMomentum)
		sprintf(buffer, "Dynamic momentum: True");
	else
		sprintf(buffer, "Dynamic momentum: False");
	log.println(buffer);

	if(inputFieldShape==IFS_SQUARE)
		log.println("Input field shape: IFS_SQUARE");
	else if(inputFieldShape==IFS_DIAMOND)
		log.println("Input field shape: IFS_DIAMOND");
	else
		log.println("Input field shape: UNKNOWN");

	// output neurons info
	sprintf(buffer, "Input neurons: %d", weights[0].getHeight());
	log.println(buffer);
	for(int i=1;i<weights.size();i++)
	{
		sprintf(buffer, "Hidden neurons(%d): %d", i-1, weights[i].getHeight());
		log.println(buffer);
	}
	sprintf(buffer, "Output neurons: %d", weights[weights.size()-1].getWidth());
	log.println(buffer);

	if(printWeights)
	{
		for(int i=0;i<weights.size();i++)
		{
			sprintf(buffer, "Weights for layer %d:", i);
			log.println(buffer);
			getWeights()[i].printInfo();
		}
		for(i=0;i<biasWeights.size();i++)
		{
			sprintf(buffer, "Bias weights for layer %d:", i);
			log.println(buffer);
			biasWeights[i].printInfo();
		}
	}

	// print rank test values
//	if(printTestResults)
//	{
//		log.print("Rank test values: ");
//		for(i=0;i<rankTestValue.size();i++)
//		{
//			sprintf(buffer, "%d", rankTestEpoch[i]);
//			message+=buffer;
//			message+="[";
//			sprintf(buffer, "%*g", 9, rankTestValue[i]);
//			message+=buffer;
//			message+="] ";
//			log.print(message);
//		}
//	}
	log.print("\n");
}
CFloatImage 
SupportVectorMachine::predictSlidingWindow(const Feature& feat) const
{
	CFloatImage score(CShape(feat.Shape().width,feat.Shape().height,1));
	score.ClearPixels();

	/******** BEGIN TODO ********/
	// Sliding window prediction.
	//
	// In this project we are using a linear SVM. This means that 
	// it's classification function is very simple, consisting of a
	// dot product of the feature vector with a set of weights learned
	// during training, followed by a subtraction of a bias term
	//
	//          pred <- dot(feat, weights) - bias term
	//
	// Now this is very simple to compute when we are dealing with
	// cropped images, our computed features have the same dimensions
	// as the SVM weights. Things get a little more tricky when you
	// want to evaluate this function over all possible subwindows of
	// a larger feature, one that we would get by running our feature
	// extraction on an entire image. 
	//
	// Here you will evaluate the above expression by breaking
	// the dot product into a series of convolutions (remember that
	// a convolution can be though of as a point wise dot product with
	// the convolution kernel), each one with a different band.
	//
	// Convolve each band of the SVM weights with the corresponding
	// band in feat, and add the resulting score image. The final
	// step is to subtract the SVM bias term given by this->getBiasTerm().
	//
	// Hint: you might need to set the origin for the convolution kernel
	// in order to get the result from convoltion to be correctly centered
	// 
	// Useful functions:
	// Convolve, BandSelect, this->getWeights(), this->getBiasTerm()

	//printf("TODO: SupportVectorMachine.cpp:273\n"); exit(EXIT_FAILURE); 
	Feature weights = getWeights();
	for (int b=0; b<feat.Shape().nBands; b++){
		CFloatImage currentBandWeights = CFloatImage(weights.Shape().width, weights.Shape().height, 1);
		CFloatImage currentBandFeatures = CFloatImage(feat.Shape().width, feat.Shape().height, 1);
		CFloatImage convolved = CFloatImage(CShape(feat.Shape().width, feat.Shape().height, 1));
		CFloatImage final(CShape(feat.Shape().width, feat.Shape().height, 1));
		BandSelect(weights, currentBandWeights, b, 0);
		BandSelect(feat, currentBandFeatures, b, 0);
		currentBandWeights.origin[0] = weights.origin[0];
		currentBandWeights.origin[1] = weights.origin[1];
		Convolve(feat, convolved, currentBandWeights);
		BandSelect(convolved, final, b, 0);
		try{
		score += final;
		} catch (CError err) {
			printf("OH NOES: the final chapter!");
		}
	}
	score-=getBiasTerm();
	/******** END TODO ********/

	return score;
}
void hgExpDistance(char *database, char *posTable, char *expTable, char *outTable)
/* hgExpDistance - Create table that measures expression distance between pairs. */
{
struct sqlConnection *conn = sqlConnect(database);
struct sqlResult *sr;
char query[256];
char **row;
struct hash *expHash = hashNew(16);
int realExpCount = -1;
struct microData *gene;
int rc, t;
pthread_t *threads = NULL;
pthread_attr_t attr;
int *threadID = NULL;
void *status;
char *tempDir = ".";
int arrayNum; 
struct microDataDistance *geneDistPtr = NULL;	
struct microDataDistance *geneDistArray = NULL;	
int geneIx;
FILE *f = NULL;

/* Get list/hash of all items with expression values. */
safef(query, sizeof(query), "select name,expCount,expScores from %s", posTable);
sr = sqlGetResult(conn, query);
while ((row = sqlNextRow(sr)) != NULL)
    {
    char *name = row[0];
    if (!hashLookup(expHash, name))
	{
	int expCount = sqlUnsigned(row[1]);
	int commaCount;
	float *expScores = NULL;

	sqlFloatDynamicArray(row[2], &expScores, &commaCount);
	if (expCount != commaCount)
	    errAbort("expCount and expScores don't match on %s in %s", name, posTable);
	if (realExpCount == -1)
	    realExpCount = expCount;
	if (expCount != realExpCount)
	    errAbort("In %s some rows have %d experiments others %d", 
	    	name, expCount, realExpCount);
	AllocVar(gene);
	gene->expCount = expCount;
	gene->expScores = expScores;
	hashAddSaveName(expHash, name, gene, &gene->name);
	slAddHead(&geneList, gene);
	}
    }
sqlFreeResult(&sr);
conn = sqlConnect(database);
slReverse(&geneList);
geneCount = slCount(geneList);
printf("Have %d elements in %s\n", geneCount, posTable);

weights = getWeights(realExpCount);

if (optionExists("lookup"))
    geneList = lookupGenes(conn, optionVal("lookup", NULL), geneList);
geneCount = slCount(geneList);
printf("Got %d unique elements in %s\n", geneCount, posTable);

sqlDisconnect(&conn);	/* Disconnect because next step is slow. */


if (geneCount < 1)
    errAbort("ERROR: unique gene count less than one ?");

f = hgCreateTabFile(tempDir, outTable);
synQ = synQueueNew();

/* instantiate threads */
AllocArray( threadID, numThreads );
AllocArray( threads, numThreads );
pthread_attr_init( &attr );
pthread_mutex_init( &mutexDotOut, NULL );
pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );

for (t = 0; t < numThreads; t++) {
	threadID[t] = t;
	rc = pthread_create( &threads[t], &attr, computeDistance, 
						(void *) &threadID[t]);
	if (rc)
		errAbort("ERROR: in pthread_create() %d\n", rc );
} 

/* this thread will write to the file from the queue */
for (arrayNum = 0; arrayNum < geneCount; arrayNum++) {
	geneDistArray = (struct microDataDistance *)synQueueGet( synQ );
	geneDistPtr = geneDistArray;
    	/* Print out closest GENEDISTS distances in tab file. */
    	for (geneIx=0; geneIx < GENEDISTS && geneIx < geneCount; 
						++geneIx, geneDistPtr++)
		if (geneDistPtr != NULL)
			fprintf(f, "%s\t%s\t%f\n", geneDistPtr->name1, 
				geneDistPtr->name2, geneDistPtr->distance);
		else
			errAbort("ERROR: writing distance %d to file\n", 
							geneIx);
	freeMem( geneDistArray );
}

/* synchronize all threads */
for (t = 0; t < numThreads; t++) {
	rc = pthread_join( threads[t], &status);
	if (rc)
		errAbort("ERROR: in pthread_join() %d\n", rc );
} 

printf("Made %s.tab\n", outTable);

slFreeList( &geneList );

pthread_mutex_destroy( &mutexDotOut );
pthread_attr_destroy( &attr );

/* Create and load table. */
conn = sqlConnect(database);
distanceTableCreate(conn, outTable);
hgLoadTabFile(conn, tempDir, outTable, &f);
printf("Loaded %s\n", outTable);

/* Add indices. */
safef(query, sizeof(query), "alter table %s add index(query(12))", outTable);
sqlUpdate(conn, query);
printf("Made query index\n");
if (optionExists("targetIndex"))
    {
    safef(query, sizeof(query), "alter table %s add index(target(12))", outTable);
    sqlUpdate(conn, query);
    printf("Made target index\n");
    }

hgRemoveTabFile(tempDir, outTable);
}
Exemplo n.º 28
0
McrouterRouteHandlePtr McRouteHandleProvider::makePoolRoute(
  McRouteHandleFactory& factory, const folly::dynamic& json) {

  checkLogic(json.isObject() || json.isString(),
             "PoolRoute should be object or string");
  const folly::dynamic* jpool;
  if (json.isObject()) {
    jpool = json.get_ptr("pool");
    checkLogic(jpool, "PoolRoute: pool not found");
  } else {
    jpool = &json;
  }
  auto p = makePool(*jpool);
  auto pool = std::move(p.first);
  auto destinations = std::move(p.second);

  if (json.isObject()) {
    if (auto maxOutstandingPtr = json.get_ptr("max_outstanding")) {
      checkLogic(maxOutstandingPtr->isInt(),
                 "PoolRoute {}: max_outstanding is not int", pool->getName());
      auto maxOutstanding = maxOutstandingPtr->asInt();
      if (maxOutstanding) {
        for (auto& destination: destinations) {
          destination = makeOutstandingLimitRoute(std::move(destination),
                                                  maxOutstanding);
        }
      }
    }
  }

  if (json.isObject() && json.count("shadows")) {
    folly::StringPiece shadowPolicy = "default";
    if (auto jshadow_policy = json.get_ptr("shadow_policy")) {
      checkLogic(jshadow_policy->isString(),
                 "PoolRoute: shadow_policy is not a string");
      shadowPolicy = jshadow_policy->stringPiece();
    }

    McrouterShadowData data;
    for (auto& shadow : json["shadows"]) {
      checkLogic(shadow.count("target"),
                 "PoolRoute {} shadows: no target for shadow", pool->getName());
      auto s = ShadowSettings::create(shadow, proxy_->router());
      if (s) {
        data.emplace_back(factory.create(shadow["target"]), std::move(s));
      }
    }

    for (size_t i = 0; i < destinations.size(); ++i) {
      McrouterShadowData destinationShadows;
      for (const auto& shadowData : data) {
        if (shadowData.second->startIndex() <= i &&
            i < shadowData.second->endIndex()) {
          destinationShadows.push_back(shadowData);
        }
      }
      if (!destinationShadows.empty()) {
        destinationShadows.shrink_to_fit();
        destinations[i] = extraProvider_->makeShadow(
          proxy_, std::move(destinations[i]),
          std::move(destinationShadows), shadowPolicy);
      }
    }
  }

  // add weights and override whatever we have in PoolRoute::hash
  folly::dynamic jhashWithWeights = folly::dynamic::object();
  if (pool->getWeights()) {
    jhashWithWeights = folly::dynamic::object
      ("hash_func", WeightedCh3HashFunc::type())
      ("weights", *pool->getWeights());
  }

  if (json.isObject()) {
    if (auto jhash = json.get_ptr("hash")) {
      checkLogic(jhash->isObject() || jhash->isString(),
                 "PoolRoute {}: hash is not object/string", pool->getName());
      if (jhash->isString()) {
        jhashWithWeights["hash_func"] = *jhash;
      } else { // object
        for (const auto& it : jhash->items()) {
          jhashWithWeights[it.first] = it.second;
        }
      }
    }
  }
  auto route = makeHashRoute(jhashWithWeights, std::move(destinations));

  if (json.isObject()) {
    if (proxy_->router().opts().destination_rate_limiting) {
      if (auto jrates = json.get_ptr("rates")) {
        route = makeRateLimitRoute(std::move(route), RateLimiter(*jrates));
      }
    }

    if (auto jsplits = json.get_ptr("shard_splits")) {
      route = makeShardSplitRoute(std::move(route), ShardSplitter(*jsplits));
    }
  }

  auto asynclogName = pool->getName();
  bool needAsynclog = true;
  if (json.isObject()) {
    if (auto jasynclog = json.get_ptr("asynclog")) {
      checkLogic(jasynclog->isBool(), "PoolRoute: asynclog is not bool");
      needAsynclog = jasynclog->getBool();
    }
    if (auto jname = json.get_ptr("name")) {
      checkLogic(jname->isString(), "PoolRoute: name is not a string");
      asynclogName = jname->stringPiece().str();
    }
  }
  if (needAsynclog) {
    route = createAsynclogRoute(std::move(route), asynclogName);
  }

  return route;
}
int LCALIFLateralKernelConn::allocateDataStructures() {
   int status = HyPerConn::allocateDataStructures();

   // Neurons don't inhibit themselves, only their neighbors; set self-interaction weights to mmzero.
   assert(nxp % 2 == 1 && nyp % 2 == 1 && getNumDataPatches()==nfp);
   for (int k=0; k<getNumDataPatches(); k++) {
      int n = kIndex((nxp-1)/2, (nyp-1)/2, k, nxp, nyp, nfp);
      get_wDataHead(0, k)[n] = 0.0f;
   }

   integratedSpikeCountCube = pvcube_new(pre->getLayerLoc(), pre->getNumExtended());
   integratedSpikeCount = integratedSpikeCountCube->data;
   for (int k=0; k<pre->getNumExtended(); k++) {
      integratedSpikeCount[k] = integrationTimeConstant*getTargetRateKHz(); // Spike counts initialized to equilibrium value
   }
   mpi_datatype = Communicator::newDatatypes(pre->getLayerLoc());
   if (mpi_datatype==NULL) {
      fprintf(stderr, "LCALIFLateralKernelConn \"%s\" error creating mpi_datatype\n", name);
      abort();
   }

   // Compute the number of times each patch contributes to dw, for proper averaging.
   int num_arbors = numberOfAxonalArborLists();
   interiorCounts = (float **) calloc(num_arbors, sizeof(float *));
   if (interiorCounts==NULL) {
      fprintf(stderr, "LCALIFLateralKernelConn::initialize \"%s\" error: unable to allocate memory for interiorCounts pointer\n", name);
   }
   interiorCounts[0] = (float *) calloc(getNumDataPatches()*nxp*nyp*nfp, sizeof(float));
   if (interiorCounts[0]==NULL) {
      fprintf(stderr, "LCALIFLateralKernelConn::initialize \"%s\" error: unable to allocate memory for interiorCounts\n", name);
   }
   for (int arbor=1; arbor<num_arbors; arbor++) {
      interiorCounts[arbor] = interiorCounts[0]+arbor*getNumDataPatches()*nxp*nyp*nfp;
   }

   const PVLayerLoc * preloc = pre->getLayerLoc();
   int nxpre = preloc->nx;
   int nypre = preloc->ny;
   int nfpre = preloc->nf;

   int nExt = pre->getNumExtended();
   int sya = getPostExtStrides()->sy;
   int nxglob = preloc->nxGlobal;
   int nyglob = preloc->nyGlobal;
   int kx0 = preloc->kx0;
   int ky0 = preloc->ky0;
   for (int arbor=0; arbor<numberOfAxonalArborLists(); arbor++) {
      for(int kExt=0; kExt<nExt;kExt++) {
         int xglob = kxPos(kExt, nxpre + preloc->halo.lt + preloc->halo.rt, nypre + preloc->halo.dn + preloc->halo.up, nfpre) + kx0 - preloc->halo.lt;
         int yglob = kyPos(kExt, nypre + preloc->halo.lt + preloc->halo.rt, nypre + preloc->halo.dn + preloc->halo.up, nfpre) + ky0 - preloc->halo.up;
         if (xglob < 0 || xglob >= nxglob || yglob < 0 || yglob >= nyglob) {
            continue;
         }
         PVPatch * weights = getWeights(kExt,arbor);
         int offset = (int) getAPostOffset(kExt, arbor);
         int ny = weights->ny;
         int nk = weights->nx * nfp;
         int interiorCountOffset = get_wData(arbor, kExt)-get_wDataStart(arbor);
         int lineoffsetw = 0;
         int lineoffseta = 0;
         for( int y=0; y<ny; y++ ) {
            for( int k=0; k<nk; k++ ) {
               int postactindex = offset+lineoffseta+k;
               if (postactindex != kExt) { // Neurons don't inhibit themselves
                  interiorCounts[arbor][interiorCountOffset + lineoffsetw + k]++;
               }
            }
            lineoffsetw += syp;
            lineoffseta += sya;
         }
      }
   }
   int bufsize = numberOfAxonalArborLists() * getNumDataPatches() * nxp * nyp * nfp;
// TODO-CER-2014.3.26 - Ensure that reduction is done when not using MPI
#ifdef PV_USE_MPI
   MPI_Allreduce(MPI_IN_PLACE, interiorCounts[0], bufsize, MPI_FLOAT, MPI_SUM, parent->icCommunicator()->communicator());
#endif

   return status;
}
int LCALIFLateralKernelConn::update_dW(int axonId) {
   if (parent->simulationTime() < dWUpdateTime) {
      return PV_SUCCESS;
   }
   dWUpdateTime += dWUpdatePeriod;
   int nExt = preSynapticLayer()->getNumExtended();
   int numKernelIndices = getNumDataPatches();
   updateIntegratedSpikeCount();
   float target_rate_sq = getTargetRateKHz()*getTargetRateKHz();
   const pvdata_t * preactbuf = integratedSpikeCount;
   const pvdata_t * postactbuf = integratedSpikeCount;

   int sya = (post->getLayerLoc()->nf * (post->getLayerLoc()->nx + post->getLayerLoc()->halo.lt + post->getLayerLoc()->halo.rt));

   const PVLayerLoc * preloc = pre->getLayerLoc();
   int nxpre = preloc->nx;
   int nypre = preloc->ny;
   int nfpre = preloc->nf;
   int nxglob = preloc->nxGlobal;
   int nyglob = preloc->nyGlobal;
   int kx0 = preloc->kx0;
   int ky0 = preloc->ky0;
   for(int kExt=0; kExt<nExt;kExt++) {
      int xglob = kxPos(kExt, nxpre + preloc->halo.lt + preloc->halo.rt, nypre + preloc->halo.dn + preloc->halo.up, nfpre) + kx0 - preloc->halo.lt;
      int yglob = kyPos(kExt, nxpre + preloc->halo.lt + preloc->halo.rt, nypre + preloc->halo.dn + preloc->halo.up, nfpre) + ky0 - preloc->halo.dn;
      if (xglob < 0 || xglob >= nxglob || yglob < 0 || yglob >= nyglob) {
         continue;
      }
      PVPatch * weights = getWeights(kExt,axonId);
      size_t offset = getAPostOffset(kExt, axonId);
      pvdata_t preactrate = preactbuf[kExt]/integrationTimeConstant;
      int ny = weights->ny;
      int nk = weights->nx * nfp;
      pvwdata_t * dwdata = get_dwData(axonId, kExt);
      int lineoffsetw = 0;
      int lineoffseta = 0;
      for( int y=0; y<ny; y++ ) {
         for( int k=0; k<nk; k++ ) {
            int postactindex = offset+lineoffseta+k;
            if (postactindex != kExt) { // Neurons don't inhibit themselves
               pvdata_t postactrate = postactbuf[postactindex]/integrationTimeConstant;
               pvdata_t dw = preactrate*postactrate-target_rate_sq;
               dwdata[lineoffsetw + k] += dw;
            }
         }
         lineoffsetw += syp;
         lineoffseta += sya;
      }
   }
   // Divide each dw by the number of correlations that contributed to that dw (divisorptr was summed over all MPI processes in initialization).
   // Also divide by target_rate_sq to normalize to a dimensionless quantity.
   // The nonlinear filter and the multiplication by dt/tauINH takes place in updateWeights, because the filter has to be applied after reduceKernels
   // and the multiplication by dt/tauINH needs to take place after the filter.
   int patch_size = nxp*nyp*nfp;
   for( int kernelindex=0; kernelindex<numKernelIndices; kernelindex++ ) {
      pvwdata_t * dwpatchdata = get_dwDataHead(axonId,kernelindex);
      float * divisorptr = &interiorCounts[axonId][kernelindex*patch_size];
      for( int n=0; n<patch_size; n++ ) {
         assert(divisorptr[n]>0 || dwpatchdata[n]==0);
         if (divisorptr[n]>0) dwpatchdata[n] /= target_rate_sq * divisorptr[n];
      }
   }

   lastUpdateTime = parent->simulationTime();

   return PV_SUCCESS;
}