示例#1
0
/* Krichevski-Trofimov estimated log probability accessor */
weight_t CTNode::logProbEstimated() const {

    if (UseZeroRedundancy) {
        if (m_count[0]+m_count[1] == 0) return 0.0;
        double rval = log_point_five + m_log_prob_est;
        if (m_count[0] == 0) rval = logAdd(log_quarter, rval);
        if (m_count[1] == 0) rval = logAdd(log_quarter, rval);
        return rval;
    }

    return m_log_prob_est;
}
/*
 * CRF_StdSegStateNode::computeAlpha
 *
 * Read alpha vectors of previous nodes directly from prevNode and store the result of the alpha vector in alphaArray.
 *
 * Compute the alpha vector for the forward backward computation for this node.
 */
double CRF_StdSegStateNode::computeAlpha()
{
	//QNUInt32 nLabs = this->crf_ptr->getNLabs();
	this->alphaScale=0.0;

	checkNumPrevNodes();

	QNUInt32 clab = 0;
	for (QNUInt32 dur = 1; dur <= this->numPrevNodes; dur++)
	{
		CRF_StateNode* prevAdjacentSeg = this->prevNodes[this->numPrevNodes - dur];
		double* prev_adj_seg_alpha = prevAdjacentSeg->getAlpha();
		for (QNUInt32 lab = 0; lab < this->nActualLabs; lab++)
		{
			this->logAddAcc[0]=prev_adj_seg_alpha[0]+this->transMatrix[0+clab];

			double maxv=this->logAddAcc[0];
			//TODO: for (QNUInt32 plab = 1; plab < prevAdjacentSeg->getNLabs(); plab++) {  //full implementation. But not working now because logAdd() cannot do calculation on LOG0 yet.
			for (QNUInt32 plab = 1; plab < prevAdjacentSeg->getNumAvailLabs(); plab++) {   //faster implementation, not guaranteed to work for all classes of previous nodes.
				//this->logAddAcc[plab]=prev_adj_seg_alpha[plab]+this->transMatrix[plab * prevAdjacentSeg->getNLabs() + clab];
				this->logAddAcc[plab]=prev_adj_seg_alpha[plab]+this->transMatrix[plab * this->nLabs + clab];

				if (this->logAddAcc[plab]>maxv) {
					maxv=logAddAcc[plab];
				}
			}
			try {
				//TODO: this->alphaArray[clab]=logAdd(this->logAddAcc,maxv,prevAdjacentSeg->getNLabs());        //full implementation. But not working now because logAdd() cannot do calculation on LOG0 yet.
				this->alphaArray[clab]=logAdd(this->logAddAcc,maxv,prevAdjacentSeg->getNumAvailLabs());   //faster implementation, not guaranteed to work for all classes of previous nodes.
			}
			catch (exception &e) {
				string errstr="CRF_StdSegStateNode::computeAlpha() caught exception: "+string(e.what())+" while computing alpha";
				throw runtime_error(errstr);
				return(-1);
			}
			this->alphaArray[clab]+=this->stateArray[clab];
			clab++;
		}
	}
	// These are the cases when the current node serves as the beginning segment of the sequence, so there is no previous node.
	for (QNUInt32 dur = this->numPrevNodes + 1; dur <= this->nodeLabMaxDur; dur++)
	{
		for (QNUInt32 lab = 0; lab < this->nActualLabs; lab++)
		{
			this->alphaArray[clab]=this->stateArray[clab];
			clab++;
		}
	}

	return this->alphaScale;

}
示例#3
0
void LogSoftMax::frameForward(int t, real *f_inputs, real *f_outputs)
{
  real sum = LOG_ZERO;
  for(int i = 0; i < n_inputs; i++)
  {
    real z = f_inputs[i];
    f_outputs[i] = z;
    sum = logAdd(sum, z);
  }

  for(int i = 0; i < n_outputs; i++)
    f_outputs[i] -= sum;
}
/*
 * CRF_StdSegStateNode::computeAlphaAlignedSum
 *
 * Returns: Sum of the values in the alphaAligned vector of this node
 *
 * Used to compute the "soft" normalization constant.
 */
double CRF_StdSegStateNode::computeAlphaAlignedSum()
{
	double Zx;
	try {
		//Zx=logAdd(&(this->alphaArrayAligned),this->nLabs);  // full implementation. But not working now because logAdd() cannot do calculation on LOG0 yet.
		Zx=logAdd(&(this->alphaArrayAligned),this->numAvailLabs);
	}
	catch (exception& e) {
		//changed by Ryan
		//string errstr="CRF_StdStateNodeLog::computeExpF() threw exception: "+string(e.what());
		string errstr="CRF_StdStateNode::computeAlphaAlignedSum() threw exception: "+string(e.what());
		throw runtime_error(errstr);
	}
	return Zx;
}
示例#5
0
void IPFP::computePredictedPeakHeights( Message &out, spec_factor_t &log_spec_factor, Message &marginals){

	out.reset( log_spec_factor.size() );
	for( unsigned int i = 0; i < log_spec_factor.size(); i++ ){
		
		Message::const_iterator it = marginals.begin();
		double log_sum = NULL_PROB;
		for( ; it != marginals.end(); ++it ){
			double tmp = *it;
			tmp += log_spec_factor[i][it.index()];
			log_sum= logAdd( log_sum, tmp );
		}
		out.addToIdx(i,log_sum);
	}
}
示例#6
0
void IPFP::setDesiredMarginals( std::vector<Message> &desired_margs, std::vector<Message> &marginals ){

	unsigned int num_spectra = cfg->dv_spectrum_depths.size();

	//Initialise factor probabilities between peaks and fragments
	//and peak targets
	std::vector<Message> peak_targets( num_spectra );
	std::vector<spec_factor_t> log_spec_factors( num_spectra );
	for( unsigned int energy = 0; energy < num_spectra; energy++ ){
		int spec_idx = cfg->dv_spectrum_indexes[energy];
		initLogSpecFactor( log_spec_factors[energy], moldata->getSpectrum(spec_idx) );
		initPeakTargets( peak_targets[energy], moldata->getSpectrum(spec_idx) );
	}

	//Set desired marginals
	Message pk_heights;
	desired_margs.resize(num_spectra);
	for( unsigned int energy = 0; energy < num_spectra; energy++ ){

		//Adjust the fragment marginals so that they sum to the correct target for each
		//peak yet maintain the correct ratio between fragments of similar mass
		//given prior likelihoods
		computePredictedPeakHeights( pk_heights, log_spec_factors[energy], marginals[energy] );

		//Use these to find the desired marginals for each fragment
		desired_margs[energy].reset(marginals[energy].size());
		Message::const_iterator it = marginals[energy].begin();
		for( ; it != marginals[energy].end(); ++it ){
			double log_sum = NULL_PROB;
			for( unsigned int i = 0; i < pk_heights.size(); i++ ){
				if( pk_heights.getIdx(i) > -10000.0 ){	//Ignore peaks that can't be realistically achieved 	
					double tmp = peak_targets[energy].getIdx(i);
					tmp -= pk_heights.getIdx(i);
					log_sum = logAdd( log_sum, tmp + log_spec_factors[energy][i][it.index()]);
				}
			}
			log_sum += *it;
			desired_margs[energy].addToIdx(it.index(), log_sum);
		}
	}
	
}
/*
 * CRF_StdSegStateNode::computeBeta
 *
 * Inputs: scale - scaling constant for result_beta array
 *
 * Returns:
 *
 * Read the beta vectors of next nodes directly from nextNode and store the result of the beta vector in betaArray.
 *
 * Compute the beta vector for the node before this one and store it in result_beta
 */
double CRF_StdSegStateNode::computeBeta(double scale)
{
	// Logic desired:
	//	* Compute beta_i[size of alpha[]+1] to be all 1s
	//	* Multiply M_i[current] by beta_i[current+1] to get beta_i[current]

//	QNUInt32 nLabs = this->crf_ptr->getNLabs();
//
//	for (QNUInt32 clab=0; clab<nLabs; clab++) {
//		this->tempBeta[clab]=this->betaArray[clab]+this->stateArray[clab];
//	}
//
//	for (QNUInt32 plab=0; plab<nLabs; plab++) {
//		this->logAddAcc[0]=this->transMatrix[plab*nLabs+0]+this->tempBeta[0];
//		double maxv=this->logAddAcc[0];
//		for (QNUInt32 clab=1; clab<nLabs; clab++) {
//			this->logAddAcc[clab]=this->transMatrix[plab*nLabs+clab]+this->tempBeta[clab];
//			if (this->logAddAcc[clab]>maxv) {
//				maxv=this->logAddAcc[clab];
//			}
//		}
//		try {
//			result_beta[plab]=logAdd(this->logAddAcc,maxv,nLabs);
//		}
//		catch (exception &e) {
//			string errstr="CRF_StdSegStateNode::computeBeta() caught exception: "+string(e.what())+" while computing beta";
//			throw runtime_error(errstr);
//			return(-1);
//		}
//	}

	checkNumNextNodes();

	// if numNextNodes == 0, this is the last node of the sequence.
	// Sets the beta value in this node to the special case for the end of the sequence.
	if (this->numNextNodes == 0)
	{
		setTailBeta();
		return this->alphaScale;
	}

	QNUInt32 nextlab = 0;
	for (QNUInt32 dur = 1; dur <= this->numNextNodes; dur++)
	{
		CRF_StateNode* nextAdjacentSeg = this->nextNodes[dur - 1];
		double* next_adj_seg_beta = nextAdjacentSeg->getBeta();
		for (QNUInt32 lab = 0; lab < this->nActualLabs; lab++) {
			this->tempBeta[nextlab] = next_adj_seg_beta[nextlab] + nextAdjacentSeg->getStateValue(nextlab);
			nextlab++;
		}
	}

	for (QNUInt32 clab = 0; clab < this->numAvailLabs; clab++)
	{
		CRF_StateNode* nextAdjacentSeg = this->nextNodes[0];
		//this->transMatrix[plab*nLabs+0]+this->tempBeta[0]
		this->logAddAcc[0] = nextAdjacentSeg->getTransValue(clab, 0) + this->tempBeta[0];
		double maxv=this->logAddAcc[0];
		QNUInt32 nextlab = 0;

		for (QNUInt32 dur = 1; dur <= this->numNextNodes; dur++)
		{
			nextAdjacentSeg = this->nextNodes[dur - 1];
			//TODO: It should be nActualLabs_of_nextNode instead of nActualLabs_of_thisNode as the number of iterations.
			//for (QNUInt32 lab = 0; lab < this->nActualLabs; lab++) {
			for (QNUInt32 lab = 0; lab < this->nextNodeNActualLabs; lab++) {

				//this->logAddAcc[clab]=this->transMatrix[plab*nLabs+clab]+this->tempBeta[clab];
				this->logAddAcc[nextlab] = nextAdjacentSeg->getTransValue(clab, nextlab) + this->tempBeta[nextlab];

				if (this->logAddAcc[nextlab]>maxv) {
					maxv=logAddAcc[nextlab];
				}
				nextlab++;
			}
		}
		try {
			//TODO:It should be nActualLabs_of_nextNode instead of nActualLabs_of_thisNode.
			//this->betaArray[clab]=logAdd(this->logAddAcc,maxv,this->nActualLabs * this->numNextNodes);
			this->betaArray[clab]=logAdd(this->logAddAcc,maxv,this->nextNodeNActualLabs * this->numNextNodes);
		}
		catch (exception &e) {
			string errstr="CRF_StdSegStateNode::computeBeta() caught exception: "+string(e.what())+" while computing beta";
			throw runtime_error(errstr);
			return(-1);
		}
	}

	return this->alphaScale;
}
示例#8
0
void Gear_VideoTexture::onUpdateSettings()
{
//   Timer timer;
//   timer.reset();

  char tempstr[1024];

  // XXX todo : parameters
  double power = 1.0;
  int nEpochs = 3;
  double alpha = 0.999;

  pthread_mutex_lock(_mutex);

  // Initialize (open) the movie.
  strcpy(tempstr,_settings.get(SETTING_FILENAME)->valueStr().c_str());
  
  std::cout << "opening movie : " << tempstr << std::endl;

  if (_file!=NULL)
    mpeg3_close(_file);
  
  _file = mpeg3_open(tempstr);

  if (_file==NULL)
  {
    std::cout << "error opening movie : " << tempstr << std::endl;
    pthread_mutex_unlock(_mutex);
    return;
  }
  _sizeX = mpeg3_video_width(_file, 0);
  _sizeY = mpeg3_video_height(_file, 0);

  //_nFrames = (int)CLAMP((long int)_settings.get(SETTING_NFRAMES)->valueInt(), 1l, mpeg3_video_frames(_file, 0));
  _nFrames = (int)_settings.get(SETTING_NFRAMES)->valueInt();
  ASSERT_ERROR(_nFrames >= 1);
  
  std::cout << "movie size X : " << _sizeX << std::endl;
  std::cout << "movie size Y : " << _sizeY << std::endl;

  std::cout << "numframes : " << _nFrames << " / " << mpeg3_video_frames(_file, 0) << std::endl;
  std::cout << "movie samplerate : " << mpeg3_sample_rate(_file,0) << std::endl;

  for (int i=0;i<_sizeY-1;i++)
    _frame[i] = (RGBA*) realloc(_frame[i], _sizeX * sizeof(RGBA));

  //from the doc :
  //You must allocate 4 extra bytes in the last output_row. This is scratch area for the MMX routines.
  _frame[_sizeY-1] = (RGBA*) realloc(_frame[_sizeY-1], (_sizeX * sizeof(RGBA)) + 4);

//   std::cout << "Time to initialize things: " << timer.getTime()  << std::endl;
//   timer.reset();

  // Fill sequences and distance matrix.
  _distances.resize(_nFrames, _nFrames);
  _sequences.resize(_nFrames);
  _size = _sizeX * _sizeY;
  NOTICE("Filling distance and sequence matrix.");
  for (int i=0; i<_nFrames; ++i)
  {
    Array2D<RGBA>& currImage = _sequences[i];
    currImage.resize(_sizeX, _sizeY);

    // Read current image.
    mpeg3_read_frame(_file, (unsigned char**)_frame, 0, 0, _sizeX, _sizeY, _sizeX, _sizeY, MPEG3_RGBA8888, 0);

    // Add image to sequences.
    for(int y=0;y<_sizeY;y++)
      memcpy(currImage.row(y), _frame[y], sizeof(RGBA) * _sizeX);
    
    // Update distance matrix (this is the bottleneck of the whole algorithm).
    _distances(i,i) = 0.0;
    for (int j=0; j<i; ++j)
      _distances(i,j) = _distances(j,i) = L2((unsigned char*)currImage.data(), (unsigned char*)_sequences[j].data(), (size_t)_size*SIZE_RGBA);
  }

//   std::cout << "Time to fill sequence and distance: " << timer.getTime() << std::endl;
//   timer.reset();

#if DEBUG_NOTICE
  std::cout << "Distances: " << std::endl;
  for (int i=0; i<_nFrames; ++i)
  {
    for (int j=0; j<_nFrames; ++j)
      std::cout << _distances(j,i) << " ";
    std::cout << std::endl;
  }
  std::cout << std::endl;
#endif

  // Add temporal coherence to distance matrix by smoothing the distance with linear interpolation.
  NOTICE("Computing smoothed distances.");
  int timeWindowLength = CLAMP(_settings.get(SETTING_TIMEWINDOWLENGTH)->valueInt(), 0, 2);
  _smoothedDistances.resize(_nFrames, _nFrames);
  switch (timeWindowLength)
  {
  case 0:
    memcpy(_smoothedDistances.data(), _distances.data(), _distances.size());
    break;
  case 1:
    for (int i=0; i<_nFrames; ++i)
    {
      for (int j=0; j<_nFrames; ++j)
      {
        int iPrev = MAX(i-1,0);
        int jPrev = MAX(j-1,0);
        _smoothedDistances(j,i) =
          0.5 * _distances(jPrev,iPrev) +
          0.5 * _distances(j,i);
      }
    }
    break;
  case 2:
    for (int i=0; i<_nFrames; ++i)
    {
      for (int j=0; j<_nFrames; ++j)
      {
        int iPrev = MAX(i-1,0);
        int jPrev = MAX(j-1,0);
        int iNext = MIN(i+1,_nFrames-1);
        int jNext = MIN(j+1,_nFrames-1);
        int iPrev2 = MAX(i-2,0);
        int jPrev2 = MAX(j-2,0);
        _smoothedDistances(j,i) =
          0.125 * _distances(jPrev2,iPrev2) +
          0.375  * _distances(jPrev,iPrev) +
          0.375  * _distances(j,i) +
          0.125 * _distances(jNext,iNext);
      }
    }
    break;
  default:;
    error("Wrong time window length specified, please check");
  }

//   std::cout <<  "Time to compute smoothed distances: " << timer.getTime() << std::endl;
//   timer.reset();

#if DEBUG_NOTICE
  std::cout << "Smoothed distances: " << std::endl;
  for (int i=0; i<_nFrames; ++i)
  {
    for (int j=0; j<_nFrames; ++j)
      std::cout << _smoothedDistances(i,j) << " ";
    std::cout << std::endl;
  }
  std::cout << std::endl;
#endif
  
  // Using Q-learning, recompute the matrix of distances.
  NOTICE("Computing final set of distances using Q-learning.");
  _minDistances.resize(_nFrames);

  // Initialize distances.
  for (int i=0; i<_nFrames; ++i)
    for (int j=0; j<=i; ++j)
      _distances(i,j) = _distances(j,i) = _smoothedDistances(i,j) = _smoothedDistances(j,i) = pow(_smoothedDistances(i,j), power);

  // Q-learning.
  for (int t=0; t<nEpochs; ++t)
  {
#if DEBUG_NOTICE
    std::cout << "Q-learn distances step " << t << " : " << std::endl;
    for (int i=0; i<_nFrames; ++i)
    {
      for (int j=0; j<_nFrames; ++j)
        std::cout << _distances(j,i) << " ";
      std::cout << std::endl;
    }
    std::cout << std::endl;
#endif

    // Init min distances.
    for (int j=0; j<_nFrames; ++j)
      _minDistances[j] = min(_distances.row(j), (size_t)_nFrames);
    
    for (int i=_nFrames-1; i>=0; --i)
      for (int j=0; j<_nFrames; ++j)
      {
        // Update distances.
        _distances(j,i) = _smoothedDistances(j,i) + alpha * _minDistances[j];
        // Update min distances.
        _minDistances[j] = min(_distances.row(j), (size_t)_nFrames);
      }
  }

  // std::cout <<  "Time to compute Q-learned distances: " <<  timer.getTime() << std::endl;
//   timer.reset();

  // Calculate the mean (smoothed) distance.
  double meanDistance = sum(_distances.data(), _distances.size()) / (double)_distances.size();

  // Compute cumulative probabilities.
  _logCumProbs.resize(_nFrames, _nFrames);
  for (int i=0; i<_nFrames; ++i)
  {
    // Compute logCumProbs.
    _logCumProbs(0,i) = LOG_ZERO;
    for (int j=1; j<_nFrames; ++j)
      _logCumProbs(j,i) = logAdd(_logCumProbs(j-1,i), -_distances(j,i) / meanDistance);
    
    // Normalize to make a true probability.
    double norm = _logCumProbs(_nFrames-1,i);
    for (int j=0; j<_nFrames; ++j)
      _logCumProbs(j,i) -= norm;
  }

//   std::cout << "Time to compute probabilities: "<< timer.getTime() << std::endl;

#if DEBUG_NOTICE
  std::cout << "Distances: " << std::endl;
  for (int i=0; i<_nFrames; ++i)
  {
    for (int j=0; j<_nFrames; ++j)
      std::cout << _distances(j,i) << " ";
    std::cout << std::endl;
  }
  std::cout << std::endl;

  std::cout << "Probabilities: " << std::endl;
  for (int i=0; i<_nFrames; ++i)
  {
    for (int j=0; j<_nFrames; ++j)
      std::cout << exp(_logCumProbs(j,i)) << " ";
    std::cout << std::endl;
  }
  std::cout << std::endl;
#endif
  _currentFrame = 0;
  
  pthread_mutex_unlock(_mutex);
}
示例#9
0
double IPFP::computeBeliefs( ){

	double diff = 0.0;
	beliefs_t tmp_beliefs;

	std::vector<double> norms;
	norms.resize(cfg->model_depth);

	const FragmentGraph *fg = moldata->getFragmentGraph();

	//Compute Persistence Beliefs (and track norms)
	tmp_beliefs.ps.resize(fg->getNumFragments());
	for( unsigned int i = 0; i < fg->getNumFragments(); i++ ){
		tmp_beliefs.ps[i].resize(cfg->model_depth);

		for( unsigned int d = 0; d < cfg->model_depth; d++ ){
			
			double tmp;
			if( (d == 0 && i == 0) || (d > 0 && down_msgs[d-1].getIdx(i) > -DBL_MAXIMUM) ){
				tmp = fprobs.ps[i][d];
				if( d < cfg->model_depth-1 ) 
					tmp += up_msgs[d].getIdx(i);
				if( d > 0 ){
					double msg_val = down_msgs[d-1].getIdx(i);
					if( msg_val > -DBL_MAXIMUM )
						tmp += msg_val;
					else tmp = NULL_PROB;
				}
				if ( i == 0 ) norms[d] = tmp;
				else norms[d] = logAdd(norms[d], tmp);
			}else tmp = NULL_PROB;
			tmp_beliefs.ps[i][d] = tmp;

		}	
	}

	//Compute Transition Beliefs (and track norms)
	tmp_beliefs.tn.resize(fg->getNumTransitions());
	for( unsigned int i = 0; i < fg->getNumTransitions(); i++ ){
		const Transition *t = fg->getTransitionAtIdx(i);
		tmp_beliefs.tn[i].resize(cfg->model_depth);

		for( unsigned int d = 0; d < cfg->model_depth; d++ ){
			
			double tmp;
			if( (d == 0 && t->getFromId() == 0) || (d > 0 && down_msgs[d-1].getIdx(t->getFromId()) > -DBL_MAXIMUM) ){
				tmp= fprobs.tn[i][d];
				if( d < cfg->model_depth-1 ) 
					tmp += up_msgs[d].getIdx(t->getToId());
				if( d > 0 ){
					double msg_val = down_msgs[d-1].getIdx(t->getFromId());
					if( msg_val > -DBL_MAXIMUM )
						tmp += msg_val;
					else tmp = NULL_PROB;
				}
				norms[d] = logAdd(norms[d], tmp);
			}else tmp = NULL_PROB;
			tmp_beliefs.tn[i][d] = tmp;
		}

	}

	//Normalise
	for( unsigned int i = 0; i < tmp_beliefs.tn.size(); i++ ){
		for( unsigned int d = 0; d < cfg->model_depth; d++ ){
			tmp_beliefs.tn[i][d] -= norms[d];
		}
	}
	for( unsigned int i = 0; i < tmp_beliefs.ps.size(); i++ ){
		for( unsigned int d = 0; d < cfg->model_depth; d++ ){
			tmp_beliefs.ps[i][d] -= norms[d];
		}
	}

	//Replace the old beliefs with the new ones, tracking the total difference
	diff = copyBeliefsAndComputeDiff( &tmp_beliefs );
	return diff;

}