unsigned int QLearning::select_keepold (unsigned int state){
  assert(initialised);
  assert(state < Q.getM());

  // exploration
  double r = randGen->rand();
  if(r<exploration){
    std::cout << "explore\n";
    return int(randGen->rand()*(double)Q.getN());
  }
  matrix::Matrix vals = Q.row(state);
  vals += vals.mapP(randGen, random_minusone_to_one)*0.001; // this is like random
                                                  // walk if we know nothing
  double m = vals.elementSum() / vals.size();
  r = randGen->rand();
  // keep to 80% old if acceptable
  if(vals.val(0,  actions[(t-1)%ringbuffersize])>m && r<0.8){
    std::cout << "keepold\n";
    return actions[(t-1)%ringbuffersize];
  }else {
    int a = argmax(vals);
    // select to 30% second best
    r = randGen->rand();
    if(r<0.3){
      std::cout << "second best\n";
      vals.val(0,a)-=1000;
      return argmax(vals);
    }else {
      std::cout << "best\n";
      return a;
    }
  }
  return sample(vals);
}
Esempio n. 2
0
  /*********************************************
   * Sample particles for a given document
   * 
   * doc: 
   *********************************************/
  LatentSeq DecodeGraph(const Doc doc){
    // ----------------------------------------
    // init
    int nsent = doc.size();
    LatentSeq latseq;
    // ----------------------------------------
    // for each sentence in doc, each latval, compute
    // the posterior prob p(R|cvec, sent)
    vector<float> U;
    for (unsigned sidx = 0; sidx < nsent; sidx ++){
      final_hlist.clear();
      for (int val = 0; val < nlatvar; val ++){
	ComputationGraph cg;
	BuildSentGraph(doc[sidx], sidx, cg, val);
	float prob = as_scalar(cg.forward());
	U.push_back(prob);
	cg.clear();
      }
      // normalize and get the argmax
      log_normalize(U);
      // greedy decoding
      int max_idx = argmax(U);
      // get the corresponding context vector
      final_h = final_hlist[max_idx];
      // 
      U.clear();
      // cerr << "max_latval = " << max_idx << endl;
      latseq.push_back(max_idx);
    }
    // cerr << "====" << endl;
    return latseq;
  }
void AfterMaxToBeforeMaxEnergyRatio::compute() {

  vector<Real> pitch = _pitch.get();
  Real& afterMaxToBeforeMaxEnergyRatio = _afterMaxToBeforeMaxEnergyRatio.get();

  // Remove all 0Hz elements
  vector<Real>::iterator i = pitch.begin();
  while (i != pitch.end()) {
    if (*i <= 0.0) {
      i = pitch.erase(i);
    }
    else {
      i++;
    }
  }

  if (pitch.empty()) {
    throw EssentiaException("AfterMaxToBeforeMaxEnergyRatio: pitch array doesn't contain any non-zero values or is empty");
  }

  int nMax = argmax(pitch);
  Real energyBeforeMax = 0.0;
  Real energyAfterMax = 0.0;
  for (int i=0; i<=nMax; ++i) {
    energyBeforeMax += pitch[i] * pitch[i];
  }
  for (int i=nMax; i<int(pitch.size()); ++i) {
    energyAfterMax += pitch[i] * pitch[i];
  }

  // division by zero will never occure as first we have removed any elements with 0Hz
  // and the max pitch is included in both the energy before and energy after.
  afterMaxToBeforeMaxEnergyRatio = energyAfterMax / energyBeforeMax;
}
// the results can be either obtained by soft voting or hard voting
// soft voting: sum of the classification scores from different trees
// hard voting: counts of the hits to each class from different trees
Result RandomForest::eval(cv::Point p)
{
	Result result, treeResult;
	for (int i = 0; i < m_nClassNum; i++) 
		result.Confidence.push_back(0.0);

	for (int i = 0; i < m_nTreeNum; i++)
	{
		treeResult = m_vecTrees[i]->eval(p);
		if (m_bUseSoftVoting) 
			add(treeResult.Confidence, result.Confidence);
		else 
			result.Confidence[treeResult.Prediction]++;
	}

	scale(result.Confidence, 1.0 / m_nTreeNum);
	result.Prediction = argmax(result.Confidence);
	int confcounter = 0;
	for (vector<double>::iterator itc = result.Confidence.begin();
		itc != result.Confidence.end(); itc++)
	{
		confcounter++;
	}
	return result;
}
Esempio n. 5
0
void test(
    Net& model,
    torch::Device device,
    DataLoader& data_loader,
    size_t dataset_size) {
  torch::NoGradGuard no_grad;
  model.eval();
  double test_loss = 0;
  int32_t correct = 0;
  for (const auto& batch : data_loader) {
    auto data = batch.data.to(device), targets = batch.target.to(device);
    auto output = model.forward(data);
    test_loss += torch::nll_loss(
                     output,
                     targets,
                     /*weight=*/{},
                     Reduction::Sum)
                     .template item<float>();
    auto pred = output.argmax(1);
    correct += pred.eq(targets).sum().template item<int64_t>();
  }

  test_loss /= dataset_size;
  std::printf(
      "\nTest set: Average loss: %.4f | Accuracy: %.3f\n",
      test_loss,
      static_cast<double>(correct) / dataset_size);
}
Esempio n. 6
0
/*
* writes the word assignments line for a Document to a file
*
*/
void MedSTC::write_word_assignment(FILE* f, Document* doc, double** phi)
{
	fprintf(f, "%03d", doc->length);
	for (int n = 0; n < doc->length; n++) {
		fprintf(f, " %04d:%02d", doc->words[n], argmax(phi[n], m_nK));
	}
	fprintf(f, "\n");
	fflush(f);
}
int Viterbi_C(int* Observations, int Nobs, int* EstimatedStates, hmm_desc* hmm) {

/**@param viterbi       initializing viterbi matrix values to 0
*  @param sum           sum of all the normalized probabilities to calculate next states for all observations
*/

    for (int i = 1; i < Nobs; i++) {
        for (int j = 0; j < 2 * hmm->S; j++) {
            viterbi[i][j] = 0;
        }
    }
        
    float sum = 0;

    //printf("Observation 0 is %d\n", Observations[0]);
    for (int j = 0; j < hmm->S; j++) {
        viterbi[0][j] = hmm->prior[j] * get_index(&((hmm->emission)[0][0]), hmm->V, j, Observations[0]);    //multiplying prior array values with values of columns of emission matrix
        sum += viterbi[0][j];
    }

    if (fabs(sum) > ZERO_THRESHOLD) {
        array_scale_divide(viterbi[0], hmm->S, sum);        //dividing all the normalized maximum probabilities in the viterbi matrix with the sum of all its values
    }

/** repeating the calculations with all the values of observations to get next states
*   @param VITERBI_ASM          to check for the assembly program, calling function ViterbiUpdate_asm 
*   @param else                 to check for C program, calling function ViterbiUpdate_C
*/ 

    for (int i = 1; i < Nobs; i++) {
        //printf("-----> At iteration number %d with Observation %d\n", i, Observations[i]);
        //print_array(viterbi[i - 1], hmm->S);

        //printf("Array output address before is %x\n", viterbi[i]);
#ifdef VITERBI_ASM
        int return_value = ViterbiUpdate_asm(viterbi[i - 1], viterbi[i], Observations[i], hmm);   
#else
        int return_value = ViterbiUpdate_C(viterbi[i - 1], viterbi[i], hmm, Observations[i]);
#endif
        //printf("Array output address after is %x\n", viterbi[i]);
        // //printf("vitpsi is --> ");
        //print_array(viterbi[i], hmm->S);

        if (return_value != 0) {
            return return_value;
        }
    }

    float* max_value;
    EstimatedStates[Nobs - 1] = argmax(viterbi[Nobs - 1], hmm->S, max_value);
    for (int i = Nobs - 1; i > 0; i--) {
        EstimatedStates[i - 1] = viterbi[i][hmm->S + EstimatedStates[i]];       //storing the next state values from the viterbi matrix which has stored previous states
    }

    return 0;
}
Esempio n. 8
0
void write_word_assignment(FILE* f, document* doc, double** phi, lda_model* model) {
	int n;

	fprintf(f, "%03d", doc->length);
	for (n = 0; n < doc->length; n++) {
		fprintf(f, " %04d:%02d", doc->words[n], argmax(phi[n], model->num_topics));
	}
	fprintf(f, "\n");
	fflush(f);
}
Esempio n. 9
0
void MaxToTotal::compute() {

  const vector<Real>& envelope = _envelope.get();
  Real& maxToTotal = _maxToTotal.get();

  if (envelope.empty()) {
    throw EssentiaException("MaxToTotal: envelope is empty, maxToTotal is not defined for an empty envelope");
  }

  maxToTotal = Real(argmax(envelope)) / envelope.size();
}
Esempio n. 10
0
void MaxToTotal::consume() {
  const vector<Real>& envelope = *((const vector<Real>*)_envelope.getTokens());

  int maxIdx = argmax(envelope);
  if (envelope[maxIdx] > _max) {
    _max = envelope[maxIdx];
    _maxIdx = maxIdx + _size;
  }

  _size += envelope.size();
}
typename Dist::Group argmax( const MixtureOf<Dist>& N ) {
	typename Dist::Scalar w(0);
	typename Dist::Group g;
	
	for(size_t i = 0; i < N.numComponents(); i++) {
		if( i == 0 || w < N.weight(i) ) {
			w = N.weight(i);
			g = argmax( N.component(i) );
		}
	}
	
	return g;
}
static PyObject* w_argmax(PyObject *module, PyObject *args) {
    PyObject *arrayin;
    PyObject *result;
    double *carrayin;
    uint32_t *cresult;
    npy_intp *shape, shapeout[1];
    size_t i, ndim;
    int err, nparallel;
    struct module_state *st = GETSTATE(module);

    if (!PyArg_ParseTuple(args, "Oi", &arrayin, &nparallel)) {
        PyErr_SetString(st->error, "usage argmax(array)");
        return NULL;
    }

    if (!good_array(arrayin, NPY_DOUBLE)) return NULL;

    shape = PyArray_DIMS((PyArrayObject*)arrayin);
    ndim = PyArray_NDIM((PyArrayObject*)arrayin);

    if (ndim != 2){
        PyErr_SetString(st->error, "array shape is not 2D");
        return NULL;
    }

    carrayin = PyArray_DATA((PyArrayObject*)arrayin);

    if ((size_t)shape[0] >= (size_t)UINT32_MAX) {
        PyErr_SetString(st->error, "shape[0] must be smaller than 2^32");
        return NULL;
    }

    shapeout[0] = shape[1];

    result = PyArray_SimpleNew(1, shapeout, NPY_UINT32);
    cresult = PyArray_DATA((PyArrayObject*)result);

    for (i=0; i<(size_t)shapeout[0]; i++){
        cresult[i] = 0;
    }

    err = argmax(carrayin, cresult, (size_t)shape[1], (size_t)shape[0], nparallel);

    if(err != 0){
        Py_DECREF(result);
        return NULL;
    }

    return Py_BuildValue("N", (PyObject *)result);
}
Esempio n. 13
0
unsigned int QLearning::select (unsigned int state){
  assert(initialised);
  assert(state < Q.getM());

  matrix::Matrix as = Q.row(state);
  as += as.mapP(randGen, random_minusone_to_one)*0.001; // this is like random
                                              // walk if we know nothing
  int a= argmax(as);
  // exploration
  double r = randGen->rand();
  if(r<exploration){
    a = (int)(randGen->rand()*(double)Q.getN());
  }
  return a;
}
Esempio n. 14
0
File: key.cpp Progetto: MTG/essentia
void Key::shiftPcp(vector<Real>& pcp) {
  int tuningResolution = pcp.size() / 12;

  normalize(pcp);

  int maxValIndex = argmax(pcp);
  maxValIndex %= tuningResolution;

  vector<Real>::iterator newBegin;
  if (maxValIndex > (tuningResolution / 2)) {
    newBegin = pcp.end() + maxValIndex - tuningResolution;
  }
  else {
    newBegin = pcp.begin() + maxValIndex;
  }
  
  rotate(pcp.begin(), newBegin, pcp.end());
};
Esempio n. 15
0
void gldraw(const std::vector<float3> &verts, const std::vector<int3> &tris)
{
	glBegin(GL_TRIANGLES);
	glColor4f(1, 1, 1, 0.25f);
	for (auto t : tris)
	{
		auto n = TriNormal(verts[t[0]], verts[t[1]], verts[t[2]]);
		glNormal3fv(n); auto vn = vabs(n);
		int k = argmax(&vn.x, 3);
		for (int j = 0; j < 3; j++)
		{
			const auto &v = verts[t[j]];
			glTexCoord2f(v[(k + 1) % 3], v[(k + 2) % 3]);
			glVertex3fv(v);
		}
	}
	glEnd();
}
Esempio n. 16
0
// Returns in gens a generating set for Zm* /<p>, and in ords the
// order of these generators. Return value is the order of p in Zm*.
long findGenerators(vector<long>& gens, vector<long>& ords, long m, long p)
{
  gens.clear();
  ords.clear();
  // Compute the generators for (Z/mZ)^*
  vector<long> classes(m);
  vector<long> orders(m);

  for (long i=0; i<m; i++) { // initially each element in its own class
    if (GCD(i,m)!=1) classes[i] = 0; // i is not in (Z/mZ)^*
    else             classes[i] = i;
  }

  // Start building a representation of (Z/mZ)^*, first use the generator p
  conjClasses(classes,p % m,m);  // merge classes that have a factor of p

  // The order of p is the size of the equivalence class of 1
#if 0
  long ordP = std::count(classes.begin(), classes.end(), 1);
       // count(from,to,val) returns # of elements in (from,to) with value=val
#else
   long ordP = 0;
   for (long i = 0; i < lsize(classes); i++)
      if (classes[i] == 1) ordP++;
#endif

  // Compute orders in (Z/mZ)^*/<p> while comparing to (Z/mZ)^*
  while (true) {
    compOrder(orders,classes,true,m);
    // if the orders of i in Zm* /<p> and Zm* are not the same, then
    // order[i] contains the order in Zm* /<p> with negative sign

    long idx = argmax(orders, &gtAbsVal); // find the element with largest order
    long largest = orders[idx];

    if (abs(largest) == 1) break;   // Trivial group, we are done

    // store generator with same order as in (Z/mZ)^*
    gens.push_back(idx);
    ords.push_back(largest);
    conjClasses(classes,idx,m); // merge classes that have a factor of idx
  }
  return ordP;
}
Result RandomForest::eval(const Sample &sample)
{
	Result result, treeResult;
	for (int i = 0; i < m_nClassNum; i++) 
		result.Confidence.push_back(0.0);

	for (int i = 0; i < m_nTreeNum; i++)
	{
		treeResult = m_vecTrees[i]->eval(sample);
		if (m_bUseSoftVoting) 
			add(treeResult.Confidence, result.Confidence);
		else 
			result.Confidence[treeResult.Prediction]++;
	}

	scale(result.Confidence, 1.0 / m_nTreeNum);
	result.Prediction = argmax(result.Confidence);
	return result;
}
Esempio n. 18
0
void CMT::estimate(const std::vector<std::pair<cv::KeyPoint, int> >& keypointsIN, cv::Point2f& center, float& scaleEstimate, float& medRot, std::vector<std::pair<cv::KeyPoint, int> >& keypoints)
{
    center = cv::Point2f(NAN,NAN);
    scaleEstimate = NAN;
    medRot = NAN;

    //At least 2 keypoints are needed for scale
    if(keypointsIN.size() > 1)
    {
        //sort
        std::vector<PairInt> list;
        for(int i = 0; i < keypointsIN.size(); i++)
            list.push_back(std::make_pair(keypointsIN[i].second, i));
        std::sort(&list[0], &list[0]+list.size(), comparatorPair<int>);
        for(int i = 0; i < list.size(); i++)
            keypoints.push_back(keypointsIN[list[i].second]);

        std::vector<int> ind1;
        std::vector<int> ind2;
        for(int i = 0; i < list.size(); i++)
            for(int j = 0; j < list.size(); j++)
            {
                if(i != j && keypoints[i].second != keypoints[j].second)
                {
                    ind1.push_back(i);
                    ind2.push_back(j);
                }
            }
        if(ind1.size() > 0)
        {
            std::vector<int> class_ind1;
            std::vector<int> class_ind2;
            std::vector<cv::KeyPoint> pts_ind1;
            std::vector<cv::KeyPoint> pts_ind2;
            for(int i = 0; i < ind1.size(); i++)
            {
                class_ind1.push_back(keypoints[ind1[i]].second-1);
                class_ind2.push_back(keypoints[ind2[i]].second-1);
                pts_ind1.push_back(keypoints[ind1[i]].first);
                pts_ind2.push_back(keypoints[ind2[i]].first);
            }

            std::vector<float> scaleChange;
            std::vector<float> angleDiffs;
            for(int i = 0; i < pts_ind1.size(); i++)
            {
                cv::Point2f p = pts_ind2[i].pt - pts_ind1[i].pt;
                //This distance might be 0 for some combinations,
                //as it can happen that there is more than one keypoint at a single location
                float dist = sqrt(p.dot(p));
                float origDist = squareForm[class_ind1[i]][class_ind2[i]];
                scaleChange.push_back(dist/origDist);
                //Compute angle
                float angle = atan2(p.y, p.x);
                float origAngle = angles[class_ind1[i]][class_ind2[i]];
                float angleDiff = angle - origAngle;
                //Fix long way angles
                if(fabs(angleDiff) > CV_PI)
                    angleDiff -= sign(angleDiff) * 2 * CV_PI;
                angleDiffs.push_back(angleDiff);
            }
            scaleEstimate = median(scaleChange);
            if(!estimateScale)
                scaleEstimate = 1;
            medRot = median(angleDiffs);
            if(!estimateRotation)
                medRot = 0;
            votes = std::vector<cv::Point2f>();
            for(int i = 0; i < keypoints.size(); i++)
                votes.push_back(keypoints[i].first.pt - scaleEstimate * rotate(springs[keypoints[i].second-1], medRot));
            //Compute linkage between pairwise distances
            std::vector<Cluster> linkageData = linkage(votes);

            //Perform hierarchical distance-based clustering
            std::vector<int> T = fcluster(linkageData, thrOutlier);
            //Count votes for each cluster
            std::vector<int> cnt = binCount(T);
            //Get largest class
            int Cmax = argmax(cnt);

            //Remember outliers
            outliers = std::vector<std::pair<cv::KeyPoint, int> >();
            std::vector<std::pair<cv::KeyPoint, int> > newKeypoints;
            std::vector<cv::Point2f> newVotes;
            for(int i = 0; i < keypoints.size(); i++)
            {
                if(T[i] != Cmax)
                    outliers.push_back(keypoints[i]);
                else
                {
                    newKeypoints.push_back(keypoints[i]);
                    newVotes.push_back(votes[i]);
                }
            }
            keypoints = newKeypoints;

            center = cv::Point2f(0,0);
            for(int i = 0; i < newVotes.size(); i++)
                center += newVotes[i];
            center *= (1.0/newVotes.size());
        }
    }
}
 inline IteratorT argmax(const IteratorT & begin, const IteratorT & end, const HeuristicFunctorT & functor)
 {
     return argmax(IteratorT(begin), end, functor);
 }
Esempio n. 20
0
void OnsetDetectionGlobal::computeBeatEmphasis() {
  vector<Real>& onsetDetections = _onsetDetections.get();
  onsetDetections.clear();

  vector<complex<Real> > frameFFT;
  _fft->input("frame").set(_frameWindowed);
  _fft->output("fft").set(frameFFT);

  vector<Real> spectrum;
  vector<Real> phase;
  _cartesian2polar->input("complex").set(frameFFT);
  _cartesian2polar->output("magnitude").set(spectrum);
  _cartesian2polar->output("phase").set(phase);


  fill(_phase_1.begin(), _phase_1.end(), Real(0.0));
  fill(_phase_2.begin(), _phase_2.end(), Real(0.0));
  fill(_spectrum_1.begin(), _spectrum_1.end(), Real(0.0));

  vector<vector<Real> > onsetERB(_numberERBBands);
  vector<Real> tempFFT (_numberFFTBins, 0.);  // detection function in FFT bins
  vector<Real> tempERB (_numberERBBands, 0.); // detection function in ERP bands

  // NB: a hack to make use of ERBBands algorithm and not reimplement the
  // computation of gammatone filterbank weights again. As long as ERBBands
  // computes weighted magnitudes in each ERB band instead of energy, we can
  // feed it onset detection values instead of spectrum.
  _erbbands->input("spectrum").set(tempFFT);
  _erbbands->output("bands").set(tempERB);

  size_t numberFrames=0;

  while (true) {
    // get a frame
    _frameCutter->compute();

    if (!_frame.size()) {
      break;
    }

    _windowing->compute();
    _fft->compute();
    _cartesian2polar->compute();

    // Compute complex spectral difference. Optimized, see details in the
    // OnsetDetection algo
    for (int i=0; i<_numberFFTBins; ++i) {
      Real targetPhase = 2*_phase_1[i] + _phase_2[i];
      targetPhase = fmod(targetPhase + M_PI, -2 * M_PI) + M_PI;
      tempFFT[i] = norm(_spectrum_1[i] - polar(spectrum[i], phase[i]-targetPhase));
    }

    // Group detection functions for spectral bins into larger ERB sub-bands using
    // a Gammatone filterbank to improve the likelihood of finding meaningful
    // periodicity in spectral bands.
    _erbbands->compute();
    for (int b=0; b<_numberERBBands; ++b) {
      onsetERB[b].push_back(tempERB[b]);
    }

    _phase_2 = _phase_1;
    _phase_1 = phase;
    _spectrum_1 = spectrum;
    numberFrames += 1;
  }

  // Post-processing found in M.Davies' matlab code, but not mentioned in the
  // paper, and skipped in this implementation:
  // - interpolate detection functions by factor of 2 (by zero-stuffing)
  // - half-rectify
  // - apply a Butterworth low-pass filter with zero-phase (running in forward
  // and backward directions); Matlab: [b,a]=butter(2,0.4);
  // - half-rectify again


  if (!numberFrames) {
    return;
  }

  for (int b=0; b<_numberERBBands; ++b) {
    // TODO tmp = interp(newspec2(pp,:),2);
    // interpolate to the doubled sampling rate interp performs lowpass
    // interpolation by inserting zeros into the original sequence and then
    // applying a special lowpass filter.

    // TODO half-rectify is not in the paper, futhermore, all onsetERB values
    // are supposed to be non-negative, as they are weighted sums of norms.
    // Half-rectification would have been necessary in the case of
    // interpolation, which can produce negative values.
    //for (size_t i=0; i<onsetERB[b].size(); ++i) {
    //  if (onsetERB[b][i] < 0) {
    //    onsetERB[b][i] = 0.;
    //  }
    //}

    // TODO newspecout(pp,:) = max(0,(filtfilt(b,a,(tmp))));
    // --> apply lowpass Butterworth filter, half-rectify again

    // normalize to have a unit variance
    Real bandMean = mean(onsetERB[b]);
    Real bandStddev = stddev(onsetERB[b], bandMean);
    if (bandStddev > 0) {
      for (size_t i=0; i<onsetERB[b].size(); ++i) {
        onsetERB[b][i] /= bandStddev;
      }
    }
  }

  // TODO Matlab: sbdb = max(0,newspecout); // half-rectify again? but onsetERB is
  // already non-negative

  // Compute weights for ODFs for ERB bands

  vector<Real> smoothed;
  vector<Real> tempACF;
  vector<vector <Real> > bandsACF;
  bandsACF.resize(_numberERBBands);
  vector <Real> weightsERB;
  weightsERB.resize(_numberERBBands);

  for (int b=0; b<_numberERBBands; ++b) {
    // Apply adaptive moving average threshold to emphasise the strongest and
    // discard the least significant peaks. Subtract the adaptive mean, and
    // half-wave rectify the output, setting any negative valued elements to zero.

    // Align filter output for symmetrical averaging, and we want the filter to
    // return values on the edges as the averager output computed at these
    // positions to avoid smoothing to zero.

    onsetERB[b].insert(onsetERB[b].end(), _smoothingWindowHalfSize, onsetERB[b].back());
    onsetERB[b].insert(onsetERB[b].end(), _smoothingWindowHalfSize, onsetERB[b].back());

    _movingAverage->input("signal").set(onsetERB[b]);
    _movingAverage->output("signal").set(smoothed);
    _movingAverage->compute();

    smoothed.erase(smoothed.begin(), smoothed.begin() + 2*_smoothingWindowHalfSize);
    for (size_t i=0; i<numberFrames; ++i) {
      onsetERB[b][i] -= smoothed[i];
      if (onsetERB[b][i] < 0) {
        onsetERB[b][i] = 0;
      }
    }

    // Compute band-wise unbiased autocorrelation
    _autocorrelation->input("array").set(onsetERB[b]);
    _autocorrelation->output("autoCorrelation").set(tempACF);
    _autocorrelation->compute();

    // Consider only periods up to _maxPeriodODF ODF samples
    tempACF.resize(_maxPeriodODF);

    // Weighten by tempo preference curve
    vector<Real> tempACFWeighted;
    tempACFWeighted.resize(_maxPeriodODF);

    // Apply comb-filtering to reflect periodicities on different metric levels
    // (integer multiples) and apply tempo preference curve.
    int numberCombs = 4;

    // To accout for poor resolution of ACF at short lags, each comb element has
    // width proportional to its relationship to the underlying periodicity, and
    // its height is normalized by its width.

    // 0-th element in autocorrelation vector corresponds to the period of 1.
    // Min value for the 'region' variable is -3 => compute starting from the
    // 3-rd index, which corresponds to the period of 4, until period of 120
    // ODF samples (as in matlab code) or 110 (as in the paper). Generalization:
    // not clear why max period is 120 or 110, should be (512 - 3) / 4 = 127
    int periodMin = 4 - 1;
    int periodMax = (_maxPeriodODF-(numberCombs-1)) / numberCombs - 1;

    for (int comb=1; comb<=numberCombs; ++comb) {
      int width = 2*comb - 1;
      for (int region=1-comb; region<=comb-1; ++region) {
        for (int period=periodMin; period<periodMax; ++period) {
          tempACFWeighted[period] +=
              _weights[period] * tempACF[period*comb + region] / width;
        }
      }
    }

    // We are not interested in the period estimation, but in general salience of
    // the existing periodicity
    weightsERB[b] = tempACFWeighted[argmax(tempACFWeighted)];
  }
  normalize(weightsERB);

  // Matlab M.Davies: take top 40% of weights, zero the rest (not in the paper!)
  vector<Real> sorted;
  sorted.reserve(_numberERBBands);
  copy(weightsERB.begin(), weightsERB.end(), sorted.begin());
  sort(sorted.begin(), sorted.end());
  Real threshold = sorted[int(floor(_numberERBBands * 0.6))];

  // Compute weighted sub of ODFs for ERB bands for each audio frame
  onsetDetections.resize(numberFrames);
  for (size_t i=0; i<numberFrames; ++i) {
    for (int b=0; b<_numberERBBands; ++b) {
      if (weightsERB[b] >= threshold) {
        onsetDetections[i] += onsetERB[b][i] * weightsERB[b];
      }
    }
  }
}
Esempio n. 21
0
void TempoTapDegara::computeBeatsDegara(vector <Real>& detections,
                        const vector<Real>& beatPeriods,
                        const vector<Real>& beatEndPositions,
                        vector<Real>& ticks) {

  // Implementation of Degara's beat tracking using a probabilitic framework
  // (Hidden Markov Model). Tempo estimations throughout the track are assumed
  // to be computed from the algorithm by M. Davies.

  // avoid zeros to avoid log(0) error in future
  for(size_t i=0; i<detections.size(); ++i) {
    if (detections[i]==0) {
      detections[i] = numeric_limits<Real>::epsilon();
    }
  }

  // Minimum tempo (i.e., maximum period) to be considered
  Real periodMax = beatPeriods[argmax(beatPeriods)];
  // The number of states of the HMM is determined bt the largest time between
  // beats allowed (periodMax + 3 standard deviations). Compute a list of
  // inter-beat time intervals corresponding to each state (ignore zero period):
  vector<Real> ibi;
  Real ibiMax = periodMax + 3 *_sigma_ibi;

  ibi.reserve(ceil(ibiMax / _resolutionODF));
  for (Real t=_resolutionODF; t<=ibiMax; t+=_resolutionODF) {
    ibi.push_back(t);
  }
  _numberStates = (int) ibi.size();

  // Compute transition matrix from the inter-beat-interval distribution
  // according to the tempo estimates. Transition matrix is unique for each beat
  // period.
  map<Real, vector<vector<Real> > > transitionMatrix;
  vector<Real> gaussian;
  vector<Real> ibiPDF(_numberStates);

  gaussianPDF(gaussian, _sigma_ibi, _resolutionODF, 0.01 / _resample);
  // Scale down to avoid computational errors,
  // * _resolutionODF, as in matlab code, works worse

  for (size_t i=0; i<beatPeriods.size(); ++i) {
    // no need to recompute if we have seen this beat period before
    if (transitionMatrix.count(beatPeriods[i])==0) {
      // Shift gaussian vector to be centered at beatPeriods[i] secs which is
      // equivalent to round(beatPeriods[i] / _resolutionODF) samples.
      int shift = (int) gaussian.size()/2 - round(beatPeriods[i]/_resolutionODF - 1);
      for (int j=0; j<_numberStates; ++j) {
        int j_new = j + shift;
        ibiPDF[j] = j_new < 0 || j_new >= (int) gaussian.size() ? 0 : gaussian[j_new];
      }
      computeHMMTransitionMatrix(ibiPDF, transitionMatrix[beatPeriods[i]]);
    }
  }

  // Compute observation likelihoods for each HMM state
  vector<vector<Real> > biy;   // _numberStates x _numberFramesODF
  biy.reserve(_numberStates);

  // treat ODF as probability, normalize to 0.99 to avoid numerical problems
  _numberFrames = detections.size();
  vector<Real> beatProbability(_numberFrames);
  vector<Real> noBeatProbability(_numberFrames);
  for (size_t i=0; i<_numberFrames; ++i) {
    beatProbability[i] = 0.99 * detections[i];
    noBeatProbability[i] = 1. - beatProbability[i];
    // NB: work in log space to avoid numerical issues
    beatProbability[i] = (1-_alpha) * log(beatProbability[i]);
    noBeatProbability[i] = (1-_alpha) * log(noBeatProbability[i]);
  }

  biy.push_back(beatProbability);
  biy.insert(biy.end(), _numberStates-1, noBeatProbability);

  // Decoding
  vector<int> stateSequence;
  decodeBeats(transitionMatrix, beatPeriods, beatEndPositions, biy, stateSequence);
  for (size_t i=0; i<stateSequence.size(); ++i) {
    if (stateSequence[i] == 0) { // beat detected
      ticks.push_back(i * _resolutionODF);
    }
  }
}
Esempio n. 22
0
void TempoTapDegara::computeBeatPeriodsDavies(vector<Real> detections,
                              vector<Real>& beatPeriods,
                              vector<Real>& beatEndPositions) {
  // Implementation of the beat period detection algorithm by M. Davies.

  adaptiveThreshold(detections, _smoothingWindowHalfSize);

  // Tempo estimation:
  // - Split detection function into overlapping frames.
  // - Compute autocorrelation (ACF) for each frame with bias correction.
  // - Weight it by the tempo preference curve (Rayleigh distrubution).

  vector<vector<Real> > observations;
  Real observationsMax = 0;
  vector<Real> frame;
  vector<Real> frameACF;
  vector<Real> frameACFNormalized(_hopSizeODF);

  _frameCutter->input("signal").set(detections);
  _frameCutter->output("frame").set(frame);
  _autocorrelation->input("array").set(frame);
  _autocorrelation->output("autoCorrelation").set(frameACF);

  while (true) {
    // get a frame
    _frameCutter->compute();
    if (!frame.size()) {
      break;
    }
    _autocorrelation->compute();
    // To accout for poor resolution of ACF at short lags, each comb element has
    // width proportional to its relationship to the underlying periodicity, and
    // its height is normalized by its width.
    fill(frameACFNormalized.begin(), frameACFNormalized.end(), (Real)0.0);
    for (int comb=1; comb<=_numberCombs; ++comb) {
      int width = 2*comb - 1;
      for (int region=1-comb; region<=comb-1; ++region) {
        for (int period=_periodMinIndex; period<=_periodMaxIndex; ++period) {
          frameACFNormalized[period] +=
              _tempoWeights[period] * frameACF[(period+1)*comb-1 + region] / width;
        }
      }
    }
    // Apply adaptive threshold. It is not mentioned in the paper, but is taken
    // from matlab code by M.Davies (including the smoothing size). The
    // implemented smoothing does not exactly match the one in matlab code,
    // howeer, the evaluation results were found very close.
    adaptiveThreshold(frameACFNormalized, 8);

    // zero weights for periods out of the user-specified range
    fill(frameACFNormalized.begin(), frameACFNormalized.begin() + _periodMinUserIndex+1, (Real) 0.);
    fill(frameACFNormalized.begin() + _periodMaxUserIndex+1, frameACFNormalized.end(), (Real) 0.);

    normalizeSum(frameACFNormalized);
    observations.push_back(frameACFNormalized);

    // Search for the maximum value in observations in the same loop.
    Real tMax = observations.back()[argmax(observations.back())];
    if (tMax > observationsMax) {
      observationsMax = tMax;
    }
  }
  _frameCutter->reset();  // TODO reset here for consequent signal inputs, or should the user do it always?

  _numberFramesODF = observations.size();
  // Add noise
  for (size_t t=0; t<_numberFramesODF; ++t) {
    for (int i=0; i<_hopSizeODF; ++i) {
      observations[t][i] += 0.0001 * observationsMax * (float) rand() / RAND_MAX;
    }
  }

  // find Viterbi path (ODF-frame-wise list of indices of the estimated periods;
  // zero index corresponds to beat period of 1 ODF frame hopsize)
  vector <Real> path;
  findViterbiPath(_tempoWeights, _transitionsViterbi, observations, path);

  beatPeriods.reserve(_numberFramesODF);
  beatEndPositions.reserve(_numberFramesODF);

  for (size_t t=0; t<_numberFramesODF; ++t) {
    beatPeriods.push_back((path[t]+1) / _sampleRateODF);
    beatEndPositions.push_back((t + 1) * _hopDurationODF);
  }
}
Esempio n. 23
0
void TempoTapDegara::findViterbiPath(const vector<Real>& prior,
                     const vector<vector<Real> > transitionMatrix,
                     const vector<vector <Real> >& observations,
                     vector<Real>& path) {
  // Find the most-probable (Viterbi) path through the HMM state trellis.

  // Inputs:
  //   prior(i) = Pr(Q(1) = i)
  //   transmat(i,j) = Pr(Q(t+1)=j | Q(t)=i)
  //   observations(i,t) = Pr(y(t) | Q(t)=i)
  //
  // Outputs:
  //   path(t) = q(t), where q1 ... qT is the argmax of the above expression.

  // delta(j,t) = prob. of the best sequence of length t-1 and then going to state j, and O(1:t)
  // psi(j,t) = the best predecessor state, given that we ended up in state j at t

  int numberPeriods = prior.size();

  vector<vector<Real> > delta; // = zeros(numberFramesODF,numberPeriods);
  vector<vector<Real> > psi;   // = zeros(numberFramesODF,numberPeriods);

  vector<Real> deltaNew;
  deltaNew.resize(numberPeriods);

  // weighten likelihoods of periods in the first frame by the prior
  for (int i=0; i<numberPeriods; ++i) {
    deltaNew[i] = prior[i] * observations[0][i];
  }
  normalizeSum(deltaNew);
  delta.push_back(deltaNew);

  vector<Real> psiNew;
  // a vector of zeros (arbitrary, since there is no predecessor to the first frame)
  psiNew.resize(numberPeriods);
  psi.push_back(psiNew);

  vector<Real> tmp;
  tmp.resize(numberPeriods);

  for (size_t t=1; t<_numberFramesODF; ++t) {
    for (int j=0; j<numberPeriods; ++j) {
      for (int i=0; i<numberPeriods; ++i) {
        // weighten delta for a previous frame by vector from the transitionMatrix
        tmp[i] = delta.back()[i] * transitionMatrix[j][i];
      }
      int iMax = argmax(tmp);
      deltaNew[j] = tmp[iMax] * observations[t][j];
      psiNew[j] = iMax;
    }
    normalizeSum(deltaNew);
    delta.push_back(deltaNew);
    psi.push_back(psiNew);
  }

  // track the path backwards in time
  path.resize(_numberFramesODF);
  path.back() = argmax(delta.back());
  if (_numberFramesODF >= 2) {
    for (size_t t=_numberFramesODF-2;; --t) {
      path[t] = psi[t+1][path[t+1]];
      if (t==0) { // size_t can't be negative, break on zero
        break;
      }
    }
  }
}
Esempio n. 24
0
// Generate the representation of Z_m^* for a given odd integer m
// and plaintext base p
PAlgebra::PAlgebra(unsigned long mm, unsigned long pp)
{
    m = mm;
    p = pp;

    assert( (m&1) == 1 );
    assert( ProbPrime(p) );
    // replaced by Mahdi after a conversation with Shai
    // assert( m > p && (m % p) != 0 );	// original line
    assert( (m % p) != 0 );
    // end of replace by Mahdi
    assert( m < NTL_SP_BOUND );

    // Compute the generators for (Z/mZ)^*
    vector<unsigned long> classes(m);
    vector<long> orders(m);

    unsigned long i;
    for (i=0; i<m; i++) { // initially each element in its own class
        if (GCD(i,m)!=1)
            classes[i] = 0; // i is not in (Z/mZ)^*
        else
            classes[i] = i;
    }

    // Start building a representation of (Z/mZ)^*, first use the generator p
    conjClasses(classes,p,m);  // merge classes that have a factor of 2

    // The order of p is the size of the equivalence class of 1
    ordP = (unsigned long) count (classes.begin(), classes.end(), 1);

    // Compute orders in (Z/mZ)^*/<p> while comparing to (Z/mZ)^*
    long idx, largest;
    while (true) {
        compOrder(orders,classes,true,m);
        idx = argmax(orders);      // find the element with largest order
        largest = orders[idx];

        if (largest <= 0) break;   // stop comparing to order in (Z/mZ)^*

        // store generator with same order as in (Z/mZ)^*
        gens.push_back(idx);
        ords.push_back(largest);
        conjClasses(classes,idx,m); // merge classes that have a factor of idx
    }
    // Compute orders in (Z/mZ)^*/<p> without comparing to (Z/mZ)^*
    while (true) {
        compOrder(orders,classes,false,m);
        idx = argmax(orders);      // find the element with largest order
        largest = orders[idx];

        if (largest <= 0) break;   // we have the trivial group, we are done

        // store generator with different order than (Z/mZ)^*
        gens.push_back(idx);
        ords.push_back(-largest);  // store with negative sign
        conjClasses(classes,idx,m);  // merge classes that have a factor of idx
    }

    nSlots = qGrpOrd();
    phiM = ordP * nSlots;

    // Allocate space for the various arrays
    T.resize(nSlots);
    dLogT.resize(nSlots*gens.size());
    Tidx.assign(m,-1);    // allocate m slots, initialize them to -1
    zmsIdx.assign(m,-1);  // allocate m slots, initialize them to -1
    for (i=idx=0; i<m; i++) if (GCD(i,m)==1) zmsIdx[i] = idx++;

    // Now fill the Tidx and dLogT translation tables. We identify an element
    // t\in T with its representation t = \prod_{i=0}^n gi^{ei} mod m (where
    // the gi's are the generators in gens[]) , represent t by the vector of
    // exponents *in reverse order* (en,...,e1,e0), and order these vectors
    // in lexicographic order.

    // buffer is initialized to all-zero, which represents 1=\prod_i gi^0
    vector<unsigned long> buffer(gens.size()); // temporaty holds exponents
    i = idx = 0;
    do {
        unsigned long t = exponentiate(buffer);
        for (unsigned long j=0; j<buffer.size(); j++) dLogT[idx++] = buffer[j];
        T[i] = t;       // The i'th element in T it t
        Tidx[t] = i++;  // the index of t in T is i

        // increment buffer by one (in lexigoraphic order)
    } while (nextExpVector(buffer)); // until we cover all the group

    PhimX = Cyclotomic(m); // compute and store Phi_m(X)

    // initialize prods array
    long ndims = gens.size();
    prods.resize(ndims+1);
    prods[ndims] = 1;
    for (long j = ndims-1; j >= 0; j--) {
        prods[j] = OrderOf(j) * prods[j+1];
    }
}
Esempio n. 25
0
vector<int> CaffeMobile::PredictTopK(const cv::Mat &img, int k) {
  const vector<float> probs = Forward(img);
  k = std::min<int>(std::max(k, 1), probs.size());
  return argmax(probs, k);
}
Esempio n. 26
0
void MultiPitchKlapuri::compute() {
  const vector<Real>& signal = _signal.get();
  vector<vector<Real> >& pitch = _pitch.get();
  if (signal.empty()) {
    pitch.clear();
    return;
  }

  // Pre-processing
  vector<Real> frame;
  _frameCutter->input("signal").set(signal);
  _frameCutter->output("frame").set(frame);

  vector<Real> frameWindowed;
  _windowing->input("frame").set(frame);
  _windowing->output("frame").set(frameWindowed);

  // Spectral peaks
  vector<Real> frameSpectrum;
  _spectrum->input("frame").set(frameWindowed);
  _spectrum->output("spectrum").set(frameSpectrum);

  vector<Real> frameFrequencies;
  vector<Real> frameMagnitudes;
  _spectralPeaks->input("spectrum").set(frameSpectrum);
  _spectralPeaks->output("frequencies").set(frameFrequencies);
  _spectralPeaks->output("magnitudes").set(frameMagnitudes);

  // Spectral whitening
  vector<Real> frameWhiteMagnitudes;
  _spectralWhitening->input("spectrum").set(frameSpectrum);
  _spectralWhitening->input("frequencies").set(frameFrequencies);
  _spectralWhitening->input("magnitudes").set(frameMagnitudes);
  _spectralWhitening->output("magnitudes").set(frameWhiteMagnitudes);
  
  // Pitch salience contours
  vector<Real> frameSalience;
  _pitchSalienceFunction->input("frequencies").set(frameFrequencies);
  _pitchSalienceFunction->input("magnitudes").set(frameMagnitudes);
  _pitchSalienceFunction->output("salienceFunction").set(frameSalience);

  vector<Real> frameSalienceBins;
  vector<Real> frameSalienceValues;
  _pitchSalienceFunctionPeaks->input("salienceFunction").set(frameSalience);
  _pitchSalienceFunctionPeaks->output("salienceBins").set(frameSalienceBins);
  _pitchSalienceFunctionPeaks->output("salienceValues").set(frameSalienceValues);

  vector<Real> nearestBinWeights;
  nearestBinWeights.resize(_binsInSemitone + 1);
  for (int b=0; b <= _binsInSemitone; b++) {
    nearestBinWeights[b] = pow(cos((Real(b)/_binsInSemitone)* M_PI/2), 2);
  }
  
  while (true) {
    // get a frame
    _frameCutter->compute();

    if (!frame.size()) {
      break;
    }

    _windowing->compute();

    // calculate spectrum
    _spectrum->compute();

    // calculate spectral peaks
    _spectralPeaks->compute();
    
    // whiten the spectrum
    _spectralWhitening->compute();

    // calculate salience function
    _pitchSalienceFunction->compute();

    // calculate peaks of salience function
    _pitchSalienceFunctionPeaks->compute();
      
    // no peaks in this frame
    if (!frameSalienceBins.size()) {
      continue;
    }

    // Joint F0 estimation (pitch salience function peaks as candidates) 

    // compute the cent-scaled spectrum
    fill(_centSpectrum.begin(), _centSpectrum.end(), (Real) 0.0);
    for (int i=0; i<(int)frameSpectrum.size(); i++) {
      Real f = (Real(i) / Real(frameSpectrum.size())) * (_sampleRate/2);
      int k = frequencyToCentBin(f);
      if (k>0 && k<_numberBins) {
        _centSpectrum[k] += frameSpectrum[i];
      }
    }
  
    // get indices corresponding to harmonics of each found peak
    vector<vector<int> > kPeaks;
    for (int i=0; i<(int)frameSalienceBins.size(); i++) {
      vector<int> k;
      Real f = _referenceFrequency * pow(_centToHertzBase, frameSalienceBins[i]);
      for (int m=0; m<_numberHarmonicsMax; m++) {
        // find the exact peak for each harmonic
        int kBin = frequencyToCentBin(f*(m+1));
        int kBinMin = max(0, int(kBin-_binsInSemitone));
        int kBinMax = min(_numberBins-1, int(kBin+_binsInSemitone));
        vector<Real> specSegment;
        for (int ii=kBinMin; ii<=kBinMax; ii++) {
          specSegment.push_back(_centSpectrum[ii]);
        }
        kBin = kBinMin + argmax(specSegment)-1;
        k.push_back(kBin);
      }
      kPeaks.push_back(k);
    }
    
    // candidate Spectra
    vector<vector<Real> > Z;
    for (int i=0; i<(int)frameSalienceBins.size(); i++) {
      vector<Real> z(_numberBins, 0.);
      for (int h=0; h<_numberHarmonicsMax; h++) {
        int hBin = kPeaks[i][h];
        for(int b = max(0, hBin-_binsInSemitone); b <= min(_numberBins-1, hBin+_binsInSemitone); b++) {
          z[b] += nearestBinWeights[abs(b-hBin)] * getWeight(hBin, h) * 0.25; // 0.25 is cancellation parameter
        }
      }
      Z.push_back(z);
    }

    // TODO: segfault somewhere here
    // inhibition function
    int numCandidates = frameSalienceBins.size();
    vector<vector<Real> > inhibition;

    for (int i=0; i<numCandidates; i++) {
      vector<Real> inh(numCandidates, 0.); 
      for (int j=0; j<numCandidates; j++) {
        for (int m=0; m<_numberHarmonicsMax; m++) {
          inh[j] += getWeight(kPeaks[i][m], m) * _centSpectrum[kPeaks[i][m]] * Z[j][kPeaks[i][m]];
        }
      }
      inhibition.push_back(inh);
    }

    // polyphony estimation initialization
    vector<int> finalSelection;
    int p = 1;
    Real gamma = 0.73;
    Real S = frameSalienceValues[argmax(frameSalienceValues)] / pow(p,gamma);
    finalSelection.push_back(argmax(frameSalienceValues));
    
    // goodness function
    vector<vector<Real> > G;
    for (int i=0; i<numCandidates; i++) {
      vector<Real> g;
      for (int j=0; j<numCandidates; j++) {
        if(i==j) {
          g.push_back(0.0);
        } else {
          Real g_val = frameSalienceValues[i] + frameSalienceValues[j] - (inhibition[i][j] + inhibition[j][i]);
          g.push_back(g_val);
        }
      }
      G.push_back(g);
    }
  
    vector<vector<int> > selCandInd;
    vector<Real> selCandVal;
    vector<Real> localF0;
    
    while (true) {
      // find numCandidates largest values
      Real maxVal=-1;
      int maxInd_i=0;
      int maxInd_j=0;
  
      for (int I=0; I < numCandidates; I++) {
        vector<int> localInd;
        for (int i=0; i < numCandidates; i++) {
          for (int j=0; j < numCandidates; j++) {
            if (G[i][j] > maxVal) {
              maxVal = G[i][j];
              maxInd_i = i;
              maxInd_j = j;
            }
          }
        }

        localInd.push_back(maxInd_i);
        localInd.push_back(maxInd_j);
        selCandInd.push_back(localInd);
        selCandVal.push_back(G[maxInd_i][maxInd_j]);
        G[maxInd_i][maxInd_j] =- 1;
        maxVal =- 1;
        maxInd_i = 0;
        maxInd_j = 0;
      }
  
      // re-estimate polyphony
      p++;
      Real Snew = selCandVal[argmax(selCandVal)] / pow(p,gamma);
      if (Snew > S) {
        finalSelection.clear();
        for (int i=0; i<(int)selCandInd[0].size(); i++) {
          finalSelection.push_back(selCandInd[0][i]);
        }
        // re-calculate goddess function
        for (int i=0; i<numCandidates; i++) {
          for (int j=0; j<numCandidates; j++) {
            G[i][j] += frameSalienceValues[j];
            for (int ii=0; ii<(int)selCandInd[i].size(); ii++) {
              G[i][j] -= (inhibition[selCandInd[i][ii]][j] + inhibition[j][selCandInd[i][ii]]);
            }
          }
        }
        S = Snew;
      } 
      else {
        // add estimated f0 to frame
        for (int i=0; i<(int)finalSelection.size(); i++) {
          Real freq = _referenceFrequency * pow(_centToHertzBase, frameSalienceBins[finalSelection[i]]);
          localF0.push_back(freq);
        }
        break;
      }
    }
    pitch.push_back(localF0);
  }
}
Esempio n. 27
0
void
*tracking_thread(tracker_t *trac)
{
  // Lock the mutex.
  errno = pthread_mutex_lock(&(trac->tl->tracker_busy));
  if (errno)
    err(EXIT_FAILURE, "pthread_mutex_lock() failed");

  while (1) {
    // Wait until there is work to do.
    while (trac->tl->reason == NONE) {
      errno = pthread_cond_wait(&(trac->tl->wake_up), &(trac->tl->tracker_busy));
      if (errno)
        err(EXIT_FAILURE, "pthread_cond_wait() failed");
    }

    // Received quit from UI.
    if (trac->tl->reason & QUIT) {
      printf("Tracker thread shutting down!\n");
      errno = pthread_mutex_unlock(&(trac->tl->tracker_busy));
      if (errno)
        err(EXIT_FAILURE, "pthread_mutex_unlock() failed");
      pthread_exit(NULL);
    }

    // Received tap from the UI.
    if (trac->tl->reason & TAP) {
      jack_nframes_t tap_delta = trac->ud->tap_time - trac->last_tap;
      if (trac->last_tap != 0 && 
          tap_delta < MAX_TAP_DELTA &&
          tap_delta > MIN_TAP_DELTA) {
        trac->pd->beat_period = tap_delta;
        trac->pd->cur_beat = trac->ud->tap_time;
        trac->pd->next_beat = trac->ud->tap_time + tap_delta;
        trac->pd->calc_called = false;
        printf("next_beat: %u, tap_delta: %u, bpm: %f\n", trac->pd->next_beat, tap_delta, 60.*48000./tap_delta);
      }
      trac->last_tap = trac->ud->tap_time;
    }

    // Received beat from process.
    if (trac->tl->reason & BEAT) {
      printf("Beat (bpm = %f)\n", (60.*48000.)/trac->pd->beat_period);
    }

    // Received calc from process.
    if (trac->tl->reason & CALC) {

      /*
       * Tempo (i.e. beat period) tracking.
       */

      // Beat period in odf samples.
      jack_nframes_t beat_period = trac->pd->beat_period/BUFSIZE;

      // Beat period in odf samples.
      jack_nframes_t beat_pos = beat_period/2;

      // Calculate autocorrelation.
      acorr(trac->acf, trac->pd->odf, 8*beat_period);

      // Comb filter and weight the autocorrelation.
      comb_acf(trac->cacf, trac->acf, beat_period, trac->sigma_t);

#if 0 // Print odf, acf & cacf.
      uint_t ps = 8*beat_period;
      for (jack_nframes_t n = 0; n < ps; n++) {
        printf("o[%u] = %f\ta[%u] = %f\t", n, trac->pd->odf->data[(trac->pd->odf->ridx + ODFSIZE - ps+1 + n) % ODFSIZE], n, trac->acf[n]);
        if (n < 2*beat_period)
          printf("c[%u] = %f\n", n, trac->cacf[n]);
        else
          printf("\n");
      }
#endif

      // Beat period is index of maximum element of comb filtered and weighted autocorrelation function.
      beat_period = argmax(trac->cacf, beat_period);

#if 0 // Print new beat period in odf samples.
      printf("beat_period = %u\n", beat_period);
#endif

      /*
       * Beat tracking.
       */

      // Comb filter onset detection function and apply weighting.
      comb_odf(trac->codf, trac->pd->odf, beat_pos, beat_period, trac->sigma_b);

      // Beat position is index of maximum element of comb filtered and weighted onset detection function.
      beat_pos = argmax(trac->codf, beat_pos);

#if 0 // Print beat position in odf samples from ridx.
      printf("beat_pos = %u\n", beat_pos);
#endif

      // Update beat pos.
      // TODO: Thread safety!
      trac->pd->beat_period = beat_period * BUFSIZE;
      trac->pd->cur_beat = trac->pd->next_beat;
      trac->pd->next_beat = trac->pd->cur_beat + trac->pd->beat_period;
      trac->pd->beat_called = false;
      trac->pd->calc_called = false;
      //printf("cur_beat = %d\n", trac->pd->cur_beat);
      //printf("next_beat = %d\n", trac->pd->next_beat);
    }

    // Reset reason.
    trac->tl->reason = NONE;
  }
}
Esempio n. 28
0
vector<caffe_result> CaffeMobile::PredictTopK(const string &img_path, int k) {
    const vector<float> probs = Forward(img_path);
    k = std::min<int>(std::max(k, 1), probs.size());
    vector<int> topK = argmax(probs, k);
    return create_results(topK,probs,k);
}
Esempio n. 29
0
vector<caffe_result> CaffeMobile::predict_top_k(cv::Mat& cv_img, int k) {
    const vector<float> probs = Forward(cv_img);
    k = std::min<int>(std::max(k, 1), probs.size());
    vector<int> topK = argmax(probs,k);
    return create_results(topK,probs,k);
}
Esempio n. 30
0
/**
 * Predict the single best label sequence given the features for an
 * observation sequence by Viterbi algorithm.
 *
 * @param Fs a 2D {@code Matrix} array, where F[i][j] is the sparse
 * 			 feature matrix for the j-th feature of the observation sequence
 *	 	 	 at position i, i.e., f_{j}^{{\bf x}, i}
 *
 * @return the single best label sequence for an observation sequence
 *
 */
int* CRF::predict(Matrix*** Fs, int length) {

	Matrix** Ms = computeTransitionMatrix(Fs, length);

	/*
	 * Alternative backward recursion with scaling for the Viterbi
	 * algorithm
	 */
	int n_x = length;
	double* b = allocateVector(n_x);
	// Matrix Beta_tilta = new BlockRealMatrix(numStates, n_x);
	Vector** Beta_tilta = new Vector*[n_x];

	for (int i = n_x - 1; i >= 0; i--) {
		if ( i == n_x - 1) {
			// Beta_tilta.setColumnMatrix(i, ones(numStates, 1));
			Beta_tilta[i] = new DenseVector(numStates, 1);
		} else {
			// Beta_tilta.setColumnMatrix(i, mtimes(Ms[i + 1], Beta_tilta.getColumnMatrix(i + 1)));
			Beta_tilta[i] = &Ms[i + 1]->operate(*Beta_tilta[i + 1]);
		}
		b[i] = 1.0 / sum(*Beta_tilta[i]);
		// Beta_tilta.setColumnMatrix(i, times(b[i], Beta_tilta.getColumnMatrix(i)));
		timesAssign(*Beta_tilta[i], b[i]);
	}

	/*fprintf("Beta:\n");
				display(Beta_tilta);*/

	/*
	 * Gammas[i](y_{i-1}, y_[i]) is P(y_i|y_{i-1}, Lambda), thus each row of
	 * Gammas[i] should be sum to one.
	 */

	double** Gamma_i = allocate2DArray(numStates, numStates, 0);
	double** Phi =  allocate2DArray(n_x, numStates, 0);
	double** Psi =  allocate2DArray(n_x, numStates, 0);
	double** M_i = null;
	double* M_i_Row = null;
	double* Gamma_i_Row = null;
	double* Beta_tilta_i = null;
	double* Phi_i = null;
	double* Phi_im1 = null;
	double** maxResult = null;
	for (int i = 0; i < n_x; i++) {
		M_i = ((DenseMatrix*) Ms[i])->getData();
		Beta_tilta_i = ((DenseVector*) Beta_tilta[i])->getPr();
		for (int y_im1 = 0; y_im1 < numStates; y_im1++) {
			M_i_Row = M_i[y_im1];
			Gamma_i_Row = Gamma_i[y_im1];
			assign(Gamma_i_Row, M_i_Row, numStates);
			timesAssign(Gamma_i_Row, Beta_tilta_i, numStates);
			sum2one(Gamma_i_Row, numStates);
		}
		Phi_i = Phi[i];
		if (i == 0) { // Initialization
			log(Phi_i, Gamma_i[startIdx], numStates);
		} else {
			Phi_im1 = Phi[i - 1];
			for (int y_im1 = 0; y_im1 < numStates; y_im1++) {
				Gamma_i_Row = Gamma_i[y_im1];
				logAssign(Gamma_i_Row, numStates);
				plusAssign(Gamma_i_Row, Phi_im1[y_im1], numStates);
			}
			maxResult = max(Gamma_i, numStates, numStates, 1);
			Phi[i] = maxResult[0];
			Psi[i] = maxResult[1];
		}

	}

	/*
	 *  Predict the single best label sequence.
	 */
	// double[] phi_n_x = Phi.getRow(n_x - 1);
	double* phi_n_x = Phi[n_x - 1];
	int* YPred = allocateIntegerVector(n_x);
	for (int i = n_x - 1; i >= 0; i--) {
		if (i == n_x - 1) {
			YPred[i] = argmax(phi_n_x, numStates);
		} else {
			// YPred[i] = (int)Psi.getEntry(i + 1, YPred[i + 1]);
			YPred[i] = (int) Psi[i + 1][YPred[i + 1]];
		}
	}

	/*display(Phi);
				display(Psi);*/

	/*
	 *  Predict the optimal conditional probability: P*(y|x)
	 */
	double p = exp(phi_n_x[YPred[n_x - 1]]);
	fprintf("P*(YPred|x) = %g\n", p);

	return YPred;

}