Esempio n. 1
0
bool UManArc::getSMRCLcmd(char * buf, int bufCnt, double maxDist)
{
  int i, n, m;
  double d, dd, da;
  char * p1;
  bool result = true;
  double v;
  double dist;
  //
  // turn is not allowed at zero speed
  if (fabs(vel) < 0.2)
    v = 0.25;
  else
    v = vel;
  //
  if (radius < 3.0)
  {
    dist = mind(maxDist, getDistance());
    snprintf(buf, bufCnt, "turnr %.3f %.3f @v%.2f @a%.3f :($drivendist > %.2f)",
             maxd(radius, 0.15), // use at least 15cm turn radius
             angle * 180.0 / M_PI, v,
             maxd(getMinAcc(), fabs(acc)), dist);
    n = strlen(buf);
  }
  else
  { // it is too unsafe to turn and finish on the implicit angle criteria
    // divide into 30 cm steps
    m = maxi(1, roundi(fabs(angle) / 0.05)); // about 3 deg steps
    d = radius * angle;
    dd = d / double(m);
    da = angle / double(m);
    n = 0;
    p1 = buf;
    dist = mind(maxDist, getDistance());
    for (i = 0; i < m; i++)
    { // one command takes a little less than 60 characters.
      // so there need to be at least 60 characters left in buffer
      result = ((bufCnt - n) > 60);
      if (not result)
      { // no more space for next command
        printf("*** UManArc::getSMRCLcmd: no space left (%d left) in buffer (size %d) for next drive command\n", bufCnt, n);
        break;
      }
      // space for next command line
      snprintf(p1, bufCnt - n, "turnr %.3f %.3f \"rad\" @v%.2f @a%.3f :($drivendist > %.2f)\n",
               radius, da, v, maxd(getMinAcc(), fabs(acc)), dd);
      n += strlen(p1);
      p1 = &buf[n];
      dist -= dd;
      if (dist < 0.0)
        break;
    }
    // remove laset '\n'
    if (n > 0)
      buf[n-1] = '\0';
  }
  return (n < bufCnt);
}
Esempio n. 2
0
File: frustum.cpp Progetto: hglm/sre
void sreScissors::UpdateWithProjectedPoint(float x, float y, double z) {
    if (z >= - 1.001d) {
        // Beyond the near plane.
        z = maxd(- 1.0d, z);
        double depth = 0.5d * z + 0.5d;
        near = mind(depth, near);
        far = maxd(depth, far);
        left = minf(x, left);
        right = maxf(x, right);
        bottom = minf(y, bottom);
        top = maxf(y, top);
        return;
    }
    sreMessage(SRE_MESSAGE_WARNING,
        "Unexpected vertex in front of the near plane in UpdateWorldSpaceBoundingHull "
        "z = %lf", z);
    // In front of the near plane.
    near = 0;
    far = 1.0;
    // We know that the light volume intersects the frustum, so it must extend to
    // both sides of the near plane.
    // Assume it fills the whole viewport (not optimal).
    left = - 1.0f;
    right = 1.0f;
    bottom = - 1.0f;
    top = 1.0f;
}
 void Blend(double *s, double *d, double *c, int modes, int moded) {
     double fs[4];
     double fd[4];
     BlendScaleFactors(s, d, c, modes,fs);
     BlendScaleFactors(s, d, c, moded,fd);
     for(int i=0; i<4; i++) { s[i]=mind(1, s[i]*fs[i]+d[i]*fd[i]); }
 }
Esempio n. 4
0
inline double range( double a, double b ) {
    long r = random();
    double rate = (double)r / (double)(0x7fffffff);
    double _a = mind(a,b);
    double _b = maxd(a,b);
    return _a + (_b-_a)*rate;
}
Esempio n. 5
0
void gline(float x1, float y1, float x2, float y2, float width, uint8_t r, uint8_t g, uint8_t b, uint8_t a) {
	#ifdef	OPENGL
		glColor4f(((float) r) / 255.0, ((float) g) / 255.0, ((float) b) / 255.0, ((float) a) / 255.0);
		//   c1x,c1y
		//  0,0......
		//   c4x,c4y  ........       c2x,c2y
		//                    ........ px,py
		//                       c3x,c3y
		float c1x, c1y, c1l, c2x, c2y, c2l, c3x, c3y, c3l, c4x, c4y, c4l;

		float px = x2 - x1;
		float py = y2 - y1;

		c1x = -py;
		c1y = px;
		c1l = hypotf(c1x, c1y);
		c1x = (c1x * width / c1l / 2.0) + x1;
		c1y = (c1y * width / c1l / 2.0) + y1;

		c2x = -py;
		c2y = px;
		c2l = hypotf(c2x, c2y);
		c2x = (c2x * width / c2l / 2.0) + px + x1;
		c2y = (c2y * width / c2l / 2.0) + py + y1;

		c3x = py;
		c3y = -px;
		c3l = hypotf(c3x, c3y);
		c3x = (c3x * width / c3l / 2.0) + px + x1;
		c3y = (c3y * width / c3l / 2.0) + py + y1;

		c4x = py;
		c4y = -px;
		c4l = hypotf(c4x, c4y);
		c4x = (c4x * width / c4l / 2.0) + x1;
		c4y = (c4y * width / c4l / 2.0) + y1;

		if (width == 4.0)
			printf("LINE: [%3.0f,%3.0f]->[%3.0f,%3.0f]->[%3.0f,%3.0f]->[%3.0f,%3.0f]\n", c1x, c1y, c2x, c2y, c3x, c3y, c4x, c4y);

		glVertex2f(c1x, c1y);
		glVertex2f(c2x, c2y);
		glVertex2f(c3x, c3y);
		glVertex2f(c4x, c4y);
	#else
	thickLineRGBA(Surf_Display,
		(x1 - viewPortL) * zoomFactor,
		(viewPortB - y1) * zoomFactor,
		(x2 - viewPortL) * zoomFactor,
		(viewPortB - y2) * zoomFactor,
		mind(maxd(width * zoomFactor, 1), 2),
		r, g, b, a
		);
	#endif
}
Esempio n. 6
0
void do_experiment(swp_t&swp, sind_t&sind, rng_t&rng)
{
  constant_indicator mind(
    moran_probability(swp.mutantFitness()/swp.residentFitness(),
                      swp.n_individuals()));
  relative_indicator_t<sind_t, constant_indicator> rind(sind,mind);

  if (parameters.experiment() == "SWEEP")
  { IndicatorsDisplayController<swp_t,ParamsClass>
      ind_display;
    ind_display.inheritParametersFrom(swp);
    ind_display.setrecordEvery(0);
    ind_display.setdisplayEvery(0);
    ind_display.installIndicator(rind,"fixation/Moran prob");

//     //   cout << "fixation / Moran probability is " << rind(swp) << endl;
//     for (double rsr = 0.001; rsr <= 1000; rsr *= 1.1/*1.01*/)
//       //   for (swp.room_switch_rate = 1000; swp.room_switch_rate >= 0.001;
//       //        swp.room_switch_rate /= 1.01)
//     { double p_switch = rsr / (1 + rsr);
//       swp.setroom_switch_rate(rsr);
//       ind_display.update(p_switch,swp);
//     }

    for (double mp = 0.01; mp < 1; mp += 0.01)
    { swp.setlattice_move_probability(mp);
      ind_display.update(mp,swp);
    }
  }
  else if (parameters.experiment() == "SAMPLE")
  { cout << "swarming pattern:\n" << canonical(swp);
    double pfix = sind(swp);
    cout << "fixation probability: " << pfix << '\n';
    cout << "Moran probability: " << mind(swp) << '\n';
  }
  else if (parameters.experiment() == "OPTIMIZE")
  { cout << "Moran probability: " << mind(swp) << '\n';
    do_optimize(swp,sind,swp,rng);
  }
}
Esempio n. 7
0
/* compute HOG features given gradient histograms */
void hog( double *H, double *HG, int h, int w, int d, int sBin, int oBin ) {
  double *N, *N1, *H1, *HG1, n; int o, x, y, x1, y1, hb, wb, nb, hb1, wb1, nb1;
  double eps = 1e-4/4.0/sBin/sBin/sBin/sBin; /* precise backward equality */
  hb=h/sBin; wb=w/sBin; nb=wb*hb; hb1=hb-2; wb1=wb-2; nb1=hb1*wb1;
  if(hb1<=0 || wb1<=0) return; N = (double*) mxCalloc(nb,sizeof(double));
  for(o=0; o<oBin; o++) for(x=0; x<nb; x++) N[x]+=H[x+o*nb]*H[x+o*nb];
  for( x=0; x<wb1; x++ ) for( y=0; y<hb1; y++ ) {
    HG1 = HG + x*hb1 + y; /* perform 4 normalizations per spatial block */
    for(x1=1; x1>=0; x1--) for(y1=1; y1>=0; y1--) {
      N1 = N + (x+x1)*hb + (y+y1);  H1 = H + (x+1)*hb + (y+1);
      n = 1.0/sqrt(*N1 + *(N1+1) + *(N1+hb) + *(N1+hb+1) + eps);
      for(o=0; o<oBin; o++) { *HG1=mind(*H1*n, 0.2); HG1+=nb1; H1+=nb; }
    }
  } mxFree(N);
}
Esempio n. 8
0
 int minDistance(string word1, string word2) {
     if (word1.size() == 0)
         return word2.size();
     if (word2.size() == 0)
         return word1.size();
     vector<vector<int>> mind(word1.size() + 1, vector<int>(word2.size() + 1));
     for (int i = 0;i <= word1.size();i++)
         mind[i][0] = i;
     for (int i = 0;i <= word2.size();i++)
         mind[0][i] = i;
     for (int i = 1;i <= word1.size();i++)
         for (int j = 1;j <= word2.size();j++) {
             mind[i][j] = min(mind[i - 1][j] + 1, mind[i][j - 1] + 1);
             if (word1[i - 1] == word2[j - 1])
                 mind[i][j] = min(mind[i - 1][j - 1], mind[i][j]);
             else mind[i][j] = min(mind[i - 1][j - 1] + 1, mind[i][j]);
         }
     return mind[word1.size()][word2.size()];
 }
 void BlendScaleFactors(double *s, double *d, double *c, int mode, double *f) {
     double i;
     switch(mode){
         case 0: f[0]=0; f[1]=0; f[2]=0; f[3]=0; break; // 0:ZERO
         case 1: f[0]=1; f[1]=1; f[2]=1; f[3]=1; break; //1:ONE
         case 2: f[0]=s[0]; f[1]=s[1]; f[2]=s[2]; f[3]=s[3]; break; // 2:SRC_COLOR
         case 3: f[0]=1-s[0]; f[1]=1-s[1]; f[2]=1-s[2]; f[3]=1-s[3]; break;// 3:ONE_MINUS_SRC_COLOR
         case 4: f[0]=d[0]; f[1]=d[1]; f[2]=d[2]; f[3]=d[3];  break; // 4:DST_COLOR
         case 5: f[0]=1-d[0]; f[1]=1-d[1]; f[2]=1-d[2]; f[3]=1-d[3]; break; // 5:ONE_MINUS_DST_COLOR
         case 6: f[0]=s[3]; f[1]=s[3]; f[2]=s[3]; f[3]=s[3]; break; // 6:SRC_ALPHA
         case 7: f[0]=1-s[3]; f[1]=1-s[3]; f[2]=1-s[3]; f[3]=1-s[3];  break; // 7:ONE_MINUS_SRC_ALPHA
         case 8: f[0]=d[3]; f[1]=d[3]; f[2]=d[3]; f[3]=d[3]; break; // 8:DST_ALPHA
         case 9: f[0]=1-d[3]; f[1]=1-d[3]; f[2]=1-d[3]; f[3]=1-d[3]; break; // 9:ONE_MINUS_DST_ALPHA
         case 10: i=mind(s[3], 1-d[3]); f[0]=i; f[1]=i; f[2]=i; f[3]=1; break; // 10:SRC_ALPHA_SATURATE
         case 11: f[0]=c[0]; f[1]=c[1]; f[2]=c[2]; f[3]=c[3]; break; // 11:CONSTANT_COLOR
         case 12: f[0]=1-c[0]; f[1]=1-c[1]; f[2]=1-c[2]; f[3]=1-c[3]; break; // 12:ONE_MINUS_CONSTANT_COLOR
         case 13: f[0]=c[3]; f[1]=c[3]; f[2]=c[3]; f[3]=c[3]; break; // 13:CONSTANT_ALPHA
         case 14: f[0]=1-c[3]; f[1]=1-c[3]; f[2]=1-c[3]; f[3]=1-c[3]; break; // 14:ONE_MINUS_CONSTANT_ALPHA
     };
 }
Esempio n. 10
0
void tree::train(const vector<vector<float>>& x, const vector<float>& y, const size_t mtry, const function<double()>& u01, vector<float>& incPurity, vector<float>& incMSE, vector<float>& impSD, vector<float>& oobPreds, vector<size_t>& oobTimes)
{
	const size_t num_samples = x.size();
	const size_t num_variables = x.front().size();

/*	float sum = 0;
	for (size_t i = 0; i < num_samples; ++i)
	{
		sum += y[i];
	}
	const float sum_inv = 1 / sum;
	vector<float> p(num_samples);
	for (size_t i = 0; i < num_samples; ++i)
	{
		p[i] = y[i] * sum_inv;
	}
	for (size_t i = 1; i < num_samples; ++i)
	{
		p[i] += p[i - 1];
	}*/

	// Create bootstrap samples with replacement
	reserve((num_samples << 1) - 1);
	emplace_back();
	node& root = front();
	root.samples.resize(num_samples);
	for (size_t& s : root.samples)
	{
		const auto u = u01();
		s = static_cast<size_t>(u * num_samples);
//		for (s = 0; s < num_samples - 1 && !(u < p[s]); ++s);
	}

	// Populate nodes
	for (size_t k = 0; k < size(); ++k)
	{
		node& n = (*this)[k];

		// Evaluate node y and purity
		float sum = 0;
		for (const size_t s : n.samples) sum += y[s];
		n.y = sum / n.samples.size();
		n.p = sum * n.y; // = n.y * n.y * n.samples.size() = sum * sum / n.samples.size()

		// Do not split the node if it contains too few samples
		if (n.samples.size() <= 5) continue;

		// Find the best split that has the highest increase in node purity
		float bestChildNodePurity = n.p;
		vector<size_t> mind(num_variables);
		iota(mind.begin(), mind.end(), 0);
		for (size_t i = 0; i < mtry; ++i)
		{
			// Randomly select a variable without replacement
			const size_t j = static_cast<size_t>(u01() * (num_variables - i));
			const size_t v = mind[j];
			mind[j] = mind[num_variables - i - 1];

			// Sort the samples in ascending order of the selected variable
			vector<size_t> ncase(n.samples.size());
			iota(ncase.begin(), ncase.end(), 0);
			sort(ncase.begin(), ncase.end(), [&x, &n, v](const size_t val1, const size_t val2)
			{
				return x[n.samples[val1]][v] < x[n.samples[val2]][v];
			});

			// Search through the gaps in the selected variable
			float suml = 0;
			float sumr = sum;
			size_t popl = 0;
			size_t popr = n.samples.size();
			for (size_t j = 0; j < n.samples.size() - 1; ++j)
			{
				const float d = y[n.samples[ncase[j]]];
				suml += d;
				sumr -= d;
				++popl;
				--popr;
				if (x[n.samples[ncase[j]]][v] == x[n.samples[ncase[j+1]]][v]) continue;
				const float curChildNodePurity = (suml * suml / popl) + (sumr * sumr / popr);
				if (curChildNodePurity > bestChildNodePurity)
				{
					bestChildNodePurity = curChildNodePurity;
					n.var = v;
					n.val = (x[n.samples[ncase[j]]][v] + x[n.samples[ncase[j+1]]][v]) * .5f;
				}
			}
		}

		// Do not split the node if purity does not increase
		if (bestChildNodePurity == n.p) continue;

		// Create two child nodes and distribute samples
		n.children[0] = size();
		emplace_back();
		n.children[1] = size();
		emplace_back();
		for (const size_t s : n.samples)
		{
			(*this)[n.children[x[s][n.var] > n.val]].samples.push_back(s);
		}
	}

	// Aggregate NodeIncPurity
	for (const auto& n : *this)
	{
		if (!n.children[0]) continue;
		incPurity[n.var] += (*this)[n.children[0]].p + (*this)[n.children[1]].p - n.p;
	}

	// Find the samples used in bootstrap
	vector<size_t> in(num_samples, 0);
	for (const auto s : front().samples)
	{
		++in[s];
	}

	// Find the out-of-bag samples and calculate the OOB error without permutation
	vector<size_t> oob;
	oob.reserve(num_samples);
	float ooberr = 0;
	for (size_t s = 0; s < num_samples; ++s)
	{
		if (in[s]) continue;
		oob.push_back(s);
		const auto p = (*this)(x[s]);
		ooberr += (p - y[s]) * (p - y[s]);
		oobPreds[s] += p;
		++oobTimes[s];
	}

	// Generate a permutation of OOB samples for use in all variables
	vector<size_t> perm(oob);
	for (size_t i = 1; i < perm.size(); ++i)
	{
		const auto j = static_cast<size_t>(u01() * (perm.size() - i + 1));
		const auto tmp = perm[perm.size() - i];
		perm[perm.size() - i] = perm[j];
		perm[j] = tmp;
	}

	// Aggregate incMSE and impSD
	for (size_t v = 0; v < num_variables; ++v)
	{
		float ooberrperm = 0;
		for (size_t i = 0; i < oob.size(); ++i)
		{
			auto permx = x[oob[i]];
			permx[v] = x[perm[i]][v];
			const auto p = (*this)(permx);
			ooberrperm += (p - y[oob[i]]) * (p - y[oob[i]]);
		}
		const auto delta = (ooberrperm - ooberr) / oob.size();
		incMSE[v] += delta;
		impSD[v] += delta * delta;
	}
}
Esempio n. 11
0
/*
 * if baseline is true then we just leave out the subset totally
 */
static void cluster (char *outfn, int baseline)
{
  // ranking[n] is the index of the nth testcase we want a user to
  // look at, or else -1
  int *ranking = (int *) malloc (numvecs * sizeof (int));
  assert (ranking);

  // ranked[0] is true iff point 0 has been placed in the list
  int *ranked = (int *) malloc (numvecs * sizeof (int));
  assert (ranked);

  // number of entries in ranking[] that are filled in now
  int cur_ranking = 0;

  int x;
  for (x=0; x<numvecs; x++) {
    ranking[x] = -1;
    ranked[x] = 0;
  }

  printf ("output file: '%s'\n", outfn);
  FILE *outf = fopen (outfn, "w+");
  assert (outf);

  if (baseline == 0) {
    // give a place to each member (if any) of the already-found subset
    for (x=0; x<numvecs; x++) {
      if (in_subset[x]) {
	ranked[x] = 1;
	ranking[cur_ranking] = x;
	cur_ranking++;
      }
    }
  }

  // need to start with at least one ranked point, so start
  // with the one farthest from everyone else
  if (cur_ranking==0) {

    int dpos;
    int i = -1;
    for (dpos=0; dpos<numvecs*numvecs; dpos++) {
      i = dlist[dpos].a;
      int j = dlist[dpos].b;
      if (baseline==2) {
	if (!in_all_subset[i] &&
	    !in_all_subset[j]) break;
      }
      if (!in_subset[i] &&
	  !in_subset[j]) break;
    }
    assert (!in_subset[i]);
    ranking[cur_ranking] = i;
    ranked[i] = 1;
    cur_ranking++;
    fprintf (outf, "%d\n", i);
  }

  while (1) {

    // find the point that maximizes the minimum distance from
    // any ranked point
    int y;
    double max_dist = -HUGE_VALF;
    int i = -1;
    for (y=0; y<numvecs; y++) {
      if (ranked[y]) continue;
      if ((baseline==1) && in_subset[y]) continue;
      if ((baseline==2) && in_all_subset[y]) continue;
      int z;
      double min_dist = HUGE_VALF;
      for (z=0; z<cur_ranking; z++) {
        int new_z = ranking[z];
	if ((baseline==1) && in_subset[new_z]) continue;
	if ((baseline==2) && in_all_subset[new_z]) continue;
	min_dist = mind (min_dist, distances[y][new_z]);
      }
      
      // alternate, simpler implementation
      int zz;
      double min_dist2 = HUGE_VALF;
      for (zz=0; zz<numvecs; zz++) {
	if ((baseline==1) && in_subset[zz]) continue;
	if ((baseline==2) && in_all_subset[zz]) continue;
	if (ranked[zz])
	  min_dist2 = mind (min_dist2, distances[y][zz]);
      }
      assert ((min_dist == min_dist2) && "not the same min_dist!");

      if (min_dist > max_dist) {
	max_dist = min_dist;
	i = y;
      }
    }

    if (i == -1) {
      printf ("cur_ranking = %d, subset_size = %d, all_subset_size = %d, numvecs = %d\n", 
	      cur_ranking, subset_size, all_subset_size, numvecs);
      switch (baseline) {
      case 0:
	assert (cur_ranking == numvecs);
	break;
      case 1:
	assert ((subset_size + cur_ranking) == numvecs);
	break;
      case 2:
	// assert ((all_subset_size + cur_ranking) == numvecs);
	break;
      default:
	assert (0);
      }
      break;
    }

    ranking[cur_ranking] = i;
    ranked[i] = 1;
    cur_ranking++;
    fprintf (outf, "%d\n", i);
  }

  fclose (outf);
  free (ranking);
  free (ranked);
}
Esempio n. 12
0
bool UResPassable::combineNearIntervals(double obstSize, double lowVarLimit)
{
  ULaserPi *pp1, *pp2;
  int i, k;
  const int PI_NEAR_LIMIT = MIN_MEASUREMENTS_PER_PI;
  bool result = false;
  bool combined;
  double avgX, distLimit;
  double xl, xr, xl2, xr2;
  double v1, v2;
  double vm1, vm2;
  double a1, a2, ad;
  double sg1l, sg2l;
//  double sgLen;
//  ULaserPi * ppis[MAX_PASSABLE_INTERVALS_PR_SCAN];
//  const double LOW_VAR_LIMIT = 0.00005;
  //  /** max difference in variance for two intervals */
//  const double varVarianceFactor = 1.0;
//  const double varVarLengthFactor = 0.5;
//  double vf;
  // sort intervals from right to left
/*  for (i = 0; i < pisCnt; i++)
  ppis[i] = &pis[i];*/
  qsort(pis, pisCnt, sizeof(ULaserPi), sortPis);
  // combine near intervals
  pp1 = pis;
  v1 = pp1->getFitVariance();
  vm1 = pp1->getVarMin();
  a1 = atan2(pp1->getSegment()->vec.y, pp1->getSegment()->vec.x);
  sg1l = pp1->getSegment()->length;
  pp2 = pp1 + 1;
  i = 0;
  while (pp2 < &pis[pisCnt])
  { // test right of pp1 and left of pp2
    combined = false;
    v2 = pp2->getFitVariance();
    vm2 = pp2->getVarMin();
    a2 = atan2(pp2->getSegment()->vec.y, pp2->getSegment()->vec.x);
    sg2l = pp2->getSegment()->length;
    if (abs(pp2->getRight() - pp1->getLeft()) < PI_NEAR_LIMIT)
    { // combine possible to the left of pp1
      // get average x-value
      xl = scan->getPos(pp1->getLeft()).x;
      xr = scan->getPos(pp2->getRight()).x;
      avgX = (xl + xr)/2.0;
      distLimit = obstSize * avgX / sensorPose->getZ(); //scannerHeight;
      // dist limit is for small obstacles
      // level steps must be smaller
      combined = fabs(xl - xr) < distLimit;
      if (combined)
      { // test also the ends of the fitted line
        xl2 = pp1->getSegment()->getOtherEnd().x;
        xr2 = pp2->getSegment()->pos.x;;
        combined = fabs(xl2 - xr2) < distLimit;
      }
      // test min half-robot-width variance difference
      if (combined)
      { // test if step in variance is less than a factor 2
        // if (fmax(vm1, vm2) > fmax(fmin(vm1, vm2), lowVarLimit/9.0) * 2.0)
        if (fmax(vm1, vm2) > fmax(fmin(vm1, vm2), lowVarLimit) * 1.8)
          combined = false;
      }
      if (combined)
      { // test angle and angle difference of segments
        ad = limitToPi(a1 - a2);
        if ((ad > (60 * M_PI / 180.0)) or
             (ad < (-20 * M_PI / 180.0)) or
             (fabs(a1) < 30 * M_PI / 180.0) or
             (fabs(a1) > 150 * M_PI / 180.0) or
             (fabs(a2) < 30 * M_PI / 180.0) or
             (fabs(a2) > 150 * M_PI / 180.0))
          combined = false;
      }
      if (combined)
        // test measurements inbetween
        for (k = pp1->getLeft() + 1; k < pp2->getRight(); k++)
      {
        combined  = (avgX - scan->getPos(k).x) < distLimit;
        if (not combined)
          break;
      }
      if (combined)
        pp1->setInterval(pp1->getRight(), pp2->getLeft(),
                         scan,
                         mind(pp1->getVarMin(), pp2->getVarMin()),
                         mind(pp1->getVarMin2(), pp2->getVarMin2()));
    }
    // use data from left-most interval as new base (combined or not)
    v1 = v2;
    vm1 = vm2;
    a1 = a2;
    sg1l = sg2l;
    if (combined)
    { // combined, so move the remaining intervals down
      for (k = i + 2; k < pisCnt; k++)
        pis[k - 1] = pis[k];
      pisCnt--;
    }
    else
    { // not combined - so move on to next
      pp1++;
      pp2++;
      i++;
    }
  }
  return result;
}
Esempio n. 13
0
int UResPassable::makePassableIntervals2(const int rightLim, const int leftLim)
{
  int result = 0;
  int i, j;
  ULaserPoint *pr;
  ULaserPoint *pl; // pointer to left test point
  ULaserPoint *pll; // pointer to last left test point
  ULaserPoint *par;
  int piLeft, piRight; // left and right index
  int paRight, paLeft; // actual interval limits
  bool passable;
  double w, d;
  double varMin = 1.0;
  double varMin2 = 1.0;
  double varTest = 0.0;
  const double LASER_MEARUREMENT_VARIANCE = sqr(0.01); // in meter^2
  double varMinLim = LASER_MEARUREMENT_VARIANCE;
  double varStop = 1.0;
  UPosition p;
  // max allowed separation of measurements
  const double MAX_MEASUREMENT_INTERVAL_DIST = 0.8;
//  const double VAR_MIN2_LIMIT = 2; // minimum measurements for valid varince type 2 (no min limit)
  double d2p; // distance to previous measurement
  bool limHeight, limMaxVar, limEndDist, limInside, limMeasureDist;
  bool doContinue;
  //
  //
  // set default as not passable
  scan->setQ(rightLim, leftLim - 1, PQ_NOT);
  //
  pr = scan->getData(rightLim);
  if (lineSmoothSettings)
    varMinLim = LASER_MEARUREMENT_VARIANCE;
  else
    varMinLim = LASER_MEARUREMENT_VARIANCE * 2.0;
  pl = pr;
  i = rightLim;
  passable = false;
  piRight = 0;
  piLeft = -1; // just to avoid warnings
  while (i < leftLim)
  { // test for terrain structure
    if (not passable)
    { // look for start of passable using max limit
      passable = (pr->varL < lineFitVarLimit) and     // within variance limit ...
          (absd(pr->tilt) < lineFitXTiltLimit) and // and tilt OK
          (pr->pos.z < MAX_ALLOWED_HEIGHT) and // and not too high ...
          (pr->pos.z > MIN_ALLOWED_HEIGHT);    // or too low
      if (passable)
      { // right edge of a potential area is found
        piRight = i;
        varMinLim = LASER_MEARUREMENT_VARIANCE;
        varMin = maxd(pr->varL, varMinLim);
        varMin2 = varMin; // 1.0;
        // initialize looking for left side of interval
        piLeft = i;
        pl = scan->getData(i);
      }
    }
    //
    if (passable)
    {
      pll = pl;
      pl = scan->getData(i);
      // look for left side of interval using adaptive limit
      varTest = mind(varMin, lineFitVarLimit)  * lineFitEndpointDev;
      // save index for (adaptive) left limit
      paLeft = scan->getData(piLeft)->varToL;
      // get compensated distance for last measurement to the left of interval
      d = pr->distLeft - lineFitConvexOffset;
      // get distance to previous measurement
      if (pll->isValid() and pl->isValid())
        d2p = hypot(pl->pos.x - pll->pos.x, pl->pos.y - pll->pos.y);
      else
        d2p = 0.0;
      // debug
      //if (pr->pos.z >= MAX_ALLOWED_HEIGHT)
      //  pr->pos.z = pr->pos.z + 0.01;
      // debug end
      limHeight = (pr->pos.z < MAX_ALLOWED_HEIGHT) and (pr->pos.z > MIN_ALLOWED_HEIGHT);
      limMaxVar = pr->varL < lineFitVarLimit;
      limEndDist = sqr(d) < varTest;
      limInside  = paLeft <= leftLim;
      limMeasureDist = d2p < MAX_MEASUREMENT_INTERVAL_DIST;
      // debug
      doContinue = (limMeasureDist and limInside and limEndDist and limMaxVar and limHeight);
      if (not doContinue)
      {
        if (not limMeasureDist)
          iMeasureDist++;
        if (not limInside)
          iInside++;
        if (limEndDist)
          iEndDist++;
        if (limMaxVar)
          iMaxVar++;
        if (limHeight)
          iHeight++;
      }
      // debug end
      // test for end of passable interval
      if ((pr->varL < lineFitVarLimit) and  // max line-fit variance
           (sqr(d) < varTest) and      // fit of left-most point adapted
           (paLeft <= leftLim) and     // inside test interval
           (absd(pr->tilt) < lineFitXTiltLimit) and // line tilt OK
           (pr->pos.z < MAX_ALLOWED_HEIGHT) and // item height not too high ...
           (pr->pos.z > MIN_ALLOWED_HEIGHT) and
           (d2p < MAX_MEASUREMENT_INTERVAL_DIST)) // or too low
      { // still passable
        if (pr->isValid())
          piLeft = i; // left-most place to search for a right edge
        // adapt limit of line-fit
        if (pr->varL < varMin)
          varMin = maxd(pr->varL, varMinLim);
        if ((pr->varL < varMin2)) // and ((pr->varToL - i) > VAR_MIN2_LIMIT))
          varMin2 = pr->varL;
      }
      else
      { // (adaptive) left end of interval is found
        // look from here to the right for adaptive right limit
        paRight = piLeft;
        // get position of left side of interval
        p = scan->getData(paLeft)->pos;
        par = scan->getData(paRight); // first guess of adapted right edge
        w = 0.0; // reset width
        for (j = paRight; j >= piRight; j--)
        { // look for right edge with this variance.
          // calculate passable width
          w = p.dist(par->pos);
          // get distance relative to line compensated
          // to favor convex shaped road
          d = par->distRight - lineFitConvexOffset;
          varStop = par->varL;
          //
          limMaxVar = pr->varL < lineFitVarLimit;
          limEndDist = sqr(d) < varTest;
          doContinue = (limMeasureDist and limInside and limEndDist and limMaxVar and limHeight);
          if (not doContinue)
          {
            if (limEndDist)
              iEndDist++;
            if (limMaxVar)
              iMaxVar++;
          }
          //
          if ((par->varL <= lineFitVarLimit) and  // max line-fit variance limit
               (sqr(d) < varTest))// and           // last right-most point (adaptive)
            paRight = j;
          else
            // right edge is found
            break;
          // go further right
          par--;
        }
        // left is left side of last good interval
        // missing test for big (in-line) jumps in untested interval
        // from liLeft to paLeft
        pl = scan->getData(piLeft);
        for (j = piLeft + 1; j <= paLeft; j++)
        {
          pll = pl;
          pl = scan->getData(j);
          if (pll->isValid() and pl->isValid())
          { // test only if measurements are valid
            d2p = hypot(pl->pos.x - pll->pos.x, pl->pos.y - pll->pos.y);
            if (d2p >= MAX_MEASUREMENT_INTERVAL_DIST)
            { // stop before the jump
              paLeft = j - 1;
              break;
            }
          }
        }
        addPassableInterval(paRight, mini(paLeft, leftLim), varMin, varMin2);
        //
        result++;
        // look for new start
        i = paLeft;
        pr = scan->getData(i);
        passable = false;
        // an interval is found (or failed), but there may be another
        // to the right of the tested interval
        if ((paRight - piRight) > MIN_MEASUREMENTS_PER_PI)
        { // test this interval
          result += makePassableIntervals2(piRight, paRight - 1);
        }
        // continue with next interval
      }
    }
    i++;
    //    prOld = pr;
    pr++;
  }
  return result;
}
Esempio n. 14
0
// calculation of the threshold in quiet in FFT-resolution
static void
Ruhehoerschwelle ( PsyModel* m,
				   unsigned int  EarModelFlag,
                   int           Ltq_offset,
                   int           Ltq_max )
{
    int     n;
    int     k;
    float   f;
    float   erg;
    double  tmp;
    float   absLtq [512];

    for ( n = 0; n < 512; n++ ) {
		f = (float) ( (n+1) * (float)(m->SampleFreq / 2000.) / 512 );   // Frequency in kHz

        switch ( EarModelFlag / 100 ) {
        case 0:         // ISO-threshold in quiet
            tmp  = 3.64*pow (f,-0.8) -  6.5*exp (-0.6*(f-3.3)*(f-3.3)) + 0.001*pow (f, 4.0);
            break;
        default:
        case 1:         // measured threshold in quiet (Nick Berglmeir, Andree Buschmann, Kopfh�er)
            tmp  = 3.00*pow (f,-0.8) -  5.0*exp (-0.1*(f-3.0)*(f-3.0)) + 0.0000015022693846297*pow (f, 6.0) + 10.*exp (-(f-0.1)*(f-0.1));
            break;
        case 2:         // measured threshold in quiet (Filburt, Kopfh�er)
            tmp  = 9.00*pow (f,-0.5) - 15.0*exp (-0.1*(f-4.0)*(f-4.0)) + 0.0341796875*pow (f, 2.5)          + 15.*exp (-(f-0.1)*(f-0.1)) - 18;
            tmp  = mind ( tmp, Ltq_max - 18 );
            break;
        case 3:
            tmp  = ATHformula_Frank ( 1.e3 * f );
            break;
        case 4:
            tmp  = ATHformula_Frank ( 1.e3 * f );
            if ( f > 4.8 ) {
                tmp += 3.00*pow (f,-0.8) -  5.0*exp (-0.1*(f-3.0)*(f-3.0)) + 0.0000015022693846297*pow (f, 6.0) + 10.*exp (-(f-0.1)*(f-0.1));
                tmp *= 0.5 ;
            }
            break;
        case 5:
            tmp  = ATHformula_Frank ( 1.e3 * f );
            if ( f > 4.8 ) {
                tmp = 3.00*pow (f,-0.8) -  5.0*exp (-0.1*(f-3.0)*(f-3.0)) + 0.0000015022693846297*pow (f, 6.0) + 10.*exp (-(f-0.1)*(f-0.1));
            }
            break;
        }

        tmp -= f * f * (int)(EarModelFlag % 100 - 50) * 0.0015;  // 00: +30 dB, 100: -30 dB  @20 kHz

        tmp       = mind ( tmp, Ltq_max );              // Limit ATH
        tmp      += Ltq_offset - 23;                    // Add chosen Offset
        m->tables.fftLtq[n] = absLtq[n] = POW10 ( 0.1 * tmp);     // conversion into power
    }

    // threshold in quiet in partitions (long)
    for ( n = 0; n < PART_LONG; n++ ) {
        erg = 1.e20f;
        for ( k = MPC_WL[n]; k <= MPC_WH[n]; k++ )
            erg = minf (erg, absLtq[k]);

		m->tables.partLtq[n] = erg;               // threshold in quiet
		m->tables.invLtq [n] = 1.f / m->tables.partLtq[n];  // Inverse
    }
}