// ######################################################################
void TaskRelevanceMapKillN::integrate(SimEventQueue& q)
{
  // anything from the ShapeEstimator?
  if (SeC<SimEventShapeEstimatorOutput> e =
      q.check<SimEventShapeEstimatorOutput>(this))
    {
      // get the smooth object mask:
      Image<float> mask = e->smoothMask();

      // downscale to our map's size:
      mask = downSize(mask, itsMap.getWidth(), itsMap.getHeight(), 5);

      // is the mask uniform (i.e., empty)?  In this case, just push
      // it in, otherwise let's segment the objects out:
      float mi, ma; getMinMax(mask, mi, ma);
      if (fabs(mi - ma) < 1.0e-10)
        itsCache.push_back(mask);
      else
        {
          // let's build a distance map out of the object and threhold it:
          Image<float> dmap = chamfer34(mask, 255.0F);
          inplaceClamp(dmap, 0.0F, 15.0F);
          dmap /= 15.0F;  // 0 inside obj, 1 outside

          // get this new input mask into our cache:
          itsCache.push_back(dmap);
        }

      // our current relevance map is the min over our cache:
      itsMap = itsCache.getMin();
    }
}
// ######################################################################
void TaskRelevanceMapKillStatic::inputFrame(const InputFrame& f)
{
  // NOTE: we are guaranteed that itsMap is initialized and of correct
  // size, as this is done by the TaskRelevanceMapAdapter before
  // calling us.

  // NOTE: this is duplicating some of the computation done in the
  // IntensityChannel, so there is a tiny performance hit (~0.25%) in
  // doing this operation twice; however the benefit is that we avoid
  // having to somehow pick information out of an IntensityChannel and
  // pass it to a TaskRelevanceMap -- that was making for a mess in
  // Brain. In the future, we could avoid the ineffiency by caching
  // the intensity pyramid in the InputFrame object, and then letting
  // IntensityChannel and TaskRelevanceMap both share access to that
  // pyramid.
  const Image<float> img =
    downSize(f.grayFloat(), itsMap.getWidth(), itsMap.getHeight(), 5);

  // is this our first time here?
  if (itsStaticBuff.initialized() == false)
    { itsStaticBuff = img; return; }

  // otherwise derive TRM from difference between current frame and
  // staticBuf:
  Image<float> diff = absDiff(itsStaticBuff, img);

  // useful range of diff is 0..255; anything giving an output less
  // that itsKillStaticThresh will be considered static; here let's
  // clamp to isolate that:
  inplaceClamp(diff, 0.0F, itsKillStaticThresh.getVal());

  // the more static, the greater the killing: so, first let's change
  // the range so that the min is at -itsKillStaticThresh and zero is
  // neutral:
  diff -= itsKillStaticThresh.getVal();

  // itsKillStaticCoeff determines how strong the killing should be:
  diff *= (1.0F / itsKillStaticThresh.getVal()) * // normalize to min at -1.0
    itsKillStaticCoeff.getVal();                  // apply coeff

  // mix our new TRM to the old one; we want to have a fairly high
  // mixing coeff for the new one so that we don't penalize recent
  // onset objects too much (i.e., were previously static and killed,
  // but are not anymore):
  itsMap = itsMap * 0.25F + (diff + 1.0F) * 0.75F;
  float mi, ma; getMinMax(itsMap, mi, ma);
  LINFO("TRM range = [%.2f .. %.2f] -- 1.0 is baseline", mi, ma);

  // update our cumulative buffer:
  itsStaticBuff = itsStaticBuff * 0.75F + img * 0.25F;

}
// ######################################################################
Image<float> SubmapAlgorithmBiased::compute(const SingleChannel& chan,
                                            const uint i)
{
  Image<float> submap = chan.getRawCSmap(i);

  // resize submap to fixed scale if necessary:
  if (submap.getWidth() > chan.getMapDims().w())
    submap = downSize(submap, chan.getMapDims());
  else if (submap.getWidth() < chan.getMapDims().w())
    submap = rescale(submap, chan.getMapDims());

  LFATAL("This is being reworked... stay tuned");

  /*
  // bias the submap if we have a mean and sigma
  LINFO("Mean %f var %f", chan.getMean(i), chan.getSigmaSq(i));
  if (chan.getMean(i) > 0 && chan.getSigmaSq(i) > 0)
  {
    double mean = chan.getMean(i);
    double var = chan.getSigmaSq(i);

    for(int y=0; y<submap.getHeight(); y++)
      for(int x=0; x<submap.getWidth(); x++)
      {
        double val = submap.getVal(x, y);
        double delta = -(val - mean) * (val - mean);
        //Calc the normal dist
        double newVal = exp(delta/(2*var))/(sqrt(2*M_PI*var));

        // submap.setVal(x, y, newVal*10000);
        submap.setVal(x, y, log(newVal)+10000);
      }
  }

  Image<float> biasMask = chan.getBiasMask();
  if (biasMask.initialized()) //bias based on a mask
  {
    //rescale the mask to the submap scale TODO: can be done more effiently
    biasMask = rescale(biasMask, submap.getDims());
    submap *= biasMask;
  }
  // now do the standard processing
  submap = chan.postProcessMap(submap, i);

*/
  return submap;
}
void RKVariable::removeRows (int from_row, int to_row) {
	RK_TRACE (OBJECTS);
	for (int row = from_row; row <= to_row; ++row) {
		myData ()->invalid_fields.remove (row);
	}

	if (to_row < (myData ()->allocated_length - 1)) {	// not the last rows
		if (myData ()->cell_strings) {
			qmemmove (&(myData ()->cell_strings[from_row]), &(myData ()->cell_strings[to_row+1]), (myData ()->allocated_length - to_row - 1) * sizeof (QString));
		} else {
			qmemmove (&(myData ()->cell_doubles[from_row]), &(myData ()->cell_doubles[to_row+1]), (myData ()->allocated_length - to_row - 1) * sizeof (double));
		}
		qmemmove (&(myData ()->cell_states[from_row]), &(myData ()->cell_states[to_row+1]), (myData ()->allocated_length - to_row - 1) * sizeof (int));
	}

	for (int row = (myData ()->allocated_length - 1 - (to_row - from_row)); row < myData ()->allocated_length; ++row) {
		myData ()->cell_states[myData ()->allocated_length - 1] = RKVarEditData::Unknown;
	}

	dimensions[0] -= (to_row - from_row) + 1;	
	downSize ();
}
Beispiel #5
0
Image<T> downSize(const Image<T>& src, const Dims& dims,
                  const int filterWidth)
{
  return downSize(src, dims.w(), dims.h(), filterWidth);
}
Beispiel #6
0
// ######################################################################
void DescriptorVec::buildRawDV()
{

  bool salientLocationWithinSubmaps = true;
  Point2D<int> objSalientLoc(-1,-1);  //the feature location

  const LevelSpec lspec = itsComplexChannel->getModelParamVal<LevelSpec>("LevelSpec");
  const int smlevel = lspec.mapLevel();

  int x=int(itsFoveaLoc.i / double(1 << smlevel) + 0.49);
  int y=int(itsFoveaLoc.j / double(1 << smlevel) + 0.49);

  int foveaW = int(itsFoveaSize.getVal().w() / double(1 << smlevel) + 0.49);
  int foveaH = int(itsFoveaSize.getVal().h() / double(1 << smlevel) + 0.49);

  int tl_x = x - (foveaW/2);
  int tl_y = y - (foveaH/2);

  Dims mapDims = itsComplexChannel->getSubmap(0).getDims();

  //Shift the fovea location so we dont go outside the image
  //Sift the fovea position if nessesary
  if (tl_x < 0) tl_x = 0; if (tl_y < 0) tl_y = 0;
  if (tl_x+foveaW > mapDims.w()) tl_x = mapDims.w() - foveaW;
  if (tl_y+foveaH > mapDims.h()) tl_y = mapDims.h() - foveaH;

  if (!salientLocationWithinSubmaps)
  {
    //Find the most salient location within the fovea
    Image<float> SMap = itsComplexChannel->getOutput();

    Image<float> tmp = SMap; //TODO need to resize to fovea
    //Find the max location within the fovea

    float maxVal; Point2D<int> maxLoc;
    findMax(tmp, maxLoc, maxVal);
    //convert back to original SMap cordinates
   // objSalientLoc.i=tl_x+maxLoc.i;
   // objSalientLoc.j=tl_y+maxLoc.j;
    objSalientLoc.i=x;
    objSalientLoc.j=y;
    itsAttentionLoc = objSalientLoc;
  }

  //Go through all the submaps building the DV
  itsFV.clear(); //clear the FV
  uint numSubmaps = itsComplexChannel->numSubmaps();
  for (uint i = 0; i < numSubmaps; i++)
  {
    //Image<float> submap = itsComplexChannel->getSubmap(i);
    Image<float> submap = itsComplexChannel->getRawCSmap(i);

    // resize submap to fixed scale if necessary:
    if (submap.getWidth() > mapDims.w())
      submap = downSize(submap, mapDims);
    else if (submap.getWidth() < mapDims.w())
      submap = rescale(submap, mapDims); //TODO convert to  quickInterpolate


    if (salientLocationWithinSubmaps) //get the location from the salient location within each submap
    {
      Image<float> tmp = submap;
      //get only the fovea region

      if (foveaW < tmp.getWidth()) //crop if our fovea is smaller
        tmp = crop(tmp, Point2D<int>(tl_x, tl_y), Dims(foveaW, foveaH));
     // tmp = maxNormalize(tmp, 0.0F, 10.0F, VCXNORM_MAXNORM);  //find salient locations

      //Find the max location within the fovea
      float maxVal; Point2D<int> maxLoc; findMax(tmp, maxLoc, maxVal);
      //LINFO("%i: Max val %f, loc(%i,%i)", i, maxVal, maxLoc.i, maxLoc.j);

      objSalientLoc.i=tl_x+maxLoc.i;
      objSalientLoc.j=tl_y+maxLoc.j;

    }

    if (objSalientLoc.i < 0) objSalientLoc.i = 0;
    if (objSalientLoc.j < 0) objSalientLoc.j = 0;

    if (objSalientLoc.i > submap.getWidth()-1) objSalientLoc.i = submap.getWidth()-1;
    if (objSalientLoc.j > submap.getHeight()-1) objSalientLoc.j = submap.getHeight()-1;



   // LINFO("Location from %i,%i: (%i,%i)", objSalientLoc.i, objSalientLoc.j,
    //    submap.getWidth(), submap.getHeight());
    float featureVal = submap.getVal(objSalientLoc.i,objSalientLoc.j);
    itsFV.push_back(featureVal);
 //   SHOWIMG(rescale(submap, 255, 255));

  }
}