示例#1
0
FloatImage *VectorField::get_vorticity(int xs, int ys)
{
  int i,j;

  FloatImage *image = new FloatImage(xsize-2, ysize-2);

  float d = 0.1 / xsize;

  for (i = 1; i < xsize-1; i++)
    for (j = 1; j < ysize-1; j++) {
      float dx = yval(i+1, j) - yval(i-1, j);
      float dy = xval(i, j+1) - xval(i, j-1);
      float vort = (dx/d) - (dy/d);
      image->pixel(i-1, j-1) = vort;
    }

  FloatImage *image2 = new FloatImage(xs, ys);

  for (i = 0; i < xs; i++)
    for (j = 0; j < ys; j++) {
      float x = (i + 0.5) / xs;
      float y = (j + 0.5) / ys;
      image2->pixel(i,j) = image->get_value(x,y);
    }

  delete image;
  return (image2);
}
示例#2
0
// @@ Not tested!
CubeSurface CubeSurface::fastResample(int size, EdgeFixup fixupMethod) const
{
    // Allocate output cube.
    CubeSurface resampledCube;
    resampledCube.m->allocate(size);

    // For each texel of the output cube.
    for (uint f = 0; f < 6; f++) {
        nvtt::Surface resampledFace = resampledCube.m->face[f];
        FloatImage * resampledImage = resampledFace.m->image;

        for (uint y = 0; y < uint(size); y++) {
            for (uint x = 0; x < uint(size); x++) {

                const Vector3 filterDir = texelDirection(f, x, y, size, fixupMethod);

                Vector3 color = m->sample(filterDir);

                resampledImage->pixel(0, x, y, 0) = color.x;
                resampledImage->pixel(1, x, y, 0) = color.y;
                resampledImage->pixel(2, x, y, 0) = color.z;
            }
        }
    }

    // @@ Implement edge averaging. Share this code with cosinePowerFilter
    if (fixupMethod == EdgeFixup_Average) {
    }

    return resampledCube;
}
示例#3
0
// float-rgb conversion
FloatImage* rgbToFloat(const ColorImage& src, FloatImage* dst)
{
    FloatImage* result = createResultBuffer(src.width()*3, src.height(), dst);

    #ifdef NICE_USELIB_IPP
        IppStatus ret = ippiConvert_8u32f_C3R(src.getPixelPointer(), src.getStepsize(),
                                              result->getPixelPointer(), result->getStepsize(),
                                              makeROIFullImage(src));

        if(ret!=ippStsNoErr)
            fthrow(ImageException, ippGetStatusString(ret));

    #else
        const ColorImage::Pixel* pSrc;
              FloatImage::Pixel* pDst;
        for(int y=0; y<src.height(); ++y) {
            pSrc = src.getPixelPointerY(y);
            pDst = result->getPixelPointerY(y);
            for(int x=0; x<3*src.width(); ++x,++pSrc,++pDst)
                *pDst = static_cast<Ipp32f>(*pSrc);
        }
    #endif

    return result;
}
示例#4
0
FloatImage *VectorField::get_divergence(int xs, int ys)
{
  int i,j;

  FloatImage *image = new FloatImage(xsize-2, ysize-2);

  float d = 0.1 / xsize;

  for (i = 1; i < xsize-1; i++)
    for (j = 1; j < ysize-1; j++) {
      float dx = xval(i+1, j) - xval(i-1, j);
      float dy = yval(i, j+1) - yval(i, j-1);
      float div = (dx + dy) / (2 * d);
      image->pixel(i-1, j-1) = div;
    }

  FloatImage *image2 = new FloatImage(xs, ys);

  for (i = 0; i < xs; i++)
    for (j = 0; j < ys; j++) {
      float x = (i + 0.5) / xs;
      float y = (j + 0.5) / ys;
      image2->pixel(i,j) = image->get_value(x,y);
    }

  delete image;
  return (image2);
}
bool SemiglobalLabMatcher::update()
{
    WriteGuard<ReadWritePipe<FloatImage, FloatImage> > wguard(m_wpipe);
    FloatImage leftImg, rightImg;   
    if ( m_lrpipe->read(&leftImg) && m_rrpipe->read(&rightImg) )
    {
        Dim dsiDim(leftImg.dim().width(), leftImg.dim().height(), 
                   m_maxDisparity);
        
        float *leftImg_d = leftImg.devMem();
        float *rightImg_d = rightImg.devMem();   
        FloatImage dispImage = FloatImage::CreateDev(
            Dim(dsiDim.width(), dsiDim.height()));
     
        cudaPitchedPtr aggregDSI = m_aggregDSI.mem(dsiDim);
        SGPath *paths = m_sgPaths.getDescDev(dispImage.dim());
                   
        
        SemiGlobalLabDevRun(dsiDim, paths, m_sgPaths.pathCount(),
                            leftImg_d, rightImg_d,
                            aggregDSI, dispImage.devMem(), m_zeroAggregDSI);
        m_zeroAggregDSI = false;
        
        dispImage.cpuMem();
        wguard.write(dispImage);        
    }
    
    return wguard.wasWrite();
}
示例#6
0
// Sample cubemap in the given direction.
Vector3 CubeSurface::Private::sample(const Vector3 & dir)
{
    int f = -1;
    if (fabs(dir.x) > fabs(dir.y) && fabs(dir.x) > fabs(dir.z)) {
        if (dir.x > 0) f = 0;
        else f = 1;
    }
    else if (fabs(dir.y) > fabs(dir.z)) {
        if (dir.y > 0) f = 2;
        else f = 3;
    }
    else {
        if (dir.z > 0) f = 4;
        else f = 5;
    }
    nvDebugCheck(f != -1);

    // uv coordinates corresponding to filterDir.
    float u = dot(dir, faceU[f]);
    float v = dot(dir, faceV[f]);

    FloatImage * img = face[f].m->image;

    Vector3 color;
    color.x = img->sampleLinearClamp(0, u, v);
    color.y = img->sampleLinearClamp(1, u, v);
    color.z = img->sampleLinearClamp(2, u, v);

    return color;
}
示例#7
0
 JNIEXPORT void JNICALL
 Java_org_gearvrf_NativeFloatImage_update(JNIEnv * env,
                                          jobject obj, jlong jtexture, jint width, jint height, jfloatArray jdata) {
     FloatImage* texture = reinterpret_cast<FloatImage*>(jtexture);
     jfloat* data = env->GetFloatArrayElements(jdata, 0);
     texture->update(env, width, height, jdata);
     env->ReleaseFloatArrayElements(jdata, data, 0);
 }
示例#8
0
FloatImage *FloatImage::copy()
{
  FloatImage *image = new FloatImage (xsize, ysize);

  for (int i = 0; i < xsize * ysize; i++)
    image->pixel(i) = pixels[i];

  return (image);
}
示例#9
0
void EpochModel::depthFilter(FloatImage &depthImgf, FloatImage &countImgf, float depthJumpThr, 
														 bool dilation, int dilationNumPasses, int dilationWinsize,
														 bool erosion, int erosionNumPasses, int erosionWinsize)
{
	FloatImage depth;
	FloatImage depth2;
	int w = depthImgf.w;
	int h = depthImgf.h;
	
	depth=depthImgf;

	if (dilation)
	{
		for (int k = 0; k < dilationNumPasses; k++)
		{
			depth.Dilate(depth2, dilationWinsize / 2);
			depth=depth2;
		}
	}

	if (erosion)
	{
		for (int k = 0; k < erosionNumPasses; k++)
		{
			depth.Erode(depth2, erosionWinsize / 2);
			depth=depth2;
		}
	}

  Histogramf HH;
  HH.Clear();
  HH.SetRange(0,depthImgf.MaxVal()-depthImgf.MinVal(),10000);
  for(int i=1; i < static_cast<int>(depthImgf.v.size()); ++i)
    HH.Add(fabs(depthImgf.v[i]-depth.v[i-1]));

  if(logFP) fprintf(logFP,"**** Depth histogram 2 Min %f Max %f Avg %f Percentiles ((10)%f (25)%f (50)%f (75)%f (90)%f)\n",HH.MinV(),HH.MaxV(),HH.Avg(),
        HH.Percentile(.1),HH.Percentile(.25),HH.Percentile(.5),HH.Percentile(.75),HH.Percentile(.9));

  int deletedCnt=0;
  
  depthJumpThr = static_cast<float>(HH.Percentile(0.8));
	for (int y = 0; y < h; y++)
		for (int x = 0; x < w; x++)
		{
				if ((depthImgf.Val(x, y) - depth.Val(x, y)) / depthImgf.Val(x, y) > 0.6)
        {
					countImgf.Val(x, y) = 0.0f;
          ++deletedCnt;
        }
		}

	countImgf.convertToQImage().save("tmp_filteredcount.jpg","jpg");
  
  if(logFP) fprintf(logFP,"**** depthFilter: deleted %i on %i\n",deletedCnt,w*h);

}
示例#10
0
float Arc3DModel::ComputeDepthJumpThr(FloatImage &depthImgf, float percentile)
{
    Histogramf HH;
    HH.Clear();
    HH.SetRange(0,depthImgf.MaxVal()-depthImgf.MinVal(),10000);
    for(unsigned int i=1; i < static_cast<unsigned int>(depthImgf.v.size()); ++i)
        HH.Add(fabs(depthImgf.v[i]-depthImgf.v[i-1]));

    return HH.Percentile(percentile);
}
示例#11
0
FloatImage *VectorField::get_magnitude()
{
  FloatImage *image = new FloatImage(xsize, ysize);

  for (int i = 0; i < xsize * ysize * 2; i += 2) {
    float x = values[i];
    float y = values[i+1];
    image->pixel(i/2) = sqrt (x*x + y*y);
  }

  return (image);
}
示例#12
0
template <typename PointInT, typename IntensityT> void
pcl::tracking::PyramidalKLTTracker<PointInT, IntensityT>::derivatives (const FloatImage& src, FloatImage& grad_x, FloatImage& grad_y) const
{
  // std::cout << ">>> derivatives" << std::endl;
  ////////////////////////////////////////////////////////
  // Use Shcarr operator to compute derivatives.        //
  // Vertical kernel +3 +10 +3 = [1 0 -1]T * [3 10 3]   //
  //                  0   0  0                          //
  //                 -3 -10 -3                          //
  // Horizontal kernel  +3 0  -3 = [3 10 3]T * [1 0 -1] //
  //                   +10 0 -10                        //
  //                    +3 0  -3                        //
  ////////////////////////////////////////////////////////
  if (grad_x.size () != src.size () || grad_x.width != src.width || grad_x.height != src.height)
    grad_x = FloatImage (src.width, src.height);
  if (grad_y.size () != src.size () || grad_y.width != src.width || grad_y.height != src.height)
  grad_y = FloatImage (src.width, src.height);

  int height = src.height, width = src.width;
  float *row0 = new float [src.width + 2];
  float *row1 = new float [src.width + 2];
  float *trow0 = row0; ++trow0;
  float *trow1 = row1; ++trow1;
  const float* src_ptr = &(src.points[0]);

  for (int y = 0; y < height; y++)
  {
    const float* srow0 = src_ptr + (y > 0 ? y-1 : height > 1 ? 1 : 0) * width;
    const float* srow1 = src_ptr + y * width;
    const float* srow2 = src_ptr + (y < height-1 ? y+1 : height > 1 ? height-2 : 0) * width;
    float* grad_x_row = &(grad_x.points[y * width]);
    float* grad_y_row = &(grad_y.points[y * width]);

    // do vertical convolution
    for (int x = 0; x < width; x++)
    {
      trow0[x] = (srow0[x] + srow2[x])*3 + srow1[x]*10;
      trow1[x] = srow2[x] - srow0[x];
    }

    // make border
    int x0 = width > 1 ? 1 : 0, x1 = width > 1 ? width-2 : 0;
    trow0[-1] = trow0[x0]; trow0[width] = trow0[x1];
    trow1[-1] = trow1[x0]; trow1[width] = trow1[x1];

    // do horizontal convolution and store results
    for (int x = 0; x < width; x++)
    {
      grad_x_row[x] = trow0[x+1] - trow0[x-1];
      grad_y_row[x] = (trow1[x+1] + trow1[x-1])*3 + trow1[x]*10;
    }
  }
}
示例#13
0
float EpochModel::ComputeDepthJumpThr(FloatImage &depthImgf, float percentile)
{
  Histogramf HH;
  HH.Clear();
  HH.SetRange(0,depthImgf.MaxVal()-depthImgf.MinVal(),10000);
  for(unsigned int i=1; i < static_cast<unsigned int>(depthImgf.v.size()); ++i)
    HH.Add(fabs(depthImgf.v[i]-depthImgf.v[i-1]));

  if(logFP) fprintf(logFP,"**** Depth histogram Min %f Max %f Avg %f Percentiles ((10)%f (25)%f (50)%f (75)%f (90)%f)\n",HH.MinV(),HH.MaxV(),HH.Avg(),
        HH.Percentile(.1),HH.Percentile(.25),HH.Percentile(.5),HH.Percentile(.75),HH.Percentile(.9));
  
  return HH.Percentile(percentile);
}
示例#14
0
void Arc3DModel::depthFilter(FloatImage &depthImgf, FloatImage &countImgf, float depthJumpThr,
    bool dilation, int dilationNumPasses, int dilationWinsize,
    bool erosion, int erosionNumPasses, int erosionWinsize)
{
    FloatImage depth;
    FloatImage depth2;
    int w = depthImgf.w;
    int h = depthImgf.h;

    depth=depthImgf;

    if (dilation)
    {
        for (int k = 0; k < dilationNumPasses; k++)
        {
            depth.Dilate(depth2, dilationWinsize / 2);
            depth=depth2;
        }
    }

    if (erosion)
    {
        for (int k = 0; k < erosionNumPasses; k++)
        {
            depth.Erode(depth2, erosionWinsize / 2);
            depth=depth2;
        }
    }

    Histogramf HH;
    HH.Clear();
    HH.SetRange(0,depthImgf.MaxVal()-depthImgf.MinVal(),10000);
    for(int i=1; i < static_cast<int>(depthImgf.v.size()); ++i)
        HH.Add(fabs(depthImgf.v[i]-depth.v[i-1]));

    int deletedCnt=0;

    depthJumpThr = HH.Percentile(0.8f);
    for (int y = 0; y < h; y++)
        for (int x = 0; x < w; x++)
        {
            if ((depthImgf.Val(x, y) - depth.Val(x, y)) / depthImgf.Val(x, y) > 0.6)
            {
                countImgf.Val(x, y) = 0.0f;
                ++deletedCnt;
            }
        }

        countImgf.convertToQImage().save("tmp_filteredcount.jpg","jpg");

}
示例#15
0
TDV_NAMESPACE_BEGIN

FloatImage MedianFilterCPU::updateImpl(FloatImage input)
{
    const Dim dim = input.dim();

    CvMat *image = input.cpuMem();

    FloatImage output = FloatImage::CreateCPU(dim);
    CvMat *img_output = output.cpuMem();

    cvSmooth(image, img_output, CV_GAUSSIAN, 5);

    return output;
}
示例#16
0
void FloatImage::blur(int steps)
{
  int i,j;
  int i0,i1,j0,j1;
  float val;
  FloatImage *image = new FloatImage (xsize, ysize);

  /* blur several times */

  for (int k = 0; k < steps; k++) {

    /* one step of blurring */

    for (i = 0; i < xsize; i++) {

      i0 = i-1;
      i1 = i+1;
      if (i == 0)
        i0 = 0;
      if (i == xsize-1)
        i1 = xsize-1;

      for (j = 0; j < ysize; j++) {

        j0 = j-1;
        j1 = j+1;
        if (j == 0)
          j0 = 0;
        if (j == ysize-1)
          j1 = ysize-1;

        val = pixel(i0,j) + pixel(i1,j) + pixel(i,j0) + pixel(i,j1);
        val += 4 * pixel(i,j);
        val *= 0.125;
        image->pixel(i, j) = val;
      }
    }
    
    /* copy result into original array */
    for (i = 0; i < xsize; i++)
      for (j = 0; j < ysize; j++)
        pixel(i,j) = image->pixel(i,j);
  }

  delete image;
}
示例#17
0
void Bundle::write_pgm(char *filename, int xsize, int ysize)
{
  Lowpass *low = new Lowpass (xsize, ysize, 2.0, 0.6);

  for (int i = 0; i < num_lines; i++) {
    Streamline *st = lines[i];
    for (int j = 0; j < st->samples - 1; j++) {
      float taper_scale = 0.5 * (st->pts[j].intensity + st->pts[j+1].intensity);
      low->filter_segment (st->xs(j), st->ys(j), st->xs(j+1), st->ys(j+1),
                           taper_scale);
    }
  }

  FloatImage *image = low->get_image_ptr();
  image->write_pgm (filename, 0.0, 1.1);

  delete low;
}
示例#18
0
bool FastWTAMatcher::update()
{
    WriteGuard<ReadWritePipe<FloatImage, FloatImage> > wguard(m_wpipe);
    FloatImage leftImg, rightImg;   
    if ( m_lrpipe->read(&leftImg) && m_rrpipe->read(&rightImg) )
    {
        float *leftImg_d = leftImg.devMem();
        float *rightImg_d = rightImg.devMem();   
        
        Dim dsiDim(leftImg.dim().width(), leftImg.dim().height(), 
                   m_maxDisparity);
        FloatImage image = FloatImage::CreateDev(
            Dim(dsiDim.width(), dsiDim.height()));
        FastWTADevRun(dsiDim, leftImg_d, rightImg_d, image.devMem());
        
        image.cpuMem();
        wguard.write(image);        
    }
    
    return wguard.wasWrite();
}
示例#19
0
FloatImage *FloatImage::normalize()
{
  int count = getwidth() * getheight();

  FloatImage *newimage = new FloatImage(getwidth(), getheight());

  /* find minimum and maximum values */

  float min,max;
  get_extrema (min, max);
  if (max == min)
    min = max - 1;

  float scale = 255 * (max - min);
  float trans = -min;

  for (int i = 0; i < count; i++)
    newimage->pixel(i) = (pixel(i) - min) / (max - min);

  return (newimage);
}
bool StereoCorrespondenceCV::update()
{
    WriteGuard<ReadWritePipe<FloatImage> > wguard(m_wpipe);
    FloatImage limg, rimg;

    if ( m_lrpipe->read(&limg) && m_rrpipe->read(&rimg) )
    {
        CvMat *limg_c = limg.cpuMem();
        CvMat *rimg_c = rimg.cpuMem();
        
        CvMat *limg8u_c = m_limg8U.getImage(cvGetSize(limg_c));
        CvMat *rimg8u_c = m_rimg8U.getImage(cvGetSize(rimg_c));

        cvConvertScale(limg_c, limg8u_c, 255.0);
        cvConvertScale(rimg_c, rimg8u_c, 255.0);

        FloatImage output = FloatImage::CreateCPU(
            Dim::minDim(limg.dim(), rimg.dim()));
        CvMat *out_c = output.cpuMem();
        
        CudaBenchmarker bMarker;
        bMarker.begin();
        
        if ( m_mode == LocalMatching )
            cvFindStereoCorrespondenceBM(limg8u_c, rimg8u_c, out_c, m_bmState);
        else
            cvFindStereoCorrespondenceGC(limg8u_c, rimg8u_c, out_c, NULL,
                                         m_gcState);
        
        bMarker.end();
        m_mark.addProbe(bMarker.elapsedTime());
        wguard.write(output);

        FloatImageSinkPol::sink(limg);
        FloatImageSinkPol::sink(rimg);
    }

    return wguard.wasWrite();
}
示例#21
0
void ApplyAngularFilterTask(void * context, int id)
{
    ApplyAngularFilterContext * ctx = (ApplyAngularFilterContext *)context;

    int size = ctx->filteredCube->edgeLength;

    int f = id / (size * size);
    int idx = id % (size * size);
    int y = idx / size;
    int x = idx % size;

    nvtt::Surface & filteredFace = ctx->filteredCube->face[f];
    FloatImage * filteredImage = filteredFace.m->image;

    const Vector3 filterDir = texelDirection(f, x, y, size, ctx->fixupMethod);

    // Convolve filter against cube.
    Vector3 color = ctx->inputCube->applyAngularFilter(filterDir, ctx->coneAngle, ctx->filterTable, ctx->tableSize);

    filteredImage->pixel(0, idx) = color.x;
    filteredImage->pixel(1, idx) = color.y;
    filteredImage->pixel(2, idx) = color.z;
}
示例#22
0
  void MultiProjector::project(FloatImage& zbuffer, 
			       IndexImage& indices,
			       const Eigen::Isometry3f& T,
			       const Cloud& model) const {
    zbuffer.create(_image_rows, _image_cols);
    indices.create(_image_rows, _image_cols);

    for (size_t i = 0; i<_projectors.size(); i++) {
      if (!_projectors[i])
	continue;

      float* zb= zbuffer.ptr<float>(_mono_rows*i);
      int* ib= indices.ptr<int>(_mono_rows*i);
      FloatImage mono_zbuffer(_mono_rows, _image_cols,zb); 
      IntImage mono_indices(_mono_rows, _image_cols,ib); 
      _projectors[i]->project(mono_zbuffer, mono_indices, _inverse_offset*T,model);
    }    
  }
示例#23
0
void Arc3DModel::SmartSubSample(int factor, FloatImage &fli, CharImage &chi, FloatImage &subD, FloatImage &subQ, int minCount)
{
    assert(fli.w==chi.w && fli.h==chi.h);
    int w=fli.w/factor;
    int h=fli.h/factor;
    subQ.resize(w,h);
    subD.resize(w,h);

    for(int i=0;i<w;++i)
        for(int j=0;j<h;++j)
        {
            float maxcount=0;
            int cnt=0;
            float bestVal=0;
            for(int ki=0;ki<factor;++ki)
                for(int kj=0;kj<factor;++kj)
                {
                    float q= chi.Val(i*factor+ki,j*factor+kj) - minCount+1 ;
                    if(q>0)
                    {
                        maxcount+= q;
                        bestVal +=q*fli.Val(i*factor+ki,j*factor+kj);
                        cnt++;
                    }
                }
                if(cnt>0)
                {
                    subD.Val(i,j)=float(bestVal)/maxcount;
                    subQ.Val(i,j)=minCount-1 + float(maxcount)/cnt  ;
                }
                else
                {
                    subD.Val(i,j)=0;
                    subQ.Val(i,j)=0;
                }
        }
}
示例#24
0
void Arc3DModel::Laplacian2(FloatImage &depthImg, FloatImage &countImg, int minCount, CharImage &featureMask, float depthThr)
{
    FloatImage Sum;
    int w=depthImg.w,h=depthImg.h;
    Sum.resize(w,h);

    for(int y=1;y<h-1;++y)
        for(int x=1;x<w-1;++x)
        {
            float curDepth=depthImg.Val(x,y);
            int cnt=0;
            for(int j=-1;j<=1;++j)
                for(int i=-1;i<=1;++i)
                {
                    int q=countImg.Val(x+i,y+j)-minCount+1;
                    if(q>0 && fabs(depthImg.Val(x+i,y+j)-curDepth) < depthThr) {
                        Sum.Val(x,y)+=q*depthImg.Val(x+i,y+j);
                        cnt+=q;
                    }
                }
                if(cnt>0) {
                    Sum.Val(x,y)/=cnt;
                }
                else Sum.Val(x,y)=depthImg.Val(x,y);
        }

        for(int y=1;y<h-1;++y)
            for(int x=1;x<w-1;++x)
            {
                float q=(featureMask.Val(x,y)/255.0);
                depthImg.Val(x,y) = depthImg.Val(x,y)*q + Sum.Val(x,y)*(1-q);
            }
}
FloatImage * nv::createNormalMipmapMap(const FloatImage * img)
{
	nvDebugCheck(img != NULL);
	
	uint w = img->width();
	uint h = img->height();
	
	uint hw = w / 2;
	uint hh = h / 2;
	
	FloatImage dotImg;
	dotImg.allocate(1, w, h);
	
	FloatImage shImg;
	shImg.allocate(9, hw, hh);
	
	SampleDistribution distribution(256);
	const uint sampleCount = distribution.sampleCount();
	
	for (uint d = 0; d < sampleCount; d++)
	{
		const float * xChannel = img->channel(0);
		const float * yChannel = img->channel(1);
		const float * zChannel = img->channel(2);
		
		Vector3 dir = distribution.sampleDir(d);
		
		Sh2 basis;
		basis.eval(dir);
		
		for(uint i = 0; i < w*h; i++)
		{
			Vector3 normal(xChannel[i], yChannel[i], zChannel[i]);
			normal = normalizeSafe(normal, Vector3(zero), 0.0f);
			
			dotImg.setPixel(dot(dir, normal), d);
		}
		
		// @@ It would be nice to have a fastDownSample that took an existing image as an argument, to avoid allocations.
		AutoPtr<FloatImage> dotMip(dotImg.fastDownSample());
		
		for(uint p = 0; p < hw*hh; p++)
		{
			float f = dotMip->pixel(p);
			
			// Project irradiance to sh basis and accumulate.
			for (uint i = 0; i < 9; i++)
			{
				float & sum = shImg.channel(i)[p];
				sum += f * basis.elemAt(i);
			}
		}
	}
	
	
	
	FloatImage * normalMipmap = new FloatImage;
	normalMipmap->allocate(4, hw, hh);
	
	// Precompute the clamped cosine radiance transfer.
	Sh2 prt;
	prt.cosineTransfer();
	
	// Allocate outside the loop.
	Sh2 sh;
	
	for(uint p = 0; p < hw*hh; p++)
	{
		for (uint i = 0; i < 9; i++)
		{
			sh.elemAt(i) = shImg.channel(i)[p];
		}
		
		// Convolve sh irradiance by radiance transfer.
		sh *= prt;
		
		// Now sh(0) is the ambient occlusion.
		// and sh(1) is the normal direction.
		
		// Should we use SVD to fit only the normals to the SH?
		
	}
	
	return normalMipmap;
}
示例#26
0
int main(int argc, char *argv[]) {

	argc--;
	argv++;
	
	if(argc < 1) {
		cout << "Usage: houghTransform <input image> [resolution (optional)]" << endl;
		return 1;
	}
	
	// Load image
	Image src;
	int result = src.LoadPPM(argv[0]);
	
	if(result != 0) {
		cerr << "Cannot open file! " << endl;
		return 1;
	}

	int resolution = 1;
	if(argc > 1) {
		resolution = atoi(argv[1]);
	}
	
	// convert image if needed
	Image greySrc = src;
	if(src.GetColorModel() != ImageBase::cm_Grey) {
		ColorConversion::ToGrey(src, greySrc);
	} 

	// times
	timeval startStandard;
	timeval endStandard;
	timeval start1Fast;
	timeval start2Fast;
	timeval endFast;
	
	// ---------- standard hough transformation ------------
	vector<PrimitiveLine> lines;
	
	HoughTransform ht;
	gettimeofday(&startStandard, NULL);
	ht.StandardHoughTransform(greySrc, resolution, lines);
	gettimeofday(&endStandard, NULL);

	// save hough space
	FloatImage houghSpace = ht.GetHoughSpace();
	Image tmp;
	houghSpace.GetAsGreyImage(tmp);

	Save(tmp, string(argv[0]) + "_standardht_houghspace.ppm");

	// paint lines
	Image linesImg;

	ColorConversion::ToRGB(greySrc, linesImg);

	for(unsigned int i = 0; i < lines.size(); i++) {
		lines[i].SetColor(Color::red());
		lines[i].Draw(&linesImg);
	}

	Save(linesImg, string(argv[0]) + "_standardht_lines.ppm");

	// ---------- fast hough transformation ------------
	cout << endl;

	lines.clear();

	StructureTensor J;
	gettimeofday(&start1Fast, NULL);
	J.SetFromImage(greySrc);

	HoughTransform ht2;
	gettimeofday(&start2Fast, NULL);
	ht2.FastHoughTransform(J, resolution, lines);
	gettimeofday(&endFast, NULL);

	// save hough space
	houghSpace = ht2.GetHoughSpace();
	houghSpace.GetAsGreyImage(tmp);

	Save(tmp, string(argv[0]) + "_fastht_houghspace.ppm");

	// paint lines
	ColorConversion::ToRGB(greySrc, linesImg);

	for(unsigned int i = 0; i < lines.size(); i++) {
		lines[i].SetColor(Color::blue());
		lines[i].Draw(&linesImg);
	}

	Save(linesImg, string(argv[0]) + "_fastht_lines.ppm");

	// ---------- Ausgabe benoetigte Zeit -------------

	timeval standard;
	timeval fast1, fast2;

	timeval_subtract(&standard, &endStandard, &startStandard);
	timeval_subtract(&fast1, &endFast, &start1Fast);
	timeval_subtract(&fast2, &endFast, &start2Fast);

	long msecStandard = standard.tv_sec *1000 + standard.tv_usec/1000;
	long msecFast1 = fast1.tv_sec *1000 + fast1.tv_usec/1000;
	long msecFast2 = fast2.tv_sec *1000 + fast2.tv_usec/1000;

	cout << endl << endl;
	cout << "Standard hough transformation: " << msecStandard << " msec" << endl;
	cout << "Fast hough transformation (with Structure Tensor calculation): " << msecFast1 << " msec" << endl;
	cout << "Fast hough transformation (without Structure Tensor calculation): " << msecFast2 << " msec" << endl;

	cout << "Fertig! " << endl;
	
	return 0;
}
示例#27
0
NiblackBinaryImage::NiblackBinaryImage(const GreyLevelImage& anImg,
				       const int aLowThres,
				       const int aHighThres,
				       const int aMaskSize,
				       const float aK,
				       const float aPostThres)

  : BinaryImage(anImg.width(), anImg.height())

{
  // Pointer to mean image
  FloatImage* pMeanImg = 0;

  // Compute standard deviation and mean
  StandardDeviationImage stdImg(FloatImage(anImg), pMeanImg, aMaskSize);

  // Rows to exchange data
  GreyLevelImage::pointer bRow = new GreyLevelImage::value_type [_width];
  GreyLevelImage::pointer gRow = new GreyLevelImage::value_type [_width];

  float* mRow = new float [_width];
  float* sRow = new float [_width];
  
  // Binarize
  for (int i = 0 ; i < _height ; ++i)
    {
      // Read means
      pMeanImg->row(i, mRow);
      // Read deviations
      stdImg.row(i, sRow);
      // Read original
      anImg.row(i, gRow);

      // Pointers
      float* m = mRow;
      float* s = sRow;
      GreyLevelImage::pointer g = gRow;
      GreyLevelImage::pointer b = bRow;

      // Dynamic thresholding
      for (int j = 0 ; j < _width ; ++j, ++b, ++g)
	{
	  if (*g < aLowThres)
	    {
	      *b = 1;
	    }
	  else if (*g > aHighThres)
	    {
	      *b = 0;
	    }
	  else if (*g < (GreyLevelImage::value_type) (*m++ + aK * *s++))
	    {
	      *b = 1;
	    }
	  else
	    {
	      *b = 0;
	    }
	}

      // Save line
      setRow(i, bRow);
    }

  // Post-processing
  if (aPostThres > 0)
    {
      // Copy reference image
      BinaryImage* contours = new BinaryImage(*this);

      // Extract connected components
      ConnectedComponents* compConnexes = new ConnectedComponents(*contours);

      // Prepare tables
      int labCnt = compConnexes->componentCnt();
      // Tables for the sums of the means of the gradient
      float* gsum = new float [labCnt];
      qgFill(gsum, labCnt, 0.f);
      // Tables for the numbers of points -- for the mean
      int* psum = new int [labCnt];
      qgFill(psum, labCnt, 0);

      // Table of labels
      Component::label_type* labRow = new Component::label_type[_width];
      // By construction, first and last pixels are always WHITE
      labRow[1] = 0;
      labRow[_width - 1] = 0;

      // Compute the module of Canny gradient
      CannyGradientImage* gradImg = new CannyGradientImage(anImg);
      GradientModuleImage gradModImg(*gradImg);
      delete gradImg;

      // Construct the image of the contours of the black components
      // which thus includes the interesting pixels
      ErodedBinaryImage* eroImg = new ErodedBinaryImage(*contours);
      (*contours) -= (*eroImg);
      delete eroImg;

      // Create a line of floats
      float* fRow = new float [_width];

      // Pointer to the pixel map of the component image
      Component::label_type* pMapCCImg =
	(compConnexes->accessComponentImage()).pPixMap() + _width;

      for (int i = 1 ; i < (_height - 1) ; ++i)
	{
	  // Get a line of labels from the component image
	  // and set pixels from white components to 0
	  pMapCCImg += 2;
	  PRIVATEgetBlackLabels(pMapCCImg, labRow);

	  // Read the corresponding line in the contours
	  contours->row(i, bRow);

	  // Read the corresponding line in the module of the gradient
	  gradModImg.row(i, fRow);
	  Component::label_type* p = labRow;
	  GreyLevelImage::pointer q = bRow;
	  float* r = fRow;
	  for (int j = 0 ; j < _width ; ++j, ++p, ++q, ++r)
	    {
	      if (*q != 0)  // We are on a contour
		{
		  gsum[(int)(*p)] += *r;
		  psum[(int)(*p)] += 1;
		}
	    } // END for j
	} // END for i

      delete contours;
 
     // Compute the means
      for (int i = 0 ; i < labCnt ; ++i)
	{
	  if (psum[i] != 0)
	    {
	      gsum[i] /= psum[i];
	    }
	} // END for
  
      // Pointer to the pixel map of the component image
      pMapCCImg = (compConnexes->accessComponentImage()).pPixMap() + _width;
      // Delete fake black components
      for (int i = 1 ; i < _height - 1 ; ++i)
	{
	  // Read the current line of components
	  pMapCCImg += 2;
	  PRIVATEgetBlackLabels(pMapCCImg, labRow);

	  // Read the corresponding line in the binary image
	  row(i, bRow);

	  // Examine components and delete
	  Component::label_type* p = labRow;
	  GreyLevelImage::pointer pb = bRow;
	  for (int j = 0 ; j < _width ; ++j, ++p, ++pb)
	    {
	      if (((*pb) != 0) && (gsum[(int)(*p)] < aPostThres))
		{
		  *pb = 0;
		}
	    }

	  // Save this line
	  setRow(i, bRow);
	} // END for

      // Clean up
      delete [] fRow;
      delete [] psum;
      delete [] gsum;
      delete compConnexes;
    }

  // And clean up
  delete [] bRow;
  delete [] gRow;
  delete [] mRow;
  delete [] sRow;
}
示例#28
0
CubeSurface CubeSurface::cosinePowerFilter(int size, float cosinePower, EdgeFixup fixupMethod) const
{
    // Allocate output cube.
    CubeSurface filteredCube;
    filteredCube.m->allocate(size);

    // Texel table is stored along with the surface so that it's compute only once.
    m->allocateTexelTable();

    const float threshold = 0.001f;
    const float coneAngle = acosf(powf(threshold, 1.0f/cosinePower));


    // For each texel of the output cube.
    /*for (uint f = 0; f < 6; f++) {
        nvtt::Surface filteredFace = filteredCube.m->face[f];
        FloatImage * filteredImage = filteredFace.m->image;

        for (uint y = 0; y < uint(size); y++) {
            for (uint x = 0; x < uint(size); x++) {

                const Vector3 filterDir = texelDirection(f, x, y, size, fixupMethod);

                // Convolve filter against cube.
                Vector3 color = m->applyCosinePowerFilter(filterDir, coneAngle, cosinePower);

                filteredImage->pixel(0, x, y, 0) = color.x;
                filteredImage->pixel(1, x, y, 0) = color.y;
                filteredImage->pixel(2, x, y, 0) = color.z;
            }
        }
    }*/

    ApplyAngularFilterContext context;
    context.inputCube = m;
    context.filteredCube = filteredCube.m;
    context.coneAngle = coneAngle;
    context.fixupMethod = fixupMethod;

    context.tableSize = 512;
    context.filterTable = new float[context.tableSize];

    // @@ Instead of looking up table between [0 - 1] we should probably use [cos(coneAngle), 1]

    for (int i = 0; i < context.tableSize; i++) {
        float f = float(i) / (context.tableSize - 1);
        context.filterTable[i] = powf(f, cosinePower);
    }
    

    nv::ParallelFor parallelFor(ApplyAngularFilterTask, &context);
    parallelFor.run(6 * size * size);

    // @@ Implement edge averaging.
    if (fixupMethod == EdgeFixup_Average) {
        for (uint f = 0; f < 6; f++) {
            nvtt::Surface filteredFace = filteredCube.m->face[f];
            FloatImage * filteredImage = filteredFace.m->image;

            // For each component.
            for (uint c = 0; c < 3; c++) {
                // @@ For each corner, sample the two adjacent faces.
                filteredImage->pixel(c, 0, 0, 0);
                filteredImage->pixel(c, size-1, 0, 0);
                filteredImage->pixel(c, 0, size-1, 0);
                filteredImage->pixel(c, size-1, size-1, 0);

                // @@ For each edge, sample the adjacent face.

            }
        }
    }

    return filteredCube;
}
示例#29
0
Point3m Arc3DModel::TraCorrection(CMeshO &m, int subsampleFactor, int minCount, int smoothSteps)
{
    FloatImage depthImgf;
    CharImage countImgc;
    depthImgf.Open(depthName.toUtf8().data());
    countImgc.Open(countName.toUtf8().data());

    QImage TextureImg;
    TextureImg.load(textureName);

    CombineHandMadeMaskAndCount(countImgc,maskName);  // set count to zero for all masked points

    FloatImage depthSubf;  // the subsampled depth image
    FloatImage countSubf;  // the subsampled quality image (quality == count)

    SmartSubSample(subsampleFactor,depthImgf,countImgc,depthSubf,countSubf,minCount);

    CharImage FeatureMask; // the subsampled image with (quality == features)
    GenerateGradientSmoothingMask(subsampleFactor, TextureImg, FeatureMask);

    depthSubf.convertToQImage().save("tmp_depth.jpg", "jpg");

    float depthThr = ComputeDepthJumpThr(depthSubf,0.8f);
    for(int ii=0;ii<smoothSteps;++ii)
        Laplacian2(depthSubf,countSubf,minCount,FeatureMask,depthThr);

    vcg::tri::Grid<CMeshO>(m,depthSubf.w,depthSubf.h,depthImgf.w,depthImgf.h,&*depthSubf.v.begin());

    // The depth is filtered and the minimum count mask is update accordingly.
    // To be more specific the border of the depth map are identified by erosion
    // and the relative vertex removed (by setting mincount equal to 0).
    ComputeDepthJumpThr(depthSubf,0.95f);

    int vn = m.vn;
    for(int i=0;i<vn;++i)
        if(countSubf.v[i]<minCount)
        {
            m.vert[i].SetD();
            m.vn--;
        }

        cam.Open(cameraName.toUtf8().data());

        CMeshO::VertexIterator vi;
        Matrix33d Rinv= Inverse(cam.R);
        Point3m correction(0.0,0.0,0.0);
        int numSamp=0;

        for(vi=m.vert.begin();vi!=m.vert.end();++vi)if(!(*vi).IsD())
        {
            Point3m in=(*vi).P();
            Point3d out;
            correction+=cam.DepthTo3DPoint(in[0], in[1], in[2], out);
            numSamp++;

        }
        if (numSamp!=0)
            correction/=(double)numSamp;

        return correction;

}
示例#30
0
bool Arc3DModel::BuildMesh(CMeshO &m, int subsampleFactor, int minCount, float minAngleCos, int smoothSteps,
    bool dilation, int dilationPasses, int dilationSize,
    bool erosion, int erosionPasses, int erosionSize,float scalingFactor)
{
    FloatImage depthImgf;
    CharImage countImgc;
    clock();
    depthImgf.Open(depthName.toUtf8().data());
    countImgc.Open(countName.toUtf8().data());

    QImage TextureImg;
    TextureImg.load(textureName);
    clock();

    CombineHandMadeMaskAndCount(countImgc,maskName);  // set count to zero for all masked points

    FloatImage depthSubf;  // the subsampled depth image
    FloatImage countSubf;  // the subsampled quality image (quality == count)

    SmartSubSample(subsampleFactor,depthImgf,countImgc,depthSubf,countSubf,minCount);

    CharImage FeatureMask; // the subsampled image with (quality == features)
    GenerateGradientSmoothingMask(subsampleFactor, TextureImg, FeatureMask);

    depthSubf.convertToQImage().save("tmp_depth.jpg", "jpg");

    clock();

    float depthThr = ComputeDepthJumpThr(depthSubf,0.8f);
    for(int ii=0;ii<smoothSteps;++ii)
        Laplacian2(depthSubf,countSubf,minCount,FeatureMask,depthThr);

    clock();

    vcg::tri::Grid<CMeshO>(m,depthSubf.w,depthSubf.h,depthImgf.w,depthImgf.h,&*depthSubf.v.begin());

    clock();


    // The depth is filtered and the minimum count mask is update accordingly.
    // To be more specific the border of the depth map are identified by erosion
    // and the relative vertex removed (by setting mincount equal to 0).
    float depthThr2 = ComputeDepthJumpThr(depthSubf,0.95f);
    depthFilter(depthSubf, countSubf, depthThr2,
        dilation, dilationPasses, dilationSize,
        erosion, erosionPasses, erosionSize);

    int vn = m.vn;
    for(int i=0;i<vn;++i)
        if(countSubf.v[i]<minCount)
        {
            m.vert[i].SetD();
            m.vn--;
        }

        cam.Open(cameraName.toUtf8().data());

        CMeshO::VertexIterator vi;
        Matrix33d Rinv= Inverse(cam.R);

        for(vi=m.vert.begin();vi!=m.vert.end();++vi)if(!(*vi).IsD())
        {
            Point3m in=(*vi).P();
            Point3d out;
            cam.DepthTo3DPoint(in[0], in[1], in[2], out);

            (*vi).P().Import(out);
            QRgb c = TextureImg.pixel(int(in[0]), int(in[1]));
            vcg::Color4b tmpcol(qRed(c),qGreen(c),qBlue(c),0);
            (*vi).C().Import(tmpcol);
            if(FeatureMask.Val(int(in[0]/subsampleFactor), int(in[1]/subsampleFactor))<200) (*vi).Q()=0;
            else (*vi).Q()=1;
            (*vi).Q()=float(FeatureMask.Val(in[0]/subsampleFactor, in[1]/subsampleFactor))/255.0;
        }

        clock();

        CMeshO::FaceIterator fi;
        Point3m CameraPos = Point3m::Construct(cam.t);
        for(fi=m.face.begin();fi!=m.face.end();++fi)
        {

            if((*fi).V(0)->IsD() ||(*fi).V(1)->IsD() ||(*fi).V(2)->IsD() )
            {
                (*fi).SetD();
                --m.fn;
            }
            else
            {
                Point3m n=vcg::TriangleNormal(*fi);
                n.Normalize();
                Point3m dir=CameraPos-vcg::Barycenter(*fi);
                dir.Normalize();
                if(dir.dot(n) < minAngleCos)
                {
                    (*fi).SetD();
                    --m.fn;
                }
            }
        }

        tri::Clean<CMeshO>::RemoveUnreferencedVertex(m);
        clock();

        Matrix44m scaleMat;
        scaleMat.SetScale(scalingFactor,scalingFactor,scalingFactor);
        vcg::tri::UpdatePosition<CMeshO>::Matrix(m, scaleMat);

        return true;
}