Пример #1
0
void ImagesBufferReader::ReadRawDepth(std::stringstream& data_path, std::string filename,
    int width, int height, DepthImageType& resImage)
{
    std::ifstream inputFileStream;
    std::stringstream imagePath;
    CoordinateType* ref_buffer;
    ref_buffer = new CoordinateType[width*height];
    imagePath << data_path.str() << filename;
    //            std::cout << data_path.str() << filename << std::endl;
    inputFileStream.open(imagePath.str().c_str(),std::ios::binary);
    inputFileStream.read((char*)ref_buffer,sizeof(CoordinateType)*width*height);
    inputFileStream.close();
    DepthImageType imgTemp(width,height,ref_buffer);
    imgTemp = imgTemp.t();
    imgTemp.copyTo(resImage);
    delete[] ref_buffer;
}
Пример #2
0
RType TGV2_3D::ComputeGStar(CVector &x, std::vector<CVector> &y1,
                         std::vector<CVector> &y2, CVector &z,
                         CVector &data_gpu, CVector &b1_gpu)
{
  unsigned N = width * height * depth;
  // F(Kx)
  CVector zTemp(N * coils);
  CVector g(N * coils);
  mrOp->BackwardOperation(x, zTemp, b1_gpu);
  agile::subVector(zTemp, data_gpu, g);

  RType g1 = 0.5 * params.lambda * std::real(agile::getScalarProduct(g, g));

  // F*(z)
  RType g2 = std::real(agile::getScalarProduct(data_gpu, z));
  g2 += 1.0 / (2.0 * params.lambda) * std::real(agile::getScalarProduct(z, z));

  // G*(-Kx)
  CVector imgTemp(N);
  CVector divTemp(N);
  std::vector<CVector> divTemp2;
  for (unsigned cnt = 0; cnt < 3; cnt++)
  {
    divTemp2.push_back(CVector(N));
  }
  mrOp->ForwardOperation(z, imgTemp, b1_gpu);
  utils::Divergence(y1, divTemp, width, height, depth, params.dx, params.dy,
                    params.dz);
  agile::subVector(imgTemp, divTemp, divTemp);
  RType g3 = agile::norm1(divTemp);

  utils::SymmetricDivergence(y2, divTemp2, width, height, depth, params.dx,
                             params.dy, params.dz);
  RType g4 = 0;
  for (unsigned cnt = 0; cnt < 3; cnt++)
  {
    // -y(:,:,:,1:3) - div3_6
    agile::addVector(divTemp2[cnt], y1[cnt], divTemp2[cnt]);
    g4 += agile::norm1(divTemp2[cnt]);
  }

  RType gstar = (RType)g1 + (RType)g2 + (RType)g3 + (RType)g4;
  return gstar;
}
Пример #3
0
void TGV2_3D::IterativeReconstruction(CVector &data_gpu, CVector &x1,
                                   CVector &b1_gpu)
{
  if (verbose)
    TestAdjointness(b1_gpu);

  unsigned N = width * height * depth;

  //TODO: compute for dx,dy,dz
  //ComputeTimeSpaceWeights(params.timeSpaceWeight, params.ds, params.dt);
  //Log("Setting ds: %.3e, dt: %.3e\n", params.ds, params.dt);
   Log("Setting Primal-Dual Gap of %.3e  as stopping criterion \n", params.stopPDGap);


  std::vector<CVector> x2;
  for (unsigned cnt = 0; cnt < 3; cnt++)
  {
    x2.push_back(CVector(N));
    x2[cnt].assign(N, 0.0);
  }

  // primal
  CVector ext1(N), x1_old(N);
  std::vector<CVector> ext2, x2_old;
  for (unsigned cnt = 0; cnt < 3; cnt++)
  {
    ext2.push_back(CVector(N));
    x2_old.push_back(CVector(N));
  }
  agile::copy(x1, ext1);

  // dual
  std::vector<CVector> y1;
  std::vector<CVector> y1Temp;
  for (int cnt = 0; cnt < 3; cnt++)
  {
    y1.push_back(CVector(N));
    y1[cnt].assign(N, 0.0);
    y1Temp.push_back(CVector(N));
  }
  std::vector<CVector> y2;
  std::vector<CVector> y2Temp;
  for (int cnt = 0; cnt < 6; cnt++)
  {
    y2.push_back(CVector(N));
    y2[cnt].assign(N, 0);
    y2Temp.push_back(CVector(N));
  }
  CVector z(N * coils);
  CVector zTemp(N * coils);
  z.assign(N * coils, 0.0);
  zTemp.assign(N * coils, 0.0);

  CVector imgTemp(N);

  CVector div1Temp(N);
  std::vector<CVector> div2Temp;
  for (unsigned cnt = 0; cnt < 3; cnt++)
    div2Temp.push_back(CVector(N));

  unsigned loopCnt = 0; 
  // loop
  Log("Starting iteration\n");
  while (loopCnt < params.maxIt)
  {
    // dual ascent step
    // p
    utils::Gradient(ext1, y1Temp, width, height, params.dx, params.dy,
                    params.dz);
    for (unsigned cnt = 0; cnt < 3; cnt++)
    {
      agile::subVector(y1Temp[cnt], ext2[cnt], y1Temp[cnt]);
      agile::addScaledVector(y1[cnt], params.sigma, y1Temp[cnt], y1[cnt]);
    }
    // q
    utils::SymmetricGradient(ext2, y2Temp, width, height, params.dx, params.dy,
                             params.dz);
    for (unsigned cnt = 0; cnt < 6; cnt++)
    {
      agile::addScaledVector(y2[cnt], params.sigma, y2Temp[cnt], y2[cnt]);
    }
   
    mrOp->BackwardOperation(ext1, zTemp, b1_gpu);
    agile::addScaledVector(z, params.sigma, zTemp, z);
   
    // Proximal mapping
    utils::ProximalMap3(y1, (DType)1.0 / params.alpha1);
    utils::ProximalMap6(y2, (DType)1.0 / params.alpha0);

    agile::subScaledVector(z, params.sigma, data_gpu, z);
    agile::scale((float)(1.0 / (1.0 + params.sigma / params.lambda)), z, z);
  
    // primal descent
    // ext1
    mrOp->ForwardOperation(z, imgTemp, b1_gpu);
    utils::Divergence(y1, div1Temp, width, height, depth, params.dx, params.dy,
                      params.dz);
    agile::subVector(imgTemp, div1Temp, div1Temp);
    agile::subScaledVector(x1, params.tau, div1Temp, ext1);
   
    // ext2
    utils::SymmetricDivergence(y2, div2Temp, width, height, depth, params.dx,
                               params.dy, params.dz);
    for (unsigned cnt = 0; cnt < 3; cnt++)
    {
      agile::addVector(y1[cnt], div2Temp[cnt], div2Temp[cnt]);
      agile::addScaledVector(x2[cnt], params.tau, div2Temp[cnt], ext2[cnt]);
    }

    // save x_n+1
    agile::copy(ext1, x1_old);
    for (unsigned cnt = 0; cnt < 3; cnt++)
      agile::copy(ext2[cnt], x2_old[cnt]);

    // extra gradient
    agile::scale(2.0f, ext1, ext1);
    agile::subVector(ext1, x1, ext1);
    // x_n = x_n+1
    agile::copy(x1_old, x1);

    for (unsigned cnt = 0; cnt < 3; cnt++)
    {
      agile::scale((DType)2.0, ext2[cnt], ext2[cnt]);
      agile::subVector(ext2[cnt], x2[cnt], ext2[cnt]);
      agile::copy(x2_old[cnt], x2[cnt]);
    }

    // adapt step size
    if (loopCnt < 10 || (loopCnt % 50 == 0))
    {
      agile::subVector(ext1, x1, div1Temp);
      for (unsigned cnt = 0; cnt < 3; cnt++)
      {
        agile::subVector(ext2[cnt], x2[cnt], div2Temp[cnt]);
      }
      AdaptStepSize(div1Temp, div2Temp, b1_gpu);
    }
    
    // compute PD Gap (export,verbose,stopping)
    if ( (verbose && (loopCnt < 10 || (loopCnt % 50 == 0)) ) ||
         ((debug) && (loopCnt % debugstep == 0)) || 
         ((params.stopPDGap > 0) && (loopCnt % 20 == 0)) )
    {
      RType pdGap =
            ComputePDGap(x1, x2, y1, y2, z, data_gpu, b1_gpu);
      pdGap=pdGap/N;
      
      if ( pdGap < params.stopPDGap )
        return;

      pdGapExport.push_back( pdGap );
      Log("Normalized Primal-Dual Gap after %d iterations: %.4e\n", loopCnt, pdGap);     
    }

/*
    // adapt step size
    if (loopCnt < 10 || (loopCnt % 50 == 0))
    {
      CVector temp1(N);
   
      agile::subVector(ext1, x1, temp1);
      std::vector<CVector> temp2;
      for (unsigned cnt = 0; cnt < 3; cnt++)
      {
        temp2.push_back(CVector(N));
        agile::subVector(ext2[cnt], x2[cnt], temp2[cnt]);
      }
      AdaptStepSize(temp1, temp2, b1_gpu);

      if (verbose)
      {
        RType pdGap = 1.0;
        RType pdGap = ComputePDGap(x1, x2, y1, y2, z, data_gpu, b1_gpu);
        Log("Normalized Primal-Dual Gap after %d iterations: %.4e\n", loopCnt, pdGap/N);
      }  
    }
    
    // compute PD Gap for export
    if ((debug) && (loopCnt % debugstep == 0))
    {
        RType pdGap = ComputePDGap(x1, x2, y1, y2, z, data_gpu, b1_gpu);
        pdGapExport.push_back( pdGap/N );
    }
*/

    loopCnt++;
    if (loopCnt % 10 == 0)
      std::cout << "." << std::flush;
  }
  std::cout << std::endl;
}