void DAI4REID_ParsedInstance::nextFrame(QHashDataFrames &output)
{
    // Read Color File
    QString instancePath = m_info.parent().getPath() + "/" + m_info.getFileName(DataFrame::Color);
    cv::Mat color_mat = cv::imread(instancePath.toStdString());
    cv::cvtColor(color_mat, color_mat, CV_BGR2RGB);
    ColorFrame srcColor(color_mat.cols, color_mat.rows, (RGBColor*) color_mat.data);
    shared_ptr<ColorFrame> dstColor = make_shared<ColorFrame>();
    *dstColor = srcColor; // Copy
    output.insert(DataFrame::Color, dstColor);

    // Read Depth File
    instancePath = m_info.parent().getPath() + "/" + m_info.getFileName(DataFrame::Depth);
    QFile depthFile(instancePath);
    depthFile.open(QIODevice::ReadOnly);
    QByteArray buffer = depthFile.readAll();
    depthFile.close();
    shared_ptr<DepthFrame> depthFrame = make_shared<DepthFrame>();
    depthFrame->loadData(buffer);
    depthFrame->setDistanceUnits(dai::DISTANCE_MILIMETERS);
    depthFrame->setCameraIntrinsics(594.21434211923247, 320.0, -591.04053696870778, 240.0);
    output.insert(DataFrame::Depth, depthFrame);

    // Set color frame offset based on the depth bin that may contain it.
    dstColor->setOffset(depthFrame->offset());

    /*instancePath = m_info.parent().getPath() + "/" + m_info.getFileName(DataFrame::Depth);
    cv::Mat depth_mat = cv::imread(instancePath.toStdString()); //, CV_LOAD_IMAGE_GRAYSCALE);
    qDebug() << depth_mat.type() << depth_mat.depth() << depth_mat.channels();
    DepthFrame srcDepth(depth_mat.cols, depth_mat.rows, (uint16_t*) depth_mat.data, depth_mat.step);
    shared_ptr<DepthFrame> depthFrame = make_shared<DepthFrame>();
    *depthFrame = srcDepth; // Copy
    depthFrame->setDistanceUnits(dai::MILIMETERS);
    output.insert(DataFrame::Depth, depthFrame);*/

    // Read Mask File
    instancePath = m_info.parent().getPath() + "/" + m_info.getFileName(DataFrame::Mask);
    QFile maskFile(instancePath);
    maskFile.open(QIODevice::ReadOnly);
    buffer = maskFile.readAll();
    maskFile.close();
    shared_ptr<MaskFrame> maskFrame = make_shared<MaskFrame>();
    maskFrame->loadData(buffer);
    output.insert(DataFrame::Mask, maskFrame);

    // Read Skeleton File
    instancePath = m_info.parent().getPath() + "/" + m_info.getFileName(DataFrame::Skeleton);
    QFile skeletonFile(instancePath);
    skeletonFile.open(QIODevice::ReadOnly);
    buffer = skeletonFile.readAll();
    skeletonFile.close();
    shared_ptr<Skeleton> skeleton = Skeleton::fromBinary(buffer);
    skeleton->setCameraIntrinsics(594.21434211923247, 320.0, -591.04053696870778, 240.0);
    shared_ptr<SkeletonFrame> skeletonFrame = make_shared<SkeletonFrame>();
    skeletonFrame->setSkeleton(1, skeleton);
    output.insert(DataFrame::Skeleton, skeletonFrame);
}
Пример #2
0
void parseCommandLine( int argc, char **argv ){
	int ch;
	string inputFile( "" ), outputFile( "" ), depthFile( "" );
  int resolution;
	static struct option longopts[] = {
    { "input", required_argument, NULL, 'i' },
    { "output", required_argument, NULL, 'o' },
    { "depth", required_argument, NULL, 'd' },
    { "resolution", required_argument, NULL, 'r' },
    { "verbose", required_argument, NULL, 'v' },
    { "help", required_argument, NULL, 'h' },
    { NULL, 0, NULL, 0 }
	};

	while( (ch = getopt_long(argc, argv, "i:o:d:r:vh", longopts, NULL)) != -1 ){
		switch( ch ){
			case 'i':
				// input file
				inputFile = string(optarg);
				break;
			case 'o':
				// image output file
				outputFile = string(optarg);
				break;
			case 'd':
				// depth output file
				depthFile = string( optarg );
				break;
			case 'r':
        resolution = atoi(optarg);
        break;
      case 'v':
        // set your flag here.
        break;
      case 'h':
        usage( );
        break;
			default:
				// do nothing
				break;
		}
	}
	gTheScene = new Scene( inputFile, outputFile, depthFile );
}
Пример #3
0
void MDAL::DriverFlo2D::parseDEPTHFile( const std::string &datFileName, const std::vector<double> &elevations )
{
  // this file is optional, so if not present, reading is skipped
  std::string depthFile( fileNameFromDir( datFileName, "DEPTH.OUT" ) );
  if ( !MDAL::fileExists( depthFile ) )
  {
    return; //optional file
  }

  std::ifstream depthStream( depthFile, std::ifstream::in );
  std::string line;

  size_t nVertices = mMesh->verticesCount();
  std::vector<double> maxDepth( nVertices );
  std::vector<double> maxWaterLevel( nVertices );

  size_t vertex_idx = 0;

  // DEPTH.OUT - COORDINATES (ELEM NUM, X, Y, MAX DEPTH)
  while ( std::getline( depthStream, line ) )
  {
    line = MDAL::rtrim( line );
    if ( vertex_idx == nVertices ) throw MDAL_Status::Err_IncompatibleMesh;

    std::vector<std::string> lineParts = MDAL::split( line, ' ' );
    if ( lineParts.size() != 4 )
    {
      throw MDAL_Status::Err_UnknownFormat;
    }

    double val = getDouble( lineParts[3] );
    maxDepth[vertex_idx] = val;

    //water level
    if ( !is_nodata( val ) ) val += elevations[vertex_idx];
    maxWaterLevel[vertex_idx] = val;


    vertex_idx++;
  }

  addStaticDataset( true, maxDepth, "Depth/Maximums", datFileName );
  addStaticDataset( true, maxWaterLevel, "Water Level/Maximums", datFileName );
}
void IASLAB_RGBD_ID_Instance::nextFrame(QHashDataFrames &output)
{
    // Read Color File
    QString instancePath = m_info.parent().getPath() + "/" + m_info.getFileName(DataFrame::Color);
    cv::Mat color_mat = cv::imread(instancePath.toStdString());
    cv::cvtColor(color_mat, color_mat, CV_BGR2RGB);
    ColorFrame srcColor(color_mat.cols, color_mat.rows, (RGBColor*) color_mat.data);
    shared_ptr<ColorFrame> dstColor = make_shared<ColorFrame>();
    *dstColor = srcColor; // Copy
    output.insert(DataFrame::Color, dstColor);

    // Read Depth File
    instancePath = m_info.parent().getPath() + "/" + m_info.getFileName(DataFrame::Depth);
    QFile depthFile(instancePath);
    depthFile.open(QIODevice::ReadOnly);
    QByteArray buffer = depthFile.readAll();
    depthFile.close();
    shared_ptr<DepthFrame> depthFrame_tmp = make_shared<DepthFrame>(640, 480, (uint16_t*) (buffer.data() + 16));
    shared_ptr<DepthFrame> depthFrame = shared_ptr<DepthFrame>(new DepthFrame(*depthFrame_tmp)); // Clone!
    depthFrame->setDistanceUnits(dai::DISTANCE_MILIMETERS);
    // Set Depth intrinsics of the camera that generated this frame
    depthFrame->setCameraIntrinsics(fx_d, cx_d, fy_d, cy_d);
    output.insert(DataFrame::Depth, depthFrame);

    // Read Mask File
    instancePath = m_info.parent().getPath() + "/" + m_info.getFileName(DataFrame::Mask);
    QFile maskFile(instancePath);
    maskFile.open(QIODevice::ReadOnly);
    buffer = maskFile.readAll();
    maskFile.close();
    shared_ptr<MaskFrame> maskFrame_tmp = make_shared<MaskFrame>(640, 480, (uint8_t*) (buffer.data() + 14));
    shared_ptr<MaskFrame> maskFrame = shared_ptr<MaskFrame>(new MaskFrame(*maskFrame_tmp)); // Clone!
    output.insert(DataFrame::Mask, maskFrame);

    // Read Skeleton txt file (line by line)
    /*For every frame, a skeleton file is available. For every joint, a row with the following information is written to the skeleton file:
    [id]: person ID,
    [x3D], [y3D], [z3D]: joint 3D position,
    [x2D], [y2D]: joint 2D position,
    [TrackingState]: 0 (not tracked), 1 (inferred) or 2 (tracked),
    [QualityFlag]: not implemented by NiTE, thus it is always 0,
    [OrientationStartJoint], [OrientationEndJoint]: indices of the extreme joints of a link,
    [Qx], [Qy], [Qz], [Qw]: quaternion expressing the orientation of the link identified by [OrientationStartJoint] and [OrientationEndJoint].
    NiTE provides 15 joints, but these are remapped to the 20 joints that Microsoft Kinect SDK estimates, in order to have the same format (the joint positions missing in NiTE (wrists, ankles) are copied from other joints (hands, feet)).
    For more information, please visit http://msdn.microsoft.com/en-us/library/hh973074.aspx.
    You can also visualize the data with the "visualize_IASLab_RGBDID_dataset.m" Matlab script we provided.*/
    instancePath = m_info.parent().getPath() + "/" + m_info.getFileName(DataFrame::Skeleton);
    QFile skeletonFile(instancePath);
    skeletonFile.open(QIODevice::ReadOnly);
    QTextStream in(&skeletonFile);
    shared_ptr<Skeleton> skeleton = make_shared<Skeleton>();

    QList<int> ignore_lines = {1, 7, 11, 15, 19};
    int lineCount = 0;

    while (!in.atEnd())
    {
        QStringList columns = in.readLine().split(",");

        if (ignore_lines.contains(lineCount)) {
            lineCount++;
            continue;
        }

        SkeletonJoint joint(Point3f(columns[1].toFloat()*1000.0f, columns[2].toFloat()*1000.0f*-1, columns[3].toFloat()*1000.0f), _staticMap[lineCount]);
        int trackingState = columns[6].toInt();

        if (trackingState == 0)
            joint.setPositionConfidence(0.0f);

        joint.setOrientation(Quaternion(columns[13].toFloat(),
                             columns[10].toFloat(), columns[11].toFloat(), columns[12].toFloat()));

        //joint.setOrientationConfidence(niteJoint.getOrientationConfiden
        skeleton->setJoint(_staticMap[lineCount], joint);
        lineCount++;
    }

    skeletonFile.close();

    // Set intrinsics of the camera that generated this frame (depth camera in this case)
    skeleton->setCameraIntrinsics(fx_d, cx_d, fy_d, cy_d);
    shared_ptr<SkeletonFrame> skeletonFrame = make_shared<SkeletonFrame>();
    skeletonFrame->setSkeleton(1, skeleton);
    output.insert(DataFrame::Skeleton, skeletonFrame);

    // Register depth to color
    depth2color(depthFrame, maskFrame, skeleton);
}
Пример #5
0
int main(int argc,char* argv[])
	{
	/* Set up the volumetric grid: */
	Box gridBox=Box(Point(-32.0,-64.0,16.0),Point(32.0,0.0,80.0));
	Index gridSize(256,256,256);
	Grid grid(gridSize);
	
	/* Initialize the grid: */
	for(Grid::iterator gIt=grid.begin();gIt!=grid.end();++gIt)
		*gIt=Voxel(255);
	
	/* Carve away the n-th facade from each depth stream file listed on the command line: */
	unsigned int facadeIndex=atoi(argv[1]);
	for(int depthFileIndex=2;depthFileIndex<argc;++depthFileIndex)
		{
		try
			{
			/* Open the depth file: */
			IO::AutoFile depthFile(IO::openFile(argv[depthFileIndex]));
			depthFile->setEndianness(IO::File::LittleEndian);
			
			/* Read the facade projection matrix and the projector transformation: */
			Projection depthTransform;
			depthFile->read<double>(depthTransform.getMatrix().getEntries(),4*4);
			OGTransform projectorTransform=Misc::Marshaller<OGTransform>::read(*depthFile);
			
			/* Calculate the joint projective transformation from 3D world space into depth image space: */
			Projection proj=Geometry::invert(Projection(projectorTransform)*depthTransform);
			
			/* Create a depth frame reader: */
			DepthFrameReader depthFrameReader(*depthFile);
			
			/* Read the n-th facade: */
			FrameBuffer frame;
			for(unsigned int i=0;i<facadeIndex;++i)
				frame=depthFrameReader.readNextFrame();
			
			/* Run a sequence of morphological open and close operators on the frame to fill holes: */
			#if 1
			for(int i=0;i<8;++i)
				frame=open(frame);
			#endif
			#if 1
			for(int i=0;i<8;++i)
				frame=close(frame);
			#endif
			
			/* Carve the facade out of the grid: */
			std::cout<<"Processing depth file "<<argv[depthFileIndex]<<std::flush;
			double fmax[2];
			for(int i=0;i<2;++i)
				fmax[i]=double(frame.getSize(i));
			unsigned short* frameBuffer=static_cast<unsigned short*>(frame.getBuffer());
			Size cellSize;
			for(int i=0;i<3;++i)
				cellSize[i]=(gridBox.max[i]-gridBox.min[i])/double(gridSize[i]);
			Index index;
			Point gridp;
			gridp[0]=gridBox.min[0]+0.5*cellSize[0];
			for(index[0]=0;index[0]<gridSize[0];++index[0],gridp[0]+=cellSize[0])
				{
				gridp[1]=gridBox.min[1]+0.5*cellSize[1];
				for(index[1]=0;index[1]<gridSize[1];++index[1],gridp[1]+=cellSize[1])
					{
					gridp[2]=gridBox.min[2]+0.5*cellSize[2];
					for(index[2]=0;index[2]<gridSize[2];++index[2],gridp[2]+=cellSize[2])
						{
						/* Project the grid point into the depth frame: */
						Point fp=proj.transform(gridp);
						
						/* Check if the projected grid point is inside the depth frame: */
						if(fp[0]>=0.0&&fp[0]<fmax[0]&&fp[1]>=0.0&&fp[1]<fmax[1])
							{
							/* Check if the grid point is outside the facade: */
							int x=int(fp[0]);
							int y=int(fp[1]);
							unsigned short depth=frameBuffer[y*frame.getSize(0)+x];
							if(fp[2]<double(depth))
								grid(index)=Voxel(0);
							}
						else
							grid(index)=Voxel(0);
						}
					}
				std::cout<<'.'<<std::flush;
				}
			std::cout<<"done"<<std::endl;
			}
		catch(std::runtime_error err)
			{
			std::cerr<<"Ignoring depth file "<<argv[depthFileIndex]<<" due to exception "<<err.what()<<std::endl;
			}
		catch(...)
			{
			std::cerr<<"Ignoring depth file "<<argv[depthFileIndex]<<" due to spurious exception"<<std::endl;
			}
		}
	
	/* Save the result grid to a volume file: */
	IO::AutoFile volFile(IO::openFile("SpaceCarverOut.vol",IO::File::WriteOnly));
	volFile->setEndianness(IO::File::BigEndian);
	for(int i=0;i<3;++i)
		volFile->write<int>(int(gridSize[i]));
	volFile->write<int>(0);
	for(int i=0;i<3;++i)
		volFile->write<float>((gridBox.max[i]-gridBox.min[i])*double(gridSize[i]-1)/double(gridSize[i]));
	volFile->write<Voxel>(grid.getArray(),grid.getNumElements());
	
	return 0;
	}