Ejemplo n.º 1
1
bool videoOptiTrack::open(gem::Properties&props) {
	if(!CameraManager::X().AreCamerasInitialized()) {
		return false;
	}
	CameraList list;
	if(!m_devname.empty()) {
		for(unsigned int i=0; i<list.Count(); i++) {
			if(list[i].Name()==m_devname)
				m_camera = CameraManager::X().GetCamera(list[i].UID());
			if(m_camera)break;
		}
		if(!m_camera)
			return false;
	} else if(m_devnum>=0) {
		if(m_devnum<list.Count())
			m_camera = CameraManager::X().GetCamera(list[m_devnum].UID());
		else
			return false;
	}
	/* auto mode */
	if(!m_camera)
		m_camera = CameraManager::X().GetCamera();

	if(!m_camera) {
    return false;
	}
	m_pixBlock.image.xsize = m_camera->Width();	
	m_pixBlock.image.ysize = m_camera->Height();

	m_camera->SetVideoType(MJPEGMode);
	m_quality=m_camera->MJPEGQuality();

	setProperties(props);
	return true;
}
Ejemplo n.º 2
0
const CameraApi::CameraList FakeCameraApi::EnumerateCameras() const
{
    CameraList list;
    list.push_back( CameraDescription(*this).WithName(L"The Fake Camera")
                                            .WithDescription(L"For Testing Purposes")
                                            .WithUniqueId(L"fakeCamera:://00000"));
    return list;
}
Ejemplo n.º 3
0
void ViewPoint::CameraCall(void (Camera::*handler)())
{
    if (!_cameras.empty())
    {
        CameraList tempCameras = _cameras;
        for(CameraList::iterator itr = tempCameras.begin(); itr != tempCameras.end(); ++itr)
        {
            Player* owner = sObjectAccessor.GetPlayer(*itr);
            if (owner)
                (owner->GetCamera().*handler)();
        }
    }
}
Ejemplo n.º 4
0
void SpecificWorker::getImages(const CameraList &cameras, ImageMap &images)
{
	if( cameras.size() <= MAX_CAMERAS and ( cameraParamsMap.find(cameras.front()) != cameraParamsMap.end()))
	{
		//uint size = cameraParamsMap[cameras.front()].colorHeight * cameraParamsMap[cameras.front()].colorWidth * 3;
		RoboCompRGBDBus::Image img;
		img.camera = cameraParamsMap[cameras.front()];
		QMutexLocker ml(mutex);
		img.colorImage.resize( readBuffer.size());
		img.colorImage.swap( readBuffer );
		images.insert( std::pair<std::string, RoboCompRGBDBus::Image>("default", img));
		std::cout << "camera name " <<cameras.front() << " " << img.colorImage.size() << " " << images.empty() << std::endl;
	}
}
Ejemplo n.º 5
0
std::vector<std::string>videoOptiTrack::enumerate(void) {
  std::vector<std::string>result;
  if(!CameraManager::X().AreCamerasInitialized()) {
	  CameraManager::X().WaitForInitialization();
  }

  CameraList list;
  unsigned int i;
  for(i=0; i<list.Count(); i++) {
	  result.push_back(list[i].Name());
  }

  return result;
}
Ejemplo n.º 6
0
int main(int argc, char* argv[])
{
    printf("==============================================================================\n");
    printf("== Camera Calibration                       NaturalPoint 2010 ==\n");
    printf("==============================================================================\n\n");
	

    //== This example attaches a frame synchronizer for all connected cameras.  Once
    //== attached, it receives their synchronized frame data as a FrameGroup vs. the
    //== GetFrame approach used for a single camera.

    //== For OptiTrack Ethernet cameras, it's important to enable development mode if you
    //== want to stop execution for an extended time while debugging without disconnecting
    //== the Ethernet devices.  Lets do that now:

    CameraLibrary_EnableDevelopment();

	//== Next, lets wait until all connected cameras are initialized ==

    printf("Waiting for cameras to spin up...");
    
    //== Initialize Camera Library ==----

    CameraManager::X().WaitForInitialization();
    
    //== Verify all cameras are initialized ==--

    if(CameraManager::X().AreCamerasInitialized())
        printf("complete\n\n");
    else
    {
        printf("failed (return key to exit)");
        getchar();
        return 1;
    }

    //== Connect to all connected cameras ==----

    Camera *camera[kMaxCameras];
    int     cameraCount=0;

    CameraList list;

    printf("Cameras:\n");

    for(int i=0; i<list.Count(); i++)
    {
        printf("Camera %d >> %s\n", i, list[i].Name());

        camera[i] = CameraManager::X().GetCamera(list[i].UID());
        if(camera[i]==0)
            printf("unable to connected to camera...\n");
        else {
            cameraCount++;
			//camera[i]->SetVideoType(PrecisionMode); <-what's this? -NR
			// values derived from visualtest.exe in camera SDK/bin -NR
			camera[i]->SetExposure(50);
			camera[i]->SetThreshold(200);
			camera[i]->SetIntensity(15);
		}
    }
        
    if(cameraCount==0)
    {
        printf("no cameras (return key to exit)");
        getchar();
        return 1;
    }

    printf("\n");

	// after reading documentation, found following info: --nr
	for(int i = 0; i < cameraCount; i++) {
		cout << "camera " << i << endl;
		cout << "physical pixel width: " << camera[i]->PhysicalPixelWidth() << endl;
		cout << "physical pixel height: " << camera[i]->PhysicalPixelHeight() << endl;
		cout << "focal length: " << camera[i]->FocalLength() << endl;
		Core::DistortionModel dm;
		camera[i]->GetDistortionModel(dm);
		cout << "f horizontal: " << dm.HorizontalFocalLength << endl;
		cout << "f vertical: " << dm.VerticalFocalLength << endl;
		cout << "kc1: " << dm.KC1 << endl;
		cout << "kc2: " << dm.KC2 << endl;
		cout << "kc3: " << dm.KC3 << endl;
		cout << "t0: " << dm.Tangential0 << endl;
		cout << "t1: " << dm.Tangential1 << endl;
		cout << "cx: " << dm.LensCenterX << endl;
		cout << "cy: " << dm.LensCenterY << endl;
		cout << "distort?: " << dm.Distort << endl;
	}


    //== Create and attach frame synchronizer ==--

    cModuleSync * sync = new cModuleSync();
    for(int i=0; i<cameraCount; i++)
        sync->AddCamera(camera[i]);

    //== Start cameras ==--

    printf("Starting cameras... (any key to exit)\n\n");
    for(int i=0; i<cameraCount; i++)
        camera[i]->Start();


	//== Define Image Size ==-- DBE
	Size imageSize = Size(1280, 1024);

	//== Get Image Points and Object Points==--
	vector<vector<Point2f>> imagePoints = GetImagePoints(sync);
	vector<vector<Point3f>> objectPoints = MakeObjectPoints();   //Only Works for our board (Alternating rows of 3 and 2 dots placed two inches apart horizontally and 1 inch vertically on a plane [z = 0])
	
	//== Test built in Optitrack Code
	cout << endl;
	Core::DistortionModel dm;
	camera[1]->GetDistortionModel(dm);

	for (int i = 0; i < RECORDED_FRAMES; i++)
	{
		for (int j = 0; j < NUMBER_OF_POINTS ; j++)
		{
			Core::Undistort2DPoint(dm, imagePoints[i][j].x, imagePoints[i][j].y);
			cout << "["<< imagePoints[i][j].x << ", " << imagePoints[i][j].y <<"], ";
		}
		cout << endl << "----------------------------------" << endl;
	}
			

	//Initialize Camera Matrix and distortion coefficient matrix
	Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
	Mat distCoeffs = Mat::zeros(8, 1, CV_64F);

	//Create rvecs and tvecs output vectors
	vector<Mat> rvecs, tvecs;

	//== Perform Calibration ==-- 
	runCalibration(imageSize, cameraMatrix, distCoeffs, objectPoints, imagePoints, rvecs, tvecs);	

	//== Print Calibration ==-- DBE
	cout << "\nCalibration Results:\n" << endl << "distCoeffs = " << endl;
	printMat(distCoeffs);
	cout << "Camera Matrix" << endl;
	printMat(cameraMatrix);

    //== Destroy synchronizer ==--
    sync->RemoveAllCameras();
	delete sync;

    //== Release cameras ==--
    for(int i=0; i<cameraCount; i++)
        camera[i]->Release();

    //== Disconnect devices and shutdown Camera Library ==--
    CameraManager::X().Shutdown();

	return 0;
}
void SynCaptureThread::run()
{
	Camera **cameras = new Camera*[_numCameras];
	CameraList list;
	
	for(int i=0; i<list.Count(); i++)
	{

		cameras[i] = CameraManager::X().GetCamera(list[i].UID());

	}
	//== Create and attach frame synchronizer ==--

	cModuleSync * sync = cModuleSync::Create();
	for(int i=0; i<_numCameras; i++)
	{
		sync->AddCamera(cameras[i]);
	}

	for(int i=0; i<_numCameras; i++)
	{
		cameras[i]->Start();
		cameras[i]->SetVideoType(Core::SegmentMode);
	}

	while(1)
	{
		if(_videoStatus == Para::PLAY_LIVE)
		{
			FrameGroup *frameGroup = sync->GetFrameGroup();
			if(frameGroup)
			{
				for(int i = 0; i < _POIsList.size(); i++)
					_POIsList[i].clear();
				for(int i = 0; i<frameGroup->Count(); i++)
				{
					Frame * frame = frameGroup->GetFrame(i);
					vector<POI> newPOIs;
					for(int j = 0; j < frame->ObjectCount(); j++)
					{				
						float area = frame->Object(j)->Area();
						if(area > Para::markerFilterThreshold)
						{
							float x = frame->Object(j)->X();
							float y = frame->Object(j)->Y();
							POI temp(x, y);
							if(!ContainNoise(_noisyList[i], temp))         // if temp is not in the nosiyList, which means temp is not a noise
								newPOIs.push_back(temp);
						}										
					}

					frame->Release();
					_POIsList[i] = newPOIs;
				}
				frameGroup->Release();
				emit signalPOIsListReady(_POIsList);
			}
		}
		else if( _videoStatus == Para::CALIBRATE)
		{
			FrameGroup *frameGroup = sync->GetFrameGroup();
			if(frameGroup)
			{
				for(int i = 0; i < _POIsList.size(); i++)
					_POIsList[i].clear();
				_frameCount++;
				if(_frameCount > Para::maxFrame * Para::frameInterval)             // change to PLAY_LIVE mode when we have 500 frames
				{
					WriteCalibratedPoints();
					_videoStatus = Para::PLAY_LIVE;
					emit signalCalibrationReady();
				}
				for(int i = 0; i<frameGroup->Count(); i++)
				{
					Frame * frame = frameGroup->GetFrame(i);
					vector<POI> newPOIs;
					for(int j = 0; j < frame->ObjectCount(); j++)
					{				
						float area = frame->Object(j)->Area();
						if(area > Para::markerFilterThreshold)
						{
							float x = frame->Object(j)->X();
							float y = frame->Object(j)->Y();
							POI temp(x, y);
							if(!ContainNoise(_noisyList[i], temp))         // if temp is not in the nosiyList, which means temp is not a noise
								newPOIs.push_back(temp);
						}										
					}

					frame->Release();
					_POIsList[i] = newPOIs;
				}

				if(_frameCount % Para::frameInterval == 0)             // sample points each 3 frames
				{
					for(int i = 0; i < _POIsList.size(); i++)  
					{
						if(_POIsList[i].size() == 0 || _POIsList[i].size() > 1)
						{
							_calibratedPoints[_frameCount / Para::frameInterval].push_back(Point2f(-1.0,-1.0));
							continue;
						}
						for(int j = 0; j < _POIsList[i].size(); j++)     // now we have only one calibration point
						{
							_calibratedPoints[_frameCount / Para::frameInterval].push_back(Point2f(_POIsList[i][j]._coordinates2d.x, _POIsList[i][j]._coordinates2d.y));
						}			
					}			
				}

				frameGroup->Release();
				emit signalPOIsListReady(_POIsList);
			}
		}
		else if(_videoStatus == Para::DENOISE)
		{
			FrameGroup *frameGroup = sync->GetFrameGroup();
			if(frameGroup)
			{
				for(int i = 0; i < _POIsList.size(); i++)
					_POIsList[i].clear();
				for(int i = 0; i < frameGroup->Count(); i++)
				{
					Frame * frame = frameGroup->GetFrame(i);
					for(int j = 0; j < frame->ObjectCount(); j++)
					{				
						float area = frame->Object(j)->Area();
						if(area > Para::markerFilterThreshold)
						{
							float x = frame->Object(j)->X();
							float y = frame->Object(j)->Y();
							POI temp(x, y);
							if(!ContainNoise(_noisyList[i], temp))         // if temp is not in the nosiyList, which means temp is not a noise
								_noisyList[i].push_back(temp);
						}										
					}
					frame->Release();
				}

				frameGroup->Release();
			}
		}
		else if(_videoStatus == Para::STOP)
		{

		}
		else if(_videoStatus == Para::BREAK)
			break;
	}
	delete[] cameras;
	exec();
}
void OpenGlBox::paintGL()
{
	//Prepare
	glClear(GL_COLOR_BUFFER_BIT);
	mImageShader->set();
	glEnableVertexAttribArray(0);
	glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex2D), NULL);

	//Live stream

	//If state is true, get the image from the camera and launch thread to process it.
	//If it is false, retrieve the image and upload it.
	//An intermediary variable, thisState, is used in case of early return. 
	static bool state = true;
	int thisState = state;
	state = !state;

	if (thisState)
	{
		//Check camera
		CameraList* cl = CameraList::instance();
		if (cl == nullptr)
			return;

		Camera* camera = cl->activeCamera();
		if (camera == nullptr)
			return;

		//Get jpg
		mVideoImageData.swap(camera->getLiveImage());

		//Launch processing thread
		mWorkerReturnFuture = std::async(std::launch::async, [this]() ->WorkerReturn*
		{
			//Convert image
			if (this->mVideoImageData.size() == 0)
				return nullptr;

			QImage img;
			if (!img.loadFromData(&this->mVideoImageData[0], this->mVideoImageData.size(), "JPG"))
				return nullptr;

			WorkerReturn* out = new WorkerReturn();

			//Generate histogram
			//Create local copy to avoid pointer dereferencing
			unsigned int localHist[256];
			memset(localHist, 0, 256 * sizeof(unsigned));
			
			unsigned w = img.width();
			unsigned h = img.height();

			for (unsigned i = 0; i < w; ++i)
				for (unsigned j = 0; j < h; ++j)
				{
					QRgb px = img.pixel(i, j);
					unsigned r = (px & 0x00ff0000) >> 16;
					unsigned g = (px & 0x0000ff00) >> 8;
					unsigned b = (px & 0x000000ff);
					++localHist[(r + g + b) / 3];
				}

			out->image = std::move(img);
			memcpy(out->hist, localHist, 256*sizeof(unsigned));
			//


			return out;
		});
	}