Esempio n. 1
0
double GazeTracker::covariancefunction(SharedImage const& im1, 
				       SharedImage const& im2)
{
    const double sigma = 100.0;
    const double lscale = 4000.0;
    return sigma*sigma*exp(-imagedistance(im1.get(),im2.get())/(2*lscale*lscale));
}
void
ShadowImageLayerOGL::Swap(const SharedImage& aNewFront,
                          SharedImage* aNewBack)
{
  if (!mDestroyed) {
    if (aNewFront.type() == SharedImage::TSharedImageID) {
      // We are using ImageBridge protocol. The image data will be queried at render
      // time in the parent side.
      PRUint64 newID = aNewFront.get_SharedImageID().id();
      if (newID != mImageContainerID) {
        mImageContainerID = newID;
        mImageVersion = 0;
      }
    } else if (aNewFront.type() == SharedImage::TSurfaceDescriptor) {
      AutoOpenSurface surf(OPEN_READ_ONLY, aNewFront.get_SurfaceDescriptor());
      gfxIntSize size = surf.Size();
      if (mSize != size || !mTexImage ||
          mTexImage->GetContentType() != surf.ContentType()) {
        Init(aNewFront);
      }
      // XXX this is always just ridiculously slow
      nsIntRegion updateRegion(nsIntRect(0, 0, size.width, size.height));
      mTexImage->DirectUpdate(surf.Get(), updateRegion);
    } else {
      const YUVImage& yuv = aNewFront.get_YUVImage();
      UploadSharedYUVToTexture(yuv);
    }
  }

  *aNewBack = aNewFront;
}
Esempio n. 3
0
        bool LaneDetector::readSharedImage(Container &c) {
	        bool retVal = false;

	        if (c.getDataType() == Container::SHARED_IMAGE) {
		        SharedImage si = c.getData<SharedImage> ();

		        // Check if we have already attached to the shared memory containing the image from the virtual camera.
		        if (!m_hasAttachedToSharedImageMemory) {
			        m_sharedImageMemory = core::wrapper::SharedMemoryFactory::attachToSharedMemory(si.getName());
		        }

		        // Check if we could successfully attach to the shared memory.
		        if (m_sharedImageMemory->isValid()) {
			        // Lock the memory region to gain exclusive access using a scoped lock.
                    Lock l(m_sharedImageMemory);

			        if (m_image == NULL) {
				        m_image = cvCreateImage(cvSize(si.getWidth(), si.getHeight()), IPL_DEPTH_8U, si.getBytesPerPixel());
			        }

			        // Example: Simply copy the image into our process space.
			        if (m_image != NULL) {
				        memcpy(m_image->imageData, m_sharedImageMemory->getSharedMemory(), si.getWidth() * si.getHeight() * si.getBytesPerPixel());
			        }

			        // Mirror the image.
			        cvFlip(m_image, 0, -1);

			        retVal = true;
		        }
	        }
	        return retVal;
        }
    void CamGen::drawScene() {
        static uint32_t frameCounter = 0;
        static clock_t start = clock();

        Container container = getKeyValueDataStore().get(Container::EGOSTATE);
        m_egoState = container.getData<hesperia::data::environment::EgoState>();
        m_image = m_grabber->getNextImage();

        frameCounter++;

        // Share information about this image.
        if (m_image.isValid()) {
            SharedImage si;
            si.setWidth(m_image->getWidth());
            si.setHeight(m_image->getHeight());
            // TODO: Refactor me!
            si.setBytesPerPixel(3);
            si.setName("CamGen");

            Container c(Container::SHARED_IMAGE, si);
            getConference().send(c);
        }

        if ((frameCounter % 20) == 0) {
            clock_t end = clock();
            clock_t duration = end - start;
            double seconds = (1000.0 * duration) / CLOCKS_PER_SEC;
            seconds /= 1000.0;
            cerr << "FPS: " << (frameCounter / seconds) << endl;
            frameCounter = 0;
            start = clock();
        }

        m_grabber->delay();
    }
        bool LaneFollower::readSharedImage(Container &c) {
            bool retVal = false;

            if (c.getDataType() == odcore::data::image::SharedImage::ID()) {
                SharedImage si = c.getData<SharedImage> ();

                // Check if we have already attached to the shared memory.
                if (!m_hasAttachedToSharedImageMemory) {
                    m_sharedImageMemory = odcore::wrapper::SharedMemoryFactory::attachToSharedMemory(si.getName());
                }

                // Check if we could successfully attach to the shared memory.
                if (m_sharedImageMemory->isValid()) {

                    // Lock the memory region to gain exclusive access using a scoped lock.
                    Lock l(m_sharedImageMemory);
                    const uint32_t numberOfChannels = 3;

                    // For example, simply show the image.
                    if (m_image.empty()) {
                        m_image.create(cv::Size(si.getWidth(), si.getHeight()), CV_8UC3);
                    }

                    // Copying the image data is very expensive...
                    if (!m_image.empty()) {
                        memcpy(m_image.data, m_sharedImageMemory->getSharedMemory(), si.getWidth() * si.getHeight() * numberOfChannels);
                    }

                    // Mirror the image.
			        cv::flip(m_image,m_image,-1); //only use in simulator
                    retVal = true;
                }
            }
            return retVal;
        }
Esempio n. 6
0
SharedImage* ImageContainerChild::CreateSharedImageFromData(Image* image)
{
  NS_ABORT_IF_FALSE(InImageBridgeChildThread(),
                  "Should be in ImageBridgeChild thread.");
  
  ++mActiveImageCount;

  // TODO: I don't test for BasicManager()->IsCompositingCheap() here,
  // is this a problem? (the equvivalent code in PCompositor does that)
  if (image->GetFormat() == Image::PLANAR_YCBCR ) {
    PlanarYCbCrImage *YCbCrImage = static_cast<PlanarYCbCrImage*>(image);
    const PlanarYCbCrImage::Data *data = YCbCrImage->GetData();
    NS_ASSERTION(data, "Must be able to retrieve yuv data from image!");
    
    nsRefPtr<gfxSharedImageSurface> tempBufferY;
    nsRefPtr<gfxSharedImageSurface> tempBufferU;
    nsRefPtr<gfxSharedImageSurface> tempBufferV;
    
    if (!this->AllocBuffer(data->mYSize, gfxASurface::CONTENT_ALPHA,
                           getter_AddRefs(tempBufferY)) ||
        !this->AllocBuffer(data->mCbCrSize, gfxASurface::CONTENT_ALPHA,
                           getter_AddRefs(tempBufferU)) ||
        !this->AllocBuffer(data->mCbCrSize, gfxASurface::CONTENT_ALPHA,
                           getter_AddRefs(tempBufferV))) {
      NS_RUNTIMEABORT("creating SharedImage failed!");
    }

    for (int i = 0; i < data->mYSize.height; i++) {
      memcpy(tempBufferY->Data() + i * tempBufferY->Stride(),
             data->mYChannel + i * data->mYStride,
             data->mYSize.width);
    }
    for (int i = 0; i < data->mCbCrSize.height; i++) {
      memcpy(tempBufferU->Data() + i * tempBufferU->Stride(),
             data->mCbChannel + i * data->mCbCrStride,
             data->mCbCrSize.width);
      memcpy(tempBufferV->Data() + i * tempBufferV->Stride(),
             data->mCrChannel + i * data->mCbCrStride,
             data->mCbCrSize.width);
    }

    SharedImage* result = new SharedImage( 
              *(new YUVImage(tempBufferY->GetShmem(),
                                             tempBufferU->GetShmem(),
                                             tempBufferV->GetShmem(),
                                             data->GetPictureRect())));
    NS_ABORT_IF_FALSE(result->type() == SharedImage::TYUVImage,
                      "SharedImage type not set correctly");
    return result;
  } else if (image->GetFormat() == Image::GONK_IO_SURFACE) {
    GonkIOSurfaceImage* gonkImage = static_cast<GonkIOSurfaceImage*>(image);
    SharedImage* result = new SharedImage(gonkImage->GetSurfaceDescriptor());
    return result;
  } else {
    NS_RUNTIMEABORT("TODO: Only YUVImage is supported here right now.");
  }
  return nsnull;
}
void SharedImageSequence::GetSharedImageSequencePowerCube(libCameraSensors::AbstractRangeImagingSensor* RangeCam, libCameraSensors::AbstractColorCamera* ColorCam, const CvSize& SharedImageSize, int DegreeOffset)
{

#ifndef __USE_POWERCUBE__
	std::cout << "Error: you have to enable the preprocessor symbol __USE_POWERCUBE__" << std::endl;
#endif

#ifdef __USE_POWERCUBE__
	cvNamedWindow(m_CoordWinName.c_str());
	cvNamedWindow(m_ColorWinName.c_str());
	char c=-1;
	ipa_utils::PowerCube powercube;
	powercube.Init();
	powercube.Open();
	powercube.DoHoming();
			
	// Rotate and capture

	unsigned int rotationIncrement = DegreeOffset;
	for(unsigned int degree = 0; degree < 360; degree += rotationIncrement)
	{
		
		if(!cvGetWindowHandle(m_CoordWinName.c_str()) || !cvGetWindowHandle(m_ColorWinName.c_str()))
		{
			break;
		}
		
		powercube.Rotate(rotationIncrement);
		
		SharedImage SImg;//(m_SharedImageSize);//, m_CameraSensorsIniDirectory);
		//SImg.Init(SharedImageSize);
	#ifndef __USE_SHAREDIMAGE_JBK__
		SImg.GetImagesFromSensors(RangeCam, ColorCam, SharedImageDefaultSize);
	#endif
	#ifdef __USE_SHAREDIMAGE_JBK__
		SImg.GetImagesFromSensors(RangeCam, ColorCam);
	#endif
		SImg.DisplayCoord(m_CoordWinName);
		SImg.DisplayShared(m_ColorWinName);
		push_back(SImg);

		c = cvWaitKey(100);
		if(c=='q' || c=='Q')
		{
			break;
		}

		std::cout << "SharedImageSequence::GetSharedImageSequence: ... one image sucessfully acquired." << std::endl;
	}
	cvDestroyAllWindows();
	powercube.Close();
#endif // __USE_POWERCUBE__
}
bool
ShadowImageLayerOGL::Init(const SharedImage& aFront)
{
  if (aFront.type() == SharedImage::TSurfaceDescriptor) {
    SurfaceDescriptor surface = aFront.get_SurfaceDescriptor();
    if (surface.type() == SurfaceDescriptor::TSharedTextureDescriptor) {
      SharedTextureDescriptor texture = surface.get_SharedTextureDescriptor();
      mSize = texture.size();
      mSharedHandle = texture.handle();
      mShareType = texture.shareType();
      mInverted = texture.inverted();
    } else {
      AutoOpenSurface autoSurf(OPEN_READ_ONLY, surface);
      mSize = autoSurf.Size();
      mTexImage = gl()->CreateTextureImage(nsIntSize(mSize.width, mSize.height),
                                           autoSurf.ContentType(),
                                           LOCAL_GL_CLAMP_TO_EDGE,
                                           mForceSingleTile
                                            ? TextureImage::ForceSingleTile
                                            : TextureImage::NoFlags);
    }
  } else {
    YUVImage yuv = aFront.get_YUVImage();

    AutoOpenSurface surfY(OPEN_READ_ONLY, yuv.Ydata());
    AutoOpenSurface surfU(OPEN_READ_ONLY, yuv.Udata());

    mSize = surfY.Size();
    mCbCrSize = surfU.Size();

    if (!mYUVTexture[0].IsAllocated()) {
      mYUVTexture[0].Allocate(gl());
      mYUVTexture[1].Allocate(gl());
      mYUVTexture[2].Allocate(gl());
    }

    NS_ASSERTION(mYUVTexture[0].IsAllocated() &&
                 mYUVTexture[1].IsAllocated() &&
                 mYUVTexture[2].IsAllocated(),
                 "Texture allocation failed!");

    gl()->MakeCurrent();
    SetClamping(gl(), mYUVTexture[0].GetTextureID());
    SetClamping(gl(), mYUVTexture[1].GetTextureID());
    SetClamping(gl(), mYUVTexture[2].GetTextureID());
    return true;
  }
  return false;
}
Esempio n. 9
0
void
ShadowImageLayerOGL::Swap(const SharedImage& aNewFront,
                          SharedImage* aNewBack)
{
  if (!mDestroyed) {
    if (aNewFront.type() == SharedImage::TSurfaceDescriptor) {
      nsRefPtr<gfxASurface> surf =
        ShadowLayerForwarder::OpenDescriptor(aNewFront.get_SurfaceDescriptor());
      gfxIntSize size = surf->GetSize();
      if (mSize != size || !mTexImage ||
          mTexImage->GetContentType() != surf->GetContentType()) {
        Init(aNewFront);
      }
      // XXX this is always just ridiculously slow
      nsIntRegion updateRegion(nsIntRect(0, 0, size.width, size.height));
      mTexImage->DirectUpdate(surf, updateRegion);
    } else {
      const YUVImage& yuv = aNewFront.get_YUVImage();

      nsRefPtr<gfxSharedImageSurface> surfY =
        gfxSharedImageSurface::Open(yuv.Ydata());
      nsRefPtr<gfxSharedImageSurface> surfU =
        gfxSharedImageSurface::Open(yuv.Udata());
      nsRefPtr<gfxSharedImageSurface> surfV =
        gfxSharedImageSurface::Open(yuv.Vdata());
      mPictureRect = yuv.picture();

      gfxIntSize size = surfY->GetSize();
      gfxIntSize CbCrSize = surfU->GetSize();
      if (size != mSize || mCbCrSize != CbCrSize || !mYUVTexture[0].IsAllocated()) {
        Init(aNewFront);
      }

      PlanarYCbCrImage::Data data;
      data.mYChannel = surfY->Data();
      data.mYStride = surfY->Stride();
      data.mYSize = surfY->GetSize();
      data.mCbChannel = surfU->Data();
      data.mCrChannel = surfV->Data();
      data.mCbCrStride = surfU->Stride();
      data.mCbCrSize = surfU->GetSize();

      UploadYUVToTexture(gl(), data, &mYUVTexture[0], &mYUVTexture[1], &mYUVTexture[2]);
    }
  }

  *aNewBack = aNewFront;
}
int SharedImageSequence::LoadSharedImageSequence(const std::string& filename, unsigned int Limit, int k)
{
	clear();
	std::cout << "Loading image sequence " << filename << "\n";
	std::stringstream FileNameStream;
	FileNameStream << filename << m_InfFileAttachment;
	std::ifstream f((FileNameStream.str()).c_str());
	if(!f.is_open())
	{
		std::cout << "SharedImageSequence::LoadSharedImageSequence: Error while opening file " 
			      << FileNameStream.str().c_str() << ".\n";
		return RET_FAILED;
	}

	/// Read number of images that are stored on disk
	int s=0; f >> s; s=intmin(Limit, s);

	std::cout << "SharedImageSequence::LoadSharedImageSequence: Loading ";
	std::list<SharedImage> listSI;
	int i;
	for(i=0; i<s; i+=k)
	{
		std::stringstream FileNameStream2;
		FileNameStream2 << filename << m_Spacing << i;
		
		SharedImage Tmp;
		
		/// Load the single images (coordinate and shared image) from disk
		if(Tmp.LoadSharedImage(FileNameStream2.str())==RET_FAILED) return RET_FAILED;
		push_back(Tmp);
		//listSI.push_back(Tmp);
		std::cout << i << " ";
	}

	//	std::list<SharedImage>::iterator it;
	//	(*this).assign(s, SharedImage());
	//	i=0;
	//	for(it=listSI.begin(); it!=listSI.end(); it++)
	//	{
	//		(*this)[i]=*it;
	//		i++;
	//	}

	std::cout << "\n";

	return RET_OK;
}
Esempio n. 11
0
bool
ShadowImageLayerOGL::Init(const SharedImage& aFront)
{
  if (aFront.type() == SharedImage::TSurfaceDescriptor) {
    SurfaceDescriptor desc = aFront.get_SurfaceDescriptor();
    nsRefPtr<gfxASurface> surf =
      ShadowLayerForwarder::OpenDescriptor(desc);
    mSize = surf->GetSize();
    mTexImage = gl()->CreateTextureImage(nsIntSize(mSize.width, mSize.height),
                                         surf->GetContentType(),
                                         LOCAL_GL_CLAMP_TO_EDGE,
                                         mForceSingleTile
                                          ? TextureImage::ForceSingleTile
                                          : TextureImage::NoFlags);
    return true;
  } else {
    YUVImage yuv = aFront.get_YUVImage();

    nsRefPtr<gfxSharedImageSurface> surfY =
      gfxSharedImageSurface::Open(yuv.Ydata());
    nsRefPtr<gfxSharedImageSurface> surfU =
      gfxSharedImageSurface::Open(yuv.Udata());
    nsRefPtr<gfxSharedImageSurface> surfV =
      gfxSharedImageSurface::Open(yuv.Vdata());

    mSize = surfY->GetSize();
    mCbCrSize = surfU->GetSize();

    if (!mYUVTexture[0].IsAllocated()) {
      mYUVTexture[0].Allocate(gl());
      mYUVTexture[1].Allocate(gl());
      mYUVTexture[2].Allocate(gl());
    }

    NS_ASSERTION(mYUVTexture[0].IsAllocated() &&
                 mYUVTexture[1].IsAllocated() &&
                 mYUVTexture[2].IsAllocated(),
                 "Texture allocation failed!");

    gl()->MakeCurrent();
    SetClamping(gl(), mYUVTexture[0].GetTextureID());
    SetClamping(gl(), mYUVTexture[1].GetTextureID());
    SetClamping(gl(), mYUVTexture[2].GetTextureID());
    return true;
  }
  return false;
}
void SharedImageSequence::GetRawImageSequence(libCameraSensors::AbstractRangeImagingSensor* RangeCam, libCameraSensors::AbstractColorCamera* ColorCam)
{
	cvNamedWindow(m_CoordWinName.c_str());
	cvNamedWindow(m_ColorWinName.c_str());

	char c=-1;
	SharedImage SImg;
	SImg.GetRawImagesFromSensors(RangeCam, ColorCam);
	SImg.DisplayInten(m_CoordWinName);
	SImg.DisplayShared(m_ColorWinName);

	int cnt=0;
	while(cvGetWindowHandle(m_ColorWinName.c_str()) && cvGetWindowHandle(m_CoordWinName.c_str()))
	{

		std::cout << "SharedImageSequence::GetSharedImageSequence: Press 'n' to take a training image, 's' to save the image, or 'q' to quit.\n";

		c = cvWaitKey();

		SImg.GetRawImagesFromSensors(RangeCam, ColorCam);

		SImg.DisplayInten(m_CoordWinName);
		SImg.DisplayShared(m_ColorWinName);

		std::cout << "SharedImageSequence::GetSharedImageSequence: " << c << ".\n";
		if(c=='q' || c=='Q')
		{
			break;
		}
		else if (c=='n' || c=='N')
		{
			SImg.GetRawImagesFromSensors(RangeCam, ColorCam);
			SImg.DisplayInten(m_CoordWinName);
			SImg.DisplayShared(m_ColorWinName);
		}
		else if (c=='s' || c=='S')
		{
			push_back(SImg);
			std::cout << "SharedImageSequence::GetSharedImageSequence: ... one image sucessfully acquired." << std::endl;
			cnt++;
		}
	}
	cvDestroyAllWindows();
}
            void SharedImageViewerWidget::selectedSharedImage(QListWidgetItem *item) {
            	if (item != NULL) {
            		// Retrieve stored shared image.
            		SharedImage si = m_mapOfAvailableSharedImages[item->text().toStdString()];

            		if ( (si.getWidth() * si.getHeight()) > 0 ) {
            			Lock l(m_sharedImageMemoryMutex);

            			cerr << "Using shared image: " << si.toString() << endl;
                        setWindowTitle(QString::fromStdString(si.toString()));

            			m_sharedImageMemory = core::wrapper::SharedMemoryFactory::attachToSharedMemory(si.getName());
            			m_sharedImage = si;

            			// Remove the selection box.
            			m_list->hide();
            		}
            	}
            }
Esempio n. 14
0
LayerRenderState
ShadowImageLayerOGL::GetRenderState()
{
  if (!mImageContainerID) {
    return LayerRenderState();
  }

  // Update the associated compositor ID in case Composer2D succeeds,
  // because we won't enter RenderLayer() if so ...
  ImageContainerParent::SetCompositorIDForImage(
    mImageContainerID, mOGLManager->GetCompositorID());
  // ... but do *not* try to update the local image version.  We need
  // to retain that information in case we fall back on GL, so that we
  // can upload / attach buffers properly.

  SharedImage* img = ImageContainerParent::GetSharedImage(mImageContainerID);
  if (img && img->type() == SharedImage::TSurfaceDescriptor) {
    return LayerRenderState(&img->get_SurfaceDescriptor());
  }
  return LayerRenderState();
}
void SharedImageSequence::DeleteSharedImageSequence(const std::string& Name)
{
	// load old info header
	std::stringstream FileNameStream;
	FileNameStream << Name << m_InfFileAttachment;
	int s=0;
	std::ifstream f(FileNameStream.str().c_str());
	if(!f.is_open()) return;

	f >> s;
	SharedImage Dummy;
	for(int i=0; i<s; i++)
	{
		std::stringstream FileNameStream2;
		FileNameStream2 << Name << m_Spacing << i;
		Dummy.DeleteSharedImage(FileNameStream2.str());
		i++;
	}
	f.close();

	std::string name = FileNameStream.str();
	removeFile(name);
}
void SharedImageSequence::GetSharedImageSequenceManually(libCameraSensors::AbstractRangeImagingSensor* RangeCam, libCameraSensors::AbstractColorCamera* ColorCam, const CvSize& SharedImageSize)
{

	cvNamedWindow(m_CoordWinName.c_str());
	cvNamedWindow(m_ColorWinName.c_str());

	char c=-1;
	SharedImage SImg;
	SImg.GetImagesFromSensors(RangeCam, ColorCam, SharedImageDefaultSize);

	SImg.DisplayCoord(m_CoordWinName);
	SImg.DisplayShared(m_ColorWinName);

	int cnt=0;
	while(cvGetWindowHandle(m_ColorWinName.c_str()) && cvGetWindowHandle(m_CoordWinName.c_str()))
	{

		std::cout << "SharedImageSequence::GetSharedImageSequence: Press 'n' to take a training image, 's' to save the image, or 'q' to quit.\n";

		c = cvWaitKey();

		std::cout << "SharedImageSequence::GetSharedImageSequence: " << c << ".\n";
		if(c=='q' || c=='Q')
		{
			break;
		}
		else if (c=='n' || c=='N')
		{
		#ifndef __USE_SHAREDIMAGE_JBK__
			SImg.GetImagesFromSensors(RangeCam, ColorCam, SharedImageDefaultSize);
		#endif
		#ifdef __USE_SHAREDIMAGE_JBK__
			SImg.GetImagesFromSensors(RangeCam, ColorCam);
		#endif
			SImg.DisplayCoord(m_CoordWinName);
			SImg.DisplayShared(m_ColorWinName);
                }
		else if (c=='s' || c=='S')
		{
			push_back(SImg);
			std::cout << "SharedImageSequence::GetSharedImageSequence: ... one image sucessfully acquired." << std::endl;
			cnt++;
		}
	}
	cvDestroyAllWindows();
}
Esempio n. 17
0
        bool VCR::readSharedImage(Container &c) {
	        bool retVal = false;

	        if (c.getDataType() == Container::SHARED_IMAGE) {
		        SharedImage si = c.getData<SharedImage> ();

		        // Check if we have already attached to the shared memory.
		        if (!m_hasAttachedToSharedImageMemory) {
			        m_sharedImageMemory
					        = core::wrapper::SharedMemoryFactory::attachToSharedMemory(
							        si.getName());
		        }

		        // Check if we could successfully attach to the shared memory.
		        if (m_sharedImageMemory->isValid()) {
			        //cerr << "Got image: LOG 0.2 " << si.toString() << endl;

			        // Lock the memory region to gain exclusive access. REMEMBER!!! DO NOT FAIL WITHIN lock() / unlock(), otherwise, the image producing process would fail.
			        m_sharedImageMemory->lock();
			        {
				        // Here, do something with the image. For example, we simply show the image.

				        const uint32_t numberOfChannels = 3;
				        // For example, simply show the image.
				        if (m_image == NULL) {
					        m_image = cvCreateImage(cvSize(si.getWidth(),
							        si.getHeight()), IPL_DEPTH_8U, numberOfChannels);
				        }

				        // Copying the image data is very expensive...
				        if (m_image != NULL) {
					        memcpy(m_image->imageData,
							        m_sharedImageMemory->getSharedMemory(),
							        si.getWidth() * si.getHeight() * numberOfChannels);
				        }
			        }

			        // Release the memory region so that the image produce (i.e. the camera for example) can provide the next raw image data.
			        m_sharedImageMemory->unlock();

			        // Mirror the image.
			        cvFlip(m_image, 0, -1);

			        retVal = true;
		        }
	        }
	        return retVal;
        }
            void SharedImageViewerWidget::nextContainer(Container &c) {
                if (c.getDataType() == Container::SHARED_IMAGE) {
                    SharedImage si = c.getData<SharedImage>();

                    if ( ( (si.getWidth() * si.getHeight()) > 0) && (si.getName().size() > 0) ) {
                    	// Check if this shared image is already in the list.
                    	vector<string>::iterator result = std::find(m_listOfAvailableSharedImages.begin(), m_listOfAvailableSharedImages.end(), si.getName());
                    	if (result == m_listOfAvailableSharedImages.end()) {
                    		m_listOfAvailableSharedImages.push_back(si.getName());

                    		QString item = QString::fromStdString(si.getName());
                    		m_list->addItem(item);

                    		// Store for further usage.
                    		m_mapOfAvailableSharedImages[si.getName()] = si;
                    	}
                    }
                }
            }
void
ShadowImageLayerOGL::RenderLayer(int aPreviousFrameBuffer,
                                 const nsIntPoint& aOffset)
{
  mOGLManager->MakeCurrent();
  if (mImageContainerID) {
    ImageContainerParent::SetCompositorIDForImage(mImageContainerID,
                                                  mOGLManager->GetCompositorID());
    PRUint32 imgVersion = ImageContainerParent::GetSharedImageVersion(mImageContainerID);
    if (imgVersion != mImageVersion) {
      SharedImage* img = ImageContainerParent::GetSharedImage(mImageContainerID);
      if (img && (img->type() == SharedImage::TYUVImage)) {
        UploadSharedYUVToTexture(img->get_YUVImage());
  
        mImageVersion = imgVersion;
      }
    }
  }


  if (mTexImage) {
    NS_ASSERTION(mTexImage->GetContentType() != gfxASurface::CONTENT_ALPHA,
                 "Image layer has alpha image");

    ShaderProgramOGL *colorProgram =
      mOGLManager->GetProgram(mTexImage->GetShaderProgramType(), GetMaskLayer());

    colorProgram->Activate();
    colorProgram->SetTextureUnit(0);
    colorProgram->SetLayerTransform(GetEffectiveTransform());
    colorProgram->SetLayerOpacity(GetEffectiveOpacity());
    colorProgram->SetRenderOffset(aOffset);
    colorProgram->LoadMask(GetMaskLayer());

    mTexImage->SetFilter(mFilter);
    mTexImage->BeginTileIteration();

    if (gl()->CanUploadNonPowerOfTwo()) {
      do {
        TextureImage::ScopedBindTextureAndApplyFilter texBind(mTexImage, LOCAL_GL_TEXTURE0);
        colorProgram->SetLayerQuadRect(mTexImage->GetTileRect());
        mOGLManager->BindAndDrawQuad(colorProgram);
      } while (mTexImage->NextTile());
    } else {
      do {
        TextureImage::ScopedBindTextureAndApplyFilter texBind(mTexImage, LOCAL_GL_TEXTURE0);
        colorProgram->SetLayerQuadRect(mTexImage->GetTileRect());
        // We can't use BindAndDrawQuad because that always uploads the whole texture from 0.0f -> 1.0f
        // in x and y. We use BindAndDrawQuadWithTextureRect to actually draw a subrect of the texture
        mOGLManager->BindAndDrawQuadWithTextureRect(colorProgram,
                                                    nsIntRect(0, 0, mTexImage->GetTileRect().width,
                                                                    mTexImage->GetTileRect().height),
                                                    mTexImage->GetTileRect().Size());
      } while (mTexImage->NextTile());
    }

  } else {
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, mYUVTexture[0].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE1);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, mYUVTexture[1].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE2);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, mYUVTexture[2].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);

    ShaderProgramOGL *yuvProgram = mOGLManager->GetProgram(YCbCrLayerProgramType, GetMaskLayer());

    yuvProgram->Activate();
    yuvProgram->SetLayerQuadRect(nsIntRect(0, 0,
                                           mPictureRect.width,
                                           mPictureRect.height));
    yuvProgram->SetYCbCrTextureUnits(0, 1, 2);
    yuvProgram->SetLayerTransform(GetEffectiveTransform());
    yuvProgram->SetLayerOpacity(GetEffectiveOpacity());
    yuvProgram->SetRenderOffset(aOffset);
    yuvProgram->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuadWithTextureRect(yuvProgram,
                                                mPictureRect,
                                                nsIntSize(mSize.width, mSize.height));
 }
}
int32_t main(int32_t argc, char **argv)
{
    uint32_t retVal = 0;
    int recIndex=1;
    bool log=false;
    
    if((argc != 2 && argc != 3 && argc != 4) || (argc==4 && string(argv[1]).compare("-l")!=0))
    {
        errorMessage(string(argv[0]));
        retVal = 1;
    }
    else if(argc==2 && string(argv[1]).compare("-h")==0)
    {
        helpMessage(string(argv[0]));
        retVal = 0;
    }
    else
    {
        // if -l option is set
        if(argc==4 || (argc==3 && string(argv[1]).compare("-l")==0))
        {
            ++recIndex;
            log=true;
        }
        
        // Use command line parameter as file for playback;
        string recordingFile(argv[recIndex]);
        stringstream recordingFileUrl;
        recordingFileUrl << "file://" << recordingFile;

        // Location of the recording file.
        URL url(recordingFileUrl.str());

        // Do we want to rewind the stream on EOF?
        const bool AUTO_REWIND = false;

        // Size of the memory buffer that should fit at least the size of one frame.
        const uint32_t MEMORY_SEGMENT_SIZE = 1024 * 768;

        // Number of memory segments (one is enough as we are running sychronously).
        const uint32_t NUMBER_OF_SEGMENTS = 1;

        // Run player in synchronous mode without data caching in background.
        const bool THREADING = false;

        // Construct the player.
        Player player(url, AUTO_REWIND, MEMORY_SEGMENT_SIZE, NUMBER_OF_SEGMENTS, THREADING);

        // The next container from the recording.
        Container nextContainer;

        // Using OpenCV's IplImage data structure to simply playback the data.
        IplImage *image = NULL;

        // Create the OpenCV playback window.
        cvNamedWindow("CaroloCup-CameraPlayback", CV_WINDOW_AUTOSIZE);

        // This flag indicates whether we have attached already to the shared
        // memory containing the sequence of captured images.
        bool hasAttachedToSharedImageMemory = false;

        // Using this variable, we will access the captured images while
        // also having convenient automated system resource management.
        SharedPointer<SharedMemory> sharedImageMemory;

        ifstream file(argv[recIndex+1]);
        CSVRow row;
        // read out the header row
        row.readNextRow(file);
        uint32_t frameNumber=1, csvFN;
        int32_t VPx,VPy,BLx,BLy,BRx,BRy,TLx,TLy,TRx,TRy;
        stringstream frameMessage;
        stringstream VPMessage;
        frameMessage.str(string());
        VPMessage.str(string());
        bool fbf=false;
        
        // Main data processing loop.
        while (player.hasMoreData()) {
            // Read next entry from recording.
            nextContainer = player.getNextContainerToBeSent();

            // Data type SHARED_IMAGE contains a SharedImage data structure that
            // provides meta-information about the captured image.
            if (nextContainer.getDataType() == Container::SHARED_IMAGE) {
                // Read the data structure to retrieve information about the image.
                SharedImage si = nextContainer.getData<SharedImage>();

                // Check if we have already attached to the shared memory.
                if (!hasAttachedToSharedImageMemory) {
                    sharedImageMemory = SharedMemoryFactory::attachToSharedMemory(si.getName());

                    // Toggle the flag as we have now attached to the shared memory.
                    hasAttachedToSharedImageMemory = true;
                }

                // Check if we could successfully attach to the shared memory.
                if (sharedImageMemory->isValid()) {
                    // Using a scoped lock to get exclusive access.
                    {
                        Lock l(sharedImageMemory);
                        if (image == NULL) {
                            // Create the IplImage header data and access the shared memory for the actual image data. 
                            image = cvCreateImageHeader(cvSize(si.getWidth(), si.getHeight()), IPL_DEPTH_8U, si.getBytesPerPixel());

                            // Let the IplImage point to the shared memory containing the captured image.
                            image->imageData = static_cast<char*>(sharedImageMemory->getSharedMemory());
                        }
                    }

                    // Show the image using OpenCV.
                    
                    // if csv parameter is set
                    if(argc==4 || (argc==3 && string(argv[1]).compare("-l")!=0))
                    {
                        if(! row.readNextRow(file)) break;
                        while(row[0].compare("")==0)
                            if(! row.readNextRow(file)) break;
                        
                        sscanf(row[0].c_str(), "%d", &csvFN);
                        
                        if(frameNumber==csvFN)
                        {
                            Mat img = cvarrToMat(image);
                            
                        
                            frameMessage.str(string());
                            VPMessage.str(string());
                            sscanf(row[9].c_str(), "%d", &VPx);
                            sscanf(row[10].c_str(), "%d", &VPy);
                            
                            frameMessage<<"Frame "<<frameNumber;
                            VPMessage<<"Vanishing Point ("<<VPx<<","<<VPy<<")";
                            
                            setLabel(img, frameMessage.str(), cvPoint(30,45));
                            setLabel(img, VPMessage.str(), cvPoint(30,60));
                            
                            if(log)
                                cout << frameNumber << ", " << VPx << ", " << VPy <<endl;
                            
                            // print support points and lines
                            sscanf(row[1].c_str(), "%d", &BLx);
                            sscanf(row[2].c_str(), "%d", &BLy);BLy+=60;
                            sscanf(row[3].c_str(), "%d", &TLx);
                            sscanf(row[4].c_str(), "%d", &TLy);TLy+=60;
                            sscanf(row[5].c_str(), "%d", &TRx);
                            sscanf(row[6].c_str(), "%d", &TRy);TRy+=60;
                            sscanf(row[7].c_str(), "%d", &BRx);
                            sscanf(row[8].c_str(), "%d", &BRy);BRy+=60;
                            
                            circle(img, Point(BLx,BLy), 5, CV_RGB(255, 255, 255), CV_FILLED);
                            circle(img, Point(TLx,TLy), 5, CV_RGB(255, 255, 255), CV_FILLED);
                            circle(img, Point(TRx,TRy), 5, CV_RGB(255, 255, 255), CV_FILLED);
                            circle(img, Point(BRx,BRy), 5, CV_RGB(255, 255, 255), CV_FILLED);
                            
                            double slope1 = static_cast<double>(TLy-BLy)/static_cast<double>(TLx-BLx);
                            double slope2 = static_cast<double>(TRy-BRy)/static_cast<double>(TRx-BRx);
                            Point p1(0,0), q1(img.cols,img.rows);
                            Point p2(0,0), q2(img.cols,img.rows);
                            p1.y = -(BLx-p1.x) * slope1 + BLy;
                            q1.y = -(TLx-q1.x) * slope1 + TLy;
                            p2.y = -(BRx-p2.x) * slope2 + BRy;
                            q2.y = -(TRx-q2.x) * slope2 + TRy;
                            
                            line(img,p1,q1,CV_RGB(255, 255, 255),1,CV_AA);
                            line(img,p2,q2,CV_RGB(255, 255, 255),1,CV_AA);
                            
                            imshow("CaroloCup-CameraPlayback", img);
                        }
                    }
                    else
                        cvShowImage("CaroloCup-CameraPlayback", image);

                    // Let the image render before proceeding to the next image.
                    char c = cvWaitKey(10);
                    // Check if the user wants to stop the replay by pressing ESC or pause it by pressing SPACE (needed also to go frame-by-frame).
                    if (static_cast<uint8_t>(c) == 27) break;
                    else if (static_cast<uint8_t>(c) == 32 || fbf) {
                        do
                        {
                            c = cvWaitKey();
                        }while(c!='n' && static_cast<uint8_t>(c) != 32 && static_cast<uint8_t>(c) != 27);
                        
                        if (static_cast<uint8_t>(c) == 27) break; // ESC
                        else if (static_cast<uint8_t>(c) == 32) fbf=false; // SPACE -> continue
                        else if (c=='n') fbf=true; // pressed 'n' -> next frame
                    }
                    
                    ++frameNumber;
                }
            }
        }

        // maybe print EOF message && wait for user input?

        // Release IplImage data structure.
        cvReleaseImage(&image);

        // Close playback window.
        cvDestroyWindow("CaroloCup-CameraPlayback");

        // The shared memory will be automatically released.
    }

    // Return error code.
    return retVal;
}
Esempio n. 21
0
void
ShadowImageLayerOGL::RenderLayer(int aPreviousFrameBuffer,
                                 const nsIntPoint& aOffset)
{
  if (mOGLManager->CompositingDisabled()) {
    return;
  }
  mOGLManager->MakeCurrent();
  if (mImageContainerID) {
    ImageContainerParent::SetCompositorIDForImage(mImageContainerID,
                                                  mOGLManager->GetCompositorID());
    uint32_t imgVersion = ImageContainerParent::GetSharedImageVersion(mImageContainerID);
    SharedImage* img = ImageContainerParent::GetSharedImage(mImageContainerID);
    if (imgVersion != mImageVersion) {
      if (img && (img->type() == SharedImage::TYUVImage)) {
        UploadSharedYUVToTexture(img->get_YUVImage());
  
        mImageVersion = imgVersion;
      } else if (img && (img->type() == SharedImage::TYCbCrImage)) {
        ShmemYCbCrImage shmemImage(img->get_YCbCrImage().data(),
                                   img->get_YCbCrImage().offset());
        UploadSharedYCbCrToTexture(shmemImage, img->get_YCbCrImage().picture());

        mImageVersion = imgVersion;
      } else if (img && (img->type() == SharedImage::TRGBImage)) {
        UploadSharedRGBToTexture(&img->get_RGBImage().data(),
                                 img->get_RGBImage().picture(),
                                 img->get_RGBImage().rgbFormat());
        mImageVersion = imgVersion;
      }
    }
#ifdef MOZ_WIDGET_GONK
    if (img
        && (img->type() == SharedImage::TSurfaceDescriptor)
        && (img->get_SurfaceDescriptor().type() == SurfaceDescriptor::TSurfaceDescriptorGralloc)) {
      const SurfaceDescriptorGralloc& desc = img->get_SurfaceDescriptor().get_SurfaceDescriptorGralloc();
      sp<GraphicBuffer> graphicBuffer = GrallocBufferActor::GetFrom(desc);
      mSize = gfxIntSize(graphicBuffer->getWidth(), graphicBuffer->getHeight());
      if (!mExternalBufferTexture.IsAllocated()) {
        mExternalBufferTexture.Allocate(gl());
      }
      gl()->MakeCurrent();
      gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
      gl()->BindExternalBuffer(mExternalBufferTexture.GetTextureID(), graphicBuffer->getNativeBuffer());
      mImageVersion = imgVersion;
    }
#endif
  }


  if (mTexImage) {
    NS_ASSERTION(mTexImage->GetContentType() != gfxASurface::CONTENT_ALPHA,
                 "Image layer has alpha image");

    ShaderProgramOGL *colorProgram =
      mOGLManager->GetProgram(mTexImage->GetShaderProgramType(), GetMaskLayer());

    colorProgram->Activate();
    colorProgram->SetTextureUnit(0);
    colorProgram->SetLayerTransform(GetEffectiveTransform());
    colorProgram->SetLayerOpacity(GetEffectiveOpacity());
    colorProgram->SetRenderOffset(aOffset);
    colorProgram->LoadMask(GetMaskLayer());

    mTexImage->SetFilter(mFilter);
    mTexImage->BeginTileIteration();

    if (gl()->CanUploadNonPowerOfTwo()) {
      do {
        nsIntRect rect = mTexImage->GetTileRect();
        if (!rect.IsEmpty()) {
          TextureImage::ScopedBindTextureAndApplyFilter texBind(mTexImage, LOCAL_GL_TEXTURE0);
          colorProgram->SetLayerQuadRect(rect);
          mOGLManager->BindAndDrawQuad(colorProgram);
        }
      } while (mTexImage->NextTile());
    } else {
      do {
        nsIntRect rect = mTexImage->GetTileRect();
        if (!rect.IsEmpty()) {
          TextureImage::ScopedBindTextureAndApplyFilter texBind(mTexImage, LOCAL_GL_TEXTURE0);
          colorProgram->SetLayerQuadRect(rect);
          // We can't use BindAndDrawQuad because that always uploads the whole texture from 0.0f -> 1.0f
          // in x and y. We use BindAndDrawQuadWithTextureRect to actually draw a subrect of the texture
          mOGLManager->BindAndDrawQuadWithTextureRect(colorProgram,
                                                      nsIntRect(0, 0, mTexImage->GetTileRect().width,
                                                                mTexImage->GetTileRect().height),
                                                      mTexImage->GetTileRect().Size());
        }
      } while (mTexImage->NextTile());
    }
#ifdef MOZ_WIDGET_GONK
  } else if (mExternalBufferTexture.IsAllocated()) {
    gl()->MakeCurrent();
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_EXTERNAL, mExternalBufferTexture.GetTextureID());

    ShaderProgramOGL *program = mOGLManager->GetProgram(RGBAExternalLayerProgramType, GetMaskLayer());

    gl()->ApplyFilterToBoundTexture(LOCAL_GL_TEXTURE_EXTERNAL, mFilter);

    program->Activate();
    program->SetLayerQuadRect(nsIntRect(0, 0,
                                        mSize.width, mSize.height));
    program->SetLayerTransform(GetEffectiveTransform());
    program->SetLayerOpacity(GetEffectiveOpacity());
    program->SetRenderOffset(aOffset);
    program->SetTextureUnit(0);
    program->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuad(program);

    // Make sure that we release the underlying external image
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_EXTERNAL, 0);
    mExternalBufferTexture.Release();
#endif
  } else if (mSharedHandle) {
    GLContext::SharedHandleDetails handleDetails;
    if (!gl()->GetSharedHandleDetails(mShareType, mSharedHandle, handleDetails)) {
      NS_ERROR("Failed to get shared handle details");
      return;
    }

    ShaderProgramOGL* program = mOGLManager->GetProgram(handleDetails.mProgramType, GetMaskLayer());
   
    program->Activate();
    program->SetLayerTransform(GetEffectiveTransform());
    program->SetLayerOpacity(GetEffectiveOpacity());
    program->SetRenderOffset(aOffset);
    program->SetTextureUnit(0);
    program->SetTextureTransform(handleDetails.mTextureTransform);
    program->LoadMask(GetMaskLayer());

    MakeTextureIfNeeded(gl(), mTexture);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(handleDetails.mTarget, mTexture);
    
    if (!gl()->AttachSharedHandle(mShareType, mSharedHandle)) {
      NS_ERROR("Failed to bind shared texture handle");
      return;
    }

    gl()->fBlendFuncSeparate(LOCAL_GL_ONE, LOCAL_GL_ONE_MINUS_SRC_ALPHA,
                             LOCAL_GL_ONE, LOCAL_GL_ONE);
    gl()->ApplyFilterToBoundTexture(mFilter);
    program->SetLayerQuadRect(nsIntRect(nsIntPoint(0, 0), mSize));
    mOGLManager->BindAndDrawQuad(program, mInverted);
    gl()->fBindTexture(handleDetails.mTarget, 0);
    gl()->DetachSharedHandle(mShareType, mSharedHandle);
  } else if (mRGBTexture.IsAllocated()) {
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, mRGBTexture.GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);

    ShaderProgramOGL *shader = mOGLManager->GetProgram(RGBALayerProgramType, GetMaskLayer());
    shader->Activate();

    shader->SetLayerQuadRect(nsIntRect(0, 0,
                                           mPictureRect.width,
                                           mPictureRect.height));
    shader->SetTextureUnit(0);
    shader->SetLayerTransform(GetEffectiveTransform());
    shader->SetLayerOpacity(GetEffectiveOpacity());
    shader->SetRenderOffset(aOffset);
    shader->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuadWithTextureRect(shader,
                                                mPictureRect,
                                                nsIntSize(mSize.width, mSize.height));
  } else {
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, mYUVTexture[0].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE1);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, mYUVTexture[1].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE2);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, mYUVTexture[2].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);

    ShaderProgramOGL *yuvProgram = mOGLManager->GetProgram(YCbCrLayerProgramType, GetMaskLayer());

    yuvProgram->Activate();
    yuvProgram->SetLayerQuadRect(nsIntRect(0, 0,
                                           mPictureRect.width,
                                           mPictureRect.height));
    yuvProgram->SetYCbCrTextureUnits(0, 1, 2);
    yuvProgram->SetLayerTransform(GetEffectiveTransform());
    yuvProgram->SetLayerOpacity(GetEffectiveOpacity());
    yuvProgram->SetRenderOffset(aOffset);
    yuvProgram->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuadWithTextureRect(yuvProgram,
                                                mPictureRect,
                                                nsIntSize(mSize.width, mSize.height));
 }
}