Viewer::Viewer( const orca::MultiCameraDataPtr& multiCameraData, const orcaice::Context& context ) : context_(context) { isPadded_ = false; // Assume each camera is identical and the image formats are identical std::string format = multiCameraData->cameraDataVector.at(0)->description->format; int width = multiCameraData->cameraDataVector.at(0)->description->width; int height = multiCameraData->cameraDataVector.at(0)->description->height; // class to search for image format properties ImageFormat imageFormat = ImageFormat::find( format ); int numChannels = imageFormat.getNumberOfChannels(); int depth = imageFormat.getBitsPerPixel()/numChannels; // set up opencv storage for the source image cvSrcImage_ = cvCreateImage( cvSize( width, height ), depth, numChannels ); // check if opencv has padded the byte array so that the width is a multiple of 4 or 8 bytes orcaByteWidth_ = width*numChannels; if ( orcaByteWidth_ != cvSrcImage_->widthStep ) { isPadded_ = true; } // set up opencv storage for the display image std::string displayFormat = "BGR8"; ImageFormat imageDisplayFormat = ImageFormat::find( displayFormat ); numChannels = imageDisplayFormat.getNumberOfChannels(); depth = imageDisplayFormat.getBitsPerPixel()/numChannels; // The width of all the camera frames side-by-side int totalWidth = cvSrcImage_->width * multiCameraData->cameraDataVector.size(); cvMultiDisplayImage_ = cvCreateImage( cvSize( totalWidth, height ), depth, numChannels ); // dodgy opencv needs this so it has time to resize cvWaitKey(100); name_ = "MultiCameraViewer"; cvNamedWindow( name_ ); // context_.tracer()->debug("opencv window created",5); // start the timer for calculating the number of frames per second // the images are being displayed at orcaice::setToNow( oldFrameTime_ ); // initialise font for displaying fps cvInitFont(&font_, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0.0, 1, CV_AA); }
void MultiCameraWriter::initMultiImageLogWriter(const orca::MultiCameraDescriptionPtr &descr) { #ifdef OPENCV_FOUND for( unsigned int i=0; i<descr->descriptions.size(); ++i ) { isPadded_[i] = false; // Get the properties of each camera std::string format = descr->descriptions[i]->format; int width = descr->descriptions[i]->width; int height = descr->descriptions[i]->height; // class to search for image format properties ImageFormat imageFormat = ImageFormat::find(format); int numChannels = imageFormat.getNumberOfChannels(); int depth = imageFormat.getBitsPerPixel() / numChannels; // check if opencv has padded the byte array so that the width is a multiple of 4 or 8 bytes orcaByteWidth_[i] = width*numChannels; // Don't allocate image space yet cvImage_[i] = cvCreateImageHeader(cvSize(width, height), depth, numChannels); if (orcaByteWidth_[i] != cvImage_[i]->widthStep) { isPadded_[i] = true; } if (isPadded_[i]) // Allocate space, we will need to copy image data properly cvCreateData(&cvImage_[i]); // Allocate memory for bayer image conversion if (format == "BayerBG8" || format == "BayerGB8"|| format == "BayerRG8" || format == "BayerGR8") cvBayer_[i] = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); } #endif }
void MultiCameraReader::initDataStorage() { // Create new MultiCameraData object data_ = new orca::MultiCameraData(); #ifdef OPENCV_FOUND // Resize variables appropriately cvImage_.resize(descr_->descriptions.size(), NULL); for( unsigned int i = 0; i < descr_->descriptions.size(); ++i) { data_->cameraDataVector.push_back( new orca::ImageData() ); data_->cameraDataVector[i]->description = descr_->descriptions[i]; // class to search for image format properties ImageFormat imageFormat = ImageFormat::find(descr_->descriptions[i]->format); int nChannels = imageFormat.getNumberOfChannels(); // resize object buffer to fit image int imageSize = (int)ceil( nChannels * data_->cameraDataVector[i]->description->height *data_->cameraDataVector[i]->description->width ); data_->cameraDataVector[i]->pixelData.resize( imageSize ); } #endif }