bool convertFrom(Pylon::GrabResult Result) { const struct Pylon::SImageFormat format=getInFormat(Result); bool init=false; #ifdef HAVE_LIBPYLONUTILITY if(!converter)init=true; if(converter && !converter->IsInitialized())init=true; #endif if(format!=inFormat)init=true; if(init) { makeConverter(format); inFormat=format; } image.xsize=inFormat.Width; image.ysize=inFormat.Height; image.reallocate(); if(0) {; #ifdef HAVE_LIBPYLONUTILITY } else if(converter) { const struct Pylon::SOutputImageFormat oformat=getOutFormat(image); converter->Convert(image.data, image.xsize*image.ysize*image.csize, Result.Buffer(), Result.GetPayloadSize(), format, oformat); #endif } else { if(image.format==GL_RGBA) image.fromRGBA(reinterpret_cast<unsigned char*>(Result.Buffer())); else image.fromGray(reinterpret_cast<unsigned char*>(Result.Buffer())); } return true; }
static void set_( ChunkData& d, boost::posix_time::ptime& t, const Pylon::GrabResult& result, Pylon::CBaslerGigECamera& camera ) { parser->AttachBuffer( ( unsigned char* ) result.Buffer(), result.GetPayloadSize() ); d.timestamp = t; d.frames = camera.ChunkFramecounter.GetValue(); d.ticks = camera.ChunkTimestamp.GetValue(); d.line_trigger_ignored = camera.ChunkLineTriggerIgnoredCounter.GetValue(); d.frame_trigger_ignored = camera.ChunkFrameTriggerIgnoredCounter.GetValue(); d.line_trigger_end_to_end = camera.ChunkLineTriggerEndToEndCounter.GetValue(); d.frame_trigger = camera.ChunkFrameTriggerCounter.GetValue(); d.frames_per_trigger = camera.ChunkFramesPerTriggerCounter.GetValue(); parser->DetachBuffer(); }
static P capture_( Pylon::CBaslerGigECamera& camera, Pylon::CBaslerGigECamera::StreamGrabber_t& grabber ) //static Pair capture( Ark::Robotics::Camera::Gige& camera ) { /// @todo if color spatial correction implemented, mind the following: /// /// Runner_Users_manual.pdf, 8.2.2.3: /// /// If you are using a color camera, you have spatial correction enabled /// and you have the frame start trigger mode set to off, you must discard /// the first n x 2 lines from the first frame transmitted by the camera /// after an acquisition start command is issued (where n is the absolute /// value of the current spatial correction parameter setting). /// /// If you have spatial correction enabled and you have the frame start /// trigger mode set to on, you must discard the first n x 2 lines from /// each frame transmitted by the camera. static const unsigned int retries = 10; // quick and dirty: arbitrary for( unsigned int i = 0; !done && i < retries; ++i ) { Pylon::GrabResult result; //camera.AcquisitionStart.Execute(); // acquire single image (since acquisition mode set so) if( !grabber.GetWaitObject().Wait( timeout ) ) // quick and dirty: arbitrary timeout { std::cerr << "basler-cat: timeout" << std::endl; grabber.CancelGrab(); while( grabber.RetrieveResult( result ) ); // get all buffers back if( is_shutdown ) { done = true; } return P(); } boost::posix_time::ptime t = boost::get_system_time(); grabber.RetrieveResult( result ); if( !result.Succeeded() ) { std::cerr << "basler-cat: acquisition failed: " << result.GetErrorDescription() << " (" << result.GetErrorCode() << ")" << std::endl; std::cerr << " status: " << ( result.Status() == Pylon::Idle ? "idle" : result.Status() == Pylon::Queued ? "queued" : result.Status() == Pylon::Grabbed ? "grabbed" : result.Status() == Pylon::Canceled ? "canceled" : result.Status() == Pylon::Failed ? "failed" : "unknown" ) << std::endl; std::cerr << " run basler-cat --verbose and check your --packet-size settings" << std::endl; continue; } P pair; static const snark::cv_mat::serialization::header header = options.get_header(); switch( header.type ) { case CV_8UC1: pair.second = cv::Mat( result.GetSizeY(), result.GetSizeX(), header.type ); ::memcpy( pair.second.data, reinterpret_cast< const char* >( result.Buffer() ), pair.second.dataend - pair.second.datastart ); break; case CV_8UC3: // quick and dirty for now: rgb are not contiguous in basler camera frame pair.second = cv::Mat( result.GetSizeY(), result.GetSizeX(), header.type ); ::memcpy( pair.second.data, reinterpret_cast< const char* >( result.Buffer() ), pair.second.dataend - pair.second.datastart ); cv::cvtColor( pair.second, pair.second, CV_RGB2BGR ); break; default: // quick and dirty for now std::cerr << "basler-cat: cv::mat type " << options.type << " not supported" << std::endl; } set_( pair.first, t, result, camera ); grabber.QueueBuffer( result.Handle(), NULL ); // requeue buffer if( is_shutdown ) { done = true; } return pair; } if( is_shutdown ) { done = true; } return P(); }