Image *BarrelCorrection::apply(Image *image) { LOG_TRACE("BarrelCorrection::apply()"); if(m_outputWidth == 0 || m_outputHeight == 0) { setOutputSize(image->getWidth(), image->getHeight()); } if(m_correctedImage == NULL || m_pixelMapping == NULL) { LOG_ERROR("BarrelCorrection::apply(): Output image or pixel mapping array is not set. The filter is not ready to be applied"); throw CameraException("Output image or pixel mapping array is not set. The filter is not ready to be applied"); } if(image->getFormat() != Image::FORMAT_RGB32) { LOG_ERROR("BarrelCorrection::apply(): Input image is not in RGB32 format"); throw CameraException("Input image is not in RGB32 format"); } int numPixels = m_correctedImage->getWidth() * m_correctedImage->getHeight(); int *mapping = m_pixelMapping; ImageBuffer *cBufferAddr = m_correctedImage->getBufferAddress(); ImageBuffer *dBufferBaseAddr = image->getBufferAddress(); ImageBuffer *dBufferAddr = NULL; if(m_pixelMappingMin < 0 || m_pixelMappingMax * 4 > image->getBufferSize()) { LOG_ERROR("BarrelCorrection::apply(): Input image is not too small to contain the specified distorted rectangle"); throw CameraException("Input image is not too small to contain the specified distorted rectangle"); } for(int i = 0; i < numPixels; i++) { // Calculate which pixel to use from the distorted image based on the pixel mapping dBufferAddr = dBufferBaseAddr + (*mapping) * 4; // 4 bytes per pixel (BGRX) // Copy the pixel color from the distorted to the corrected image. *(cBufferAddr++) = *(dBufferAddr++); // B (Blue) *(cBufferAddr++) = *(dBufferAddr++); // G (Green) *(cBufferAddr++) = *(dBufferAddr++); // R (Red) *(cBufferAddr++) = *(dBufferAddr++); // X (Not used) mapping++; //memcpy(cBufferAddr, dBufferAddr, 4); //mapping += 4; } return m_correctedImage; }
Shader SceneCamera::getScreenEffect() const { if(thereIsScreenEffect()) return contentManager->getShader(screenEffectPath); else throw CameraException("there isn't a shader"); }
double QuantixCamera::getFrameRefreshTime(EventMetadatum &eventMetadatum) { uns16 pixelWriteTimeNS; double parallelShiftTimeNS = 80000; // 80 microseconds double serialDiscardRateNS = 100; // 0.1 microseconds double serialRecordRateNS; int16 pixTime; if (!pl_get_param (cameraHandle, PARAM_PIX_TIME, ATTR_CURRENT, &pixTime)) throw CameraException("Can't access pixel time for frame refresh"); serialRecordRateNS = (double) pixTime; if(!pl_get_param(cameraHandle, PARAM_PIX_TIME, ATTR_CURRENT, (void *) &pixelWriteTimeNS)) { char msg[ERROR_MSG_LEN]; // for error handling pl_error_message(pl_error_code(), msg); std::cerr << "Pixel readout time error: " << msg << std::endl; throw CameraException("Error getting pixel readout time."); } int parallelRows = (cameraState->imageWidth.getSize()); int totalPixels = (cameraState->imageHeight.getSize())*(cameraState->imageWidth.getSize()); int recordedRows = eventMetadatum.cropVector.at(2) + 1; int recordedPixels = recordedRows*(eventMetadatum.cropVector.at(3) + 1); int binSize; if (!STI::Utils::stringToValue(cameraState->binSize.get(), binSize)) throw CameraException("Error transforming string to value"); //Based off the 6303 data sheet double refreshTime = 0; refreshTime += recordedRows*parallelShiftTimeNS; //Time to shift relevant rows. refreshTime += recordedPixels*serialRecordRateNS / binSize / binSize; //Time to record (and discard?) relevent pixels refreshTime += (totalPixels - recordedPixels)*serialDiscardRateNS; //Time to discard non-relevant pixels std::cerr << "Frame Rate (s): " << refreshTime/1000000000 << std::endl; return refreshTime; }
virtual std::shared_ptr<RemoteReleaseControl> EnterReleaseControl() override { if (!GetDeviceCapability(capRemoteReleaseControl)) { throw CameraException(_T("PSREC::SourceDevice::EnterReleaseControl"), _T("Not supported"), prERROR_PRSDK_COMPONENTID | prNOT_SUPPORTED, __FILE__, __LINE__); } std::shared_ptr<SourceDeviceImpl> spSourceDevice = shared_from_this(); return std::shared_ptr<RemoteReleaseControl>(new RemoteReleaseControlImpl(m_hCamera, spSourceDevice)); }
void QuantixCamera::setupCameraAcquisition(std::vector <EventMetadatum> *eM) { uns16 s1, p1, s2, p2, sBin, pBin; //Check camera temperature for metadata purposes cameraState->get(cameraState->temperature); if (eM == NULL) throw CameraException("No events provided"); if (eM->at(0).cropVector.size() == 4) { p1 = (uns16) eM->at(0).cropVector.at(0); s1 = (uns16) eM->at(0).cropVector.at(1); p2 = p1 + (uns16) eM->at(0).cropVector.at(2); //width is parallel s2 = s1 + (uns16) eM->at(0).cropVector.at(3); //height is serial } else throw CameraException("Could not format crop vector (ROI)"); if (!STI::Utils::stringToValue(cameraState->binSize.get(), sBin)) throw CameraException("Error transforming string to value"); pBin = sBin; rgn_type region = {s1, s2, sBin, p1, p2, pBin}; uns16 currentTriggerMode; if (!STI::Utils::stringToValue(cameraState->triggerMode.get(), currentTriggerMode)) throw CameraException("Error transforming string to value"); uns32 exposureTime = (uns32) (eM->at(0).exposureTime/1000000); //This gets ignored //Only one region per image. What do multiple regions do? Multiple images, same exposure? uns32 bufferSize, frameSize = 1; uns16 numberOfExposures = eM->size(); std::cerr << "Number of exposures: " << numberOfExposures << std::endl; if (!pl_exp_setup_seq(cameraHandle, numberOfExposures, 1, ®ion, currentTriggerMode, exposureTime, &frameSize)) throw CameraException("Could not setup Quantix for acquisition"); bufferSize = (frameSize/2) * numberOfExposures; //Comes in bytes and an uns16 is 2 bytes std::cerr << "Buffer Size: " << bufferSize << std::endl; std::cerr << "Crop vector numbers: " << eM->at(0).cropVector.at(2) << " and " << eM->at(0).cropVector.at(3) << std::endl; try { delete [] imageBuffer; imageBuffer = new uns16[bufferSize]; } catch(...) { throw CameraException("Error allocating memory to image buffer"); } }
virtual std::shared_ptr<CameraFileSystem> GetFileSystem() override { if (!GetDeviceCapability(capCameraFileSystem)) { throw CameraException(_T("PSREC::SourceDevice::GetFileSystem"), _T("Not supported"), prERROR_PRSDK_COMPONENTID | prNOT_SUPPORTED, __FILE__, __LINE__); } // support file system using WIA WIA::RefSp wiaRef = std::make_shared<WIA::Ref>(); std::shared_ptr<SourceInfo> sourceInfo = std::make_shared<WIA::SourceInfoImpl>(wiaRef, m_deviceId, m_spDeviceInfo->m_cszModel); std::shared_ptr<SourceDevice> sourceDevice = sourceInfo->Open(); return sourceDevice->GetFileSystem(); }
QuantixCamera::QuantixCamera(int16 handle): cameraHandle(handle) { initialized = false; notDestructed = false; extension = ".tif"; rotationAngle = 270; // makes the height the right direction isAcquiring = false; imageBuffer = NULL; pauseCameraMutex = new omni_mutex(); pauseCameraCondition = new omni_condition(pauseCameraMutex); acquisitionMutex = new omni_mutex(); acquisitionCondition = new omni_condition(acquisitionMutex); bool cleanupEvent = false; filePath = createFilePath(); try { cameraState = new QuantixState(cameraHandle); //Initialize data collection libraries if (!pl_exp_init_seq()) throw CameraException("Could not init Quantix for acquisition"); initialized = true; // change to True eventually } catch(CameraException &e) { std::cerr << "Camera Initialization Error: " << e.what() << std::endl; initialized = false; } if (initialized){ notDestructed = true; //imageBuffer = (uns16 *) malloc(100); //omni_thread::create(playCameraWrapper, (void*) this, omni_thread::PRIORITY_HIGH); } }
virtual void GetHistogram(T_enHistogramType, std::vector<unsigned int>&) override { // histogram not supported by PSREC throw CameraException(_T("PSREC::Viewfinder::GetHistogram"), _T("Not supported"), prERROR_PRSDK_COMPONENTID | prNOT_SUPPORTED, __FILE__, __LINE__); }