/////////////////////////////////////////////////////////////////////////////// // // METHOD CIdsSimpleLiveDlg::InitDisplayMode() // // DESCRIPTION: - initializes the display mode // /////////////////////////////////////////////////////////////////////////////// int InitDisplayMode(HIDS* m_hCam) { INT result = IS_SUCCESS; INT nColorMode; // Y8/RGB16/RGB24/REG32 if (m_hCam == NULL) { return IS_NO_SUCCESS; } // Get sensor info SENSORINFO m_sInfo; // sensor information struct is_GetSensorInfo(*m_hCam, &m_sInfo); if (m_pcImageMemory != NULL) { is_FreeImageMem( *m_hCam, m_pcImageMemory, m_lMemoryId ); } m_pcImageMemory = NULL; // Set display mode to DIB result = is_SetDisplayMode(*m_hCam, IS_SET_DM_DIB); if (m_sInfo.nColorMode == IS_COLORMODE_BAYER) { // setup the color depth to the current windows setting is_GetColorDepth(*m_hCam, &m_nBitsPerPixel, &nColorMode); } else if (m_sInfo.nColorMode == IS_COLORMODE_CBYCRY) { // for color camera models use RGB32 mode nColorMode = IS_CM_RGB8_PACKED; m_nBitsPerPixel = 32; } else { // for monochrome camera models use Y8 mode nColorMode = IS_CM_MONO8; m_nBitsPerPixel = 8; } // allocate an image memory. if (is_AllocImageMem(*m_hCam, m_nSizeX, m_nSizeY, m_nBitsPerPixel, &m_pcImageMemory, &m_lMemoryId ) != IS_SUCCESS) { AfxMessageBox(TEXT("Memory allocation failed!"), MB_ICONWARNING ); } else { is_SetImageMem( *m_hCam, m_pcImageMemory, m_lMemoryId ); } if (result == IS_SUCCESS) { // set the desired color mode result = is_SetColorMode(*m_hCam, nColorMode); // Sets the position and size of the image by using an object of the IS_RECT type. IS_RECT rectAOI; rectAOI.s32X = 0; rectAOI.s32Y = 0; rectAOI.s32Width = m_nSizeX; rectAOI.s32Height = m_nSizeY; result |= is_AOI(*m_hCam, IS_AOI_IMAGE_SET_AOI, (void*)&rectAOI, sizeof(rectAOI)); } return result; }
bool FlosIDSAdaptor::openDevice() { if(isOpen()) { return true; } INT nRet = is_InitCamera(&m_deviceID, NULL); if(nRet != IS_SUCCESS) { if(nRet == IS_STARTER_FW_UPLOAD_NEEDED) { return false; } } else { IS_SIZE_2D imageSize; INT lMemoryId; char *pcImageMemory; is_AOI(m_deviceID, IS_AOI_IMAGE_GET_SIZE, (void *)&imageSize, sizeof(imageSize)); for(unsigned int i = 0; i < 25; i++) { is_AllocImageMem(m_deviceID, imageSize.s32Width, imageSize.s32Height, 24, &pcImageMemory, &lMemoryId); is_AddToSequence(m_deviceID, pcImageMemory, lMemoryId); is_SetImageMem(m_deviceID, pcImageMemory, lMemoryId); } if(is_InitImageQueue(m_deviceID, 0) != IS_SUCCESS) { imaqkit::adaptorWarn("FlosIDSAdaptor:openDevice","Something went wrong while allocating memory."); return false; } } m_acquireThread = CreateThread(NULL, 0, acquireThread, this, 0, &m_acquireThreadID); if(m_acquireThread == NULL) { closeDevice(); return false; } while(PostThreadMessage(m_acquireThreadID, WM_USER + 1, 0, 0) == 0) { Sleep(10); } return true; }
int CameraApi::openCamera() { int is_init = IS_NO_SUCCESS; int is_mode = IS_NO_SUCCESS; int is_alcM = IS_NO_SUCCESS; int isLoad = IS_NO_SUCCESS; is_init = is_InitCamera(&mhCam, 0); if(is_init == IS_SUCCESS) { is_mode = is_SetDisplayMode(mhCam, IS_SET_DM_DIB); } if(is_mode == IS_SUCCESS) { ringbuffer = new char* [ringbufferSize]; ringbufferId = new int [ringbufferSize]; IS_SIZE_2D maxAoi; is_AOI(mhCam, IS_AOI_IMAGE_GET_SIZE_MAX, &maxAoi, sizeof(maxAoi)); for(int i=0; i<ringbufferSize; i++) { is_AllocImageMem(mhCam, maxAoi.s32Width, maxAoi.s32Height, bitspixel(), &ringbuffer[i], &ringbufferId[i]); is_AddToSequence(mhCam, ringbuffer[i], ringbufferId[i]); } isLoad = is_LoadParameters(mhCam, "cameraParameter.ini"); setAoi(0, 0, width(), height()); is_SetExternalTrigger(mhCam, IS_SET_TRIGGER_OFF); is_CaptureVideo(mhCam, IS_WAIT); } if(isLoad == IS_SUCCESS) return IS_SUCCESS; else return IS_NO_SUCCESS; }
INT UEyeCamDriver::reallocateCamBuffer() { INT is_err = IS_SUCCESS; // Stop capture to prevent access to memory buffer setStandbyMode(); if (cam_buffer_ != NULL) { is_err = is_FreeImageMem(cam_handle_, cam_buffer_, cam_buffer_id_); cam_buffer_ = NULL; } INT width = cam_aoi_.s32Width / (cam_sensor_scaling_rate_ * cam_subsampling_rate_ * cam_binning_rate_); INT height = cam_aoi_.s32Height / (cam_sensor_scaling_rate_ * cam_subsampling_rate_ * cam_binning_rate_); if ((is_err = is_AllocImageMem(cam_handle_, width, height, bits_per_pixel_, &cam_buffer_, &cam_buffer_id_)) != IS_SUCCESS) { std::cerr << "Failed to allocate " << width << " x " << height << " image buffer" << std::endl; return is_err; } if ((is_err = is_SetImageMem(cam_handle_, cam_buffer_, cam_buffer_id_)) != IS_SUCCESS) { std::cerr << "Failed to associate an image buffer to the UEye camera driver" << std::endl; return is_err; } if ((is_err = is_GetImageMemPitch(cam_handle_, &cam_buffer_pitch_)) != IS_SUCCESS) { std::cerr << "Failed to query UEye camera buffer's pitch (a.k.a. stride)" << std::endl; return is_err; } cam_buffer_size_ = cam_buffer_pitch_ * height; std::cout << "Allocate internal memory - width: " << width << "; height: " << height << "; fetched pitch: " << cam_buffer_pitch_ << "; expected bpp: " << bits_per_pixel_ << "; total size: " << cam_buffer_size_ << std::endl; return is_err; }
bool IdsSourceSink::Init() { PUEYE_CAMERA_LIST m_pCamList; UEYE_CAMERA_INFO m_CameraInfo; // init the internal camera info structure ZeroMemory (&m_CameraInfo, sizeof(UEYE_CAMERA_INFO)); // get the cameralist from SDK m_pCamList = new UEYE_CAMERA_LIST; m_pCamList->dwCount = 0; if (is_GetCameraList (m_pCamList) == IS_SUCCESS) { DWORD dw = m_pCamList->dwCount; delete m_pCamList; // Reallocate the required camera list size m_pCamList = (PUEYE_CAMERA_LIST)new char[sizeof(DWORD) + dw * sizeof(UEYE_CAMERA_INFO)]; m_pCamList->dwCount = dw; // Get CameraList and store it ... if (is_GetCameraList (m_pCamList) != IS_SUCCESS) return false; } else return false; if (m_pCamList->dwCount==0) { qDebug()<<"No camera found"; return false; } else if (m_pCamList->dwCount>1) { qDebug()<<"More than 1 camera: "<<m_pCamList->dwCount; } // will use camera 0 memcpy (&m_CameraInfo, &m_pCamList->uci[0], sizeof(UEYE_CAMERA_INFO)); hCam = (HIDS) (m_CameraInfo.dwDeviceID | IS_USE_DEVICE_ID); if(is_InitCamera (&hCam, NULL)!= IS_SUCCESS){ qDebug()<<"init not successful"; return false; } // double minFPS, maxFPS, FPSinterval; // is_GetFrameTimeRange (hCam, &minFPS, &maxFPS, &FPSinterval); //cout<< fixed << setprecision(4) << minFPS << " MINFPS " << maxFPS << " MAXFPS "<< FPSinterval << " FPSinterval " << endl; //myfile<< fixed << setprecision(4) << minFPS << " MINFPS " << maxFPS << " MAXFPS "<< FPSinterval << " FPSinterval " << endl; is_SetGainBoost (hCam, IS_SET_GAINBOOST_OFF); is_SetWhiteBalance (hCam, IS_SET_WB_DISABLE); // is_SetBrightness (hCam,0); // is_SetContrast (hCam,0); // is_SetGamma (hCam, 100);// Value multiplied by 100 (for the camera it goes from 0.01 to 10 is_SetHWGainFactor (hCam, IS_SET_MASTER_GAIN_FACTOR, 100); uint pixelC=304; is_PixelClock(hCam, IS_PIXELCLOCK_CMD_SET, (void*)&pixelC, sizeof(pixelC)); flagIDS= is_SetSubSampling (hCam, IS_SUBSAMPLING_2X_VERTICAL | IS_SUBSAMPLING_2X_HORIZONTAL); //Both are needed //Configuration section: very important to match the img_bpp=8 with the chacracteristics of the CV::MAT image to use //weird results like cropping or black lines can be obtained if not changed accordingly int img_width=2048, img_height=2048, img_bpp=8, factorSMP=2; //Variable to state the Linehopping // int img_step, img_data_size; imgMem = NULL; is_AllocImageMem(hCam, img_width/factorSMP, img_height/factorSMP, img_bpp, &imgMem, &memId); is_SetImageMem (hCam, imgMem, memId); // is_SetImageSize (hCam, img_width/factorSMP, img_height/factorSMP); is_SetColorMode (hCam, IS_CM_MONO8); is_SetDisplayMode (hCam, IS_SET_DM_DIB); // Direct buffer mode writes to RAM which is the only option on Linux //OpenCV variables: REMEMBER THE SUBSAMPLING buffer=cv::Mat::zeros(img_width/factorSMP,img_height/factorSMP, CV_8UC1); return true; }
//Open our camera bool IDSCamera::OpenCamera() { if (m_hCam!=0) { //free old image mem. is_FreeImageMem(m_hCam,m_pcImageMemory,m_lMemoryId); is_ExitCamera(m_hCam); } // init camera m_hCam = (HIDS) 0; // open next camera m_Ret = is_InitCamera(&m_hCam,NULL); // init camera if( m_Ret == IS_SUCCESS ){ // retrieve original image size SENSORINFO sInfo; is_GetSensorInfo(m_hCam,&sInfo); m_nSizeX = sInfo.nMaxWidth; m_nSizeY = sInfo.nMaxHeight; // setup the color depth to the current windows setting //is_GetColorDepth(m_hCam,&m_nBitsPerPixel,&m_nColorMode); is_SetColorMode(m_hCam, IS_SET_CM_Y8); //printf("m_nBitsPerPixel=%i m_nColorMode=%i\n",m_nBitsPerPixel,IS_SET_CM_Y8); // memory initialization is_AllocImageMem(m_hCam, m_nSizeX, m_nSizeY, m_nBitsPerPixel, &m_pcImageMemory, &m_lMemoryId); //set memory active is_SetImageMem( m_hCam, m_pcImageMemory,m_lMemoryId ); // display initialization is_SetImageSize( m_hCam, m_nSizeX, m_nSizeY ); is_SetDisplayMode( m_hCam, IS_SET_DM_DIB); // Reinit with slower frame rate for testing on vmWare with USB 1.1 if( is_LoadParameters( m_hCam, config_file ) == IS_SUCCESS ) { // realloc image mem with actual sizes and depth. is_FreeImageMem( m_hCam, m_pcImageMemory, m_lMemoryId ); m_nSizeX = is_SetImageSize( m_hCam, IS_GET_IMAGE_SIZE_X, 0 ); m_nSizeY = is_SetImageSize( m_hCam, IS_GET_IMAGE_SIZE_Y, 0 ); switch( is_SetColorMode( m_hCam, IS_GET_COLOR_MODE ) ) { case IS_SET_CM_RGB32: m_nBitsPerPixel = 32; break; case IS_SET_CM_RGB24: m_nBitsPerPixel = 24; break; case IS_SET_CM_RGB16: case IS_SET_CM_UYVY: m_nBitsPerPixel = 16; break; case IS_SET_CM_RGB15: m_nBitsPerPixel = 15; break; case IS_SET_CM_Y8: case IS_SET_CM_RGB8: case IS_SET_CM_BAYER: default: m_nBitsPerPixel = 8; break; } // memory initialization is_AllocImageMem( m_hCam, m_nSizeX, m_nSizeY, m_nBitsPerPixel, &m_pcImageMemory, &m_lMemoryId); is_SetImageMem(m_hCam, m_pcImageMemory, m_lMemoryId ); // set memory active // display initialization is_SetImageSize(m_hCam, m_nSizeX, m_nSizeY ); } } return true; }
void get_en_image(pcl::PointCloud<pcl::PointXYZ> &cloud) { char flag = 'g'; int i = 0; while(flag != 'q') { ostringstream conv; conv << i; cout<<"Capturing new calibration image from the ensenso stereo vision camera."<<endl; ///Read the Ensenso stereo cameras: try { // Initialize NxLib and enumerate cameras nxLibInitialize(true); // Reference to the first camera in the node BySerialNo NxLibItem root; NxLibItem camera = root[itmCameras][itmBySerialNo][0]; // Open the Ensenso NxLibCommand open(cmdOpen); open.parameters()[itmCameras] = camera[itmSerialNumber].asString(); open.execute(); // Capture an image NxLibCommand (cmdCapture).execute(); // Stereo matching task NxLibCommand (cmdComputeDisparityMap).execute (); // Convert disparity map into XYZ data for each pixel NxLibCommand (cmdComputePointMap).execute (); // Get info about the computed point map and copy it into a std::vector double timestamp; std::vector<float> pointMap; int width, height; camera[itmImages][itmRaw][itmLeft].getBinaryDataInfo (0, 0, 0, 0, 0, ×tamp); // Get raw image timestamp camera[itmImages][itmPointMap].getBinaryDataInfo (&width, &height, 0, 0, 0, 0); camera[itmImages][itmPointMap].getBinaryData (pointMap, 0); // Copy point cloud and convert in meters //cloud.header.stamp = getPCLStamp (timestamp); cloud.resize (height * width); cloud.width = width; cloud.height = height; cloud.is_dense = false; // Copy data in point cloud (and convert milimeters in meters) for (size_t i = 0; i < pointMap.size (); i += 3) { cloud.points[i / 3].x = pointMap[i] / 1000.0; cloud.points[i / 3].y = pointMap[i + 1] / 1000.0; cloud.points[i / 3].z = pointMap[i + 2] / 1000.0; } NxLibCommand (cmdRectifyImages).execute(); // Save images NxLibCommand saveImage(cmdSaveImage); // raw left saveImage.parameters()[itmNode] = camera[itmImages][itmRaw][itmLeft].path; saveImage.parameters()[itmFilename] = "calib_en/raw_left" + conv.str()+".png"; saveImage.execute(); // raw right /*saveImage.parameters()[itmNode] = camera[itmImages][itmRaw][itmRight].path; saveImage.parameters()[itmFilename] = "calib_en/raw_right.png"; saveImage.execute(); // rectified left saveImage.parameters()[itmNode] = camera[itmImages][itmRectified][itmLeft].path; saveImage.parameters()[itmFilename] = "calib_en/rectified_left.png"; saveImage.execute(); // rectified right saveImage.parameters()[itmNode] = camera[itmImages][itmRectified][itmRight].path; saveImage.parameters()[itmFilename] = "calib_en/rectified_right.png"; saveImage.execute();*/ } catch (NxLibException& e) { // Display NxLib API exceptions, if any printf("An NxLib API error with code %d (%s) occurred while accessing item %s.\n", e.getErrorCode(), e.getErrorText().c_str(), e.getItemPath().c_str()); if (e.getErrorCode() == NxLibExecutionFailed) printf("/Execute:\n%s\n", NxLibItem(itmExecute).asJson(true).c_str()); } /*catch (NxLibException &ex) { ensensoExceptionHandling (ex, "grabSingleCloud"); }*/ catch (...) { // Display other exceptions printf("Something, somewhere went terribly wrong!\n"); } /*cout<<"Plug in the RGB camera and press any key to continue."<<endl; cin.ignore(); cin.get();*/ cout<<"Capturing new calibration image from the ensenso RGB camera."<<endl; ///Read the IDS RGB Camera attached to the Ensenso stereo camera HIDS hCam = 0; printf("Success-Code: %d\n",IS_SUCCESS); //Kamera öffnen INT nRet = is_InitCamera (&hCam, NULL); printf("Status Init %d\n",nRet); //Pixel-Clock setzen UINT nPixelClockDefault = 9; nRet = is_PixelClock(hCam, IS_PIXELCLOCK_CMD_SET, (void*)&nPixelClockDefault, sizeof(nPixelClockDefault)); printf("Status is_PixelClock %d\n",nRet); //Farbmodus der Kamera setzen //INT colorMode = IS_CM_CBYCRY_PACKED; INT colorMode = IS_CM_BGR8_PACKED; nRet = is_SetColorMode(hCam,colorMode); printf("Status SetColorMode %d\n",nRet); UINT formatID = 4; //Bildgröße einstellen -> 2592x1944 nRet = is_ImageFormat(hCam, IMGFRMT_CMD_SET_FORMAT, &formatID, 4); printf("Status ImageFormat %d\n",nRet); //Speicher für Bild alloziieren char* pMem = NULL; int memID = 0; nRet = is_AllocImageMem(hCam, 1280, 1024, 24, &pMem, &memID); printf("Status AllocImage %d\n",nRet); //diesen Speicher aktiv setzen nRet = is_SetImageMem(hCam, pMem, memID); printf("Status SetImageMem %d\n",nRet); //Bilder im Kameraspeicher belassen INT displayMode = IS_SET_DM_DIB; nRet = is_SetDisplayMode (hCam, displayMode); printf("Status displayMode %d\n",nRet); //Bild aufnehmen nRet = is_FreezeVideo(hCam, IS_WAIT); printf("Status is_FreezeVideo %d\n",nRet); //Bild aus dem Speicher auslesen und als Datei speichern String path = "./calib_en/snap_BGR"+conv.str()+".png"; std::wstring widepath; for(int i = 0; i < path.length(); ++i) widepath += wchar_t (path[i] ); IMAGE_FILE_PARAMS ImageFileParams; ImageFileParams.pwchFileName = &widepath[0]; ImageFileParams.pnImageID = NULL; ImageFileParams.ppcImageMem = NULL; ImageFileParams.nQuality = 0; ImageFileParams.nFileType = IS_IMG_PNG; nRet = is_ImageFile(hCam, IS_IMAGE_FILE_CMD_SAVE, (void*) &ImageFileParams, sizeof(ImageFileParams)); printf("Status is_ImageFile %d\n",nRet); //Kamera wieder freigeben is_ExitCamera(hCam); cout<<"To quit capturing calibration images, choose q. Else, choose any other letter."<<endl; cin >> flag; i++; } }
////////////////////////////////////////////////////////////////////////////////// // GetMaxImageSize --------------------------------------------------------------- ////////////////////////////////////////////////////////////////////////////////// int ofxUeye::InitDisplayMode() { int nRet = IS_NO_SUCCESS; if (m_hCam == NULL) return IS_NO_SUCCESS; // if some image memory exist already then free it if (m_pcImageMemory != NULL) nRet = is_FreeImageMem (m_hCam, m_pcImageMemory, m_nMemoryId); m_pcImageMemory = NULL; nRet = is_SetDisplayMode( m_hCam, IS_SET_DM_DIB); if(nRet == IS_SUCCESS) { if( m_si.nColorMode == IS_COLORMODE_BAYER ) { // for color camera models use RGB24 mode m_nColorMode = IS_SET_CM_RGB24; m_nBitsPerPixel = 24; _channels = 3; _bitsPerPixel = 24; } else if( m_si.nColorMode == IS_COLORMODE_CBYCRY ) { // for CBYCRY camera models use RGB32 mode m_nColorMode = IS_SET_CM_RGB32; m_nBitsPerPixel = 32; _channels = 4; _bitsPerPixel = 32; } else { // for monochrome camera models use Y8 mode m_nColorMode = IS_SET_CM_Y8; m_nBitsPerPixel = 8; _channels = 1; _bitsPerPixel = 8; } _bytesPerPixel = _bitsPerPixel / 8; // allocate an image memory. nRet = is_AllocImageMem(m_hCam, m_nSizeX, m_nSizeY, m_nBitsPerPixel, &m_pcImageMemory, &m_nMemoryId ); _fullPixels = (unsigned char*) malloc(sizeof(unsigned char)*m_nSizeX*m_nSizeY*_channels); } if(nRet == IS_SUCCESS) { // set the image memory only for the bitmap mode when allocated if(m_pcImageMemory != NULL) nRet = is_SetImageMem( m_hCam, m_pcImageMemory, m_nMemoryId ); // set the desired color mode nRet = is_SetColorMode( m_hCam, m_nColorMode ); // set the image size to capture // nRet = is_SetImageSize( m_hCam, m_nSizeX, m_nSizeY ); } // end if(nRet == IS_SUCCESS) return nRet; }
void mexFunction(int nlhs,mxArray *plhs[],int nrhs,const mxArray *prhs[]) { // CHECK ARGS if (nrhs != 0) { mexErrMsgIdAndTxt( "Mscope:initialiseCamera:invalidNumInputs", "No Input arguments accepted."); } if (nlhs > 2) { mexErrMsgIdAndTxt( "Mscope:initialiseCamera:maxlhs", "Too many output arguments."); } HCAM hCam = 0; // CONNECT TO CAMERA AND GET THE HANDLE int rv = is_InitCamera(&hCam, NULL); // SET THE PIXEL CLOCK UINT pixelClock = DFT_PX_CLOCK; rv = is_PixelClock(hCam, IS_PIXELCLOCK_CMD_SET, (void*) &pixelClock, sizeof(pixelClock)); // FRAME RATE double frameRate = DFT_FRAME_RATE; double actualFrameRate; rv = is_SetFrameRate(hCam, frameRate, &actualFrameRate); // EXPOSURE TIME double expTime = 10; // exposure time in ms rv = is_Exposure(hCam, IS_EXPOSURE_CMD_SET_EXPOSURE, &expTime, 8); // TRIGGER MODE rv = is_SetExternalTrigger(hCam, IS_SET_TRIGGER_SOFTWARE); // COLOR MODE rv = is_SetColorMode(hCam, IS_CM_MONO8); // 8-bit monochrome // SET THE SUBSAMPLING rv = is_SetSubSampling(hCam, IS_SUBSAMPLING_4X_VERTICAL | IS_SUBSAMPLING_4X_HORIZONTAL); // ALLOCATE MEMORY int bitDepth = 8; char* pcImgMem; int id; rv = is_AllocImageMem(hCam, H_PIX, V_PIX, bitDepth, &pcImgMem, &id); // CALCULATE THE LINE PITCH int linePitch; rv = is_GetImageMemPitch(hCam, &linePitch); std::printf("\nLine Pitch = %i\n",linePitch); // SET MEMORY rv = is_SetImageMem(hCam, pcImgMem, id); // START CAPTURING rv = is_CaptureVideo(hCam, IS_DONT_WAIT); // RETURN CAMERA HANDLE UINT8_T hCam8 = (UINT8_T) hCam; mwSignedIndex scalarDims[2] = {1,1}; // elements in image plhs[0] = mxCreateNumericArray(1, scalarDims, mxUINT8_CLASS, mxREAL); double * hCamPtr = mxGetPr(plhs[0]); memcpy(hCamPtr, &hCam8, sizeof(hCam8)); // RETURN MEMORY ID UINT32_T id32 = (UINT32_T) id; plhs[1] = mxCreateNumericArray(1, scalarDims, mxUINT32_CLASS, mxREAL); double * mIdPtr = mxGetPr(plhs[1]); memcpy(mIdPtr, &id32, sizeof(id32)); return; }