////////////////////////////////////////////////////////////////////////////////// // InitCamera -------------------------------------------------------------------- ////////////////////////////////////////////////////////////////////////////////// INT ofxUeye::InitCamera (HIDS *m_hCam, HWND hWnd) { INT nRet = is_InitCamera (m_hCam, hWnd); /************************************************************************************************/ /* */ /* If the camera returns with "IS_STARTER_FW_UPLOAD_NEEDED", an upload of a new firmware */ /* is necessary. This upload can take several seconds. We recommend to check the required */ /* time with the function is_GetDuration(). */ /* */ /* In this case, the camera can only be opened if the flag "IS_ALLOW_STARTER_FW_UPLOAD" */ /* is "OR"-ed to m_hCam. This flag allows an automatic upload of the firmware. */ /* */ /************************************************************************************************/ if (nRet == IS_STARTER_FW_UPLOAD_NEEDED) { // Time for the firmware upload = 25 seconds by default INT nUploadTime = 25000; is_GetDuration (*m_hCam, IS_STARTER_FW_UPLOAD, &nUploadTime); // Try again to open the camera. This time we allow the automatic upload of the firmware by // specifying "IS_ALLOW_STARTER_FIRMWARE_UPLOAD" *m_hCam = (HIDS) (((INT)*m_hCam) | IS_ALLOW_STARTER_FW_UPLOAD); nRet = is_InitCamera (m_hCam, hWnd); } return nRet; }
INT UEyeCamDriver::connectCam(int new_cam_ID) { INT is_err = IS_SUCCESS; int numCameras; // Terminate any existing opened cameras setStandbyMode(); // Updates camera ID if specified. if (new_cam_ID >= 0) { cam_id_ = new_cam_ID; } // Query for number of connected cameras if ((is_err = is_GetNumberOfCameras(&numCameras)) != IS_SUCCESS) { std::cerr << "Failed query for number of connected UEye cameras (" << err2str(is_err) << ")" << std::endl; return is_err; } else if (numCameras < 1) { std::cerr << "No UEye cameras are connected" << std::endl; return IS_NO_SUCCESS; } // NOTE: previously checked if ID < numCameras, but turns out that ID can be arbitrary // Attempt to open camera handle, and handle case where camera requires a // mandatory firmware upload cam_handle_ = (HIDS) cam_id_; if ((is_err = is_InitCamera(&cam_handle_, NULL)) == IS_STARTER_FW_UPLOAD_NEEDED) { INT uploadTimeMSEC = 25000; is_GetDuration(cam_handle_, IS_STARTER_FW_UPLOAD, &uploadTimeMSEC); std::cout << "Uploading new firmware to UEye camera '" << cam_name_ << "'; please wait for about " << uploadTimeMSEC / 1000.0 << " seconds" << std::endl; // Attempt to re-open camera handle while triggering automatic firmware upload cam_handle_ = (HIDS) (((INT) cam_handle_) | IS_ALLOW_STARTER_FW_UPLOAD); is_err = is_InitCamera(&cam_handle_, NULL); // Will block for N seconds } if (is_err != IS_SUCCESS) { std::cerr << "Could not open UEye camera ID " << cam_id_ << " (" << err2str(is_err) << ")" << std::endl; return is_err; } // Set display mode to Device Independent Bitmap (DIB) is_err = is_SetDisplayMode(cam_handle_, IS_SET_DM_DIB); // Fetch sensor parameters is_err = is_GetSensorInfo(cam_handle_, &cam_sensor_info_); // Initialize local camera frame buffer reallocateCamBuffer(); return is_err; }
static int ids_core_Camera_init(ids_core_Camera *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"handle", "color", NULL}; int ret; self->handle = 0; self->bitdepth = 0; self->color = IS_CM_RGB8_PACKED; self->autofeatures = 0; self->ready = NOT_READY; LIST_INIT(&self->mem_list); /* * This means the definition is: * def __init__(self, handle=0, color=ids_core.COLOR_RGB8): */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwlist, &self->handle, &self->color)) { return -1; } ret = is_InitCamera(&self->handle, NULL); switch (ret) { case IS_SUCCESS: break; case IS_CANT_OPEN_DEVICE: PyErr_SetString(PyExc_IOError, "Unable to open camera. Camera not connected."); return -1; case IS_INVALID_HANDLE: PyErr_SetString(PyExc_IOError, "Unable to open camera. Invalid camera handle."); return -1; default: PyErr_Format(PyExc_IOError, "Unable to open camera (Error %d).", ret); return -1; } self->ready = CONNECTED; if (!set_color_mode(self, self->color)) { return -1; } /* Initialize image queue so we can WaitForNextImage */ if (is_InitImageQueue(self->handle, 0) != IS_SUCCESS) { PyErr_SetString(PyExc_IOError, "Unable to start image queue."); return -1; } /* Lookup maximum width, height, and name */ ret = init_cam_info(self); if (ret) { return -1; } self->ready = READY; return 0; }
INT WINAPI InitializeCamera(HIDS* m_hCam) { // open camera with the given ID INT result = is_InitCamera (m_hCam, NULL); if (result == IS_STARTER_FW_UPLOAD_NEEDED) { // Time for the firmware upload = 25 seconds by default INT nUploadTime = 25000; is_GetDuration (*m_hCam, IS_STARTER_FW_UPLOAD, &nUploadTime); /*CString Str1, Str2, Str3; Str1 = "This camera requires a new firmware. The upload will take about"; Str2 = "seconds. Please wait ..."; Str3.Format ("%s %d %s", Str1, nUploadTime / 1000, Str2); AfxMessageBox (Str3, MB_ICONWARNING);*/ // Try again to open the camera. This time we allow the automatic upload of the firmware by // specifying "IS_ALLOW_STARTER_FIRMWARE_UPLOAD" *m_hCam = (HIDS) (((INT)*m_hCam) | IS_ALLOW_STARTER_FW_UPLOAD); result = is_InitCamera (m_hCam, NULL); } return result; }
bool FlosIDSAdaptor::openDevice() { if(isOpen()) { return true; } INT nRet = is_InitCamera(&m_deviceID, NULL); if(nRet != IS_SUCCESS) { if(nRet == IS_STARTER_FW_UPLOAD_NEEDED) { return false; } } else { IS_SIZE_2D imageSize; INT lMemoryId; char *pcImageMemory; is_AOI(m_deviceID, IS_AOI_IMAGE_GET_SIZE, (void *)&imageSize, sizeof(imageSize)); for(unsigned int i = 0; i < 25; i++) { is_AllocImageMem(m_deviceID, imageSize.s32Width, imageSize.s32Height, 24, &pcImageMemory, &lMemoryId); is_AddToSequence(m_deviceID, pcImageMemory, lMemoryId); is_SetImageMem(m_deviceID, pcImageMemory, lMemoryId); } if(is_InitImageQueue(m_deviceID, 0) != IS_SUCCESS) { imaqkit::adaptorWarn("FlosIDSAdaptor:openDevice","Something went wrong while allocating memory."); return false; } } m_acquireThread = CreateThread(NULL, 0, acquireThread, this, 0, &m_acquireThreadID); if(m_acquireThread == NULL) { closeDevice(); return false; } while(PostThreadMessage(m_acquireThreadID, WM_USER + 1, 0, 0) == 0) { Sleep(10); } return true; }
int CameraApi::openCamera() { int is_init = IS_NO_SUCCESS; int is_mode = IS_NO_SUCCESS; int is_alcM = IS_NO_SUCCESS; int isLoad = IS_NO_SUCCESS; is_init = is_InitCamera(&mhCam, 0); if(is_init == IS_SUCCESS) { is_mode = is_SetDisplayMode(mhCam, IS_SET_DM_DIB); } if(is_mode == IS_SUCCESS) { ringbuffer = new char* [ringbufferSize]; ringbufferId = new int [ringbufferSize]; IS_SIZE_2D maxAoi; is_AOI(mhCam, IS_AOI_IMAGE_GET_SIZE_MAX, &maxAoi, sizeof(maxAoi)); for(int i=0; i<ringbufferSize; i++) { is_AllocImageMem(mhCam, maxAoi.s32Width, maxAoi.s32Height, bitspixel(), &ringbuffer[i], &ringbufferId[i]); is_AddToSequence(mhCam, ringbuffer[i], ringbufferId[i]); } isLoad = is_LoadParameters(mhCam, "cameraParameter.ini"); setAoi(0, 0, width(), height()); is_SetExternalTrigger(mhCam, IS_SET_TRIGGER_OFF); is_CaptureVideo(mhCam, IS_WAIT); } if(isLoad == IS_SUCCESS) return IS_SUCCESS; else return IS_NO_SUCCESS; }
bool IdsSourceSink::Init() { PUEYE_CAMERA_LIST m_pCamList; UEYE_CAMERA_INFO m_CameraInfo; // init the internal camera info structure ZeroMemory (&m_CameraInfo, sizeof(UEYE_CAMERA_INFO)); // get the cameralist from SDK m_pCamList = new UEYE_CAMERA_LIST; m_pCamList->dwCount = 0; if (is_GetCameraList (m_pCamList) == IS_SUCCESS) { DWORD dw = m_pCamList->dwCount; delete m_pCamList; // Reallocate the required camera list size m_pCamList = (PUEYE_CAMERA_LIST)new char[sizeof(DWORD) + dw * sizeof(UEYE_CAMERA_INFO)]; m_pCamList->dwCount = dw; // Get CameraList and store it ... if (is_GetCameraList (m_pCamList) != IS_SUCCESS) return false; } else return false; if (m_pCamList->dwCount==0) { qDebug()<<"No camera found"; return false; } else if (m_pCamList->dwCount>1) { qDebug()<<"More than 1 camera: "<<m_pCamList->dwCount; } // will use camera 0 memcpy (&m_CameraInfo, &m_pCamList->uci[0], sizeof(UEYE_CAMERA_INFO)); hCam = (HIDS) (m_CameraInfo.dwDeviceID | IS_USE_DEVICE_ID); if(is_InitCamera (&hCam, NULL)!= IS_SUCCESS){ qDebug()<<"init not successful"; return false; } // double minFPS, maxFPS, FPSinterval; // is_GetFrameTimeRange (hCam, &minFPS, &maxFPS, &FPSinterval); //cout<< fixed << setprecision(4) << minFPS << " MINFPS " << maxFPS << " MAXFPS "<< FPSinterval << " FPSinterval " << endl; //myfile<< fixed << setprecision(4) << minFPS << " MINFPS " << maxFPS << " MAXFPS "<< FPSinterval << " FPSinterval " << endl; is_SetGainBoost (hCam, IS_SET_GAINBOOST_OFF); is_SetWhiteBalance (hCam, IS_SET_WB_DISABLE); // is_SetBrightness (hCam,0); // is_SetContrast (hCam,0); // is_SetGamma (hCam, 100);// Value multiplied by 100 (for the camera it goes from 0.01 to 10 is_SetHWGainFactor (hCam, IS_SET_MASTER_GAIN_FACTOR, 100); uint pixelC=304; is_PixelClock(hCam, IS_PIXELCLOCK_CMD_SET, (void*)&pixelC, sizeof(pixelC)); flagIDS= is_SetSubSampling (hCam, IS_SUBSAMPLING_2X_VERTICAL | IS_SUBSAMPLING_2X_HORIZONTAL); //Both are needed //Configuration section: very important to match the img_bpp=8 with the chacracteristics of the CV::MAT image to use //weird results like cropping or black lines can be obtained if not changed accordingly int img_width=2048, img_height=2048, img_bpp=8, factorSMP=2; //Variable to state the Linehopping // int img_step, img_data_size; imgMem = NULL; is_AllocImageMem(hCam, img_width/factorSMP, img_height/factorSMP, img_bpp, &imgMem, &memId); is_SetImageMem (hCam, imgMem, memId); // is_SetImageSize (hCam, img_width/factorSMP, img_height/factorSMP); is_SetColorMode (hCam, IS_CM_MONO8); is_SetDisplayMode (hCam, IS_SET_DM_DIB); // Direct buffer mode writes to RAM which is the only option on Linux //OpenCV variables: REMEMBER THE SUBSAMPLING buffer=cv::Mat::zeros(img_width/factorSMP,img_height/factorSMP, CV_8UC1); return true; }
//Open our camera bool IDSCamera::OpenCamera() { if (m_hCam!=0) { //free old image mem. is_FreeImageMem(m_hCam,m_pcImageMemory,m_lMemoryId); is_ExitCamera(m_hCam); } // init camera m_hCam = (HIDS) 0; // open next camera m_Ret = is_InitCamera(&m_hCam,NULL); // init camera if( m_Ret == IS_SUCCESS ){ // retrieve original image size SENSORINFO sInfo; is_GetSensorInfo(m_hCam,&sInfo); m_nSizeX = sInfo.nMaxWidth; m_nSizeY = sInfo.nMaxHeight; // setup the color depth to the current windows setting //is_GetColorDepth(m_hCam,&m_nBitsPerPixel,&m_nColorMode); is_SetColorMode(m_hCam, IS_SET_CM_Y8); //printf("m_nBitsPerPixel=%i m_nColorMode=%i\n",m_nBitsPerPixel,IS_SET_CM_Y8); // memory initialization is_AllocImageMem(m_hCam, m_nSizeX, m_nSizeY, m_nBitsPerPixel, &m_pcImageMemory, &m_lMemoryId); //set memory active is_SetImageMem( m_hCam, m_pcImageMemory,m_lMemoryId ); // display initialization is_SetImageSize( m_hCam, m_nSizeX, m_nSizeY ); is_SetDisplayMode( m_hCam, IS_SET_DM_DIB); // Reinit with slower frame rate for testing on vmWare with USB 1.1 if( is_LoadParameters( m_hCam, config_file ) == IS_SUCCESS ) { // realloc image mem with actual sizes and depth. is_FreeImageMem( m_hCam, m_pcImageMemory, m_lMemoryId ); m_nSizeX = is_SetImageSize( m_hCam, IS_GET_IMAGE_SIZE_X, 0 ); m_nSizeY = is_SetImageSize( m_hCam, IS_GET_IMAGE_SIZE_Y, 0 ); switch( is_SetColorMode( m_hCam, IS_GET_COLOR_MODE ) ) { case IS_SET_CM_RGB32: m_nBitsPerPixel = 32; break; case IS_SET_CM_RGB24: m_nBitsPerPixel = 24; break; case IS_SET_CM_RGB16: case IS_SET_CM_UYVY: m_nBitsPerPixel = 16; break; case IS_SET_CM_RGB15: m_nBitsPerPixel = 15; break; case IS_SET_CM_Y8: case IS_SET_CM_RGB8: case IS_SET_CM_BAYER: default: m_nBitsPerPixel = 8; break; } // memory initialization is_AllocImageMem( m_hCam, m_nSizeX, m_nSizeY, m_nBitsPerPixel, &m_pcImageMemory, &m_lMemoryId); is_SetImageMem(m_hCam, m_pcImageMemory, m_lMemoryId ); // set memory active // display initialization is_SetImageSize(m_hCam, m_nSizeX, m_nSizeY ); } } return true; }
void get_en_image(pcl::PointCloud<pcl::PointXYZ> &cloud) { char flag = 'g'; int i = 0; while(flag != 'q') { ostringstream conv; conv << i; cout<<"Capturing new calibration image from the ensenso stereo vision camera."<<endl; ///Read the Ensenso stereo cameras: try { // Initialize NxLib and enumerate cameras nxLibInitialize(true); // Reference to the first camera in the node BySerialNo NxLibItem root; NxLibItem camera = root[itmCameras][itmBySerialNo][0]; // Open the Ensenso NxLibCommand open(cmdOpen); open.parameters()[itmCameras] = camera[itmSerialNumber].asString(); open.execute(); // Capture an image NxLibCommand (cmdCapture).execute(); // Stereo matching task NxLibCommand (cmdComputeDisparityMap).execute (); // Convert disparity map into XYZ data for each pixel NxLibCommand (cmdComputePointMap).execute (); // Get info about the computed point map and copy it into a std::vector double timestamp; std::vector<float> pointMap; int width, height; camera[itmImages][itmRaw][itmLeft].getBinaryDataInfo (0, 0, 0, 0, 0, ×tamp); // Get raw image timestamp camera[itmImages][itmPointMap].getBinaryDataInfo (&width, &height, 0, 0, 0, 0); camera[itmImages][itmPointMap].getBinaryData (pointMap, 0); // Copy point cloud and convert in meters //cloud.header.stamp = getPCLStamp (timestamp); cloud.resize (height * width); cloud.width = width; cloud.height = height; cloud.is_dense = false; // Copy data in point cloud (and convert milimeters in meters) for (size_t i = 0; i < pointMap.size (); i += 3) { cloud.points[i / 3].x = pointMap[i] / 1000.0; cloud.points[i / 3].y = pointMap[i + 1] / 1000.0; cloud.points[i / 3].z = pointMap[i + 2] / 1000.0; } NxLibCommand (cmdRectifyImages).execute(); // Save images NxLibCommand saveImage(cmdSaveImage); // raw left saveImage.parameters()[itmNode] = camera[itmImages][itmRaw][itmLeft].path; saveImage.parameters()[itmFilename] = "calib_en/raw_left" + conv.str()+".png"; saveImage.execute(); // raw right /*saveImage.parameters()[itmNode] = camera[itmImages][itmRaw][itmRight].path; saveImage.parameters()[itmFilename] = "calib_en/raw_right.png"; saveImage.execute(); // rectified left saveImage.parameters()[itmNode] = camera[itmImages][itmRectified][itmLeft].path; saveImage.parameters()[itmFilename] = "calib_en/rectified_left.png"; saveImage.execute(); // rectified right saveImage.parameters()[itmNode] = camera[itmImages][itmRectified][itmRight].path; saveImage.parameters()[itmFilename] = "calib_en/rectified_right.png"; saveImage.execute();*/ } catch (NxLibException& e) { // Display NxLib API exceptions, if any printf("An NxLib API error with code %d (%s) occurred while accessing item %s.\n", e.getErrorCode(), e.getErrorText().c_str(), e.getItemPath().c_str()); if (e.getErrorCode() == NxLibExecutionFailed) printf("/Execute:\n%s\n", NxLibItem(itmExecute).asJson(true).c_str()); } /*catch (NxLibException &ex) { ensensoExceptionHandling (ex, "grabSingleCloud"); }*/ catch (...) { // Display other exceptions printf("Something, somewhere went terribly wrong!\n"); } /*cout<<"Plug in the RGB camera and press any key to continue."<<endl; cin.ignore(); cin.get();*/ cout<<"Capturing new calibration image from the ensenso RGB camera."<<endl; ///Read the IDS RGB Camera attached to the Ensenso stereo camera HIDS hCam = 0; printf("Success-Code: %d\n",IS_SUCCESS); //Kamera öffnen INT nRet = is_InitCamera (&hCam, NULL); printf("Status Init %d\n",nRet); //Pixel-Clock setzen UINT nPixelClockDefault = 9; nRet = is_PixelClock(hCam, IS_PIXELCLOCK_CMD_SET, (void*)&nPixelClockDefault, sizeof(nPixelClockDefault)); printf("Status is_PixelClock %d\n",nRet); //Farbmodus der Kamera setzen //INT colorMode = IS_CM_CBYCRY_PACKED; INT colorMode = IS_CM_BGR8_PACKED; nRet = is_SetColorMode(hCam,colorMode); printf("Status SetColorMode %d\n",nRet); UINT formatID = 4; //Bildgröße einstellen -> 2592x1944 nRet = is_ImageFormat(hCam, IMGFRMT_CMD_SET_FORMAT, &formatID, 4); printf("Status ImageFormat %d\n",nRet); //Speicher für Bild alloziieren char* pMem = NULL; int memID = 0; nRet = is_AllocImageMem(hCam, 1280, 1024, 24, &pMem, &memID); printf("Status AllocImage %d\n",nRet); //diesen Speicher aktiv setzen nRet = is_SetImageMem(hCam, pMem, memID); printf("Status SetImageMem %d\n",nRet); //Bilder im Kameraspeicher belassen INT displayMode = IS_SET_DM_DIB; nRet = is_SetDisplayMode (hCam, displayMode); printf("Status displayMode %d\n",nRet); //Bild aufnehmen nRet = is_FreezeVideo(hCam, IS_WAIT); printf("Status is_FreezeVideo %d\n",nRet); //Bild aus dem Speicher auslesen und als Datei speichern String path = "./calib_en/snap_BGR"+conv.str()+".png"; std::wstring widepath; for(int i = 0; i < path.length(); ++i) widepath += wchar_t (path[i] ); IMAGE_FILE_PARAMS ImageFileParams; ImageFileParams.pwchFileName = &widepath[0]; ImageFileParams.pnImageID = NULL; ImageFileParams.ppcImageMem = NULL; ImageFileParams.nQuality = 0; ImageFileParams.nFileType = IS_IMG_PNG; nRet = is_ImageFile(hCam, IS_IMAGE_FILE_CMD_SAVE, (void*) &ImageFileParams, sizeof(ImageFileParams)); printf("Status is_ImageFile %d\n",nRet); //Kamera wieder freigeben is_ExitCamera(hCam); cout<<"To quit capturing calibration images, choose q. Else, choose any other letter."<<endl; cin >> flag; i++; } }
void mexFunction(int nlhs,mxArray *plhs[],int nrhs,const mxArray *prhs[]) { // CHECK ARGS if (nrhs != 0) { mexErrMsgIdAndTxt( "Mscope:initialiseCamera:invalidNumInputs", "No Input arguments accepted."); } if (nlhs > 2) { mexErrMsgIdAndTxt( "Mscope:initialiseCamera:maxlhs", "Too many output arguments."); } HCAM hCam = 0; // CONNECT TO CAMERA AND GET THE HANDLE int rv = is_InitCamera(&hCam, NULL); // SET THE PIXEL CLOCK UINT pixelClock = DFT_PX_CLOCK; rv = is_PixelClock(hCam, IS_PIXELCLOCK_CMD_SET, (void*) &pixelClock, sizeof(pixelClock)); // FRAME RATE double frameRate = DFT_FRAME_RATE; double actualFrameRate; rv = is_SetFrameRate(hCam, frameRate, &actualFrameRate); // EXPOSURE TIME double expTime = 10; // exposure time in ms rv = is_Exposure(hCam, IS_EXPOSURE_CMD_SET_EXPOSURE, &expTime, 8); // TRIGGER MODE rv = is_SetExternalTrigger(hCam, IS_SET_TRIGGER_SOFTWARE); // COLOR MODE rv = is_SetColorMode(hCam, IS_CM_MONO8); // 8-bit monochrome // SET THE SUBSAMPLING rv = is_SetSubSampling(hCam, IS_SUBSAMPLING_4X_VERTICAL | IS_SUBSAMPLING_4X_HORIZONTAL); // ALLOCATE MEMORY int bitDepth = 8; char* pcImgMem; int id; rv = is_AllocImageMem(hCam, H_PIX, V_PIX, bitDepth, &pcImgMem, &id); // CALCULATE THE LINE PITCH int linePitch; rv = is_GetImageMemPitch(hCam, &linePitch); std::printf("\nLine Pitch = %i\n",linePitch); // SET MEMORY rv = is_SetImageMem(hCam, pcImgMem, id); // START CAPTURING rv = is_CaptureVideo(hCam, IS_DONT_WAIT); // RETURN CAMERA HANDLE UINT8_T hCam8 = (UINT8_T) hCam; mwSignedIndex scalarDims[2] = {1,1}; // elements in image plhs[0] = mxCreateNumericArray(1, scalarDims, mxUINT8_CLASS, mxREAL); double * hCamPtr = mxGetPr(plhs[0]); memcpy(hCamPtr, &hCam8, sizeof(hCam8)); // RETURN MEMORY ID UINT32_T id32 = (UINT32_T) id; plhs[1] = mxCreateNumericArray(1, scalarDims, mxUINT32_CLASS, mxREAL); double * mIdPtr = mxGetPr(plhs[1]); memcpy(mIdPtr, &id32, sizeof(id32)); return; }