Esempio n. 1
0
bool camera_jai::open()
{
	openCamera();
	settingCamera();
	startCapture();
	return true;
}
Esempio n. 2
0
Camera::Camera(unsigned long guid, size_t bufferSize)
  : bufferSize_(bufferSize), FSTmode_(None)
{
  openCamera(boost::bind(PvCameraInfo, guid, _1),
             boost::bind(PvCameraOpen, guid, _1, &handle_));
  
  setup();
}
Esempio n. 3
0
void FFmpegVideo::init(const QString &dev)
{
    openCamera(dev);

    connect(m_timer, SIGNAL(timeout()), this, SLOT(readFrame()));
    m_timer->start(100);

}
Esempio n. 4
0
bool ImageReader::initReader() {
    bool ret;
    ret = listDevices(_devices);
    if (_devices.size()) {
        openCamera(_devices.at(0));
    }
    return ret;
}
void HiwrCameraControllerNodelet::applyNewConfig(Config &new_config, uint32_t level)
{
    printf("### dynamic reconfig_ure level 0x%x \n", level);
    ROS_DEBUG("dynamic reconfig_ure level 0x%x", level);
    reconfig_number_++;
    // resolve frame ID using tf_prefix parameter
    if (new_config.frame_id == "")
        new_config.frame_id = "camera";

    ROS_DEBUG("dynamic reconfig_ure level 0x%x", level);
    std::string tf_prefix = tf::getPrefixParam(private_nh_);
    ROS_DEBUG_STREAM("tf_prefix: " << tf_prefix);
    new_config.frame_id = tf::resolve(tf_prefix, new_config.frame_id);

    if (state_ != Driver::CLOSED && (level & Levels::RECONFIGURE_CLOSE))
    {
        // must close the device before updating these parameters
        closeCamera();                  // state_ --> CLOSED
    }
    if (state_ == Driver::CLOSED)
    {
        // open with new values
        if (openCamera(new_config))
        {
            // update camera name string
            new_config.camera_name = camera_name_;
        }
    }


    if(reconfig_number_ == 1 || config_.focus_auto != new_config.focus_auto){
        try {
            NODELET_INFO("will try to set autofocus to %d \n " , new_config.focus_auto);
            cam_->set_control( 0x009A090C , new_config.focus_auto);
        } catch (uvc_cam::Exception& e) {
            ROS_ERROR_STREAM("Problem setting focus_auto. Exception was " << e.what());
        }
    }


    if(reconfig_number_ == 1 || config_.focus_absolute != new_config.focus_absolute){
        try {
            NODELET_INFO("will try to set focus_absolute to %d \n " , new_config.focus_absolute);
            cam_->set_control( 0x009A090A  , new_config.focus_absolute);
        } catch (uvc_cam::Exception& e) {
            ROS_ERROR_STREAM("Problem setting focus_absolute. Exception was " << e.what() << "value was" << new_config.focus_absolute ) ;
        }
    }

    config_ = new_config;

    config_width_ = new_config.width;
    config_height_ = new_config.height;

    next_config_update_ = false;
    printf("### dynamic reconfig_ure will unlock\n");

}
Esempio n. 6
0
Camera::Camera(const char* ip_address, size_t bufferSize)
  : bufferSize_(bufferSize), FSTmode_(None)
{
  unsigned long addr = inet_addr(ip_address);
  tPvIpSettings settings;
  openCamera(boost::bind(PvCameraInfoByAddr, addr, _1, &settings),
             boost::bind(PvCameraOpenByAddr, addr, _1, &handle_));
  
  setup();
}
Esempio n. 7
0
int QHY6::ConnectCamera(libusb_device *d,qhyccd_handle **h)
{
    int ret;

    ret = openCamera(d,h);

    if(ret)
    {
        return QHYCCD_ERROR_OPENCAM;
    }
    return QHYCCD_SUCCESS;
}
  void onInitImpl()
  {
    ros::NodeHandle& nh = getNodeHandle();
    ros::NodeHandle& pn = getPrivateNodeHandle();


    // Retrieve parameters from server
    update_rate=30;
    pn.param<std::string>("frame_id", frame_id_, "/camera_optical_frame");
    NODELET_INFO("Loaded param frame_id: %s", frame_id_.c_str());
    pn.param<std::string>("device_serial", device_serial, "");
    NODELET_INFO("Loaded param device_serial: %s", device_serial.c_str());
    pn.param<double>("open_camera_retry_period", open_camera_retry_period, 3.);
    pn.param<std::string>("plugin_dir", plugin_dir, "/usr/local/pmd/plugins");
    NODELET_INFO("Loaded param plugin_dir: %s", plugin_dir.c_str());
    pn.param<std::string>("source_plugin", source_plugin, "camboardnano");
    NODELET_INFO("Loaded param source_plugin: %s", source_plugin.c_str());
    pn.param<std::string>("process_plugin", process_plugin, "camboardnanoproc");
    NODELET_INFO("Loaded param process_plugin: %s", process_plugin.c_str());

    // Setup updater
    camera_state_ = OPENING;
    state_info_ = "opening camera " + device_serial;
    updater.setHardwareIDf("%s", device_serial.c_str());
    if(device_serial.empty())
    {
        updater.setHardwareID("unknown");
    }
    updater.add(getName().c_str(), this, &DriverNodelet::getCurrentState);

    // Setup periodic callback to get new data from the camera
    update_timer_ = nh.createTimer(ros::Rate(update_rate).expectedCycleTime(), &DriverNodelet::updateCallback, this, false ,false);

    // Open camera
    openCamera(update_timer_);

    // Advertise topics
    ros::NodeHandle distance_nh(nh, "distance");
    image_transport::ImageTransport distance_it(distance_nh);
    distance_publisher_ = distance_it.advertiseCamera("image", 1);
    ros::NodeHandle depth_nh(nh, "depth");
    image_transport::ImageTransport depth_it(depth_nh);
    depth_publisher_ = depth_it.advertiseCamera("image", 1);
    ros::NodeHandle amplitude_nh(nh, "amplitude");
    image_transport::ImageTransport amplitude_it(amplitude_nh);
    amplitude_publisher_ = amplitude_it.advertiseCamera("image", 1);
    points_publisher_ = nh.advertise<sensor_msgs::PointCloud2>("points_unrectified", 1);

    // Setup dynamic reconfigure server
    reconfigure_server_.reset(new ReconfigureServer(config_mutex_, pn));
    ReconfigureServer::CallbackType f = boost::bind(&DriverNodelet::reconfigureCallback, this, _1, _2);
    reconfigure_server_->setCallback(f);
  }
Esempio n. 9
0
void BbCameraSession::setState(QCamera::State state)
{
    if (m_state == state)
        return;

    const QCamera::State previousState = m_state;

    if (previousState == QCamera::UnloadedState) {
        if (state == QCamera::LoadedState) {
            if (openCamera()) {
                m_state = state;
            }
        } else if (state == QCamera::ActiveState) {
            if (openCamera()) {
                QMetaObject::invokeMethod(this, "applyConfiguration", Qt::QueuedConnection);
                m_state = state;
            }
        }
    } else if (previousState == QCamera::LoadedState) {
        if (state == QCamera::UnloadedState) {
            closeCamera();
            m_state = state;
        } else if (state == QCamera::ActiveState) {
            QMetaObject::invokeMethod(this, "applyConfiguration", Qt::QueuedConnection);
            m_state = state;
        }
    } else if (previousState == QCamera::ActiveState) {
        if (state == QCamera::LoadedState) {
            stopViewFinder();
            m_state = state;
        } else if (state == QCamera::UnloadedState) {
            stopViewFinder();
            closeCamera();
            m_state = state;
        }
    }

    if (m_state != previousState)
        emit stateChanged(m_state);
}
Esempio n. 10
0
  /** device poll */
  void NaoqiCameraDriver::poll(void)
  {
    // Do not run concurrently with reconfig().
    //
    // The mutex lock should be sufficient, but the Linux pthreads
    // implementation does not guarantee fairness, and the reconfig()
    // callback thread generally suffers from lock starvation for many
    // seconds before getting to run.  So, we avoid acquiring the lock
    // if there is a reconfig() pending.
    bool do_sleep = true;
    
    // publish images only if someone subscribe to the topic
    uint32_t nbSubscribers = image_pub_.getNumSubscribers();

    if (nbSubscribers == 0)
    {
        if (state_ == Driver::OPENED)
        {
            closeCamera();
        }
    }
    else
    {
        boost::mutex::scoped_lock scopedLock(reconfiguration_mutex_);
        if (state_ == Driver::CLOSED)
        {
            openCamera(config_);        // open with current configuration
        }
        do_sleep = (state_ == Driver::CLOSED);
        if (!do_sleep)                  // openCamera() succeeded?
        {
            // driver is open, read the next image still holding lock
            sensor_msgs::ImagePtr image(new sensor_msgs::Image);
            if (read(image))
            {
                publish(image);
                real_frame_rate_.sleep();
            }
        }
    } // release mutex lock

    // Always run the diagnostics updater: no lock required.
    diagnostics_.update();

    if (do_sleep)
      {
        // device was closed or poll is not running, sleeping avoids
        // busy wait (DO NOT hold the lock while sleeping)
        cycle_.sleep();
      }
  }
Esempio n. 11
0
TEST_F(CameraMetadataTest, AvailableStreamConfiguration) {
    TEST_EXTENSION_FORKING_INIT;
    
    for (int i = 0; i < getNumOfCams(); i++)
    {
        openCamera(i);
        int count = GetEntryCountFromStaticTag(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
        if (count > 0)
        {
            EXPECT_EQ(0, count % 4);
        }
        closeCamera();
    }

}
MicrodiaCamera::MicrodiaCamera()
    : Camera(160, 120),
    m_processOneFrame(false), 
    m_processContinuousFrames(false),
    m_camDevice(-1),
    m_thread(*this)
{
    system("rmmod microdia");
    system("rmmod videodev");
    system("insmod /mnt/kiss/drivers/videodev.ko");
    system("insmod /mnt/kiss/drivers/microdia.ko max_urbs=50 max_buffers=2 log_level=16");

    openCamera();
    m_thread.start();
}
Esempio n. 13
0
  /** device poll */
  void Camera1394Driver::poll(void)
  {
    // Do not run concurrently with reconfig().
    //
    // The mutex lock should be sufficient, but the Linux pthreads
    // implementation does not guarantee fairness, and the reconfig()
    // callback thread generally suffers from lock starvation for many
    // seconds before getting to run.  So, we avoid acquiring the lock
    // if there is a reconfig() pending.
    bool do_sleep = true;
    if (!reconfiguring_)
      {
        boost::mutex::scoped_lock lock(mutex_);
        if (state_ == Driver::CLOSED)
          {
            openCamera(config_);        // open with current configuration
          }
        do_sleep = (state_ == Driver::CLOSED);
        if (!do_sleep)                  // openCamera() succeeded?
          {
            // driver is open, read the next image still holding lock
            sensor_msgs::ImagePtr image(new sensor_msgs::Image);
            if (read(image))
              {
                publish(image);
                consecutive_read_errors_ = 0;
              }
            else if (++consecutive_read_errors_ > config_.max_consecutive_errors
                     && config_.max_consecutive_errors > 0)
            {
              ROS_WARN("reached %u consecutive read errrors, disconnecting",
                       consecutive_read_errors_ );
              closeCamera();
            }
          }
      } // release mutex lock

    // Always run the diagnostics updater: no lock required.
    diagnostics_.update();

    if (do_sleep)
      {
        // device was closed or poll is not running, sleeping avoids
        // busy wait (DO NOT hold the lock while sleeping)
        cycle_.sleep();
      }
  }
Esempio n. 14
0
TEST_F(CameraMetadataTest, RequiredFormats) {
    TEST_EXTENSION_FORKING_INIT;

    for (int i = 0; i < getNumOfCams(); i++)
    {
        openCamera(i);

        EXPECT_TRUE(
            HasElementInArrayFromStaticTag(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
                    HAL_PIXEL_FORMAT_BLOB)); // JPEG

        // HAL3 must support flexible YUV
        EXPECT_TRUE(HasElementInArrayFromStaticTag(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
                        HAL_PIXEL_FORMAT_YCbCr_420_888));
        closeCamera();
    }
}
Esempio n. 15
0
bool ImageReader::nextCamera(QList<int> &devices, int *index) {
    int deviceMax = devices.size();
    int deviceNum;

    /* Only one camera */
    if (deviceMax < 2) {
        goto done;
    }

    closeCamera();
    *index = *index + 1;
    *index = *index % deviceMax;
    deviceNum = devices.at(*index);
    openCamera(deviceNum);

    done:
    return true;
}
Esempio n. 16
0
int main()
{
	const int WIDTH = 1280, HEIGHT = 960;

	if (!openCamera(0))
	{
		std::cout << "openCamera failed!" << std::endl;
		return true;
	}

	if (!initCamera())
	{
		std::cout << "initCamera failed!" << std::endl;
		return true;
	}

	bool autov;
	setImageFormat(WIDTH, HEIGHT, 1, IMG_RAW8);
	setValue(CONTROL_EXPOSURE, 400, true);
	setValue(CONTROL_GAIN, 35, false);

	//int exposure_us = getValue(CONTROL_EXPOSURE, &autov);
	//int gain = getValue(CONTROL_GAIN, &autov);
	//int max_gain = getMax(CONTROL_GAIN);
	//std::cout << exposure_us << ", " << gain << ", " << max_gain << std::endl;

	IplImage *buffer = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
	startCapture();

	bool captured = false;
	do
	{
		std::chrono::milliseconds(10);
		captured = getImageData((unsigned char*)buffer->imageData, buffer->imageSize, -1);
	} while (!captured);

	cvSaveImage("sun_cam_2.jpg", buffer);
	stopCapture();
	closeCamera();

	return false;
}
Esempio n. 17
0
bool ImageReader::listDevices(QList<int> &devices) {
    int max;
    bool ret;
    max = MAX_CAMERA_DEVICES;

    for (int deviceId = 0; deviceId < max; deviceId++) {
        if (openCamera(deviceId)) {
            devices.append(deviceId);
            closeCamera();
        }
    }

    qDebug() << "Camera devices:" << devices;
    if (devices.count() == 0) {
        ret = false;
    } else {
        ret = true;
    }

    return ret;
}
void GPhotoCameraWorker::capturePreview()
{
    openCamera();

    if (m_status != QCamera::ActiveStatus) {
        setStatus(QCamera::StartingStatus);
    }

    gp_file_clean(m_file);

    int ret = gp_camera_capture_preview(m_camera, m_file, m_context);
    if (GP_OK == ret) {
        const char* data;
        unsigned long int size = 0;
        ret = gp_file_get_data_and_size(m_file, &data, &size);
        if (GP_OK == ret) {
            m_capturingFailCount = 0;
            const QImage &result = QImage::fromData(QByteArray(data, int(size)));
            setStatus(QCamera::ActiveStatus);
            emit previewCaptured(result);
            return;
        }
    }

    qWarning() << "Failed retrieving preview" << ret;
    ++m_capturingFailCount;

    if (m_capturingFailCount >= capturingFailLimit)
    {
      qWarning() << "Closing camera because of capturing fail";
      emit error(QCamera::CameraError, tr("Unable to capture frame"));
      setStatus(QCamera::UnloadedStatus);
      closeCamera();
    }

}
Esempio n. 19
0
static int svs_core_Camera_init(svs_core_Camera *self, PyObject *args, PyObject *kwds) {
    static char *kwlist[] = {
        "ip", "source_ip", "buffer_count", "packet_size", "queue_length", NULL
    };

    const char *ip = NULL;
    const char *source_ip = NULL;
    unsigned int buffer_count = 10;
    unsigned int packet_size = 9000;
    uint32_t ip_num, source_ip_num;
    char *manufacturer, *model;
    int ret;

    self->main_thread = PyGILState_GetThisThreadState();
    self->ready = NOT_READY;
    TAILQ_INIT(&self->images);
    self->images_length = 0;
    self->images_max = 50;

    /*
     * This means the definition is:
     * def __init__(self, ip, source_ip, buffer_count=10, packet_size=9000,
     *              queue_length=50):
     */
    if (!PyArg_ParseTupleAndKeywords(args, kwds, "ss|III", kwlist,
                &ip, &source_ip, &buffer_count, &packet_size,
                &self->images_max)) {
        return -1;
    }

    ip_num = ip_string_to_int(ip);
    source_ip_num = ip_string_to_int(source_ip);

    ret = openCamera(&self->handle, ip_num, source_ip_num, HEARTBEAT_TIMEOUT);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    self->ready = CONNECTED;

    manufacturer = strdup(Camera_getManufacturerName(self->handle));
    if (!manufacturer) {
        PyErr_SetString(PyExc_MemoryError, "Unable to allocate name");
        return -1;
    }

    model = strdup(Camera_getModelName(self->handle));
    if (!model) {
        free(manufacturer);
        PyErr_SetString(PyExc_MemoryError, "Unable to allocate name");
        return -1;
    }

    self->name = PyBytes_FromFormat("%s %s", manufacturer, model);
    free(manufacturer);
    free(model);
    if (!self->name) {
        return -1;
    }

    self->ready = NAME_ALLOCATED;

    ret = Camera_getTimestampTickFrequency(self->handle, &self->tick_frequency);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    ret = Camera_getImagerWidth(self->handle, &self->width);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    ret = Camera_getImagerHeight(self->handle, &self->height);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    /* 12-bit pixel depth */
    self->depth = 12;
    ret = Camera_setPixelDepth(self->handle, SVGIGE_PIXEL_DEPTH_12);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    /* Image buffer size in bytes */
    ret = Camera_getBufferSize(self->handle, &self->buffer_size);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    /* Open stream */
    ret = addStream(self->handle, &self->stream, &self->stream_ip,
                    &self->stream_port, self->buffer_size, buffer_count,
                    packet_size, PACKET_RESEND_TIMEOUT,
                    svs_core_Camera_stream_callback, self);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    ret = enableStream(self->stream, 1);
    if (ret != SVGigE_SUCCESS) {
        raise_general_error(ret);
        return -1;
    }

    self->ready = READY;

    return 0;
}
Esempio n. 20
0
  /** Dynamic reconfigure callback
   *
   *  Called immediately when callback first defined. Called again
   *  when dynamic reconfigure starts or changes a parameter value.
   *
   *  @param newconfig new Config values
   *  @param level bit-wise OR of reconfiguration levels for all
   *               changed parameters (0xffffffff on initial call)
   **/
  void reconfig(Config &newconfig, uint32_t level)
  {
    ROS_DEBUG("dynamic reconfigure level 0x%x", level);

    // resolve frame ID using tf_prefix parameter
    if (newconfig.frame_id == "")
      newconfig.frame_id = "camera";
    std::string tf_prefix = tf::getPrefixParam(privNH_);
    ROS_DEBUG_STREAM("tf_prefix: " << tf_prefix);
    newconfig.frame_id = tf::resolve(tf_prefix, newconfig.frame_id);

    if (state_ != Driver::CLOSED && (level & Levels::RECONFIGURE_CLOSE))
      {
        // must close the device before updating these parameters
        closeCamera();                  // state_ --> CLOSED
      }

    if (state_ == Driver::CLOSED)
      {
        // open with new values
        if (openCamera(newconfig))
          {
            // update GUID string parameter
            newconfig.guid = camera_name_;
          }
      }

    if (config_.camera_info_url != newconfig.camera_info_url)
      {
        // set the new URL and load CameraInfo (if any) from it
        if (cinfo_->validateURL(newconfig.camera_info_url))
          {
            cinfo_->loadCameraInfo(newconfig.camera_info_url);
          }
        else
          {
            // new URL not valid, use the old one
            newconfig.camera_info_url = config_.camera_info_url;
          }
      }

    if (state_ != Driver::CLOSED)       // openCamera() succeeded?
      {
        // configure IIDC features
        if (level & Levels::RECONFIGURE_CLOSE)
          {
            // initialize all features for newly opened device
            if (false == dev_->features_->initialize(&newconfig))
              {
                ROS_ERROR_STREAM("[" << camera_name_
                                 << "] feature initialization failure");
                closeCamera();          // can't continue
              }
          }
        else
          {
            // update any features that changed
            dev_->features_->reconfigure(&newconfig);
          }
      }

    config_ = newconfig;                // save new parameters

    ROS_DEBUG_STREAM("[" << camera_name_
                     << "] reconfigured: frame_id " << newconfig.frame_id
                     << ", camera_info_url " << newconfig.camera_info_url);
  }
Esempio n. 21
0
int main(int argc, const char** argv) {

	RectResizer resizer(*ORIG_SIZE, *NEW_SIZE);
	Segment scan(resizer.resize(cv::Rect(63, 112, 107, 48)), "SCAN", "    ");
	Segment hold(resizer.resize(cv::Rect(170, 108, 111, 51)), "HOLD", "    ");
	scan.setThreshold(0.01);
	hold.setThreshold(0.01);

	Digit* d[NUM_OF_DIGITS];

	d[0] = new Digit(resizer.resize(cv::Rect(45, 182, 112, 214)));
	d[1] = new Digit(resizer.resize(cv::Rect(156, 182, 112, 214)));
	d[2] = new Digit(resizer.resize(cv::Rect(272, 182, 103, 214)));
	d[3] = new Digit(resizer.resize(cv::Rect(419, 179, 105, 214)));

	cv::Mat image;

	/*
	 //From pictures
	 for (int im = 1; im < argc; im++) {
	 image = readImage(argc, argv, im);

	 readData(image, d, scan, hold);
	 printData(d, scan, hold);
	 showImage(image, d, scan, hold);

	 cv::waitKey(0);
	 }
	*/


	cv::VideoCapture cap;

	openCamera(cap, argc, argv);

	bool paused = false;

	cv::Mat frame;

	for (;;) {
		if (!paused) {
			cap >> frame;
			if (frame.empty()) {
				break;
			}
		}

		frame.copyTo(image);
		readData(image, d, scan, hold);
		printData(d, scan, hold);
		showImage(image, d, scan, hold);

		char c = (char) cv::waitKey(10);
		if (c == 27)
			break;
		switch (c) {
		case 'p':
			paused = !paused;
			break;
		default:
			;
		}
	}
Esempio n. 22
0
  /** Dynamic reconfigure callback
   *
   *  Called immediately when callback first defined. Called again
   *  when dynamic reconfigure starts or changes a parameter value.
   *
   *  @param newconfig new Config values
   *  @param level bit-wise OR of reconfiguration levels for all
   *               changed parameters (0xffffffff on initial call)
   **/
  void Camera1394Driver::reconfig(Config &newconfig, uint32_t level)
  {
    // Do not run concurrently with poll().  Tell it to stop running,
    // and wait on the lock until it does.
    reconfiguring_ = true;
    boost::mutex::scoped_lock lock(mutex_);
    ROS_DEBUG("dynamic reconfigure level 0x%x", level);

    // resolve frame ID using tf_prefix parameter
    if (newconfig.frame_id == "")
      newconfig.frame_id = "camera";
    std::string tf_prefix = tf::getPrefixParam(priv_nh_);
    ROS_DEBUG_STREAM("tf_prefix: " << tf_prefix);
    newconfig.frame_id = tf::resolve(tf_prefix, newconfig.frame_id);

    if (state_ != Driver::CLOSED && (level & Levels::RECONFIGURE_CLOSE))
      {
        // must close the device before updating these parameters
        closeCamera();                  // state_ --> CLOSED
      }

    if (state_ == Driver::CLOSED)
      {
        // open with new values
        openCamera(newconfig);
      }

    if (config_.camera_info_url != newconfig.camera_info_url)
      {
        // set the new URL and load CameraInfo (if any) from it
        if (cinfo_->validateURL(newconfig.camera_info_url))
          {
            cinfo_->loadCameraInfo(newconfig.camera_info_url);
          }
        else
          {
            // new URL not valid, use the old one
            newconfig.camera_info_url = config_.camera_info_url;
          }
      }

    if (state_ != Driver::CLOSED)       // openCamera() succeeded?
      {
        // configure IIDC features
        if (level & Levels::RECONFIGURE_CLOSE)
          {
            // initialize all features for newly opened device
            if (false == dev_->features_->initialize(&newconfig))
              {
                ROS_ERROR_STREAM("[" << camera_name_
                                 << "] feature initialization failure");
                closeCamera();          // can't continue
              }
          }
        else
          {
            // update any features that changed
            // TODO replace this with a direct call to
            //   Feature::reconfigure(&newconfig);
            dev_->features_->reconfigure(&newconfig);
          }
      }

    config_ = newconfig;                // save new parameters
    reconfiguring_ = false;             // let poll() run again

    ROS_DEBUG_STREAM("[" << camera_name_
                     << "] reconfigured: frame_id " << newconfig.frame_id
                     << ", camera_info_url " << newconfig.camera_info_url);
  }
Esempio n. 23
0
//--------------------------------------------------------------
void testApp::setup(){

	ofSetFrameRate(30);
    
    gpuBlur = NULL;
    useKinect = false;
    kinectOpen = false;
    cameraOpen = false;
    learnBG = false;
    showGui = false;
    inputWidth = 640;
    inputHeight = 480;
    
    guiPath = "guisettings.xml";
    
    if (ofxKinect::numAvailableDevices() >= 1) {
        cout << "using kinect" << endl;
        openKinect();
        useKinect = true;
    } else {
        cout << "using camera" << endl;
        openCamera();
        useKinect = false;
    }
    
	colorImg.allocate(inputWidth, inputHeight);
	grayImage.allocate(inputWidth, inputHeight);
	grayImagePrev.allocate(inputWidth, inputHeight);
	grayThreshNear.allocate(inputWidth, inputHeight);
	grayThreshFar.allocate(inputWidth, inputHeight);
    grayBg.allocate(inputWidth, inputHeight);
    
	nearThreshold = 255;
	farThreshold = 45;
    
    setupGpuBlur(inputWidth, inputHeight);
    
#ifdef USE_OFX_SOUNDSTREAM
    xss.setDeviceId(0);
    xss.setup(0, 1, this, 44100, fftLive.getBufferSize(), 4);
#else
    xss.setup(this, 0, 1, 44100, fftLive.getBufferSize(), 4);
#endif

    fftLive.setMirrorData(audioMirror);
    
    lows = 0;
    
    nbins = (int)(fftLive.getBufferSize() * 0.5);
    rawFFT = new float[nbins];
    
    setupColorMesh();
    
    colors.allocate(ofGetWidth(), ofGetHeight(), GL_RGBA);
    updateColors();
    drawmode = 0;
    
    gui.setup("Settings", guiPath, 20, 20);
    gui.add(audioThreshold.setup("audioThreshold", 0.5, 0.0, 1.0));
    gui.add(audioPeakDecay.setup("audioPeakDecay", 0.95, 0.0, 1.0));
    gui.add(audioMaxDecay.setup("audioMaxDecay", 1.0, 0.0, 1.0));
    gui.add(audioMirror.setup("audioMirror", false));
    gui.add(nearThreshold.setup("nearThreshold", 255, 0, 255));
    gui.add(farThreshold.setup("farThreshold", 45, 0, 255));
    gui.add(blobMin.setup("blobMin", 2000, 200, 20000));
    gui.add(mirrorInput.setup("mirrorInput", true));
    gui.add(drawFPS.setup("FPS", true));
    gui.add(useGPU.setup("GPUBlur", true));
    gui.add(triangleMax.setup("triangleMax", 15, 3, 60));
    gui.add(glowExtra.setup("glowExtra", 1.0, 0.0, 10.0));
    gui.add(lineWidth.setup("lineWidth", 10.0, 1.0, 20.0));
    gui.add(jitterSize.setup("jitterSize", 0.5, 0.0, 2.0));
    
    gui.loadFromFile(guiPath);
    
   
}
Esempio n. 24
0
	/** Dynamic reconfigure callback
	 *
	 *  Called immediately when callback first defined. Called again
	 *  when dynamic reconfigure starts or changes a parameter value.
	 *
	 *  @param newconfig new Config values
	 *  @param level bit-wise OR of reconfiguration levels for all
	 *               changed parameters (0xffffffff on initial call)
	 **/
	void reconfig(Config &newconfig, uint32_t level)
	{
		ROS_DEBUG("dynamic reconfigure level 0x%x", level);

		// resolve frame ID using tf_prefix parameter
		if (newconfig.frame_id == "")
			newconfig.frame_id = "camera";
		std::string tf_prefix = tf::getPrefixParam(privNH_);
		ROS_DEBUG_STREAM("tf_prefix: " << tf_prefix);
		newconfig.frame_id = tf::resolve(tf_prefix, newconfig.frame_id);

		if (state_ != Driver::CLOSED && (level & Levels::RECONFIGURE_CLOSE))
		{
			// must close the device before updating these parameters
			closeCamera();                  // state_ --> CLOSED
		}

		if (state_ == Driver::CLOSED)
		{
			// open with new values
			if (openCamera(newconfig))
			{
				// update camera name string
				newconfig.camera_name = camera_name_;
			}
		}
		
		
		///THIS IS A QUICK HACK TO GET EXPOSURE ON OUR CAMERA'S THIS DOES NOT WORK FOR ALL CAMERAS

		if(config_.exposure != newconfig.exposure){
			try {
				  cam_->set_control(0x9a0901, newconfig.exposure);
			} catch (uvc_cam::Exception& e) {
				  ROS_ERROR_STREAM("Problem setting exposure. Exception was " << e.what());
			}
		}
		if(config_.absolute_exposure != newconfig.absolute_exposure){
			try {
		  cam_->set_control(0x9a0902, newconfig.absolute_exposure);
		  	} catch (uvc_cam::Exception& e) {
				ROS_ERROR_STREAM("Problem setting absolute exposure. Exception was " << e.what());
			}
		}
		if(config_.sharpness != newconfig.sharpness){
			try {
		  cam_->set_control(0x98091b, newconfig.sharpness);
			} catch (uvc_cam::Exception& e) {
				ROS_ERROR_STREAM("Problem setting sharpness. Exception was " << e.what());
			}
		}
		if(config_.power_line_frequency != newconfig.power_line_frequency){
			try {
		  cam_->set_control(0x980918, newconfig.power_line_frequency);
			} catch (uvc_cam::Exception& e) {
				ROS_ERROR_STREAM("Problem setting powerline frequency. Exception was " << e.what());
			}
		}
		if(config_.white_balance_temperature != newconfig.white_balance_temperature){
			try {
		  cam_->set_control(0x98090c, newconfig.white_balance_temperature);
		  	} catch (uvc_cam::Exception& e) {
				ROS_ERROR_STREAM("Problem setting white balance temperature. Exception was " << e.what());
			}
		}
		if(config_.gain != newconfig.gain){
			try {
		  cam_->set_control(0x980913, newconfig.gain);
		  	} catch (uvc_cam::Exception& e) {
				ROS_ERROR_STREAM("Problem setting gain. Exception was " << e.what());
			}
		}
		if(config_.saturation != newconfig.saturation){
			try {
		  cam_->set_control(0x980902, newconfig.saturation);
		  	} catch (uvc_cam::Exception& e) {
				ROS_ERROR_STREAM("Problem setting saturation. Exception was " << e.what());
			}
		}
		if(config_.contrast != newconfig.contrast){
			try {
		  cam_->set_control(0x980901, newconfig.contrast);
		  	} catch (uvc_cam::Exception& e) {
				ROS_ERROR_STREAM("Problem setting contrast. Exception was " << e.what());
			}
		}
		if(config_.brightness != newconfig.brightness){
			try {
		  cam_->set_control(0x980900, newconfig.brightness);
		  	} catch (uvc_cam::Exception& e) {
				ROS_ERROR_STREAM("Problem setting brightness. Exception was " << e.what());
			}
		}
		
		

		if (config_.camera_info_url != newconfig.camera_info_url)
		{
			// set the new URL and load CameraInfo (if any) from it
			if (cinfo_.validateURL(newconfig.camera_info_url))
			{
				cinfo_.loadCameraInfo(newconfig.camera_info_url);
			}
			else
			{
				// new URL not valid, use the old one
				newconfig.camera_info_url = config_.camera_info_url;
			}
		}

		//	    if (state_ != Driver::CLOSED)       // openCamera() succeeded?
		//	      {
		//	        // configure IIDC features
		//	        if (level & Levels::RECONFIGURE_CLOSE)
		//	          {
		//	            // initialize all features for newly opened device
		//	            if (false == dev_->features_->initialize(&newconfig))
		//	              {
		//	                ROS_ERROR_STREAM("[" << camera_name_
		//	                                 << "] feature initialization failure");
		//	                closeCamera();          // can't continue
		//	              }
		//	          }
		//	        else
		//	          {
		//	            // update any features that changed
		//	            dev_->features_->reconfigure(&newconfig);
		//	          }
		//	      }

		config_ = newconfig;                // save new parameters

		ROS_DEBUG_STREAM("[" << camera_name_
				<< "] reconfigured: frame_id " << newconfig.frame_id
				<< ", camera_info_url " << newconfig.camera_info_url);
	}
  /** Dynamic reconfigure callback
   *
   *  Called immediately when callback first defined. Called again
   *  when dynamic reconfigure starts or changes a parameter value.
   *
   *  @param newconfig new Config values
   *  @param level bit-wise OR of reconfiguration levels for all
   *               changed parameters (0xffffffff on initial call)
   **/
  void Camera1394StereoDriver::reconfig(Config &newconfig, uint32_t level)
  {
    // Do not run concurrently with poll().  Tell it to stop running,
    // and wait on the lock until it does.
    reconfiguring_ = true;
    boost::mutex::scoped_lock lock(mutex_);
    ROS_DEBUG("dynamic reconfigure level 0x%x", level);

    // resolve frame ID using tf_prefix parameter
    if (newconfig.frame_id == "")
      newconfig.frame_id = "camera";
    std::string tf_prefix = tf::getPrefixParam(priv_nh_);
    ROS_DEBUG_STREAM("tf_prefix: " << tf_prefix);
    newconfig.frame_id = tf::resolve(tf_prefix, newconfig.frame_id);

    if (state_ != Driver::CLOSED && (level & Levels::RECONFIGURE_CLOSE))
      {
        // must close the device before updating these parameters
        closeCamera();                  // state_ --> CLOSED
      }

    if (state_ == Driver::CLOSED)
      {
        // open with new values
        if (openCamera(newconfig))
          {
            // update GUID string parameter
            // TODO move into dev_->open()
            newconfig.guid = camera_name_;
          }
      }

    std::string camera_info_url[NUM_CAMERAS];
    camera_info_url[LEFT] = config_.camera_info_url_left;
    camera_info_url[RIGHT] = config_.camera_info_url_right;
    std::string new_camera_info_url[NUM_CAMERAS];
    new_camera_info_url[LEFT] = newconfig.camera_info_url_left;
    new_camera_info_url[RIGHT] = newconfig.camera_info_url_right;

    for (int i=0; i<NUM_CAMERAS; i++)
      {
        if (camera_info_url[i] != new_camera_info_url[i])
          {
            // set the new URL and load CameraInfo (if any) from it
            if (cinfo_[i]->validateURL(new_camera_info_url[i]))
              {
                cinfo_[i]->loadCameraInfo(new_camera_info_url[i]);
              }
            else
              {
                // new URL not valid, use the old one
                ROS_WARN_STREAM("[" << camera_name_
                                    << "_" << CameraSelectorString[i]
                                    << "] not updating calibration to invalid URL "
                                    << new_camera_info_url[i] );
              }
          }
       }
    if (state_ != Driver::CLOSED)       // openCamera() succeeded?
      {
        // configure IIDC features
        if (level & Levels::RECONFIGURE_CLOSE)
          {
            // initialize all features for newly opened device
            if (false == dev_->features_->initialize(&newconfig))
              {
                ROS_ERROR_STREAM("[" << camera_name_
                                 << "] feature initialization failure");
                closeCamera();          // can't continue
              }
          }
        else
          {
            // update any features that changed
            // TODO replace this with a dev_->reconfigure(&newconfig);
            dev_->features_->reconfigure(&newconfig);
          }
      }

    config_ = newconfig;                // save new parameters
    reconfiguring_ = false;             // let poll() run again

    ROS_DEBUG_STREAM("[" << camera_name_
                     << "] reconfigured: frame_id " << newconfig.frame_id
                     << ", camera_info_url_left " << newconfig.camera_info_url_left
                     << ", camera_info_url_right " << newconfig.camera_info_url_right);
  }
Esempio n. 26
0
// Runs the dot detector and sends detected dots to server on port TODO Implement headless. Needs more config options and/or possibly a config file first though
int run( const char *serverAddress, const int serverPort, char headless ) {
    char calibrate_exposure = 0, show = ~0, flip = 0, vflip = 0, done = 0, warp = 0; //"Boolean" values used in this loop
    char noiceReduction = 2; //Small counter, so char is still ok.
    int i, sockfd; //Generic counter
    int dp = 0, minDist = 29, param1 = 0, param2 = 5; // Configuration variables for circle detection 
    int minDotRadius = 1;
    int detected_dots; //Detected dot counter
    int returnValue = EXIT_SUCCESS;
    int captureControl; //File descriptor for low-level camera controls
    int currentExposure = 150;
    int maxExposure = 1250; //Maximum exposure supported by the camera TODO Get this from the actual camera
    Color min = { 0, 70, 0, 0 }; //Minimum color to detect
    Color max = { 255, 255, 255, 0 }; //Maximum color to detect
    CvScalar colorWhite = cvScalar( WHITE ); //Color to draw detected dots on black and white surface
    BoundingBox DD_mask; //The box indicating what should and what should not be considered for dot search
    BoundingBox DD_transform; //The box indicating the plane we are looking at( and as such is the plane we would transform from )
    BoundingBox DD_transform_to; //The plane we are transforming to
    CvCapture *capture = NULL; //The camera
    CvMemStorage *storage; //Low level memory area used for dynamic structures in OpenCV
    CvSeq *seq; //Sequence to store detected dots in
    IplImage *grabbedImage = NULL; //Raw image from camera( plus some overlay in the end )
    IplImage *imgThreshold = NULL; //Image with detected dots
    IplImage *mask = NULL; //Mask to be able to remove uninteresting areas
    IplImage *coloredMask = NULL; //Mask to be able to indicate above mask on output image
    CvFont font; //Font for drawing text on images
    SendQueue *queue; //Head of the linked list that is the send queue
    char strbuf[255]; //Generic buffer for text formatting( with sprintf())
    struct timeval oldTime, time, diff; //Structs for measuring FPS
    float lastKnownFPS = 0; //Calculated FPS
    CvMat* pointRealMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* pointTransMat = cvCreateMat( 1,1,CV_32FC2 ); //Single point matrix for point transformation
    CvMat* transMat = cvCreateMat( 3,3,CV_32FC1 ); //Translation matrix for transforming input to a straight rectangle
    ClickParams clickParams = { TOP_LEFT, NULL, &DD_transform_to, transMat }; //Struct holding data needed by mouse-click callback function

    // Set up network
    sockfd = initNetwork( serverAddress, serverPort );
    if( sockfd == -1 ) {
        fprintf( stderr, "ERROR: initNetwork returned -1\n");
        return EXIT_FAILURE;
    }
    queue = initSendQueue();

    if( openCamera( &capture, &captureControl ) == 0 ) {
        fprintf( stderr, "ERROR: capture is NULL \n" );
        return EXIT_FAILURE;
    }

    if( ( disableAutoExposure( captureControl ) ) == -1 ) {
        fprintf( stderr, "ERROR: Cannot disable auto exposure \n" );
        //return EXIT_FAILURE;
    }

    if( ( updateAbsoluteExposure( captureControl, currentExposure ) ) == 0 ) {
        fprintf( stderr, "ERROR: Cannot set exposure\n");
    }

    // Create a window in which the captured images will be presented
    cvNamedWindow( imagewindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the configuration sliders and the detection frame TODO This is kind of a hack. Make a better solution
    cvNamedWindow( configwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create a window to hold the transformed image. Handy to see how the dots are translated, but not needed for functionality
    if( warp ) cvNamedWindow( warpwindowname, CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL );

    // Create sliders to adjust the lower color boundry
    cvCreateTrackbar( red_lable  , configwindowname, &min.red,   255, NULL );
    cvCreateTrackbar( green_lable, configwindowname, &min.green, 255, NULL );
    cvCreateTrackbar( blue_lable , configwindowname, &min.blue,  255, NULL );

    //Create sliters for the contour based dot detection
    cvCreateTrackbar( min_area_lable, configwindowname, &minDotRadius,255, NULL );

    /* Slider for manual exposure setting */
    cvCreateTrackbar( exposure_lable, configwindowname, &currentExposure, maxExposure, NULL );

    //Create the memory storage
    storage = cvCreateMemStorage( 0 );

    // void cvInitFont( font, font_face, hscale, vscale, shear=0, thickness=1, line_type=8 )
    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1, 8 );

    // Grab an initial image to be able to fetch image size before the main loop.
    grabbedImage = cvQueryFrame( capture );

    //Move the two windows so both are visible at the same time
    cvMoveWindow( imagewindowname, 0, 10 );
    cvMoveWindow( configwindowname, grabbedImage->width+2, 10 );

    //TODO Move these three inits to a function
    // Set masking defaults TODO load from file? Specify file for this file loading?
    DD_mask.topLeft.x = 0;  
    DD_mask.topLeft.y = 0;

    DD_mask.topRight.x = grabbedImage->width-1;
    DD_mask.topRight.y = 0;

    DD_mask.bottomLeft.x = 0;
    DD_mask.bottomLeft.y = grabbedImage->height-1;

    DD_mask.bottomRight.x = grabbedImage->width-1;
    DD_mask.bottomRight.y = grabbedImage->height-1;

    // Set transformation defaults TODO load from file? Specify file for this file loading?
    DD_transform.topLeft.x = 0;  
    DD_transform.topLeft.y = 0;

    DD_transform.topRight.x = grabbedImage->width-1;
    DD_transform.topRight.y = 0;

    DD_transform.bottomLeft.x = 0;
    DD_transform.bottomLeft.y = grabbedImage->height-1;

    DD_transform.bottomRight.x = grabbedImage->width-1;
    DD_transform.bottomRight.y = grabbedImage->height-1;

    // Set the transformation destination
    DD_transform_to.topLeft.x = 0;  
    DD_transform_to.topLeft.y = 0;

    DD_transform_to.topRight.x = grabbedImage->width-1;
    DD_transform_to.topRight.y = 0;

    DD_transform_to.bottomLeft.x = 0;
    DD_transform_to.bottomLeft.y = grabbedImage->height-1;

    DD_transform_to.bottomRight.x = grabbedImage->width-1;
    DD_transform_to.bottomRight.y = grabbedImage->height-1;

    calculateTransformationMatrix( &DD_transform, &DD_transform_to, transMat );

    // Set callback function for mouse clicks
    cvSetMouseCallback( imagewindowname, calibrateClick, ( void* ) &clickParams );

    gettimeofday( &oldTime, NULL );

    // Main loop. Grabbs an image from cam, detects dots, sends dots,and prints dots to images and shows to user
    while( !done ) {

        //PROFILING_PRO_STAMP(); //Uncomment this and the one in the end of the while-loop, and comment all other PROFILING_* to profile main-loop

        // ------ Common actions
        cvClearMemStorage( storage );
        detected_dots = 0;

        //Grab a fram from the camera
        PROFILING_PRO_STAMP();
        grabbedImage = cvQueryFrame( capture );
        PROFILING_POST_STAMP( "cvQueryFrame");

        if( grabbedImage == NULL ) {
            fprintf( stderr, "ERROR: frame is null...\n" );
            getchar();
            returnValue = EXIT_FAILURE;
            break;
        }

        //Flip images to act as a mirror. 
        if( show && flip ) {
            cvFlip( grabbedImage, grabbedImage, 1 );
        }
        if( show && vflip ) {
            cvFlip( grabbedImage, grabbedImage, 0 );
        }

        // ------ State based actions
        switch( state ) {
            case GRAB_DOTS:

                //Create detection image
                imgThreshold = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvInRangeS( grabbedImage, cvScalar( DD_COLOR( min )), cvScalar( DD_COLOR( max )), imgThreshold );

                //Mask away anything not in our calibration area
                mask = cvCreateImage( cvGetSize( grabbedImage ), 8, 1 );
                cvZero( mask );
                cvFillConvexPoly( mask, ( CvPoint* ) &DD_mask, 4, cvScalar( WHITE ), 1, 0 );
                cvAnd( imgThreshold, mask, imgThreshold, NULL );

                // Invert mask, increase the number of channels in it and overlay on grabbedImage //TODO Tint the mask red before overlaying
                cvNot( mask, mask );
                coloredMask = cvCreateImage( cvGetSize( grabbedImage ), grabbedImage->depth, grabbedImage->nChannels );
                cvCvtColor( mask, coloredMask, CV_GRAY2BGR );
                cvAddWeighted( grabbedImage, 0.95, coloredMask, 0.05, 0.0, grabbedImage );


                // Reduce noise. 
                // Erode is kind of floor() of pixels, dilate is kind of ceil()
                // I'm not sure which gives the best result.
                switch( noiceReduction ) {
                    case 0: break; //No noice reduction at all
                    case 1: cvErode( imgThreshold, imgThreshold, NULL, 2 ); break;
                    case 2: cvDilate( imgThreshold, imgThreshold, NULL, 2 ); break;
                }

                // Warp the warp-image. We are reusing the coloredMask variable to save some space
                PROFILING_PRO_STAMP();
                if( show && warp ) cvWarpPerspective( grabbedImage, coloredMask, transMat, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ));
                PROFILING_POST_STAMP( "Warping perspective" );


                // Find all dots in the image
                PROFILING_PRO_STAMP();

                // Clear old data from seq
                seq = 0;

                // Find the dots
                cvFindContours(
                        imgThreshold,
                        storage,
                        &seq,
                        sizeof( CvContour ),
                        CV_RETR_LIST,
                        CV_CHAIN_APPROX_SIMPLE,
                        cvPoint( 0,0 )
                        );
                // cvFindContours destroys the original image, so we wipe it here
                // and then repaints the detected dots later
                cvZero( imgThreshold );

                PROFILING_POST_STAMP( "Dot detection" );

                //Process all detected dots
                PROFILING_PRO_STAMP();
                for( ; seq != 0; seq = seq->h_next ) {

                    // Calculate radius of the detected contour
                    CvRect rect =( ( CvContour * )seq )->rect;
                    float relCenterX = rect.width / 2;
                    float relCenterY = rect.height / 2;

                    // Make sure the dot is big enough
                    if( relCenterX < minDotRadius || relCenterY < minDotRadius ) {
                        continue;
                    }

                    // Note that we have found another dot
                    ++detected_dots;

                    // Transform the detected dot according to transformation matrix.
                    float absCenter[] = { rect.x + relCenterX, rect.y + relCenterY };
                    pointRealMat->data.fl = absCenter;
                    cvPerspectiveTransform( pointRealMat, pointTransMat, transMat );

                    // Draw the detected contour back to imgThreshold
                    // Draw the detected dot both to real image and to warped( if warp is active )
                    if( show ) {
                        cvDrawContours( imgThreshold, seq, colorWhite, colorWhite, -1, CV_FILLED, 8, cvPoint( 0,0 ) );
                        drawCircle( absCenter[0], absCenter[1], ( relCenterX + relCenterY ) / 2, grabbedImage );
                        if( warp ) {
                            drawCircle( pointTransMat->data.fl[0], pointTransMat->data.fl[1], ( relCenterX + relCenterY ) / 2, coloredMask );
                        }
                    }

                    // Add detected dot to to send queue
                    addPointToSendQueue( pointTransMat->data.fl, queue ); 
                }

                PROFILING_POST_STAMP("Painting dots");

                //Calculate framerate
                gettimeofday( &time, NULL );
                timeval_subtract( &diff, &time, &oldTime );
                lastKnownFPS = lastKnownFPS * 0.7 + ( 1000000.0 / diff.tv_usec ) * 0.3; //We naïvly assume we have more then 1 fps
                oldTime = time;

                //Send the dots detected this frame to the server
                PROFILING_PRO_STAMP();
                sendQueue( sockfd, queue );
                clearSendQueue( queue );
                PROFILING_POST_STAMP( "Sending dots" );

                /* If calibrating, do the calibration */
                if( calibrate_exposure ) {
                    int ret;
                    ret = calibrateExposureLow( captureControl, detected_dots, &currentExposure, DD_MAX_EXPOSURE, lastKnownFPS );
                    switch( ret ) {
                        case 0: // We are done. Let's leave calibration mode
                            calibrate_exposure = 0;
                            printf( "done\n" );
                            break;

                        case -1: // We hit the upper limit with no detected dots
                            fprintf( stderr, "Reached upper limit (%d). Aborting!\n", DD_MAX_EXPOSURE );
                            calibrate_exposure = 0;
                            break;

                        case -2: // We hit lower limit with more then one dot detected
                            fprintf( stderr, "Too bright. More then one dot found even with minimal exposure. Aborting!\n");
                            calibrate_exposure = 0;
                            break;

                        case -3: //No conclusive results.
                            fprintf( stderr, "No conclusive results. Giving up\n" );
                            calibrate_exposure = 0;
                            break;
                    }
                }

                break; //End of GRAB_DOTS

            case SELECT_TRANSFORM:
                //Falling through here. Poor man's multi-case clause. Not putting this in default as we might
                //want to do different things in these two some day.
            case SELECT_MASK:
                snprintf( strbuf, sizeof( strbuf ), "Select %s point", pointTranslationTable[clickParams.currentPoint]);
                cvDisplayOverlay( imagewindowname, strbuf, 5 );
                break; //End of SELECT_MASK and SELECT_TRANSFORM
        }

        // Paint the corners of the detecting area and the calibration area
        paintOverlayPoints( grabbedImage, &DD_transform );

        //Print some statistics to the image
        if( show ) {
            snprintf( strbuf, sizeof( strbuf ), "Dots: %i", detected_dots ); //Print number of detected dots to the screen
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 20 ), &font, cvScalar( WHITE ));
            snprintf( strbuf, sizeof( strbuf ), "FPS: %.1f", lastKnownFPS );
            cvPutText( grabbedImage, strbuf, cvPoint( 10, 40 ), &font, cvScalar( WHITE ));
            cvCircle( grabbedImage, cvPoint( 15, 55 ), minDotRadius, cvScalar( min.blue, min.green, min.red, min.alpha ), -1, 8, 0 ); // Colors given in order BGR-A, Blue, Green, Red, Alpha
        }

        //Show images 
        PROFILING_PRO_STAMP();
        if( show ) {
            cvShowImage( configwindowname, imgThreshold );
            cvShowImage( imagewindowname, grabbedImage );
            if( warp ) cvShowImage( warpwindowname, coloredMask );
        }
        PROFILING_POST_STAMP("Showing images");

        //Release the temporary images
        cvReleaseImage( &imgThreshold );
        cvReleaseImage( &mask );
        cvReleaseImage( &coloredMask );

        /* Update exposure if needed */
        updateAbsoluteExposure( captureControl, currentExposure );
        cvSetTrackbarPos( exposure_lable, configwindowname, currentExposure );

        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7( linux version ),
        //remove higher bits using AND operator
        i = ( cvWaitKey( 10 ) & 0xff );
        switch( i ) {
            case 'g': 
                makeCalibrate( &DD_transform, &DD_transform_to, transMat, capture, captureControl, 20 );
                updateAbsoluteExposure( captureControl, currentExposure+1 );
                break;

            case 'e': 
                toggleCalibrationMode( &calibrate_exposure, &currentExposure );
                break; /* Toggles calibration mode */

            case 'c':
                openCamera( &capture, &captureControl );
                break;

            case 's': 
                show = ~show;
                break; //Toggles updating of the image. Can be useful for performance of slower machines... Or as frame freeze

            case 'm': 
                state = SELECT_MASK;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_mask;
                break; //Starts selection of masking area. Will return to dot detection once all four points are set

            case 't':
                state = SELECT_TRANSFORM;
                clickParams.currentPoint = TOP_LEFT;
                clickParams.DD_box = &DD_transform;
                break; //Starts selection of the transformation area. Returns to dot detection when done.

            case 'f':
                flip = ~flip;
                break; //Toggles horizontal flipping of the image
            case 'v':
                vflip = ~vflip;
                break; //Toggles vertical flipping of the image

            case 'w':
                warp = ~warp;
                toggleWarpOutput( warp );
                break; //Toggles showing the warped image

            case 'n':
                noiceReduction = ( noiceReduction + 1 ) % 3;
                break; //Cycles noice reduction algorithm

            case 'q': //falling through here to quit

            case  27: 
                done = 1;
                break; //ESC. Kills the whole thing( in a nice and controlled manner )
        }
        fflush( stdout ); //Make sure everything in the buffer is printed before we go on

        //PROFILING_POST_STAMP("Main loop");
    } //End of main while-loop

    // Release the capture device and do some housekeeping
    cvReleaseImage( &grabbedImage );
    cvReleaseCapture( &capture );
    cvReleaseMemStorage( &storage );
    cvDestroyWindow( imagewindowname );
    cvDestroyWindow( configwindowname );
    if( warp ) cvDestroyWindow( warpwindowname ); //If now warp it is already destroyed
    destroySendQueue( queue );
    close( sockfd );
    close( captureControl );
    return returnValue;
}
Esempio n. 27
0
int  main()
{
	int width;
	char* bayer[] = {"RG","BG","GR","GB"};
	char* controls[MAX_CONTROL] = {"Exposure", "Gain", "Gamma", "WB_R", "WB_B", "Brightness", "USB Traffic"};

	int height;
	int i;
	char c;
	bool bresult;

	int time1,time2;
	int count=0;

	char buf[128]={0};

	int CamNum=0;
	
	///long exposure, exp_min, exp_max, exp_step, exp_flag, exp_default;
	//long gain, gain_min, gain_max,gain_step, gain_flag, gain_default;

	IplImage *pRgb;


	int numDevices = getNumberOfConnectedCameras();
	if(numDevices <= 0)
	{
		printf("no camera connected, press any key to exit\n");
		getchar();
		return -1;
	}
	else
		printf("attached cameras:\n");

	for(i = 0; i < numDevices; i++)
		printf("%d %s\n",i, getCameraModel(i));

	printf("\nselect one to privew\n");
	scanf("%d", &CamNum);


	bresult = openCamera(CamNum);
	if(!bresult)
	{
		printf("OpenCamera error,are you root?,press any key to exit\n");
		getchar();
		return -1;
	}

	printf("%s information\n",getCameraModel(CamNum));
	int iMaxWidth, iMaxHeight;
	iMaxWidth = getMaxWidth();
	iMaxHeight =  getMaxHeight();
	printf("resolution:%dX%d\n", iMaxWidth, iMaxHeight);
	if(isColorCam())
		printf("Color Camera: bayer pattern:%s\n",bayer[getColorBayer()]);
	else
		printf("Mono camera\n");
	
	for( i = 0; i < MAX_CONTROL; i++)
	{
			if(isAvailable((Control_TYPE)i))
				printf("%s support:Yes\n", controls[i]);
			else
				printf("%s support:No\n", controls[i]);
	}

	printf("\nPlease input the <width height bin image_type> with one space, ie. 640 480 2 0. use max resolution if input is 0. Press ESC when video window is focused to quit capture\n");
	int bin = 1, Image_type;
	scanf("%d %d %d %d", &width, &height, &bin, &Image_type);
	if(width == 0 || height == 0)
	{
		width = iMaxWidth;
		height = iMaxHeight;
	}

	initCamera(); //this must be called before camera operation. and it only need init once
	printf("sensor temperature:%02f\n", getSensorTemp());

//	IMG_TYPE image_type;
	
	while(!setImageFormat(width, height, bin, (IMG_TYPE)Image_type))//IMG_RAW8
	{
		printf("Set format error, please check the width and height\n ASI120's data size(width*height) must be integer multiple of 1024\n");
		printf("Please input the width and height again£¬ie. 640 480\n");
		scanf("%d %d %d %d", &width, &height, &bin, &Image_type);
	}
	printf("\nset image format %d %d %d %d success, start privew, press ESC to stop \n", width, height, bin, Image_type);

	
	if(Image_type == IMG_RAW16)
		pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_16U, 1);
	else if(Image_type == IMG_RGB24)
		pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_8U, 3);
	else
		pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_8U, 1);

	setValue(CONTROL_EXPOSURE, 100*1000, false); //ms//auto
	setValue(CONTROL_GAIN,getMin(CONTROL_GAIN), false); 
	setValue(CONTROL_BANDWIDTHOVERLOAD, getMin(CONTROL_BANDWIDTHOVERLOAD), false); //low transfer speed

	setValue(CONTROL_WB_B, 90, false);
 	setValue(CONTROL_WB_R, 48, false);
  	setAutoPara(getMax(CONTROL_GAIN)/2,10,150); //max auto gain and exposure and target brightness
//	EnableDarkSubtract("dark.bmp"); //dark subtract will be disabled when exposure set auto and exposure below 500ms
	startCapture(); //start privew


	

	bDisplay = 1;
#ifdef _LIN
	pthread_t thread_display;
	pthread_create(&thread_display, NULL, Display, (void*)pRgb);
#elif defined _WINDOWS
	HANDLE thread_setgainexp;
	thread_setgainexp = (HANDLE)_beginthread(Display,  NULL, (void*)pRgb);
#endif

	time1 = GetTickCount();
	int iStrLen = 0, iTextX = 40, iTextY = 60;
	void* retval;
//	int time0, iWaitMs = -1;
//	bool bGetImg;
	while(bMain)
	{

//		time0 = GetTickCount();
		getImageData((unsigned char*)pRgb->imageData, pRgb->imageSize, 200);

//		bGetImg = getImageData((unsigned char*)pRgb->imageData, pRgb->imageSize, iWaitMs);
		time2 = GetTickCount();
//		printf("waitMs%d, deltaMs%d, %d\n", iWaitMs, time2 - time0, bGetImg);
		count++;
		
		if(time2-time1 > 1000 )
		{
			sprintf(buf, "fps:%d dropped frames:%lu ImageType:%d",count, getDroppedFrames(), (int)getImgType());

			count = 0;
			time1=GetTickCount();	
			printf(buf);
			printf("\n");

		}
		if(Image_type != IMG_RGB24 && Image_type != IMG_RAW16)
		{
			iStrLen = strlen(buf);
			CvRect rect = cvRect(iTextX, iTextY - 15, iStrLen* 11, 20);
			cvSetImageROI(pRgb , rect);
			cvSet(pRgb, CV_RGB(180, 180, 180)); 
			cvResetImageROI(pRgb);
		}
		cvText(pRgb, buf, iTextX,iTextY );

		if(bChangeFormat)
		{
			bChangeFormat = 0;
			bDisplay = false;
			pthread_join(thread_display, &retval);
			cvReleaseImage(&pRgb);
			stopCapture();
			
			switch(change)
			{
				 case change_imagetype:
					Image_type++;
					if(Image_type > 3)
						Image_type = 0;
					
					break;
				case change_bin:
					if(bin == 1)
					{
						bin = 2;
						width/=2;
						height/=2;
					}
					else 
					{
						bin = 1;
						width*=2;
						height*=2;
					}
					break;
				case change_size_smaller:
					if(width > 320 && height > 240)
					{
						width/= 2;
						height/= 2;
					}
					break;
				
				case change_size_bigger:
				
					if(width*2*bin <= iMaxWidth && height*2*bin <= iMaxHeight)
					{
						width*= 2;
						height*= 2;
					}
					break;
			}
			setImageFormat(width, height, bin, (IMG_TYPE)Image_type);
			if(Image_type == IMG_RAW16)
				pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_16U, 1);
			else if(Image_type == IMG_RGB24)
				pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_8U, 3);
			else
				pRgb=cvCreateImage(cvSize(getWidth(),getHeight()), IPL_DEPTH_8U, 1);
			bDisplay = 1;
			pthread_create(&thread_display, NULL, Display, (void*)pRgb);
			startCapture(); //start privew
		}
	}
END:
	
	if(bDisplay)
	{
		bDisplay = 0;
#ifdef _LIN
   		pthread_join(thread_display, &retval);
#elif defined _WINDOWS
		Sleep(50);
#endif
	}
	
	stopCapture();
	closeCamera();
	cvReleaseImage(&pRgb);
	printf("main function over\n");
	return 1;
}
Esempio n. 28
0
  /** Dynamic reconfigure callback
   *
   *  Called immediately when callback first defined. Called again
   *  when dynamic reconfigure starts or changes a parameter value.
   *
   *  @param newconfig new Config values
   *  @param level bit-wise OR of reconfiguration levels for all
   *               changed parameters (0xffffffff on initial call)
   **/
  void Camera1394Driver::reconfig(Config &newconfig, uint32_t level)
  {
    // Do not run concurrently with poll().  Tell it to stop running,
    // and wait on the lock until it does.
    reconfiguring_ = true;
    boost::mutex::scoped_lock lock(mutex_);
    ROS_DEBUG("dynamic reconfigure level 0x%x", level);

    // resolve frame ID using tf_prefix parameter
    if (newconfig.frame_id == "")
      newconfig.frame_id = "camera";
    std::string tf_prefix = tf::getPrefixParam(priv_nh_);
    ROS_DEBUG_STREAM("tf_prefix: " << tf_prefix);
    newconfig.frame_id = tf::resolve(tf_prefix, newconfig.frame_id);

    if (newconfig.multicam != "mono" && newconfig.multicam != "stereo_interlaced")
    {
      ROS_WARN("Invalid value for parameter multicam.  Defaulting to 'mono'.");
      newconfig.multicam = "mono";
    }

    if (state_ != Driver::CLOSED && (level & Levels::RECONFIGURE_CLOSE))
      {
        // must close the device before updating these parameters
        closeCamera();                  // state_ --> CLOSED
      }

    if (newconfig.multicam == "mono")
      {
      if (config_.multicam != newconfig.multicam)
          {
        //switching from stereo
            left_cinfo_.reset(new camera_info_manager::CameraInfoManager(camera_nh_, camera_name_, newconfig.camera_info_url));
            right_cinfo_.reset();

            left_image_pub_ = image_transport::CameraPublisher();
            right_image_pub_ = image_transport::CameraPublisher();

            left_it_.reset(new image_transport::ImageTransport(camera_nh_));
            right_it_.reset();

            left_image_pub_ = left_it_->advertiseCamera("image_raw", 1);

          }

      if (config_.camera_info_url != newconfig.camera_info_url)
      {
      // set the new URL and load CameraInfo (if any) from it
      if (left_cinfo_->validateURL(newconfig.camera_info_url))
        {
        left_cinfo_->loadCameraInfo(newconfig.camera_info_url);
        }
      else
        {
        // new URL not valid, use the old one
        newconfig.camera_info_url = config_.camera_info_url;
        }
      }
      }
    else
    {
      if (config_.multicam != newconfig.multicam)
      {
            // switching from mono
            left_cinfo_.reset(new camera_info_manager::CameraInfoManager(left_camera_nh_, camera_name_, newconfig.left_camera_info_url));
            right_cinfo_.reset(new camera_info_manager::CameraInfoManager(right_camera_nh_, camera_name_, newconfig.right_camera_info_url));

            left_image_pub_ = image_transport::CameraPublisher();
            right_image_pub_ = image_transport::CameraPublisher();

            left_it_.reset(new image_transport::ImageTransport(left_camera_nh_));
            right_it_.reset(new image_transport::ImageTransport(right_camera_nh_));

            left_image_pub_ = left_it_->advertiseCamera("image_raw", 1);
            right_image_pub_ = right_it_->advertiseCamera("image_raw", 1);
      }
      if (config_.left_camera_info_url != newconfig.left_camera_info_url)
      {
            // set the new URL and load CameraInfo (if any) from it
            if (left_cinfo_->validateURL(newconfig.left_camera_info_url))
              {
                left_cinfo_->loadCameraInfo(newconfig.left_camera_info_url);
              }
            else
              {
                // new URL not valid, use the old one
                newconfig.left_camera_info_url = config_.left_camera_info_url;
              }

            if (right_cinfo_->validateURL(newconfig.right_camera_info_url))
              {
                right_cinfo_->loadCameraInfo(newconfig.right_camera_info_url);
              }
            else
              {
                // new URL not valid, use the old one
                newconfig.right_camera_info_url = config_.right_camera_info_url;
              }

      }
    }

    if (state_ == Driver::CLOSED)
    {
      // open with new values
      if (openCamera(newconfig))
      {
        //update GUID string parameter
        newconfig.guid = camera_name_;
      }
    }

    if (state_ != Driver::CLOSED)       // openCamera() succeeded?
      {
        // configure IIDC features
        if (level & Levels::RECONFIGURE_CLOSE)
          {
            // initialize all features for newly opened device
            if (false == dev_->features_->initialize(&newconfig))
              {
                ROS_ERROR_STREAM("[" << camera_name_
                                 << "] feature initialization failure");
                closeCamera();          // can't continue
              }
          }
        else
          {
            // update any features that changed
            // TODO replace this with a dev_->reconfigure(&newconfig);
            dev_->features_->reconfigure(&newconfig);
          }
      }

    config_ = newconfig;                // save new parameters
    reconfiguring_ = false;             // let poll() run again

    if (config_.multicam == "mono")
      {
        ROS_DEBUG_STREAM("[" << camera_name_
                         << "] reconfigured: frame_id " << newconfig.frame_id
                         << ", camera_info_url " << newconfig.camera_info_url);
      }
    else
      {
        ROS_DEBUG_STREAM("[" << camera_name_
                         << "] reconfigured: frame_id " << newconfig.frame_id
                         << ", left_camera_info_url " << newconfig.left_camera_info_url
                         << ", right_camera_info_url " << newconfig.right_camera_info_url);
      }

  }
Esempio n. 29
0
NDKCamera::NDKCamera()
    : cameraMgr_(nullptr),
      activeCameraId_(""),
      outputContainer_(nullptr),
      captureSessionState_(CaptureSessionState::MAX_STATE),
      cameraFacing_(ACAMERA_LENS_FACING_BACK),
      cameraOrientation_(0),
      exposureTime_(static_cast<int64_t>(0)) {
  valid_ = false;
  requests_.resize(CAPTURE_REQUEST_COUNT);
  memset(requests_.data(), 0, requests_.size() * sizeof(requests_[0]));
  cameras_.clear();
  cameraMgr_ = ACameraManager_create();
  ASSERT(cameraMgr_, "Failed to create cameraManager");

  // Pick up a back-facing camera to preview
  EnumerateCamera();
  ASSERT(activeCameraId_.size(), "Unknown ActiveCameraIdx");

  // Create back facing camera device
  CALL_MGR(openCamera(cameraMgr_, activeCameraId_.c_str(), GetDeviceListener(),
                      &cameras_[activeCameraId_].device_));

  CALL_MGR(registerAvailabilityCallback(cameraMgr_, GetManagerListener()));

  // Initialize camera controls(exposure time and sensitivity), pick
  // up value of 2% * range + min as starting value (just a number, no magic)
  ACameraMetadata* metadataObj;
  CALL_MGR(getCameraCharacteristics(cameraMgr_, activeCameraId_.c_str(),
                                    &metadataObj));
  ACameraMetadata_const_entry val = {
      0,
  };
  camera_status_t status = ACameraMetadata_getConstEntry(
      metadataObj, ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE, &val);
  if (status == ACAMERA_OK) {
    exposureRange_.min_ = val.data.i64[0];
    if (exposureRange_.min_ < kMinExposureTime) {
      exposureRange_.min_ = kMinExposureTime;
    }
    exposureRange_.max_ = val.data.i64[1];
    if (exposureRange_.max_ > kMaxExposureTime) {
      exposureRange_.max_ = kMaxExposureTime;
    }
    exposureTime_ = exposureRange_.value(2);
  } else {
    LOGW("Unsupported ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE");
    exposureRange_.min_ = exposureRange_.max_ = 0l;
    exposureTime_ = 0l;
  }
  status = ACameraMetadata_getConstEntry(
      metadataObj, ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE, &val);

  if (status == ACAMERA_OK){
    sensitivityRange_.min_ = val.data.i32[0];
    sensitivityRange_.max_ = val.data.i32[1];

    sensitivity_ = sensitivityRange_.value(2);
  } else {
    LOGW("failed for ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE");
    sensitivityRange_.min_ = sensitivityRange_.max_ = 0;
    sensitivity_ = 0;
  }
  valid_ = true;
}
Esempio n. 30
0
	void Detector::start(int width, int height)
	{
		openCamera(width, height);
		_dbt->run();
	}