/**
 * \brief default main function for focusing
 */
void	FocusWork::main(astro::thread::Thread<FocusWork>& thread) {
	if (!complete()) {
		debug(LOG_ERR, DEBUG_LOG, 0,
			"FocusWork is not completely configured");
		focusingstatus(Focusing::FAILED);
		return;
	}
	debug(LOG_DEBUG, DEBUG_LOG, 0, "starting focus process in [%d,%d]",
		min(), max());

	// prepare the set of focus items to base the focus computation on
	FocusItems	focusitems;

	// prepare 
	for (int step = 0; step < steps(); step++) {
		// find position
		unsigned short	position
			= min() + (step * (max() - min())) / (steps() - 1);
		debug(LOG_DEBUG, DEBUG_LOG, 0, "next position: %hu", position);

		// move to this position
		moveto(position);

		// get an image
		focusingstatus(Focusing::MEASURING);
		ccd()->startExposure(exposure());
		usleep(1000000 * exposure().exposuretime());
		ccd()->wait();
		ImagePtr	image = ccd()->getImage();
		debug(LOG_DEBUG, DEBUG_LOG, 0, "got an image of size %s",
			image->size().toString().c_str());

		// evaluate the image
		double	value = (*evaluator())(image);
		debug(LOG_DEBUG, DEBUG_LOG, 0, "evaluated to %f", value);

		// callback with the evaluated image
		callback(evaluator()->evaluated_image(), position, value);

		// add the information to a set
		focusitems.insert(FocusItem(position, value));
	}

	// now solve we need a suitable solver for the method
	int	targetposition = solver()->position(focusitems);
	if ((targetposition < min()) || (targetposition > max())) {
		std::string	msg = stringprintf(
			"could not find a focus position: %d", targetposition);
		debug(LOG_ERR, DEBUG_LOG, 0, "%s", msg.c_str());
		focusingstatus(Focusing::FAILED);
		return;
	}

	// move to the final focus position
	focusingstatus(Focusing::MOVING);
	moveto(targetposition);
	focusingstatus(Focusing::FOCUSED);
}
/**
 * \brief Analyze a single grid point
 *
 * Moves (relatively) to a grid point, takes an image and returns the
 * the offset as measured by the tracker.
 */
Point	CalibrationProcess::starAt(double ra, double dec) {
	// move the telescope to the point
	moveto(grid * ra, grid * dec);

	// take an image at that position
	imager().startExposure(exposure());
	usleep(1000000 * exposure().exposuretime());
	ImagePtr	image = guider().getImage();

	// analze the image
	Point	star = (*tracker())(image);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "tracker found star at %s",
		star.toString().c_str());
	return star;
}
/**
 * \brief Perform a measurement at a certain focus position
 */
FocusValue	MeasureFocusWork::measureat(unsigned short pos) {
	debug(LOG_DEBUG, DEBUG_LOG, 0, "measurement at pos = %hu", pos);
	// move to the position
	focusingstatus(Focusing::MOVING);
	moveto(pos);

	// get an image
	focusingstatus(Focusing::MEASURING);
	ccd()->startExposure(exposure());
	ccd()->wait();
	ImagePtr	image = ccd()->getImage();

	// evaluate the image
	MeasureEvaluator	evaluator;
	double	value = evaluator(image);
	
	debug(LOG_DEBUG, DEBUG_LOG, 0, "pos = %hu, value = %g(%f)",
		pos, value, log10(value));

	// call the callback
	callback(evaluator.evaluated_image(), pos, value);

	// return the focus information
	return FocusValue(pos, value);
}
예제 #4
0
파일: widget.c 프로젝트: cgbarnwell/mvpmc
int
mvpw_event_flush(void)
{
	GR_EVENT event;

	if (widget_count == 0)
		return -1;

	while (widget_count > 0) {
		GrCheckNextEvent(&event);
		switch (event.type) {
		case GR_EVENT_TYPE_EXPOSURE:
			exposure(&event.exposure);
			break;
		case GR_EVENT_TYPE_KEY_DOWN:
			keystroke(&event.keystroke);
			break;
		case GR_EVENT_TYPE_TIMER:
			timer(&event.timer);
			break;
		case GR_EVENT_TYPE_SCREENSAVER:
			screensaver(&event.screensaver);
			break;
		case GR_EVENT_TYPE_FDINPUT:
			fdinput(&event.fdinput);
			break;
		case GR_EVENT_TYPE_NONE:
			return 0;
			break;
		}
	}

	return 0;
}
예제 #5
0
  double WP::loglike(const Vector &lam0_delta_weekday_weekend)const{
    const Mat &exposure(suf()->exposure());
    const Mat & count(suf()->count());

    double lambda0 = lam0_delta_weekday_weekend[0];
    Vector delta(7, 0.0);
    int pos = 1;
    VectorView(delta, 0, 6) = ConstVectorView(lam0_delta_weekday_weekend, pos, 6);
    delta[6] = 7.0 - sum(delta);
    pos += 6;

    Vector eta_weekday(24, 0.0);
    VectorView(eta_weekday, 0, 23) =
        ConstVectorView(lam0_delta_weekday_weekend, pos, 23);
    eta_weekday[23] = 24.0 - sum(eta_weekday);
    pos += 23;

    Vector eta_weekend(24, 0.0);
    VectorView(eta_weekend, 0, 23) =
        ConstVectorView(lam0_delta_weekday_weekend, pos, 23);
    eta_weekend[23] = 24.0 - sum(eta_weekend);

    double ans = 0;
    for(int d = 0; d < 7; ++d){
      const Vec &eta( (d==Sat || d==Sun) ? eta_weekend : eta_weekday);
      for(int h = 0; h < 24; ++h){
        double lam = lambda0 * delta[d] * eta[h] * exposure(d, h);
        ans += dpois(count(d, h), lam, true);
      }
    }
    return ans;
  }
예제 #6
0
 void WP::maximize_hourly_pattern(){
   const Mat &count(suf()->count());
   const Mat &exposure(suf()->exposure());
   const Vec &delta(day_of_week_pattern());
   double lambda = average_daily_rate();
   Vec eta_weekend(24, 0.0);
   Vec eta_weekday(24, 0.0);
   for(int h = 0; h < 24; ++h){
     double total_count_weekday = 0;
     double total_exposure_weekday = 0;
     double total_count_weekend = 0;
     double total_exposure_weekend = 0;
     double *total_count;
     double *total_exposure;
     for(int d = 0; d < 7; ++d){
       if(d==Sat || d==Sun){
         total_exposure = &total_exposure_weekend;
         total_count = &total_count_weekend;
       }else{
         total_exposure = &total_exposure_weekday;
         total_count = &total_count_weekday;
       }
       *total_count += count(d, h);
       *total_exposure += exposure(d, h) * lambda * delta[d];
     }
     eta_weekend[h] = total_count_weekend / total_exposure_weekend;
     eta_weekday[h] = total_count_weekday / total_exposure_weekday;
   }
   set_weekday_hourly_pattern(eta_weekday);
   set_weekend_hourly_pattern(eta_weekend);
 }
예제 #7
0
void
ri_tonemap_apply( const ri_display_t *disp, float result[3] )
{
    float      val;

    val = exposure( result[0], disp->gain, disp->gamma );
    if ( val < 0.0 ) val = 0.0;
    if ( val > 1.0 ) val = 1.0;
    result[0] = ( RtFloat ) val;

    val = exposure( result[1], disp->gain, disp->gamma );
    if ( val < 0.0 ) val = 0.0;
    if ( val > 1.0 ) val = 1.0;
    result[1] = ( RtFloat ) val;

    val = exposure( result[2], disp->gain, disp->gamma );
    if ( val < 0.0 ) val = 0.0;
    if ( val > 1.0 ) val = 1.0;
    result[2] = ( RtFloat ) val;
}
예제 #8
0
  MyClass() {
    QString xres("{ 'var' : 'xres', 'name' : 'Image Width', 'type' : 'int', 'min' : 1, 'max' : 4096, 'value' : 1024 }");
    QString yres("{ 'var' : 'yres', 'name' : 'Image Height', 'type' : 'int', 'min' : 1, 'max' : 4096, 'value' : 768 }");
    QString samplingWidth("{ 'var' : 'samplingWidth', 'name' : 'Sampling Width', 'type' : 'float', 'min' : 1, 'max' : 8, 'value' : 3 }");
    QString exposure("{ 'var' : 'exposure', 'name' : 'Exposure', 'type' : 'float', 'min' : 0.001, 'max' : 1000, 'value' : 1.0 }");

    QStringList atts;
    atts << xres << yres << samplingWidth << exposure;

    addAttributes(atts);
  }
예제 #9
0
float GRExposureMap::exposure(float energy, GRLocation location) {
    int lowerIndex = findEnergy(energy);
    int upperIndex;
    if (lowerIndex == -1) {
        lowerIndex = 0;
        upperIndex = 1;
    } else if (lowerIndex == (int)energies.size()-1) {
        lowerIndex = (int)energies.size()-2;
        upperIndex = (int)energies.size()-1;
    } else {
        upperIndex = lowerIndex+1;
    }
    
    double lowerSpread = exposure(lowerIndex, location);
    double upperSpread = exposure(upperIndex, location);
    double lowerEnergy = energies[lowerIndex];
    double upperEnergy = energies[upperIndex];
    
    if (lowerEnergy == upperEnergy) return lowerSpread;
    else return lowerSpread + (upperSpread - lowerSpread) / (upperEnergy - lowerEnergy) * (energy - lowerEnergy);
}
예제 #10
0
int	main(int argc, char *argv[]) {
	// parse command line arguments
	int	c;
	while (EOF != (c = getopt(argc, argv, "d")))
		switch (c) {
		case 'd':
			debuglevel = LOG_DEBUG;
			break;
		}

	// create a camera instance
	SimCamera	camera;
	CcdPtr	ccd = camera.getCcd(0);
	GuiderPortPtr	guiderport = camera.getGuiderPort();

	// we will always use 1 sec exposures
	Exposure	exposure(ImageRectangle(ImagePoint(160,120),
		ImageSize(320, 240)), 1);

	// make 10 images 1 second appart (should give small drift)
	counter = 0;
	while (counter < 10) {
		ccd->startExposure(exposure);
		ImagePtr	image = ccd->getImage();
		writeimage(image);
	}

	// now move for 5 seconds
	guiderport->activate(5, 0, 0, 0);
	sleep(5);
	ccd->startExposure(exposure);
	writeimage(ccd->getImage());

	guiderport->activate(0, 5, 0, 0);
	sleep(5);
	ccd->startExposure(exposure);
	writeimage(ccd->getImage());
	
	guiderport->activate(0, 0, 5, 0);
	sleep(5);
	ccd->startExposure(exposure);
	writeimage(ccd->getImage());
	
	guiderport->activate(0, 0, 0, 5);
	sleep(5);
	ccd->startExposure(exposure);
	writeimage(ccd->getImage());
	

	return EXIT_SUCCESS;
}
예제 #11
0
/*!
  \details
  No detailed.
  */
void ToneMappingOperator::initialize(const System& system,
                                     const SettingNodeBase* settings) noexcept
{
  const auto system_settings = castNode<SystemSettingNode>(settings);

  // Gamma
  {
    inverse_gamma_ = zisc::invert(system.gamma());
  }
  // Exposure
  {
    exposure_ = zisc::cast<Float>(system_settings->exposure());
  }
}
예제 #12
0
 void WP::maximize_average_daily_rate(){
   const Mat &count(suf()->count());
   const Mat &exposure(suf()->exposure());
   double total_count = 0;
   double total_exposure = 0;
   const Vec &delta(day_of_week_pattern());
   for(int d = 0; d < 7; ++d){
     const Vec &eta(hourly_pattern(d));
     for(int h = 0; h < 24; ++h){
       total_count += count(d, h);
       total_exposure += delta[d] * eta[h] * exposure(d, h);
     }
   }
   set_average_daily_rate(total_count / total_exposure);
 }
예제 #13
0
/*!
  \details
  No detailed.
  */
void ToneMappingOperator::map(System& system,
                              const HdrImage& hdr_image,
                              LdrImage* ldr_image) const noexcept
{
  ZISC_ASSERT(ldr_image != nullptr, "The LDR image is null.");
  ZISC_ASSERT(hdr_image.widthResolution() == ldr_image->widthResolution(),
              "The image width is difference between HDR and LDR images.");
  ZISC_ASSERT(hdr_image.heightResolution() == ldr_image->heightResolution(),
              "The image height is difference between HDR and LDR images.");
  auto map_luminance = [this, &system, &hdr_image, ldr_image](const uint task_id)
  {
    // Set the calculation range
    const auto range = system.calcTaskRange(hdr_image.numOfPixels(), task_id);
    // Apply tonemap to each pixel
    for (uint index = range[0]; index < range[1]; ++index) {
      auto rgba32 = Rgba32{};
      if (0.0 < hdr_image[index].y()) {
        auto xyz = hdr_image[index];
        // Tone mapping
        {
          auto yxy = ColorConversion::toYxy(xyz);
          const Float l = tonemap(exposure() * yxy.Y());
          yxy.Y() = zisc::clamp(l, 0.0, 1.0);
          xyz = ColorConversion::toXyz(yxy);
        }
        // Convert XYZ to RGB
        {
          const auto to_rgb_matrix = getXyzToRgbMatrix(system.colorSpace());
          auto rgb = ColorConversion::toRgb(xyz, to_rgb_matrix);
          rgb.clampAll(0.0, 1.0);
          rgb.correctGamma(inverseGamma());
          rgba32 = ColorConversion::toIntRgb(rgb);
        }
      }
      ldr_image->get(index) = rgba32;
    }
  };

  {
    auto& threads = system.threadManager();
    auto& work_resource = system.globalMemoryManager();
    constexpr uint begin = 0;
    const uint end = threads.numOfThreads();
    auto result = threads.enqueueLoop(map_luminance, begin, end, &work_resource);
    result.wait();
  }
}
예제 #14
0
 void WP::maximize_daily_pattern(){
   const Mat &count(suf()->count());
   const Mat &exposure(suf()->exposure());
   Vec delta(7);
   double lambda = average_daily_rate();
   for(int d = 0; d < 7; ++d){
     const Vec &eta(hourly_pattern(d));
     double total_count = 0;
     double total_exposure = 0;
     for(int h = 0; h < 24; ++h){
       total_count += count(d, h);
       total_exposure += exposure(d, h) * lambda * eta[h];
     }
     delta[d] = total_count / total_exposure;
   }
   set_day_of_week_pattern(delta);
   // TODO(stevescott):  check that this enforces sum(delta) == 7
 }
예제 #15
0
/**
 * \brief Check that the focusing parameters are all set
 */
bool	FocusWork::complete() {
	if (exposure().exposuretime() < 0) {
		debug(LOG_ERR, DEBUG_LOG, 0, "exposure time not set");
		return false;
	}
	if (_min == std::numeric_limits<unsigned short>::max()) {
		debug(LOG_ERR, DEBUG_LOG, 0, "minimum not set");
		return false;
	}
	if (_max == std::numeric_limits<unsigned short>::min()) {
		debug(LOG_ERR, DEBUG_LOG, 0, "maximum not set");
		return false;
	}
	if (_min >= _max) {
		debug(LOG_ERR, DEBUG_LOG, 0, "maximum < minimum");
		return false;
	}
	if (steps() < 3) {
		debug(LOG_ERR, DEBUG_LOG, 0, "focusing needs at least 3 points");
		return false;
	}
	if (!_focusing.ccd()) {
		debug(LOG_ERR, DEBUG_LOG, 0, "ccd not set");
		return false;
	}
	if (!_focusing.focuser()) {
		debug(LOG_ERR, DEBUG_LOG, 0, "focuser not set");
		return false;
	}
	if (!_focusing.evaluator()) {
		debug(LOG_ERR, DEBUG_LOG, 0, "evaluator not set");
		return false;
	}
	if (!_focusing.solver()) {
		debug(LOG_ERR, DEBUG_LOG, 0, "solver not set");
		return false;
	}
	return true;
}
예제 #16
0
void	uvctest::testExposure() {
	debug(LOG_DEBUG, DEBUG_LOG, 0, "get the first camera device");
	CameraPtr	camera = locator->getCamera(0);
	int	ccdindex = default_ccdid;
	debug(LOG_DEBUG, DEBUG_LOG, 0, "get the CCD no %d", ccdindex);
	CcdPtr	ccd = camera->getCcd(ccdindex);
	Exposure	exposure(ccd->getInfo().getFrame(),
		default_exposuretime);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "start an exposure: %s",
		exposure.toString().c_str());
	ccd->startExposure(exposure);
	ccd->exposureStatus();
	debug(LOG_DEBUG, DEBUG_LOG, 0, "retrieve an image");
	ImageSequence	imgseq = ccd->getImageSequence(2);
	ImagePtr	image = imgseq[imgseq.size() - 1];
	debug(LOG_DEBUG, DEBUG_LOG, 0, "image retrieved");
	// write the image to a file
	unlink("test.fits");
	FITSout	file("test.fits");
	file.write(image);

	if (ccdindex == 2) {
		DemosaicBilinear<unsigned char>        demosaicer;
		Image<unsigned char>	*mosaicimg
			= dynamic_cast<Image<unsigned char> *>(&*image);
		if (NULL != mosaicimg) {
			Image<RGB<unsigned char> >     *demosaiced
				= demosaicer(*mosaicimg);
			ImagePtr        demosaicedptr(demosaiced);
			unlink("test-demosaiced.fits");
			FITSout demosaicedfile("test-demosaiced.fits");
			demosaicedfile.write(demosaicedptr);
		} else {
			debug(LOG_ERR, DEBUG_LOG, 0, "not a mosaic image");
		}
	}
}
예제 #17
0
//Function-------------------------------------------------------------------------
bool ImagePipeline::filmulate(matrix<float> &input_image,
                              matrix<float> &output_density,
                              ParameterManager * paramManager,
                              ImagePipeline * pipeline)
{
    FilmParams filmParam;
    AbortStatus abort;
    Valid valid;
    std::tie(valid, abort, filmParam) = paramManager->claimFilmParams(FilmFetch::initial);
    if(abort == AbortStatus::restart)
    {
        return true;
    }

    //Extract parameters from struct
    float initial_developer_concentration = filmParam.initialDeveloperConcentration;
    float reservoir_thickness = filmParam.reservoirThickness;
    float active_layer_thickness = filmParam.activeLayerThickness;
    float crystals_per_pixel = filmParam.crystalsPerPixel;
    float initial_crystal_radius = filmParam.initialCrystalRadius;
    float initial_silver_salt_density = filmParam.initialSilverSaltDensity;
    float developer_consumption_const = filmParam.developerConsumptionConst;
    float crystal_growth_const = filmParam.crystalGrowthConst;
    float silver_salt_consumption_const = filmParam.silverSaltConsumptionConst;
    float total_development_time = filmParam.totalDevelopmentTime;
    int agitate_count = filmParam.agitateCount;
    int development_steps = filmParam.developmentSteps;
    float film_area = filmParam.filmArea;
    float sigma_const = filmParam.sigmaConst;
    float layer_mix_const = filmParam.layerMixConst;
    float layer_time_divisor = filmParam.layerTimeDivisor;
    float rolloff_boundary = filmParam.rolloffBoundary;

    //Set up timers
    struct timeval initialize_start, development_start, develop_start,
               diffuse_start, agitate_start, layer_mix_start;
    double develop_dif = 0, diffuse_dif = 0, agitate_dif = 0, layer_mix_dif= 0;
    gettimeofday(&initialize_start,NULL);

    int nrows = (int) input_image.nr();
    int ncols = (int) input_image.nc()/3;
    int npix = nrows*ncols;

    //Now we activate some of the crystals on the film. This is literally
    //akin to exposing film to light.
    matrix<float> active_crystals_per_pixel;
    active_crystals_per_pixel = exposure(input_image, crystals_per_pixel,
                                         rolloff_boundary);

    //We set the crystal radius to a small seed value for each color.
    matrix<float> crystal_radius;
    crystal_radius.set_size(nrows,ncols*3);
    crystal_radius = initial_crystal_radius;

    //All layers share developer, so we only make it the original image size.
    matrix<float> developer_concentration;
    developer_concentration.set_size(nrows,ncols);
    developer_concentration = initial_developer_concentration;

    //Each layer gets its own silver salt which will feed crystal growth.
    matrix<float> silver_salt_density;
    silver_salt_density.set_size(nrows,ncols*3);
    silver_salt_density = initial_silver_salt_density;

    //Now, we set up the reservoir.
    //Because we don't want the film area to influence the brightness, we
    // increase the reservoir size in proportion.
#define FILMSIZE 864;//36x24mm
    reservoir_thickness *= film_area/FILMSIZE;
    float reservoir_developer_concentration = initial_developer_concentration;

    //This is a value used in diffuse to set the length scale.
    float pixels_per_millimeter = sqrt(npix/film_area);

    //Here we do some math for the control logic for the differential
    //equation approximation computations.
    float timestep = total_development_time/development_steps;
    int agitate_period;
    if(agitate_count > 0)
    {
        agitate_period = floor(development_steps/agitate_count);
    }
    else
    {
        agitate_period = 3*development_steps;
    }
    int half_agitate_period = floor(agitate_period/2);

    tout << "Initialization time: " << timeDiff(initialize_start)
         << " seconds" << endl;
    gettimeofday(&development_start,NULL);

    //Now we begin the main development/diffusion loop, which approximates the
    //differential equation of film development.
    for(int i = 0; i <= development_steps; i++)
    {
        //Check for cancellation
        std::tie(valid, abort, filmParam) = paramManager->claimFilmParams(FilmFetch::subsequent);
        if(abort == AbortStatus::restart)
        {
            return true;
        }

        //Updating for starting the development simulation. Valid is one too high here.
        pipeline->updateProgress(Valid::prefilmulation, float(i)/float(development_steps));

        gettimeofday(&develop_start,NULL);

        //This is where we perform the chemical reaction part.
        //The crystals grow.
        //The developer in the active layer is consumed.
        //So is the silver salt in the film.
        // The amount consumed increases as the crystals grow larger.
        //Because the developer and silver salts are consumed in bright regions,
        // this reduces the rate at which they grow. This gives us global
        // contrast reduction.
        develop(crystal_radius,crystal_growth_const,active_crystals_per_pixel,
                silver_salt_density,developer_concentration,
                active_layer_thickness,developer_consumption_const,
                silver_salt_consumption_const,timestep);

        develop_dif += timeDiff(develop_start);
        gettimeofday(&diffuse_start,NULL);

        //Check for cancellation
        std::tie(valid, abort, filmParam) = paramManager->claimFilmParams(FilmFetch::subsequent);
        if(abort == AbortStatus::restart)
        {
            return true;
        }

        //Updating for starting the diffusion simulation. Valid is one too high here.
        pipeline->updateProgress(Valid::prefilmulation, (float(i)+0.5)/float(development_steps));

        //Now, we are going to perform the diffusion part.
        //Here we mix the layer among itself, which grants us the
        // local contrast increases.
//        diffuse(developer_concentration,
//                sigma_const,
//                pixels_per_millimeter,
//                timestep);
        diffuse_short_convolution(developer_concentration,
                                  sigma_const,
                                  pixels_per_millimeter,
                                  timestep);

        diffuse_dif += timeDiff(diffuse_start);

        gettimeofday(&layer_mix_start,NULL);
        //This performs mixing between the active layer adjacent to the film
        // and the reservoir.
        //This keeps the effects from getting too crazy.
        layer_mix(developer_concentration,
                  active_layer_thickness,
                  reservoir_developer_concentration,
                  reservoir_thickness,
                  layer_mix_const,
                  layer_time_divisor,
                  pixels_per_millimeter,
                  timestep);

        layer_mix_dif += timeDiff(layer_mix_start);

        gettimeofday(&agitate_start,NULL);

        //I want agitation to only occur in the middle of development, not
        //at the very beginning or the ends. So, I add half the agitate
        //period to the current cycle count.
        if((i+half_agitate_period) % agitate_period ==0)
            agitate(developer_concentration, active_layer_thickness,
                    reservoir_developer_concentration, reservoir_thickness,
                    pixels_per_millimeter);

        agitate_dif += timeDiff(agitate_start);
    }
    tout << "Development time: " <<timeDiff(development_start)<< " seconds" << endl;
    tout << "Develop time: " << develop_dif << " seconds" << endl;
    tout << "Diffuse time: " << diffuse_dif << " seconds" << endl;
    tout << "Layer mix time: " << layer_mix_dif << " seconds" << endl;
    tout << "Agitate time: " << agitate_dif << " seconds" << endl;

    //Done filmulating, now do some housecleaning
    silver_salt_density.free();
    developer_concentration.free();


    //Now we compute the density (opacity) of the film.
    //We assume that overlapping crystals or dye clouds are
    //nonexistant. It works okay, for now...
    //The output is crystal_radius^2 * active_crystals_per_pixel
    struct timeval mult_start;
    gettimeofday(&mult_start,NULL);

    std::tie(valid, abort, filmParam) = paramManager->claimFilmParams(FilmFetch::subsequent);
    if(abort == AbortStatus::restart)
    {
        return true;
    }

    output_density = crystal_radius % crystal_radius % active_crystals_per_pixel;
    tout << "Output density time: "<<timeDiff(mult_start) << endl;
#ifdef DOUT
    debug_out.close();
#endif
    return false;
}
예제 #18
0
/***********************************************************************//**
 * @brief Return instrument response to elliptical source
 *
 * @param[in] event Observed event.
 * @param[in] source Source.
 * @param[in] obs Observation (not used).
 * @return Instrument response to elliptical source.
 *
 * Returns the instrument response to a specified elliptical source.
 ***************************************************************************/
double GCTAResponseCube::irf_elliptical(const GEvent&       event,
                                        const GSource&      source,
                                        const GObservation& obs) const
{
    // Initialise IRF
    double irf = 0.0;

    // Get pointer to CTA event bin
    if (!event.is_bin()) {
        std::string msg = "The current event is not a CTA event bin. "
                          "This method only works on binned CTA data. Please "
                          "make sure that a CTA observation containing binned "
                          "CTA data is provided.";
        throw GException::invalid_value(G_IRF_RADIAL, msg);
    }
    const GCTAEventBin* bin = static_cast<const GCTAEventBin*>(&event);

    // Get event attribute references
    const GSkyDir& obsDir  = bin->dir().dir();
    const GEnergy& obsEng  = bin->energy();
    const GTime&   obsTime = bin->time();

    // Get pointer to elliptical model
    const GModelSpatialElliptical* model = static_cast<const GModelSpatialElliptical*>(source.model());

    // Compute angle between model centre and measured photon direction and
    // position angle (radians)
    double rho_obs      = model->dir().dist(obsDir);
    double posangle_obs = model->dir().posang(obsDir);

    // Get livetime (in seconds)
    double livetime = exposure().livetime();

    // Continue only if livetime is >0 and if we're sufficiently close to
    // the model centre to get a non-zero response
    if ((livetime > 0.0) && (rho_obs <= model->theta_max()+psf().delta_max())) {

        // Get exposure
        irf = exposure()(obsDir, obsEng);

        // Continue only if exposure is positive
        if (irf > 0.0) {

            // Recover effective area from exposure
            irf /= livetime;

            // Get PSF component
            irf *= psf_elliptical(model, rho_obs, posangle_obs, obsDir, obsEng, obsTime);

            // Apply deadtime correction
            irf *= exposure().deadc();

        } // endif: exposure was positive
        
    } // endif: we were sufficiently close and livetime >0

    // Compile option: Check for NaN/Inf
    #if defined(G_NAN_CHECK)
    if (gammalib::is_notanumber(irf) || gammalib::is_infinite(irf)) {
        std::cout << "*** ERROR: GCTAResponseCube::irf_elliptical:";
        std::cout << " NaN/Inf encountered";
        std::cout << " irf=" << irf;
        std::cout << std::endl;
    }
    #endif

    // Return IRF value
    return irf;
}
/**
 * \brief Main function of the Focusing process
 */
void	VCurveFocusWork::main(astro::thread::Thread<FocusWork>& /* thread */) {
	debug(LOG_DEBUG, DEBUG_LOG, 0, "start focusing work");
	if (!complete()) {
		focusingstatus(Focusing::FAILED);
		throw std::runtime_error("focuser not completely specified");
	}

	FocusCompute	fc;

	// determine how many intermediate steps we want to access

	if (min() < focuser()->min()) {
		throw std::runtime_error("minimum too small");
	}

	// based on the exposure specification, build an evaluator
	ImageSize	size = exposure().size();
	int	radius = std::min(size.width(), size.height()) / 2;
	FWHM2Evaluator	evaluator(size.center(), radius);

	unsigned long	delta = max() - min();
	for (int i = 0; i < steps(); i++) {
		// compute new position
		unsigned short	position = min() + (i * delta) / (steps() - 1);
		debug(LOG_DEBUG, DEBUG_LOG, 0, "measuring position %hu",
			position);

		// move to new position
		moveto(position);
		
		// get an image from the Ccd
		focusingstatus(Focusing::MEASURING);
		ccd()->startExposure(exposure());
		usleep(1000000 * exposure().exposuretime());
		ccd()->wait();
		ImagePtr	image = ccd()->getImage();
		
		// turn the image into a value
		FWHMInfo	fwhminfo = focusFWHM2_extended(image,
					size.center(), radius);
		double	value = fwhminfo.radius;

		// add the new value 
		fc.insert(std::pair<unsigned short, double>(position, value));

		// send the callback data
		callback(combine(image, fwhminfo), position, value);
	}

	// compute the best focus position
	double	focusposition = 0;
	try {
		focusposition = fc.focus();
	} catch (std::exception& x) {
		debug(LOG_DEBUG, DEBUG_LOG, 0, "no optimal focus position: %s",
			x.what());
		focusingstatus(Focusing::FAILED);
		return;
	}
	debug(LOG_DEBUG, DEBUG_LOG, 0, "optimal focus position: %f",
		focusposition);

	// plausibility check for the position
	if (!((focusposition >= min()) && (focusposition <= max()))) {
		focusingstatus(Focusing::FAILED);
		debug(LOG_DEBUG, DEBUG_LOG, 0, "focusing failed");
		return;
	}

	// move to the focus position
	unsigned short	targetposition = focusposition;
	moveto(targetposition);
	focusingstatus(Focusing::FOCUSED);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "target position reached");
}
예제 #20
0
void djvColorProfileTest::operators()
{
    DJV_DEBUG("djvColorProfileTest::operators");
    
    {
        djvColorProfile a, b;
        a.type     = b.type     = djvColorProfile::LUT;
        a.gamma    = b.gamma    = 1.0;
        a.lut      = b.lut      = djvPixelData(djvPixelDataInfo(16, 1, djvPixel::L_U8));
        a.exposure = b.exposure = djvColorProfile::Exposure(1.0, 2.0, 3.0, 4.0);
        
        a.lut.zero();
        b.lut.zero();
        
        DJV_ASSERT(a.exposure == b.exposure);
        DJV_ASSERT(a.exposure != djvColorProfile::Exposure());
        
        DJV_ASSERT(a == b);
        DJV_ASSERT(a != djvColorProfile());
    }
    
    {
        djvColorProfile::Exposure exposure;
        
        QStringList s = QStringList() << "1.0" << "2.0" << "3.0" << "4.0";
        s >> exposure;
        
        DJV_ASSERT(djvMath::fuzzyCompare(1.0, exposure.value));
        DJV_ASSERT(djvMath::fuzzyCompare(2.0, exposure.defog));
        DJV_ASSERT(djvMath::fuzzyCompare(3.0, exposure.kneeLow));
        DJV_ASSERT(djvMath::fuzzyCompare(4.0, exposure.kneeHigh));
    }
    
    {
        djvColorProfile::Exposure exposure(1.0, 2.0, 3.0, 4.0);
        
        QStringList s;
        s << exposure;
        
        DJV_ASSERT((QStringList() << "1" << "2" << "3" << "4") == s);
    }
    
    {
        const djvColorProfile::Exposure a(1.0, 2.0, 3.0, 4.0);
        
        QStringList tmp;
        
        tmp << a;
        
        djvColorProfile::Exposure b;
        
        tmp >> b;
        
        DJV_ASSERT(a == b);
    }
    
    {
        const djvColorProfile::PROFILE a = djvColorProfile::LUT;
        
        QStringList tmp;
        
        tmp << a;
        
        djvColorProfile::PROFILE b = static_cast<djvColorProfile::PROFILE>(0);
        
        tmp >> b;
        
        DJV_ASSERT(a == b);
    }
    
    {
        DJV_DEBUG_PRINT(djvColorProfile::Exposure());
        
        DJV_DEBUG_PRINT(djvColorProfile::RAW);
        
        DJV_DEBUG_PRINT(djvColorProfile());
    }
}
예제 #21
0
float exposure(const Camera& c) noexcept {
    const FCamera& camera = upcast(c);
    return exposure(camera.getAperture(), camera.getShutterSpeed(), camera.getSensitivity());
}
예제 #22
0
파일: ctmodel.cpp 프로젝트: jdevin/ctools
/***********************************************************************//**
 * @brief Get observation container
 *
 * Get an observation container according to the user parameters. The method
 * supports loading of a individual FITS file or an observation definition
 * file in XML format.
 *
 * If the input filename is empty, the method checks for the existence of the
 * "expcube", "psfcube" and "bkgcube" parameters. If file names have been
 * specified, the method loads the files and creates a dummy events cube that
 * is appended to the observation container.
 *
 * If no file names are specified for the "expcube", "psfcube" or "bkgcube"
 * parameters, the method reads the necessary parameters to build a CTA
 * observation from scratch.
 *
 * The method sets m_append_cube = true and m_binned = true in case that
 * a stacked observation is requested (as detected by the presence of the
 * "expcube", "psfcube", and "bkgcube" parameters). In that case, it appended
 * a dummy event cube to the observation.
 ***************************************************************************/
void ctmodel::get_obs(void)
{
    // Get the filename from the input parameters
    std::string filename = (*this)["inobs"].filename();

    // If no observation definition file has been specified then read all
    // parameters that are necessary to create an observation from scratch
    if ((filename == "NONE") || (gammalib::strip_whitespace(filename) == "")) {

        // Get response cube filenames
        std::string expcube = (*this)["expcube"].filename();
        std::string psfcube = (*this)["psfcube"].filename();
        std::string bkgcube = (*this)["bkgcube"].filename();

        // If the filenames are valid then build an observation from cube
        // response information
        if ((expcube != "NONE") && (psfcube != "NONE") && (bkgcube != "NONE") &&
            (gammalib::strip_whitespace(expcube) != "") &&
            (gammalib::strip_whitespace(psfcube) != "") &&
            (gammalib::strip_whitespace(bkgcube) != "")) {

            // Get exposure, PSF and background cubes
            GCTACubeExposure   exposure(expcube);
            GCTACubePsf        psf(psfcube);
            GCTACubeBackground background(bkgcube);

            // Create energy boundaries
            GEbounds ebounds = create_ebounds();

            // Create dummy sky map cube
            GSkyMap map("CAR","GAL",0.0,0.0,1.0,1.0,1,1,ebounds.size());

            // Create event cube
            GCTAEventCube cube(map, ebounds, exposure.gti());

            // Create CTA observation
            GCTAObservation cta;
            cta.events(cube);
            cta.response(exposure, psf, background);

            // Append observation to container
            m_obs.append(cta);

            // Signal that we are in binned mode
            m_binned = true;

            // Signal that we appended a cube
            m_append_cube = true;

        } // endif: cube response information was available

        // ... otherwise build an observation from IRF response information
        else {

            // Create CTA observation
            GCTAObservation cta = create_cta_obs();

            // Set response
            set_obs_response(&cta);

            // Append observation to container
            m_obs.append(cta);

        }

    } // endif: filename was "NONE" or ""

    // ... otherwise we have a file name
    else {

        // If file is a FITS file then create an empty CTA observation
        // and load file into observation
        if (gammalib::is_fits(filename)) {

            // Allocate empty CTA observation
            GCTAObservation cta;

            // Load data
            cta.load(filename);

            // Set response
            set_obs_response(&cta);

            // Append observation to container
            m_obs.append(cta);

            // Signal that no XML file should be used for storage
            m_use_xml = false;

        }

        // ... otherwise load file into observation container
        else {

            // Load observations from XML file
            m_obs.load(filename);

            // For all observations that have no response, set the response
            // from the task parameters
            set_response(m_obs);

            // Set observation boundary parameters (emin, emax, rad)
            set_obs_bounds(m_obs);

            // Signal that XML file should be used for storage
            m_use_xml = true;

        } // endelse: file was an XML file

    }

    // Return
    return;

}
예제 #23
0
/**
 * \brief main function for the focus program
 */
int	main(int argc, char *argv[]) {
	int	c;
	double	exposuretime = 0.1;
	unsigned int	cameraid = 0;
	unsigned int	ccdid = 0;
	int	length = 512;
	std::string	cameratype("uvc");

	while (EOF != (c = getopt(argc, argv, "de:m:c:C:l:")))
		switch (c) {
		case 'd':
			debuglevel = LOG_DEBUG;
			break;
		case 'm':
			cameratype = std::string(optarg);
			break;
		case 'C':
			cameraid = atoi(optarg);
			break;
		case 'c':
			ccdid = atoi(optarg);
			break;
		case 'e':
			exposuretime = atof(optarg);
			break;
		case 'l':
			length = atoi(optarg);
			break;
		}

	// get the camera
	Repository	repository;
	debug(LOG_DEBUG, DEBUG_LOG, 0, "loading module %s",
		cameratype.c_str());
	ModulePtr	module = repository.getModule(cameratype);
	module->open();


	// get the camera
	DeviceLocatorPtr	locator = module->getDeviceLocator();
	std::vector<std::string>	cameras = locator->getDevicelist();
	if (0 == cameras.size()) {
		std::cerr << "no cameras found" << std::endl;
		return EXIT_FAILURE;
	}
	if (cameraid >= cameras.size()) {
		std::string	msg = stringprintf("camera %d out of range",
			cameraid);
		debug(LOG_ERR, DEBUG_LOG, 0, "%s\n", msg.c_str());
		throw std::range_error(msg);
	}
	std::string	cameraname = cameras[cameraid];
	CameraPtr	camera = locator->getCamera(cameraname);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "camera loaded: %s", cameraname.c_str());

	// get the ccd
	CcdPtr	ccd = camera->getCcd(ccdid);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "get a ccd: %s",
		ccd->getInfo().toString().c_str());

	// get a centerd length x length frame
	ImageSize	framesize(length, length);
	ImageRectangle	frame = ccd->getInfo().centeredRectangle(framesize);
	Exposure	exposure(frame, exposuretime);
	debug(LOG_DEBUG, DEBUG_LOG, 0, "exposure prepared: %s",
		exposure.toString().c_str());

	// retrieve an image
	ccd->startExposure(exposure);
	ImagePtr	image = ccd->getImage();

	// write image
	unlink("test.fits");
	FITSout	out("test.fits");
	out.write(image);

	// apply a mask to keep the border out
	CircleFunction	circle(ImagePoint(length/2, length/2), length/2, 0.8);
	mask(circle, image);
	unlink("masked.fits");
	FITSout	maskout("masked.fits");
	maskout.write(image);

#if 0
	// compute the FOM
	double	fom = focusFOM(image, true,
		Subgrid(ImagePoint(1, 0), ImageSize(1, 1)));
	std::cout << "FOM: " << fom << std::endl;
#endif


	return EXIT_SUCCESS;
}
예제 #24
0
/***********************************************************************//**
 * @brief Return instrument response to point source
 *
 * @param[in] event Observed event.
 * @param[in] source Source.
 * @param[in] obs Observation (not used).
 * @return Instrument response to point source.
 *
 * Returns the instrument response to a specified point source.
 ***************************************************************************/
double GCTAResponseCube::irf_ptsrc(const GEvent&       event,
                                   const GSource&      source,
                                   const GObservation& obs) const
{
    // Initialise IRF
    double irf = 0.0;

    // Get pointer to model source model
    const GModelSpatialPointSource* ptsrc = static_cast<const GModelSpatialPointSource*>(source.model());

    // Get point source direction
    GSkyDir srcDir = ptsrc->dir();

    // Get pointer on CTA event bin
    if (!event.is_bin()) {
        std::string msg = "The current event is not a CTA event bin. "
                          "This method only works on binned CTA data. Please "
                          "make sure that a CTA observation containing binned "
                          "CTA data is provided.";
        throw GException::invalid_value(G_IRF_PTSRC, msg);
    }
    const GCTAEventBin* bin = static_cast<const GCTAEventBin*>(&event);
    
    // Determine angular separation between true and measured photon
    // direction in radians
    double delta = bin->dir().dir().dist(srcDir);

    // Get maximum angular separation for PSF (in radians)
    double delta_max = psf().delta_max();

    // Get livetime (in seconds)
    double livetime = exposure().livetime();

    // Continue only if livetime is >0 and if we're sufficiently close
    // to the PSF
    if ((livetime > 0.0) && (delta <= delta_max)) {

        // Get exposure
        irf = exposure()(srcDir, source.energy());

        // Multiply-in PSF
        if (irf > 0.0) {

            // Recover effective area from exposure
            irf /= livetime;

            // Get PSF component
            irf *= psf()(srcDir, delta, source.energy());

            // Apply deadtime correction
            irf *= exposure().deadc();

        } // endif: exposure was non-zero

    } // endif: we were sufficiently close to PSF and livetime >0

    // Compile option: Check for NaN/Inf
    #if defined(G_NAN_CHECK)
    if (gammalib::is_notanumber(irf) || gammalib::is_infinite(irf)) {
        std::cout << "*** ERROR: GCTAResponseCube::irf_ptsrc:";
        std::cout << " NaN/Inf encountered";
        std::cout << " irf=" << irf;
        std::cout << std::endl;
    }
    #endif

    // Return IRF value
    return irf;
}
void	CcdTask::start() {
	_ccd->startExposure(exposure());
}
예제 #26
0
/***********************************************************************//**
 * @brief Return instrument response
 *
 * @param[in] event Observed event.
 * @param[in] photon Incident photon.
 * @param[in] obs Observation (not used).
 * @return Instrument response.
 ***************************************************************************/
double GCTAResponseCube::irf(const GEvent&       event,
                             const GPhoton&      photon,
                             const GObservation& obs) const
{
    // Retrieve event instrument direction
    const GCTAInstDir& dir = retrieve_dir(G_IRF, event);

    // Get event attributes
    const GSkyDir& obsDir = dir.dir();
    //const GEnergy& obsEng = event.energy();

    // Get photon attributes
    const GSkyDir& srcDir  = photon.dir();
    const GEnergy& srcEng  = photon.energy();
    //const GTime&   srcTime = photon.time();

    // Determine angular separation between true and measured photon
    // direction in radians
    double delta = obsDir.dist(srcDir);

    // Get maximum angular separation for PSF (in radians)
    double delta_max = psf().delta_max();

    // Initialise IRF value
    double irf = 0.0;

    // Get livetime (in seconds)
    double livetime = exposure().livetime();

    // Continue only if livetime is >0 and if we're sufficiently close
    // to the PSF
    if ((livetime > 0.0) && (delta <= delta_max)) {

        // Get exposure
        irf = exposure()(srcDir, srcEng);

        // Multiply-in PSF
        if (irf > 0.0) {

            // Get PSF component
            irf *= psf()(srcDir, delta, srcEng);

            // Divide by livetime
            irf /= livetime;

            // Apply deadtime correction
            irf *= exposure().deadc();

        } // endif: Aeff was non-zero

    } // endif: we were sufficiently close to PSF and livetime was >0

    // Compile option: Check for NaN/Inf
    #if defined(G_NAN_CHECK)
    if (gammalib::is_notanumber(irf) || gammalib::is_infinite(irf)) {
        std::cout << "*** ERROR: GCTAResponseCube::irf:";
        std::cout << " NaN/Inf encountered";
        std::cout << " irf=" << irf;
        std::cout << std::endl;
    }
    #endif

    // Return IRF value
    return irf;
}
/**
 * \brief Main command interpreter function
 *
 * This operator analyzes the command arguments and calls the appropriate
 * subcommand method.
 */
void	guidercommand::operator()(const std::string& /* command */,
		const std::vector<std::string>& arguments) {
	if (arguments.size() < 2) {
		throw command_error("guider command requires more "
			"arguments");
	}
	std::string	guiderid = arguments[0];
	std::string	subcommand = arguments[1];
	debug(LOG_DEBUG, DEBUG_LOG, 0, "guiderid: %s", guiderid.c_str());

	Guiders	guiders;
	GuiderWrapper	guider = guiders.byname(guiderid);

	if (subcommand == "info") {
		info(guider, arguments);
		return;
	}

	if (subcommand == "exposure") {
		exposure(guider, arguments);
		return;
	}

	if (subcommand == "exposuretime") {
		exposuretime(guider, arguments);
		return;
	}

	if (subcommand == "binning") {
		binning(guider, arguments);
		return;
	}

	if (subcommand == "size") {
		size(guider, arguments);
		return;
	}

	if (subcommand == "offset") {
		offset(guider, arguments);
		return;
	}

	if (subcommand == "star") {
		star(guider, arguments);
		return;
	}

	if (subcommand == "calibration") {
		calibration(guider, arguments);
		return;
	}

	if (subcommand == "start") {
		start(guider, arguments);
		return;
	}

	if (subcommand == "stop") {
		stop(guider, arguments);
		return;
	}

	if (subcommand == "wait") {
		wait(guider, arguments);
		return;
	}

	if (subcommand == "image") {
		image(guider, arguments);
		return;
	}
}