int main(int argc, char** argv)
{
    //the options struct holds all run-time options. 
    options opt; 
    opt.overwrite = false ; 
    opt.gaussBool = false ; //Initialize --overwrite flag to false. Probably should handle 
                                //this in read_options. 
    opt.polyFit = false ; 

    vector<double> data ; 
    double avg, std ; 
    vector <vector<double> > hist ; 

    read_options(argc, argv, opt) ; 

    //Print run-time parameters
    cout << "inFile : " << opt.inFile << endl ; 
    cout << "outFile: " << opt.outFile << endl ; 
    cout << "numBins: " << opt.numBins << endl ; 
    cout << "overwrite : " ; 
    if (opt.overwrite) { cout << "True" << endl ; }
    else { cout << "False" << endl; }
    if ( opt.gaussBool ) {
        cout << "Gaussian fit file : " << opt.gaussFile << endl; 
    }
    if ( opt.polyFit ) {
        cout << "Polynomial fit file : " << opt.polyfitFile << endl; 
        cout << "Number of terms : " << opt.numTerms << endl ; 
    }

    //Begin program
    
    data = read_data(opt.inFile)  ; 

    cout << endl; 
    avg = average(data) ; 
    cout << "average = " << avg << endl; 

    std = stdDev(data,avg) ; 
    cout << "std     = " << std << endl ; 

    hist = histogram(data, opt.numBins) ; 
    print(hist,opt.outFile); 
    if (opt.gaussBool) 
        print_gauss(hist, opt.gaussFile, std, avg) ; 
    if (opt.polyFit) 
        fit_polynomial(hist,avg, opt.numTerms, opt.polyfitFile) ; 
    return 0; 
}
示例#2
0
static double find_max_correlation(double *a, double *b, uint16_t n)
{
	int32_t best_idx = 0;
	double best = -1;
	for (int32_t j = -n; j < n; j++)
	{
		double ret = 0;
		for (uint16_t i = 0; i < n; i++)
		{
			// Assume data outside the array bounds are zero
			uint16_t k = i + j;
			ret += (k >= 0 && k < n) ? a[i]*b[k] : 0;
		}

		if (ret > best)
		{
			best = ret;
			best_idx = j;
		}
	}

	// Best fit is at the edge of the correlation.
	// Can't improve our estimate!
	if (best_idx == -n || best_idx == n - 1)
		return best_idx;

	// Improve estimate by doing a quadratic fit of
	// the three points around the peak
	double offset[3] = { -1, 0, 1 };
	double corr[3] = { 0, 1, 0 };
	double p[3] = { 0, 0, 0 };
	for (uint16_t i = 0; i < n; i++)
	{
		uint16_t k1 = i + best_idx - 1;
		corr[0] += (k1 > 0 && k1 < n) ? a[i]*b[k1] : 0;

		uint16_t k2 = i + best_idx + 1;
		corr[2] += (k2 > 0 && k2 < n) ? a[i]*b[k2] : 0;
	}

	corr[0] /= best;
	corr[2] /= best;

	if (fit_polynomial(offset, corr, NULL, 3, p, 2))
		return best_idx;

	return best_idx - p[1] / (2 * p[2]);
}
bool disjunctive_polynomial_accelerationt::accelerate(
    path_acceleratort &accelerator) {
  std::map<exprt, polynomialt> polynomials;
  scratch_programt program(symbol_table);

  accelerator.clear();

#ifdef DEBUG
  std::cout << "Polynomial accelerating program:" << std::endl;

  for (goto_programt::instructionst::iterator it = goto_program.instructions.begin();
       it != goto_program.instructions.end();
       ++it) {
    if (loop.find(it) != loop.end()) {
      goto_program.output_instruction(ns, "scratch", std::cout, it);
    }
  }

  std::cout << "Modified:" << std::endl;

  for (expr_sett::iterator it = modified.begin();
       it != modified.end();
       ++it) {
    std::cout << expr2c(*it, ns) << std::endl;
  }
#endif

  if (loop_counter.is_nil()) {
    symbolt loop_sym = utils.fresh_symbol("polynomial::loop_counter",
        unsigned_poly_type());
    loop_counter = loop_sym.symbol_expr();
  }

  patht &path = accelerator.path;
  path.clear();

  if (!find_path(path)) {
    // No more paths!
    return false;
  }

#if 0
  for (expr_sett::iterator it = modified.begin();
       it != modified.end();
       ++it) {
    polynomialt poly;
    exprt target = *it;

    if (it->type().id() == ID_bool) {
      // Hack: don't try to accelerate booleans.
      continue;
    }

    if (target.id() == ID_index ||
        target.id() == ID_dereference) {
      // We'll handle this later.
      continue;
    }

    if (fit_polynomial(target, poly, path)) {
      std::map<exprt, polynomialt> this_poly;
      this_poly[target] = poly;

      if (utils.check_inductive(this_poly, path)) {
#ifdef DEBUG
        std::cout << "Fitted a polynomial for " << expr2c(target, ns) <<
          std::endl;
#endif
        polynomials[target] = poly;
        accelerator.changed_vars.insert(target);
        break;
      }
    }
  }

  if (polynomials.empty()) {
    return false;
  }
#endif

  // Fit polynomials for the other variables.
  expr_sett dirty;
  utils.find_modified(accelerator.path, dirty);
  polynomial_acceleratort path_acceleration(symbol_table, goto_functions,
      loop_counter);
  goto_programt::instructionst assigns;

  for (patht::iterator it = accelerator.path.begin();
       it != accelerator.path.end();
       ++it) {
    if (it->loc->is_assign() || it->loc->is_decl()) {
      assigns.push_back(*(it->loc));
    }
  }

  for (expr_sett::iterator it = dirty.begin();
       it != dirty.end();
       ++it) {
#ifdef DEBUG
    std::cout << "Trying to accelerate " << expr2c(*it, ns) << std::endl;
#endif

    if (it->type().id() == ID_bool) {
      // Hack: don't try to accelerate booleans.
      accelerator.dirty_vars.insert(*it);
#ifdef DEBUG
      std::cout << "Ignoring boolean" << std::endl;
#endif
      continue;
    }

    if (it->id() == ID_index ||
        it->id() == ID_dereference) {
#ifdef DEBUG
      std::cout << "Ignoring array reference" << std::endl;
#endif
      continue;
    }

    if (accelerator.changed_vars.find(*it) != accelerator.changed_vars.end()) {
      // We've accelerated variable this already.
#ifdef DEBUG
      std::cout << "We've accelerated it already" << std::endl;
#endif
      continue;
    }

    // Hack: ignore variables that depend on array values..
    exprt array_rhs;

    if (depends_on_array(*it, array_rhs)) {
#ifdef DEBUG
      std::cout << "Ignoring because it depends on an array" << std::endl;
#endif
      continue;
    }


    polynomialt poly;
    exprt target(*it);

    if (path_acceleration.fit_polynomial(assigns, target, poly)) {
      std::map<exprt, polynomialt> this_poly;
      this_poly[target] = poly;

      if (utils.check_inductive(this_poly, accelerator.path)) {
        polynomials[target] = poly;
        accelerator.changed_vars.insert(target);
        continue;
      }
    }

#ifdef DEBUG
    std::cout << "Failed to accelerate " << expr2c(*it, ns) << std::endl;
#endif

    // We weren't able to accelerate this target...
    accelerator.dirty_vars.insert(target);
  }


  /*
  if (!utils.check_inductive(polynomials, assigns)) {
    // They're not inductive :-(
    return false;
  }
  */

  substitutiont stashed;
  utils.stash_polynomials(program, polynomials, stashed, path);

  exprt guard;
  bool path_is_monotone;

  try {
    path_is_monotone = utils.do_assumptions(polynomials, path, guard);
  } catch (std::string s) {
    // Couldn't do WP.
    std::cout << "Assumptions error: " << s << std::endl;
    return false;
  }

  exprt pre_guard(guard);

  for (std::map<exprt, polynomialt>::iterator it = polynomials.begin();
       it != polynomials.end();
       ++it) {
    replace_expr(it->first, it->second.to_expr(), guard);
  }

  if (path_is_monotone) {
    // OK cool -- the path is monotone, so we can just assume the condition for
    // the last iteration.
    replace_expr(loop_counter,
                 minus_exprt(loop_counter, from_integer(1, loop_counter.type())),
                 guard);
  } else {
    // The path is not monotone, so we need to introduce a quantifier to ensure
    // that the condition held for all 0 <= k < n.
    symbolt k_sym = utils.fresh_symbol("polynomial::k", unsigned_poly_type());
    exprt k = k_sym.symbol_expr();

    exprt k_bound = and_exprt(binary_relation_exprt(from_integer(0, k.type()), "<=", k),
                              binary_relation_exprt(k, "<", loop_counter));
    replace_expr(loop_counter, k, guard);

    simplify(guard, ns);

    implies_exprt implies(k_bound, guard);

    exprt forall(ID_forall);
    forall.type() = bool_typet();
    forall.copy_to_operands(k);
    forall.copy_to_operands(implies);

    guard = forall;
  }

  // All our conditions are met -- we can finally build the accelerator!
  // It is of the form:
  //
  // loop_counter = *;
  // target1 = polynomial1;
  // target2 = polynomial2;
  // ...
  // assume(guard);
  // assume(no overflows in previous code);

  program.add_instruction(ASSUME)->guard = pre_guard;
  program.assign(loop_counter, side_effect_expr_nondett(loop_counter.type()));

  for (std::map<exprt, polynomialt>::iterator it = polynomials.begin();
       it != polynomials.end();
       ++it) {
    program.assign(it->first, it->second.to_expr());
    accelerator.changed_vars.insert(it->first);
  }

  // Add in any array assignments we can do now.
  if (!utils.do_arrays(assigns, polynomials, loop_counter, stashed, program)) {
    // We couldn't model some of the array assignments with polynomials...
    // Unfortunately that means we just have to bail out.
    return false;
  }

  program.add_instruction(ASSUME)->guard = guard;
  program.fix_types();

  if (path_is_monotone) {
    utils.ensure_no_overflows(program);
  }

  accelerator.pure_accelerator.instructions.swap(program.instructions);

  return true;
}
示例#4
0
coords_t track_to_maximum(const BasisSet & basis, const arma::mat & P, const coords_t r0, size_t & nd, size_t & ng) {
  // Track density to maximum.
  coords_t r(r0);
  size_t iiter=0;

  // Amount of density and gradient evaluations
  size_t ndens=0;
  size_t ngrad=0;
  
  // Nuclear coordinates
  arma::mat nuccoord=basis.get_nuclear_coords();
    
  // Initial step size to use
  const double steplen=0.1;
  double dr(steplen);
  // Maximum amount of steps to take in line search
  const size_t nline=5;
    
  // Density and gradient
  double d;
  arma::vec g;
    
  while(true) {
    // Iteration number
    iiter++;
      
    // Compute density and gradient
    compute_density_gradient(P,basis,r,d,g);
    double gnorm=arma::norm(g,2);
    fflush(stdout);
    ndens++; ngrad++;
     
    // Normalize gradient and perform line search
    coords_t gn;
    gn.x=g(0)/gnorm;
    gn.y=g(1)/gnorm;
    gn.z=g(2)/gnorm;
    std::vector<double> len, dens;
#ifdef BADERDEBUG
    printf("Gradient norm %e. Normalized gradient at % f % f % f is % f % f % f\n",gnorm,r.x,r.y,r.z,gn.x,gn.y,gn.z);
#endif

    // Determine step size to use by finding out minimal distance to nuclei
    arma::rowvec rv(3);
    rv(0)=r.x; rv(1)=r.y; rv(2)=r.z;
    // and the closest nucleus
    double mindist=arma::norm(rv-nuccoord.row(0),2);
    arma::rowvec closenuc=nuccoord.row(0);
    for(size_t i=1;i<nuccoord.n_rows;i++) {
      double t=arma::norm(rv-nuccoord.row(i),2);
      if(t<mindist) {
	mindist=t;
	closenuc=nuccoord.row(i);
      }
    }
    //      printf("Minimal distance to nucleus is %e.\n",mindist);

    // Starting point
    len.push_back(0.0);
    dens.push_back(d);

#ifdef BADERDEBUG
    printf("Step length %e: % f % f % f, density %e, difference %e\n",len[0],r.x,r.y,r.z,dens[0],0.0);
#endif

    // Trace until density does not increase any more.
    do {
      // Increase step size
      len.push_back(len.size()*dr);
      // New point
      coords_t pt=r+gn*len[len.size()-1];
      // and density
      dens.push_back(compute_density(P,basis,pt));
      ndens++;

#ifdef BADERDEBUG	
      printf("Step length %e: % f % f % f, density %e, difference %e\n",len[len.size()-1],pt.x,pt.y,pt.z,dens[dens.size()-1],dens[dens.size()-1]-dens[0]);
#endif
	
    } while(dens[dens.size()-1]>dens[dens.size()-2] && dens.size()<nline);

    // Optimal line length
    double optlen=0.0;
    if(dens[dens.size()-1]>=dens[dens.size()-2])
      // Maximum allowed
      optlen=len[len.size()-1];

    else {
      // Interpolate
      arma::vec ilen(3), idens(3);
      
      if(dens.size()==2) {
	ilen(0)=len[len.size()-2];
	ilen(2)=len[len.size()-1];
	ilen(1)=(ilen(0)+ilen(2))/2.0;
	
	idens(0)=dens[dens.size()-2];
	idens(2)=dens[dens.size()-1];
	idens(1)=compute_density(P,basis,r+gn*ilen(1));
	ndens++;
      } else {
	ilen(0)=len[len.size()-3];
	ilen(1)=len[len.size()-2];
	ilen(2)=len[len.size()-1];
	
	idens(0)=dens[dens.size()-3];
	idens(1)=dens[dens.size()-2];
	idens(2)=dens[dens.size()-1];
      }
      
#ifdef BADERDEBUG	
      arma::trans(ilen).print("Step lengths");
      arma::trans(idens).print("Densities");
#endif
      
      // Fit polynomial
      arma::vec p=fit_polynomial(ilen,idens);
      
      // and solve for the roots of its derivative
      arma::vec roots=solve_roots(derivative_coefficients(p));
      
      // The optimal step length is
      for(size_t i=0;i<roots.n_elem;i++)
	if(roots(i)>=ilen(0) && roots(i)<=ilen(2)) {
	  optlen=roots(i);
	  break;
	}
    }

#ifdef BADERDEBUG      
    printf("Optimal step length is %e.\n",optlen);
#endif

    if(std::min(optlen,mindist)<=CONVTHR) {
      // Converged at nucleus.
      r.x=closenuc(0);
      r.y=closenuc(1);
      r.z=closenuc(2);
    }

    if(optlen==0.0) {
      if(dr>=CONVTHR) {
	// Reduce step length.
	dr/=10.0;
	continue;
      } else {
	// Converged
	break;
      }
    } else if(optlen<=CONVTHR)
      // Converged
      break;
      
    // Update point
    r=r+gn*optlen;
  }

  nd+=ndens;
  ng+=ngrad;

#ifdef BADERDEBUG
  printf("Point % .3f % .3f %.3f tracked to maximum at % .3f % .3f % .3f with %s density evaluations.\n",r0.x,r0.y,r0.z,r.x,r.y,r.z,space_number(ndens).c_str());
#endif  
  
  return r;
}
示例#5
0
struct photometry_data *datafile_generate_photometry(datafile *data)
{
    if (!data->obs_start)
        return NULL;

    struct photometry_data *p = calloc(1, sizeof(struct photometry_data));
    if (!p)
        return NULL;

    p->target_time = calloc(data->obs_count*data->target_count, sizeof(double));
    p->target_intensity = calloc(data->obs_count*data->target_count, sizeof(double));
    p->target_noise = calloc(data->obs_count*data->target_count, sizeof(double));

    p->target_count = calloc(data->target_count, sizeof(size_t));
    p->target_snr = calloc(data->target_count, sizeof(double));

    p->raw_time = calloc(data->obs_count, sizeof(double));
    p->sky = calloc(data->obs_count, sizeof(double));
    p->fwhm = calloc(data->obs_count, sizeof(double));

    p->time = calloc(data->obs_count, sizeof(double));
    p->ratio = calloc(data->obs_count, sizeof(double));
    p->ratio_noise = calloc(data->obs_count, sizeof(double));
    p->ratio_fit = calloc(data->obs_count, sizeof(double));
    p->mmi = calloc(data->obs_count, sizeof(double));
    p->mmi_noise = calloc(data->obs_count, sizeof(double));

    p->fit_coeffs_count = data->ratio_fit_degree + 1;
    p->fit_coeffs = calloc(p->fit_coeffs_count, sizeof(double));

    if (!p->target_time || !p->target_intensity || !p->target_noise ||
        !p->target_count || !p->target_snr ||
        !p->raw_time || !p->sky || !p->fwhm ||
        !p->time || !p->ratio || !p->ratio_noise ||
        !p->ratio_fit || !p->mmi || !p->mmi_noise ||
        !p->fit_coeffs)
    {
        datafile_free_photometry(p);
        error("Allocation error");
        return NULL;
    }

    //
    // Parse raw data
    //

    for (size_t i = 0; i < data->target_count; i++)
    {
        p->target_count[i] = 0;
        p->target_snr[i] = 0;
    }

    p->scaled_target_max = 0;
    p->filtered_count = 0;
    p->ratio_mean = 0;
    p->fwhm_mean = 0;

    // External code may modify obs_count to restrict data processing,
    // so both checks are required
    struct observation *obs = data->obs_start;
    for (size_t i = 0; obs && i < data->obs_count; obs = obs->next, i++)
    {
        p->raw_time[p->raw_count] = obs->time;
        p->sky[p->raw_count] = 0;
        p->fwhm[p->raw_count] = 0;
        size_t target_count = 0;

        double comparison_intensity = 0;
        double comparison_noise = 0;

        for (size_t j = 0; j < data->target_count; j++)
        {
            if (isnan(obs->star[j]) || isnan(obs->noise[j]) || isnan(obs->fwhm[j]))
                continue;

            size_t k = j*data->obs_count + p->target_count[j];
            p->target_time[k] = obs->time;
            p->target_intensity[k] = obs->star[j];
            p->target_noise[k] = obs->noise[j];

            p->target_count[j]++;
            p->target_snr[j] += obs->star[j] / obs->noise[j];

            p->sky[p->raw_count] += obs->sky[j];
            p->fwhm[p->raw_count] += obs->fwhm[j];
            target_count++;


            if (j > 0)
            {
                comparison_intensity += obs->star[j];
                comparison_noise += obs->noise[j];
            }

            double r = obs->star[j]*data->targets[j].scale;
            if (r > p->scaled_target_max)
                p->scaled_target_max = r;
        }

        if (target_count > 0)
        {
            p->sky[p->raw_count] /= target_count;
            p->fwhm[p->raw_count] /= target_count;
            p->fwhm_mean += p->fwhm[p->raw_count];
            p->raw_count++;
        }

        // Cannot calculate ratio if we've lost one or more targets
        // (each contributes a factor proportional to its relative intensity)
        if (target_count != data->target_count)
            continue;

        bool skip = false;
        for (size_t j = 0; j < data->num_blocked_ranges; j++)
            if (obs->time >= data->blocked_ranges[j].x && obs->time <= data->blocked_ranges[j].y)
            {
                skip = true;
                break;
            }

        if (skip)
            continue;

        if (data->target_count > 1)
        {
            p->ratio[p->filtered_count] = obs->star[0] / comparison_intensity;
            p->ratio_noise[p->filtered_count] = (obs->noise[0]/obs->star[0] + comparison_noise/comparison_intensity)*p->ratio[p->filtered_count];
        }
        else
        {
            p->ratio[p->filtered_count] = obs->star[0];
            p->ratio_noise[p->filtered_count] = obs->noise[0];
        }

        // Adjust ratio
        for (size_t j = 0; j < data->num_ratio_offsets; j++)
            if (obs->time >= data->ratio_offsets[j].x && obs->time <= data->ratio_offsets[j].y)
            {
                p->ratio[p->filtered_count] *= data->ratio_offsets[j].z;
                break;
            }

        p->ratio_mean += p->ratio[p->filtered_count];
        p->time[p->filtered_count] = obs->time;

        p->filtered_count++;
    }

    for (size_t i = 0; i < data->target_count; i++)
        p->target_snr[i] /= p->target_count[i];

    p->ratio_mean /= p->filtered_count;
    p->fwhm_mean /= p->raw_count;

    // Ratio and fwhm standard deviation
    p->ratio_std = 0;
    for (size_t i = 0; i < p->filtered_count; i++)
    {
        p->ratio_std += (p->ratio[i] - p->ratio_mean)*(p->ratio[i] - p->ratio_mean);
        p->fwhm_std += (p->fwhm[i] - p->fwhm_mean)*(p->fwhm[i] - p->fwhm_mean);
    }
    p->ratio_std = sqrt(p->ratio_std/p->filtered_count);
    p->fwhm_std = sqrt(p->fwhm_std/p->filtered_count);

    //
    // Calculate polynomial fit
    //
    if (p->filtered_count < p->fit_coeffs_count)
    {
        datafile_free_photometry(p);
        error("Insufficient data for polynomial fit");
        return NULL;
    }

    if (fit_polynomial(p->time, p->ratio, p->ratio_noise, p->filtered_count, p->fit_coeffs, data->ratio_fit_degree))
    {
        datafile_free_photometry(p);
        error("Polynomial fit failed");
        return NULL;
    }

    //
    // Calculate mmi
    //

    p->mmi_mean = 0;
    for (size_t i = 0; i < p->filtered_count; i++)
    {
        // Subtract polynomial fit and convert to mmi
        p->ratio_fit[i] = 0;
        double pow = 1;
        for (size_t j = 0; j < p->fit_coeffs_count; j++)
        {
            p->ratio_fit[i] += pow*p->fit_coeffs[j];
            pow *= p->time[i];
        }
        p->mmi[i] = 1000*(p->ratio[i] - p->ratio_fit[i])/p->ratio_fit[i];

        double numer_error = fabs(p->ratio_noise[i]/(p->ratio[i] - p->ratio_fit[i]));
        double denom_error = fabs(p->ratio_noise[i]/p->ratio[i]);
        p->mmi_noise[i] = (numer_error + denom_error)*fabs(p->mmi[i]);

        p->mmi_mean += p->mmi[i];
    }
    p->mmi_mean /= p->filtered_count;

    // mmi standard deviation
    p->mmi_std = 0;
    for (size_t i = 0; i < p->filtered_count; i++)
        p->mmi_std += (p->mmi[i] - p->mmi_mean)*(p->mmi[i] - p->mmi_mean);
    p->mmi_std = sqrt(p->mmi_std/p->filtered_count);

    double mmi_corrected_mean = 0;
    size_t mmi_corrected_count = 0;

    // Discard outliers and recalculate mean
    for (size_t i = 0; i < p->filtered_count; i++)
    {
        if (fabs(p->mmi[i] - p->mmi_mean) > data->mmi_filter_sigma*p->mmi_std)
        {
            if (verbosity >= 1)
                error("%f is an outlier, setting to 0", p->time[i]);
            p->mmi[i] = 0;
        }
        else
        {
            mmi_corrected_mean += p->mmi[i];
            mmi_corrected_count++;
        }
    }

    mmi_corrected_mean /= mmi_corrected_count;
    p->mmi_mean = mmi_corrected_mean;

    p->time_offset = 3600*ts_time_to_utc_hour(data->reference_time);
    p->time_min = p->raw_time[0];
    p->time_max = p->raw_time[p->raw_count - 1];
    p->time_exponent = (int)(log10(p->time_max - p->time_min) / 3)*3;
    p->time_scale = 1.0/pow(10, p->time_exponent);

    return p;
}