Exemple #1
0
/**
 * Build any objects that will need to be utilised by this object.
 * Obtain smart_pointers to any objects that will be used by this object.
 */
void Data::DoBuild() {
  length_weight_ = model_->managers().length_weight()->GetLengthWeight(length_weight_label_);
  if (!length_weight_)
    LOG_ERROR_P(PARAM_LENGTH_WEIGHT) << "(" << length_weight_label_ << ") could not be found. Have you defined it?";
  if (!data_table_)
    LOG_CODE_ERROR() << "!data_table_";

  // basic validation
  const vector<string>& columns = data_table_->columns();
  if (columns.size() != model_->age_spread() + 1)
    LOG_ERROR_P(PARAM_DATA) << "column count (" << columns.size() << ") must be <year> <ages> for a total of " << model_->age_spread() + 1 << " columns";
  if (columns[0] != PARAM_YEAR)
    LOG_ERROR_P(PARAM_DATA) << "first column label must be 'year'. First column label was '" << columns[0] << "'";

  /**
   * Build our data_by_year map so we can fill the gaps
   * and use it in the model
   */
  vector<vector<string>>& data = data_table_->data();
  vector<Double> total_length(model_->age_spread(), 0.0);
  Double number_of_years = 0.0;

  for (vector<string> row : data) {
    if (row.size() != columns.size())
      LOG_CODE_ERROR() << "row.size() != columns.size()";
    number_of_years += 1;
    unsigned year = utilities::ToInline<string, unsigned>(row[0]);
    for (unsigned i = 1; i < row.size(); ++i) {
      data_by_year_[year].push_back(utilities::ToInline<string, Double>(row[i]));
      total_length[i - 1] += utilities::ToInline<string, Double>(row[i]);
    }
  }

  /*
   * Build our average map for use in initialisation and simulation phases
   */
  for (unsigned i = 0; i < model_->age_spread(); ++i)
    data_by_age_[model_->min_age() + i] = total_length[i] / number_of_years;

  /**
   * Check if we're using a mean method and build a vector of means now
   * before we modify the data_by_year object by filling the external
   * gaps
   */
  if (external_gaps_ == PARAM_MEAN || internal_gaps_ == PARAM_MEAN) {
    for (unsigned i = 0; i < model_->age_spread(); ++i) {
      Double total = 0.0;
      for (auto iter = data_by_year_.begin(); iter != data_by_year_.end(); ++iter)
        total += iter->second[i];
      means_.push_back(total / data_by_year_.size());
    }
  }

  // Fill our gaps
  FillExternalGaps();
  FillInternalGaps();


}
/**
 * This method is called at the start of the targetted
 * time step for this observation.
 *
 * At this point we need to build our cache for the partition
 * structure to use with any interpolation
 */
void ProcessRemovalsByLength::PreExecute() {
  LOG_FINEST() << "Entering observation " << label_;

  cached_partition_->BuildCache();

  if (cached_partition_->Size() != proportions_[model_->current_year()].size())
    LOG_CODE_ERROR()<< "cached_partition_->Size() != proportions_[model->current_year()].size()";
  if (partition_->Size() != proportions_[model_->current_year()].size())
    LOG_CODE_ERROR()<< "partition_->Size() != proportions_[model->current_year()].size()";
  }
/**
 * This method is called at the start of the targetted
 * time step for this observation.
 *
 * At this point we need to build our cache for the partition
 * structure to use with any interpolation
 */
void ProportionsMigrating::PreExecute() {
  cached_partition_->BuildCache();
  LOG_FINEST() << "Entering observation " << label_;


  if (cached_partition_->Size() != proportions_[model_->current_year()].size()) {
    LOG_MEDIUM() << "Cached size " << cached_partition_->Size() << " partition size = " << proportions_[model_->current_year()].size();
    LOG_CODE_ERROR() << "cached_partition_->Size() != proportions_[model->current_year()].size()";

  }
  if (partition_->Size() != proportions_[model_->current_year()].size())
    LOG_CODE_ERROR() << "partition_->Size() != proportions_[model->current_year()].size()";
}
/**
 * This method will take the current age population for this category stored
 * in this->data_ and populate this->length_data_ by using the age length
 * proportions generated and stored against the Partition class. The age
 * length proportions are generated during the build phase.
 *
 * @parameter selectivity The selectivity to apply to the age data
 */
void Category::PopulateAgeLengthMatrix(Selectivity* selectivity) {
  LOG_FINEST() << "About to populate the length data for category " << name_ << " in year " << model_->current_year();

  if (selectivity == nullptr)
    LOG_CODE_ERROR() << "selectivity == nullptr";
  if (age_length_ == nullptr)
    LOG_CODE_ERROR() << "In category " << name_ << " there is no age length object to have calculated the age length proportions";
  if (age_length_matrix_.size() == 0)
    LOG_CODE_ERROR() << "No memory has been allocated for the age_length_matrix for category " << name_;

  auto& age_length_proportions = model_->partition().age_length_proportions(name_);
  unsigned year = model_->current_year() - model_->start_year();
  vector<unsigned> length_bins = model_->length_bins();
  unsigned time_step_index = model_->managers().time_step()->current_time_step();

  LOG_FINEST() << "Year: " << year << "; time_step: " << time_step_index << "; length_bins: " << length_bins.size();
  LOG_FINEST() << "Years in proportions: " << age_length_proportions.size();
  LOG_FINEST() << "Timesteps in current year: " << age_length_proportions[year].size();

  if (year > age_length_proportions.size())
    LOG_CODE_ERROR() << "year > age_length_proportions.size()";
  if (time_step_index > age_length_proportions[year].size())
    LOG_CODE_ERROR() << "time_step_index > age_length_proportions[year].size()";
  vector<vector<Double>>& proportions_for_now = age_length_proportions[year][time_step_index];

  unsigned size = model_->length_plus() == true ? model_->length_bins().size() : model_->length_bins().size() - 1;
  LOG_FINEST() << "Calculating age length data";
  for (unsigned age = min_age_; age <= max_age_; ++age) {
    unsigned i = age - min_age_;
    if (i >= proportions_for_now.size())
      LOG_CODE_ERROR() << "i >= proportions_for_now.size()";
    if (i >= data_.size())
      LOG_CODE_ERROR() << "i >= data_.size()";
    if (i >= age_length_matrix_.size())
      LOG_CODE_ERROR() << "(i >= age_length_matrix_.size())";

    vector<Double>& ages_at_length = proportions_for_now[i];

    for (unsigned bin = 0; bin < size; ++bin) {
      if (bin >= age_length_matrix_[i].size())
        LOG_CODE_ERROR() << "bin (" << bin << ") >= age_length_matrix_[i].size(" << age_length_matrix_[i].size() << ")";
      if (bin >= ages_at_length.size())
        LOG_CODE_ERROR() << "bin >= ages_at_length.size()";

      age_length_matrix_[i][bin] = selectivity->GetAgeResult(age, age_length_) * data_[i] * ages_at_length[bin];
    }
  }

  LOG_FINEST() << "Finished populating the length data for category " << name_ << " in year " << model_->current_year();
}
Exemple #5
0
/**
 *  This method will return a reference to one of our partition categories.
 *
 *  @param category_label The name of the category
 *  @return reference tot he category
 */
partition::Category& Partition::category(const string& category_label) {
  auto find_iter = partition_.find(category_label);
  if (find_iter == partition_.end())
    LOG_CODE_ERROR() << "The partition does not have a category " << category_label;

  return (*find_iter->second);
}
Exemple #6
0
/**
 *    Print out Chain after each iteration
 */
void MCMCObjective::DoExecute() {
    if (!mcmc_)
        LOG_CODE_ERROR() << "if (!mcmc_)";

    if (first_write_ && !model_->global_configuration().resume()) {

        cache_ << "starting_covariance_matrix {m}\n";
        auto covariance = mcmc_->covariance_matrix();
        for (unsigned i = 0; i < covariance.size1(); ++i) {
            for (unsigned j = 0; j < covariance.size2() - 1; ++j)
                cache_ << covariance(i,j) << " ";
            cache_ << covariance(i, covariance.size2() - 1) << "\n";
        }
        cache_ << "samples {d} \n";
        cache_ << "sample objective_score prior likelihood penalties additional_priors jacobians step_size acceptance_rate acceptance_rate_since_adapt\n";
    }

    auto chain = mcmc_->chain();
    unsigned element = chain.size() - 1;
    cache_ << chain[element].iteration_ << " "
           << chain[element].score_ << " "
           << chain[element].prior_ << " "
           << chain[element].likelihood_ << " "
           << chain[element].penalty_ << " "
           << chain[element].additional_priors_ << " "
           << chain[element].jacobians_ << " "
           << chain[element].step_size_ << " "
           << chain[element].acceptance_rate_ << " "
           << chain[element].acceptance_rate_since_adapt_ << "\n";

    ready_for_writing_ = true;
}
Exemple #7
0
/**
 * Do the conversion of the partition structure from age to length
 *
 * @param category The current category to convert
 * @param length_bins vector of the length bins to map too
 * @param selectivity SelectivityPointer to apply (age based selectivity)
 */
void AgeLength::DoAgeToLengthConversion(partition::Category* category, const vector<Double>& length_bins, bool plus_grp, Selectivity* selectivity) {
  LOG_TRACE();
  unsigned year = model_->current_year();
  unsigned size = length_bins.size();
  unsigned time_step = model_->managers().time_step()->current_time_step();
  if (!plus_grp)
    size = length_bins.size() - 1;

  category->age_length_matrix_.resize(category->data_.size());
  for (unsigned i = 0; i < category->data_.size(); ++i) {

    vector<Double> age_frequencies;
    unsigned age = category->min_age_ + i;

    if (cvs_[year][age][time_step] <= 0.0)
        LOG_CODE_ERROR() << "Identified a CV of 0.0. please check parameters cv_first and cv_last in the @age_length";

    Double mu= category->mean_length_per_[age];
    CummulativeNormal(mu, cvs_[year][age][time_step], age_frequencies, length_bins, distribution_, plus_grp);
    category->age_length_matrix_[i].resize(size);

    // Loop through the length bins and multiple the partition of the current age to go from
    // length frequencies to age length numbers
    for (unsigned j = 0; j < size; ++j) {
      category->age_length_matrix_[i][j] = selectivity->GetResult(age, category->age_length_) * category->data_[i] * age_frequencies[j];
    }
  }
}
void Project::Build() {
  string error = "";
  if (!model_->objects().VerfiyAddressableForUse(parameter_, addressable::kProject, error)) {
    LOG_FATAL_P(PARAM_PARAMETER) << "could not be verified for use in a @project block. Error was " << error;
  }

  addressable::Type addressable_type = model_->objects().GetAddressableType(parameter_);
  switch(addressable_type) {
    case addressable::kInvalid:
      LOG_CODE_ERROR() << "Invalid addressable type: " << parameter_;
      break;
    case addressable::kSingle:
      LOG_FINEST() << "applying projection for parameter " << parameter_ << " is an single type";
      DoUpdateFunc_ = &Project::SetSingleValue;
      addressable_    = model_->objects().GetAddressable(parameter_);
      original_value_ = *addressable_;
      break;
    case addressable::kVector:
      LOG_FINEST() << "applying projection for parameter " << parameter_ << " is a vector";
      addressable_vector_ = model_->objects().GetAddressableVector(parameter_);
      DoUpdateFunc_ = &Project::SetVectorValue;
      break;
    case addressable::kUnsignedMap:
      LOG_FINEST() << "applying projection for parameter " << parameter_ << " is an unsigned map";
      DoUpdateFunc_ = &Project::SetMapValue;
      addressable_map_ = model_->objects().GetAddressableUMap(parameter_);
      break;
    default:
      LOG_ERROR() << "The addressable you have provided for use in a projection: " << parameter_ << " is not a type that is supported for projection modification";
      break;
  }
  DoBuild();
}
/**
 * This method will populate the age data from the length data. This is required
 * to transfer any changes in the length partition back to the age partition.
 */
void Category::CollapseAgeLengthData() {
  LOG_CODE_ERROR() << "This is hideously slow, do not allocate memory with the .push_back";
  data_.clear();

  for (auto age_row : age_length_matrix_) {
    Double total = 0;
    for (Double length_data : age_row)
      total += length_data;
    data_.push_back(total);
  }
}
void Project::Update(unsigned current_year) {
  LOG_TRACE();
  if (DoUpdateFunc_ == nullptr)
    LOG_CODE_ERROR() << "DoUpdateFunc_ == nullptr";
  if (std::find(years_.begin(), years_.end(), current_year) == years_.end()) {
    LOG_FINEST() << "Resetting parameter to original value as the year " << current_year << " not in years";
    RestoreOriginalValue(current_year);
  } else {
    LOG_FINEST() << "updating parameter";
    DoUpdate();
  }
}
/**
 * This method collapses the Numbers at length by age matrix to numbers at age for a category
 */
void Category::CollapseAgeLengthDataToLength() {
  LOG_TRACE();

  if (age_length_matrix_.size() == 0)
    LOG_CODE_ERROR() << "if (age_length_matrix_.size() == 0)";

  LOG_FINE() << "age_length_matrix_.size(): " << age_length_matrix_.size();
  LOG_FINE() << "age_length_matrix_[0].size(): " << age_length_matrix_[0].size();
  length_data_.assign(model_->length_bins().size(), 0.0);
  for (unsigned i = 0; i < age_length_matrix_.size(); ++i) {
    for (unsigned j = 0; j < age_length_matrix_[i].size(); ++j) {
      if (j >= length_data_.size())
        LOG_CODE_ERROR() << "j >= length_data_.size()";

      length_data_[j] += age_length_matrix_[i][j];
    }
  }

  for (unsigned i = 0; i < length_data_.size(); ++i)
    LOG_FINEST() << "length_data_[" << i << "]: " << length_data_[i];
}
Double DoubleNormal::GetLengthBasedResult(unsigned age, AgeLength* age_length, unsigned year, int time_step_index) {
  LOG_TRACE();
  unsigned yearx = year == 0 ? model_->current_year() : year;
  unsigned time_step = time_step_index == -1 ? model_->managers().time_step()->current_time_step() : (unsigned)time_step_index;
  Double cv = age_length->cv(yearx, time_step, age);
  Double mean = age_length->mean_length(time_step, age);
  string dist = age_length->distribution_label();

  if (dist == PARAM_NONE || n_quant_ <= 1) {

    if (mean < mu_)
      return pow(2.0, -((mean - mu_) / sigma_l_ * (mean - mu_) / sigma_l_)) * alpha_;
    else
      return  pow(2.0, -((mean - mu_)/sigma_r_ * (mean - mu_) / sigma_r_)) * alpha_;

  } else if (dist == PARAM_NORMAL) {

    Double sigma = cv * mean;
    Double size = 0.0;
    Double total = 0.0;

    for (unsigned j = 0; j < n_quant_; ++j) {
      size = mean + sigma * quantiles_at_[j];

      if (size < mu_)
        total +=  pow(2.0, -((size - mu_) / sigma_l_ * (size - mu_) / sigma_l_)) * alpha_;
      else
        total +=   pow(2.0, -((size - mu_)/sigma_r_ * (size - mu_) / sigma_r_)) * alpha_;
    }
    return total / n_quant_;

  } else if (dist == PARAM_LOGNORMAL) {
    // convert paramters to log space
    Double sigma = sqrt(log(1 + cv * cv));
    Double mu = log(mean) - sigma * sigma * 0.5;
    Double size = 0.0;
    Double total = 0.0;
    boost::math::lognormal dist{AS_DOUBLE(mu), AS_DOUBLE(sigma)};

    for (unsigned j = 0; j < n_quant_; ++j) {
      size = mu + sigma * quantile(dist, AS_DOUBLE(quantiles_[j]));

      if (size < mu_)
        total +=  pow(2.0, -((size - mu_) / sigma_l_ * (size - mu_) / sigma_l_)) * alpha_;
      else
        total +=   pow(2.0, -((size - mu_)/sigma_r_ * (size - mu_) / sigma_r_)) * alpha_;
    }
    return total / n_quant_;
  }
  LOG_CODE_ERROR() << "dist is invalid " << dist;
  return 0;
}
/**
 * Get the score for this penalty
 *
 * @return Penalty score
 */
Double ElementDifference::GetScore() {
  LOG_TRACE();
  vector<Double> values;
  vector<Double> second_values;
  // first parameter
  if (addressable_vector_ != nullptr)
    values.assign((*addressable_vector_).begin(), (*addressable_vector_).end());
  else if (addressable_ptr_vector_ != nullptr) {
    for (auto ptr : (*addressable_ptr_vector_))
      values.push_back((*ptr));
  } else if (addressable_map_ != nullptr) {
    for (auto iter : (*addressable_map_))
      values.push_back(iter.second);
  } else if (addressable_ != nullptr) {
    values.push_back((*addressable_));
  } else
    LOG_CODE_ERROR() << "(second_addressable_map_ != 0) && (second_addressable_vector_ != 0)";
  // Second parameter
  if (second_addressable_vector_ != nullptr)
    second_values.assign((*second_addressable_vector_).begin(), (*second_addressable_vector_).end());
  else if (second_addressable_ptr_vector_ != nullptr) {
    for (auto ptr : (*second_addressable_ptr_vector_))
      second_values.push_back((*ptr));
  } else if (second_addressable_map_ != nullptr) {
    for (auto iter : (*second_addressable_map_))
      second_values.push_back(iter.second);
  } else if (second_addressable_ != nullptr) {
      second_values.push_back((*second_addressable_));
  } else
    LOG_CODE_ERROR() << "(second_addressable_map_ != 0) && (second_addressable_vector_ != 0)";

  Double score = 0.0;
  LOG_FINEST() << "size of first vector = " << values.size() << " size of second vector";
  for(unsigned i = 0; i < values.size(); ++i)
    score += pow(values[i] - second_values[i], 2);
  return score * multiplier_;
}
/**
 * Store the value from our addressable for this year
 */
void Project::StoreValue(unsigned current_year) {
  if (addressable_ != nullptr)
    stored_values_[current_year] = *addressable_;
  else if (addressable_map_ != nullptr)
    stored_values_[current_year] = (*addressable_map_)[current_year];
  else if (addressable_vector_ != nullptr) {
    unsigned index = current_year - model_->start_year();
    if (index >= addressable_vector_->size()) {
      LOG_CODE_ERROR() << "Could not store value for @project parameter " << parameter_ << " in year "
      << current_year << " because index exceeded size of vector " << index << " : " << addressable_vector_->size();
    }
    stored_values_[current_year] = addressable_vector_->at(index);
  }
  LOG_FINEST() << "Storing value = " << stored_values_[current_year];
}
Exemple #15
0
/**
 * Call the validation method for the child object of this process and
 * set some generic variables.
 */
void Process::Validate() {
  parameters_.Populate();

  if (block_type_ != PARAM_PROCESS && block_type_ != PARAM_PROCESSES) {
    if (type_ != "")
      type_ = block_type_ + "_" + type_;
    else
      type_ = block_type_;

    block_type_ = PARAM_PROCESS;
  }

  if (process_type_ == ProcessType::kUnknown)
    LOG_CODE_ERROR() << "process_type_ == ProcessType::kUnknown for label: " << label();

  DoValidate();
}
Exemple #16
0
/**
 * Build our reports then
 * organise the reports stored in our
 * object list into different containers
 * based on their type.
 */
void Manager::Build() {
  LOG_FINEST() << "objects_.size(): " << objects_.size();
  for (auto report : objects_) {
    report->Build();

    if ((RunMode::Type)(report->run_mode() & RunMode::kInvalid) == RunMode::kInvalid)
      LOG_CODE_ERROR() << "Report: " << report->label() << " has not been properly configured to have a run mode";

    if (report->model_state() != State::kExecute) {
      LOG_FINE() << "Adding report " << report->label() << " to state reports";
      state_reports_[report->model_state()].push_back(report);
    } else {
      LOG_FINE() << "Adding report " << report->label() << " to time step reports";
      time_step_reports_[report->time_step()].push_back(report);
    }
  }
}
Exemple #17
0
/**
 * Build our parameters
 */
void VectorSmoothing::DoBuild() {
  string type       = "";
  string label      = "";
  string parameter  = "";
  string index      = "";

  /**
   * Explode the parameter string sive o we can get the estimable
   * name (parameter) and the index
   */
  if (parameter_ == "") {
    parameters().Add(PARAM_PARAMETER, label_, parameters_.Get(PARAM_LABEL)->file_name(), parameters_.Get(PARAM_LABEL)->line_number());
    parameter_ = label_;
  }


  model_->objects().ExplodeString(parameter_, type, label, parameter, index);
  if (type == "" || label == "" || parameter == "") {
    LOG_ERROR_P(PARAM_PARAMETER) << ": parameter " << parameter_
        << " is not in the correct format. Correct format is object_type[label].estimable(array index)";
  }

  string error = "";
  base::Object* target = model_->objects().FindObject(parameter_, error);
  if (!target)
    LOG_ERROR_P(PARAM_PARAMETER) << " " << parameter_ << " is not a valid estimable in the system";


  Estimable::Type estimable_type = target->GetEstimableType(parameter);
  switch(estimable_type) {
    case Estimable::kInvalid:
      LOG_CODE_ERROR() << "Invalid estimable type: " << parameter_;
      break;
    case Estimable::kVector:
      estimable_vector_ = target->GetEstimableVector(parameter);
      break;
    case Estimable::kUnsignedMap:
      estimable_map_ = target->GetEstimableUMap(parameter);
      break;
    default:
      LOG_ERROR() << "The estimable you have provided for use in a additional priors: " << parameter_ << " is not a type that is supported for this class of additional prior";
      break;
  }
}
Exemple #18
0
/**
 * The energy function used to test the solution against
 * our model.
 *
 * @param test_solution The test solution to use
 * @return The score from the energy function
 */
double CallBack::EnergyFunction(vector<double> test_solution) {
  vector<Estimate*> estimates = model_->managers().estimate()->GetIsEstimated();

  if (test_solution.size() != estimates.size()) {
    LOG_CODE_ERROR() << "The number of enabled estimates does not match the number of test solution values";
  }

  for (unsigned i = 0; i < test_solution.size(); ++i)
    estimates[i]->set_value(test_solution[i]);

  model_->managers().estimate_transformation()->RestoreEstimates();
  model_->FullIteration();

  ObjectiveFunction& objective = model_->objective_function();
  objective.CalculateScore();

  model_->managers().estimate_transformation()->TransformEstimates();
  return objective.score();
}
Exemple #19
0
/**
 * Get the score for this penalty
 *
 * @return Penalty score
 */
Double VectorSmoothing::GetScore() {
  vector<Double> values;
  if (estimable_vector_ != 0)
    values.assign((*estimable_vector_).begin(), (*estimable_vector_).end());
  else if (estimable_map_ != 0) {
    for (auto iter : (*estimable_map_))
      values.push_back(iter.second);
  } else
    LOG_CODE_ERROR() << "(estimable_map_ != 0) && (estimable_map_ != 0)";

  if(upper_ == lower_ && upper_ == 0u) {
    upper_ = values.size();
    lower_ = 1;
  }

  if(upper_ == lower_)
    LOG_FATAL_P(PARAM_UPPER_BOUND) << "Lower and upper bound cannot be equal";
  if (upper_ > values.size())
    LOG_FATAL_P(PARAM_UPPER_BOUND) << "The last element must not be greater than size of vector";
  if (lower_ < 1)
    LOG_FATAL_P(PARAM_LOWER_BOUND) << "The first element must not be less than 1";

  if (r_ >= (upper_ - lower_))
  LOG_FATAL_P(PARAM_R) << PARAM_R << " R cannot be greater than or equal to size of vector - 1";

  Double score = 0.0;
  if (log_scale_) {
    for (Double& value : values)
      value = log(value);
  }
  for (unsigned i = 1; i <= r_; ++i) {
    for(unsigned j = (lower_ - 1); j <= ((upper_ - 1) - i); ++j) {
      values[j] = values[j + 1] - values[j];
    }
    values[(upper_ - 1) - i + 1] = 0;
  }

  for (unsigned k = (lower_ - 1); k <= (upper_ - 1); ++k)
    score += values[k] * values[k];
  return score * multiplier_;
}
Exemple #20
0
//**********************************************************************
// double CGammaDiffCallback::operator()(const vector<double>& Parameters)
// Operatior() for Minimiser CallBack
//**********************************************************************
adouble CallBack::operator()(const vector<adouble>& Parameters) {

  // Update our Components with the New Parameters
  auto estimates = model_->managers().estimate()->GetIsEstimated();

  if (Parameters.size() != estimates.size()) {
    LOG_CODE_ERROR() << "The number of enabled estimates does not match the number of test solution values";
  }

  for (unsigned i = 0; i < Parameters.size(); ++i)
    estimates[i]->set_value(Parameters[i]);

  model_->managers().estimate_transformation()->RestoreEstimates();
  model_->FullIteration();

  ObjectiveFunction& objective = model_->objective_function();
  objective.CalculateScore();

  model_->managers().estimate_transformation()->TransformEstimates();
  return objective.score();
}
Exemple #21
0
Double Callback::operator()(const ::dlib::matrix<double, 0, 1>& Parameters) const {
  // Update our Components with the New Parameters
  vector<Estimate*> estimates = model_->managers().estimate()->GetIsEstimated();

  if (Parameters.size() != (int)estimates.size()) {
    LOG_CODE_ERROR() << "The number of enabled estimates does not match the number of test solution values";
  }

  double penalty = 0;
  for (int i = 0; i < Parameters.size(); ++i) {
    Double value = utilities::math::unscale_value(Parameters(i), penalty, estimates[i]->lower_bound(), estimates[i]->upper_bound());
    estimates[i]->set_value(value);
  }

  model_->managers().estimate_transformation()->RestoreEstimates();
  model_->FullIteration();
  LOG_MEDIUM() << "Iteration Complete";
  ObjectiveFunction& objective = model_->objective_function();
  objective.CalculateScore();

  model_->managers().estimate_transformation()->TransformEstimates();
  return objective.score() + penalty;
}
Exemple #22
0
void Estimable::DoBuild() {
  string type       = "";
  string label      = "";
  string parameter  = "";
  string index      = "";

  /**
   * Explode the parameter string so we can get the estimable
   * name (parameter) and the index
   */
  if (parameter_ == "") {
    parameters().Add(PARAM_PARAMETER, label_, parameters_.Get(PARAM_LABEL)->file_name(), parameters_.Get(PARAM_LABEL)->line_number());
    parameter_ = label_;
  }

  model_->objects().ExplodeString(parameter_, type, label, parameter, index);
  if (type == "" || label == "" || parameter == "") {
    LOG_ERROR_P(PARAM_PARAMETER) << ": parameter " << parameter_
        << " is not in the correct format. Correct format is object_type[label].estimable(array index)";
  }
  model_->objects().ImplodeString(type, label, parameter, index, parameter_);

  string error = "";
  base::Object* target = model_->objects().FindObject(parameter_, error);
  if (!target) {
    LOG_ERROR_P(PARAM_PARAMETER) << ": parameter " << parameter_ << " is not a valid estimable in the system";
  }

  if (index != "")
    target_ = target->GetEstimable(parameter, index);
  else
    target_ = target->GetEstimable(parameter);

  if (target_ == 0)
    LOG_CODE_ERROR() << "if (target_ == 0)";
}
void ProcessRemovalsByLength::Execute() {
  LOG_TRACE();
  /**
   * Verify our cached partition and partition sizes are correct
   */
//  auto categories = model_->categories();
  unsigned year = model_->current_year();
  unsigned year_index = year - model_->start_year();
  unsigned time_step = model_->managers().time_step()->current_time_step();
  auto cached_partition_iter = cached_partition_->Begin();
  auto partition_iter = partition_->Begin(); // vector<vector<partition::Category> >
  map<unsigned, map<string, map<string, vector<Double>>>> &Removals_at_age = mortality_instantaneous_->catch_at();

  /**
   * Loop through the provided categories. Each provided category (combination) will have a list of observations
   * with it. We need to build a vector of proportions for each length using that combination and then
   * compare it to the observations.
   */
  for (unsigned category_offset = 0; category_offset < category_labels_.size(); ++category_offset, ++partition_iter, ++cached_partition_iter) {
    LOG_FINEST() << "category: " << category_labels_[category_offset];
    Double start_value = 0.0;
    Double end_value = 0.0;
    Double number_at_age = 0.0;

//    LOG_WARNING() << "This is bad code because it allocates memory in the middle of an execute";
    vector<Double> expected_values(number_bins_, 0.0);
    vector<Double> numbers_at_length;
    vector<vector<Double>> age_length_matrix;

    /**
     * Loop through the 2 combined categories building up the
     * expected proportions values.
     */
    auto category_iter = partition_iter->begin();
    auto cached_category_iter = cached_partition_iter->begin();
    for (; category_iter != partition_iter->end(); ++cached_category_iter, ++category_iter) {
//      AgeLength* age_length = categories->age_length((*category_iter)->name_);

//      LOG_WARNING() << "This is bad code because it allocates memory in the middle of an execute";
      age_length_matrix.resize((*category_iter)->data_.size());

      vector<Double> age_frequencies(length_bins_.size(), 0.0);
      const auto& age_length_proportions = model_->partition().age_length_proportions((*category_iter)->name_)[year_index][time_step];

      for (unsigned data_offset = 0; data_offset < (*category_iter)->data_.size(); ++data_offset) {
        unsigned age = ((*category_iter)->min_age_ + data_offset);

        // Calculate the age structure removed from the fishing process
        number_at_age = Removals_at_age[year][method_][(*category_iter)->name_][data_offset];
        LOG_FINEST() << "Numbers at age = " << age << " = " << number_at_age << " start value : " << start_value << " end value : " << end_value;
        // Implement an algorithm similar to DoAgeLengthConversion() to convert numbers at age to numbers at length
        // This is different to DoAgeLengthConversion as this number is now not related to the partition
//        Double mu= (*category_iter)->mean_length_by_time_step_age_[time_step][age];

//        LOG_FINEST() << "mean = " << mu << " cv = " << age_length->cv(year, time_step, age) << " distribution = " << age_length->distribution_label() << " and length plus group = " << length_plus_;
//        age_length->CummulativeNormal(mu, age_length->cv(year, time_step, age), age_frequencies, length_bins_, length_plus_);

//        LOG_WARNING() << "This is bad code because it allocates memory in the middle of an execute";
        age_length_matrix[data_offset].resize(number_bins_);

        // Loop through the length bins and multiple the partition of the current age to go from
        // length frequencies to age length numbers
        for (unsigned j = 0; j < number_bins_; ++j) {
          age_length_matrix[data_offset][j] = number_at_age * age_length_proportions[data_offset][j];
          LOG_FINEST() << "The proportion of fish in length bin : " << length_bins_[j] << " = " << age_frequencies[j];
        }
      }

      if (age_length_matrix.size() == 0)
        LOG_CODE_ERROR()<< "if (age_length_matrix_.size() == 0)";

      numbers_at_length.assign(age_length_matrix[0].size(), 0.0);
      for (unsigned i = 0; i < age_length_matrix.size(); ++i) {
        for (unsigned j = 0; j < age_length_matrix[i].size(); ++j) {
          numbers_at_length[j] += age_length_matrix[i][j];
        }
      }

      for (unsigned length_offset = 0; length_offset < number_bins_; ++length_offset) {
        LOG_FINEST() << " numbers for length bin : " << length_bins_[length_offset] << " = " << numbers_at_length[length_offset];
        expected_values[length_offset] += numbers_at_length[length_offset];

        LOG_FINE() << "----------";
        LOG_FINE() << "Category: " << (*category_iter)->name_ << " at length " << length_bins_[length_offset];
        LOG_FINE() << "start_value: " << start_value << "; end_value: " << end_value << "; final_value: " << numbers_at_length[length_offset];
        LOG_FINE() << "expected_value becomes: " << expected_values[length_offset];
      }
    }

    if (expected_values.size() != proportions_[model_->current_year()][category_labels_[category_offset]].size())
      LOG_CODE_ERROR()<< "expected_values.size(" << expected_values.size() << ") != proportions_[category_offset].size("
      << proportions_[model_->current_year()][category_labels_[category_offset]].size() << ")";

      /**
       * save our comparisons so we can use them to generate the score from the likelihoods later
       */
    for (unsigned i = 0; i < expected_values.size(); ++i) {
      SaveComparison(category_labels_[category_offset], 0, length_bins_[i], expected_values[i], proportions_[model_->current_year()][category_labels_[category_offset]][i],
          process_errors_by_year_[model_->current_year()], error_values_[model_->current_year()][category_labels_[category_offset]][i], 0.0, delta_, 0.0);
    }
  }
}
Exemple #24
0
void MCMCObjective::DoBuild() {
    mcmc_ = model_->managers().mcmc()->active_mcmc();
    if (!mcmc_)
        LOG_CODE_ERROR() << "mcmc_ = model_->managers().mcmc()->active_mcmc();";
}
/**
 * Validate any loaded minimisers we have.
 */
void Manager::Validate() {
  LOG_TRACE();
  LOG_CODE_ERROR() << "This method is not supported";
}
Exemple #26
0
/**
 * Parse the configuration file. Creating objects and loading
 * the parameter objects
 */
void File::Parse() {
  LOG_TRACE();

  if (file_.fail() || !file_.is_open())
    LOG_CODE_ERROR() << "Unable to parse the configuration file because a previous error has not been reported.\nFile: " << file_name_;

  /**
   * Iterate through our file parsing the contents
   */
  string    current_line        = "";
  while (getline(file_, current_line)) {
    ++line_number_;

    if (current_line.length() == 0)
      continue;

    // Handle comments
    HandleComments(current_line);

    if (current_line.length() == 0)
      continue;

    /**
     * Change tabs to spaces, remove any leading/trailing or multiple spaces
     * so we can be sure the input is nicely formatted
     */
    boost::replace_all(current_line, "\t", " ");
    boost::trim_all(current_line);
    LOG_FINEST() << "current_line == '" << current_line << "'";

    /**
     * Now we need to check if this line is an include line for a new
     * file.
     */
    if (current_line.length() > strlen(CONFIG_INCLUDE) + 2) {
      string lower_line = util::ToLowercase(current_line);
      if (current_line.substr(0, strlen(CONFIG_INCLUDE)) == CONFIG_INCLUDE) {
        string include_name = current_line.substr(strlen(CONFIG_INCLUDE));
        LOG_FINEST() << "Loading new configuration file via include " << include_name;

        boost::replace_all(include_name, "\"", "");
        boost::trim_all(include_name);
        File include_file(loader_);

        if (include_name.find('\\') == string::npos && file_name_.find('\\') != string::npos)
          include_name = file_name_.substr(0, file_name_.find_last_of('\\') + 1) + include_name;
        if (include_name.find('/') == string::npos && file_name_.find('/') != string::npos)
          include_name = file_name_.substr(0, file_name_.find_last_of('/') + 1) + include_name;

        if (!include_file.OpenFile(include_name))
          LOG_FATAL() << "At line: " << line_number_ << " of " << file_name_
              << ": Include file '" << include_name << "' could not be opened. Does this file exist?";

        include_file.Parse();
        continue;
      }
    }


    /**
     * At this point everything is standard. We have a simple line of text that we now need to parse. All
     * comments etc have been removed and we've gone through any include_file directives
     */
    FileLine current_file_line;
    current_file_line.file_name_    = file_name_;
    current_file_line.line_number_  = line_number_;
    current_file_line.line_         = current_line;

    loader_.AddFileLine(current_file_line);
  } // while(get_line())
}
/**
 * Build our parameters
 */
void ElementDifference::DoBuild() {
  LOG_TRACE();
  string error = "";
  if (!model_->objects().VerfiyAddressableForUse(second_parameter_, addressable::kLookup, error)) {
    LOG_FATAL_P(PARAM_SECOND_PARAMETER) << "could not be verified for use in additional_prior.element_difference. Error was " << error;
  }
  error = "";
  if (!model_->objects().VerfiyAddressableForUse(parameter_, addressable::kLookup, error)) {
    LOG_FATAL_P(PARAM_PARAMETER) << "could not be verified for use in additional_prior.element_difference. Error was " << error;
  }
  // first parameter
  addressable::Type addressable_type = model_->objects().GetAddressableType(parameter_);
  LOG_FINEST() << "addressable type = " << addressable_type;
  switch(addressable_type) {
    case addressable::kInvalid:
      LOG_CODE_ERROR() << "Invalid addressable type: " << parameter_;
      break;
    case addressable::kMultiple:
      addressable_ptr_vector_ = model_->objects().GetAddressables(parameter_);
      break;
    case addressable::kVector:
      addressable_vector_ = model_->objects().GetAddressableVector(parameter_);
      break;
    case addressable::kUnsignedMap:
      addressable_map_ = model_->objects().GetAddressableUMap(parameter_);
      break;
    case addressable::kSingle:
      addressable_ = model_->objects().GetAddressable(parameter_);
      break;
    default:
      LOG_ERROR() << "The addressable you have provided for use in a additional priors: " << parameter_
        << " is not a type that is supported for vector smoothing additional priors";
      break;
  }
  // Get second parameter estimates
  addressable_type = model_->objects().GetAddressableType(second_parameter_);
  LOG_FINEST() << "addressable type = " << addressable_type;
  switch(addressable_type) {
    case addressable::kInvalid:
      LOG_CODE_ERROR() << "Invalid addressable type: " << second_parameter_;
      break;
    case addressable::kMultiple:
      second_addressable_ptr_vector_ = model_->objects().GetAddressables(second_parameter_);
      break;
    case addressable::kVector:
      second_addressable_vector_ = model_->objects().GetAddressableVector(second_parameter_);
      break;
    case addressable::kUnsignedMap:
      second_addressable_map_ = model_->objects().GetAddressableUMap(second_parameter_);
      break;
    case addressable::kSingle:
      second_addressable_ = model_->objects().GetAddressable(second_parameter_);
      break;
    default:
      LOG_ERROR() << "The addressable you have provided for use in a additional priors: " << second_parameter_
        << " is not a type that is supported for difference element additional priors";
      break;
  }

  // Check the two parameters are the same length
  vector<Double> values;
  vector<Double> second_values;
  // Load first parameter
  if (addressable_vector_ != nullptr)
    values.assign((*addressable_vector_).begin(), (*addressable_vector_).end());
  else if (addressable_ptr_vector_ != nullptr) {
    for (auto ptr : (*addressable_ptr_vector_))
      values.push_back((*ptr));
  } else if (addressable_map_ != nullptr) {
    for (auto iter : (*addressable_map_))
      values.push_back(iter.second);
  } else if (addressable_ != nullptr) {
    values.push_back((*addressable_));
  } else
    LOG_CODE_ERROR() << "(addressable_map_ != 0) && (addressable_vector_ != 0)";
  // Load second parameter
  if (second_addressable_vector_ != nullptr)
    second_values.assign((*second_addressable_vector_).begin(), (*second_addressable_vector_).end());
  else if (second_addressable_ptr_vector_ != nullptr) {
    for (auto ptr : (*second_addressable_ptr_vector_))
      second_values.push_back((*ptr));
  } else if (second_addressable_map_ != nullptr) {
    for (auto iter : (*second_addressable_map_))
      second_values.push_back(iter.second);
  } else if (second_addressable_ != nullptr) {
    second_values.push_back((*second_addressable_));
  } else
    LOG_CODE_ERROR() << "(second_addressable_map_ != 0) && (second_addressable_vector_ != 0) && (second_addressable_ != 0)";

  if(second_values.size() != values.size())
    LOG_ERROR_P(PARAM_SECOND_PARAMETER) << "The parameters are not the same size, which they need to be, the second parameter has " << second_values.size() << " elements where as, the first parameter has " << values.size() << " elements";

}
void ProportionsMigrating::Execute() {
  LOG_TRACE();

  /**
   * Verify our cached partition and partition sizes are correct
   */
  auto cached_partition_iter  = cached_partition_->Begin();
  auto partition_iter         = partition_->Begin(); // vector<vector<partition::Category> >

  /**
   * Loop through the provided categories. Each provided category (combination) will have a list of observations
   * with it. We need to build a vector of proportions for each age using that combination and then
   * compare it to the observations.
   */
  LOG_FINEST() << "Number of categories " << category_labels_.size();
  for (unsigned category_offset = 0; category_offset < category_labels_.size(); ++category_offset, ++partition_iter, ++cached_partition_iter) {
    Double      start_value        = 0.0;
    Double      end_value          = 0.0;


    vector<Double> expected_values(age_spread_, 0.0);
    vector<Double> numbers_age_before((model_->age_spread() + 1), 0.0);
    vector<Double> numbers_age_after((model_->age_spread() + 1), 0.0);

    /**
     * Loop through the 2 combined categories building up the
     * expected proportions values.
     */
    auto category_iter = partition_iter->begin();
    auto cached_category_iter = cached_partition_iter->begin();
    for (; category_iter != partition_iter->end(); ++cached_category_iter, ++category_iter) {
      for (unsigned data_offset = 0; data_offset < (*category_iter)->data_.size(); ++data_offset) {
        // We now need to loop through all ages to apply ageing misclassification matrix to account
        // for ages older than max_age_ that could be classified as an individual within the observation range
        unsigned age = ( (*category_iter)->min_age_ + data_offset);

        start_value   = (*cached_category_iter).data_[data_offset];
        end_value     = (*category_iter)->data_[data_offset];

        numbers_age_before[data_offset] += start_value;
        numbers_age_after[data_offset] += end_value;

        LOG_FINE() << "----------";
        LOG_FINE() << "Category: " << (*category_iter)->name_ << " at age " << age;
        LOG_FINE() << "start_value: " << start_value << "; end_value: " << end_value;
      }
    }

    /*
    *  Apply Ageing error on numbers at age before and after
    */
    if (ageing_error_label_ != "") {
      vector<vector<Double>>& mis_matrix = ageing_error_->mis_matrix();
      vector<Double> temp_before(numbers_age_before.size(), 0.0);
      vector<Double> temp_after(numbers_age_after.size(), 0.0);

      for (unsigned i = 0; i < mis_matrix.size(); ++i) {
        for (unsigned j = 0; j < mis_matrix[i].size(); ++j) {
          temp_before[j] += numbers_age_before[i] * mis_matrix[i][j];
          temp_after[j] += numbers_age_after[i] * mis_matrix[i][j];
        }
      }
      numbers_age_before = temp_before;
      numbers_age_after = temp_after;
    }


    /*
     *  Now collapse the number_age into out expected values
     */
    Double plus_before = 0, plus_after = 0;
    for (unsigned k = 0; k < numbers_age_before.size(); ++k) {
      // this is the difference between the
      unsigned age_offset = min_age_ - model_->min_age();
      if (numbers_age_before[k] > 0) {
        if (k >= age_offset && (k - age_offset + min_age_) <= max_age_) {
          expected_values[k - age_offset] = (numbers_age_before[k] - numbers_age_after[k]) / numbers_age_before[k];
          LOG_FINEST() << "Numbers before migration = " << numbers_age_before[k] << " numbers after migration = " << numbers_age_after[k]
                   << " proportion migrated = " <<   expected_values[k - age_offset];
        }
        if (((k - age_offset + min_age_) > max_age_) && age_plus_) {
          plus_before += numbers_age_before[k];
          plus_after += numbers_age_after[k];
        }
      } else {
          if (k >= age_offset && (k - age_offset + min_age_) <= max_age_)
            expected_values[k] = 0;
          if (((k - age_offset + min_age_) > max_age_) && age_plus_) {
            plus_before += 0;
            plus_after += 0;
          }
      }
    }
    LOG_FINEST() << "Plus group before migration = " << plus_before << " Plus group after migration = " << plus_after;
    if (age_plus_)
      expected_values[age_spread_ - 1] = (plus_before - plus_after) / plus_before;


    if (expected_values.size() != proportions_[model_->current_year()][category_labels_[category_offset]].size())
      LOG_CODE_ERROR() << "expected_values.size(" << expected_values.size() << ") != proportions_[category_offset].size("
        << proportions_[model_->current_year()][category_labels_[category_offset]].size() << ")";

    /**
     * save our comparisons so we can use them to generate the score from the likelihoods later
     */

    for (unsigned i = 0; i < expected_values.size(); ++i) {
      LOG_FINEST() << " Numbers at age " << min_age_ + i << " = " << expected_values[i];
      SaveComparison(category_labels_[category_offset], min_age_ + i ,0.0 ,expected_values[i], proportions_[model_->current_year()][category_labels_[category_offset]][i],
          process_errors_by_year_[model_->current_year()], error_values_[model_->current_year()][category_labels_[category_offset]][i], delta_, 0.0);
    }
  }
}
/**
 * Validate configuration file parameters
 */
void ProportionsMigrating::DoValidate() {
  age_spread_ = (max_age_ - min_age_) + 1;
  map<unsigned, vector<Double>> error_values_by_year;
  map<unsigned, vector<Double>> obs_by_year;

  /**
   * Do some simple checks
   */
  if (min_age_ < model_->min_age())
    LOG_ERROR_P(PARAM_MIN_AGE) << ": min_age (" << min_age_ << ") is less than the model's min_age (" << model_->min_age() << ")";
  if (max_age_ > model_->max_age())
    LOG_ERROR_P(PARAM_MAX_AGE) << ": max_age (" << max_age_ << ") is greater than the model's max_age (" << model_->max_age() << ")";
  if (process_error_values_.size() != 0 && process_error_values_.size() != years_.size()) {
    LOG_ERROR_P(PARAM_PROCESS_ERRORS) << " number of values provied (" << process_error_values_.size() << ") does not match the number of years provided ("
        << years_.size() << ")";
  }
  for (Double process_error : process_error_values_) {
    if (process_error < 0.0)
      LOG_ERROR_P(PARAM_PROCESS_ERRORS) << ": process_error (" << AS_DOUBLE(process_error) << ") cannot be less than 0.0";
  }
  if (process_error_values_.size() != 0)
    process_errors_by_year_ = utilities::Map::create(years_, process_error_values_);
  if (delta_ < 0.0)
    LOG_ERROR_P(PARAM_DELTA) << ": delta (" << AS_DOUBLE(delta_) << ") cannot be less than 0.0";

  /**
   * Validate the number of obs provided matches age spread * category_labels * years
   * This is because we'll have 1 set of obs per category collection provided.
   * categories male+female male = 2 collections
   */
  unsigned obs_expected = age_spread_ * category_labels_.size() + 1;
  vector<vector<string>>& obs_data = obs_table_->data();
  if (obs_data.size() != years_.size()) {
    LOG_ERROR_P(PARAM_OBS) << " has " << obs_data.size() << " rows defined, but we expected " << years_.size()
        << " to match the number of years provided";
  }

  for (vector<string>& obs_data_line : obs_data) {
    if (obs_data_line.size() != obs_expected) {
      LOG_ERROR_P(PARAM_OBS) << " has " << obs_data_line.size() << " values defined, but we expected " << obs_expected
          << " to match the age speard * categories + 1 (for year)";
    }

    unsigned year = 0;
    if (!utilities::To<unsigned>(obs_data_line[0], year))
      LOG_ERROR_P(PARAM_OBS) << " value " << obs_data_line[0] << " could not be converted in to an unsigned integer. It should be the year for this line";
    if (std::find(years_.begin(), years_.end(), year) == years_.end())
      LOG_ERROR_P(PARAM_OBS) << " value " << year << " is not a valid year for this observation";

    for (unsigned i = 1; i < obs_data_line.size(); ++i) {
      Double value = 0;
      if (!utilities::To<Double>(obs_data_line[i], value))
        LOG_ERROR_P(PARAM_OBS) << " value (" << obs_data_line[i] << ") could not be converted to a double";
      obs_by_year[year].push_back(value);
    }
    if (obs_by_year[year].size() != obs_expected - 1)
      LOG_CODE_ERROR() << "obs_by_year_[year].size() (" << obs_by_year[year].size() << ") != obs_expected - 1 (" << obs_expected -1 << ")";
  }

  /**
   * Build our error value map
   */
  vector<vector<string>>& error_values_data = error_values_table_->data();
  if (error_values_data.size() != years_.size()) {
    LOG_ERROR_P(PARAM_ERROR_VALUES) << " has " << error_values_data.size() << " rows defined, but we expected " << years_.size()
        << " to match the number of years provided";
  }

  for (vector<string>& error_values_data_line : error_values_data) {
    if (error_values_data_line.size() != 2 && error_values_data_line.size() != obs_expected) {
      LOG_ERROR_P(PARAM_ERROR_VALUES) << " has " << error_values_data_line.size() << " values defined, but we expected " << obs_expected
          << " to match the age speard * categories + 1 (for year)";
    }

    unsigned year = 0;
    if (!utilities::To<unsigned>(error_values_data_line[0], year))
      LOG_ERROR_P(PARAM_ERROR_VALUES) << " value " << error_values_data_line[0] << " could not be converted in to an unsigned integer. It should be the year for this line";
    if (std::find(years_.begin(), years_.end(), year) == years_.end())
      LOG_ERROR_P(PARAM_ERROR_VALUES) << " value " << year << " is not a valid year for this observation";
    for (unsigned i = 1; i < error_values_data_line.size(); ++i) {
      Double value = 0;

      if (!utilities::To<Double>(error_values_data_line[i], value))
        LOG_ERROR_P(PARAM_ERROR_VALUES) << " value (" << error_values_data_line[i] << ") could not be converted to a double";
      if (likelihood_type_ == PARAM_LOGNORMAL && value <= 0.0) {
        LOG_ERROR_P(PARAM_ERROR_VALUES) << ": error_value (" << AS_DOUBLE(value) << ") cannot be equal to or less than 0.0";
      } else if ((likelihood_type_ == PARAM_MULTINOMIAL && value < 0.0) || (likelihood_type_ == PARAM_DIRICHLET && value < 0.0)) {
        LOG_ERROR_P(PARAM_ERROR_VALUES) << ": error_value (" << AS_DOUBLE(value) << ") cannot be less than 0.0";
      }

      error_values_by_year[year].push_back(value);
    }
    if (error_values_by_year[year].size() == 1) {
      error_values_by_year[year].assign(obs_expected - 1, error_values_by_year[year][0]);
    }
    if (error_values_by_year[year].size() != obs_expected - 1)
      LOG_CODE_ERROR() << "error_values_by_year_[year].size() (" << error_values_by_year[year].size() << ") != obs_expected - 1 (" << obs_expected -1 << ")";
  }

  /**
   * Validate likelihood type
   */
  if (likelihood_type_ != PARAM_LOGNORMAL && likelihood_type_ != PARAM_MULTINOMIAL && likelihood_type_ != PARAM_DIRICHLET)
    LOG_ERROR_P(PARAM_LIKELIHOOD) << ": likelihood " << likelihood_type_ << " is not supported by the proportions at age observation. "
        << "Supported types are " << PARAM_LOGNORMAL << ", " << PARAM_MULTINOMIAL << " and " << PARAM_DIRICHLET;

  /**
   * Build our proportions and error values for use in the observation
   * If the proportions for a given observation do not sum to 1.0
   * and is off by more than the tolerance rescale them.
   */
  Double value = 0.0;
  for (auto iter = obs_by_year.begin(); iter != obs_by_year.end(); ++iter) {

    for (unsigned i = 0; i < category_labels_.size(); ++i) {
      for (unsigned j = 0; j < age_spread_; ++j) {
        unsigned obs_index = i * age_spread_ + j;
        if (!utilities::To<Double>(iter->second[obs_index], value))
          LOG_ERROR_P(PARAM_OBS) << ": obs_ value (" << iter->second[obs_index] << ") at index " << obs_index + 1
              << " in the definition could not be converted to a numeric double";

        Double error_value = error_values_by_year[iter->first][obs_index];
        error_values_[iter->first][category_labels_[i]].push_back(error_value);
        proportions_[iter->first][category_labels_[i]].push_back(value);
      }
    }
  }
}
Exemple #30
0
/**
 * Build
 */
void Manager::Build() {
  LOG_CODE_ERROR() << "This method is not supported";
}