CompositeSurfaceClosure::CompositeSurfaceClosure( const BSDF* osl_bsdf, const OSL::ClosureColor* ci) : m_osl_bsdf(osl_bsdf) { assert(m_osl_bsdf); process_closure_tree(ci, Color3f(1.0f)); compute_cdf(); }
CompositeSubsurfaceClosure::CompositeSubsurfaceClosure( const OSL::ClosureColor* ci) { process_closure_tree(ci, Color3f(1.0f)); compute_cdf(); }
void WaldBoost::fit(Mat& data_pos, Mat& data_neg) { // data_pos: F x N_pos // data_neg: F x N_neg // every feature corresponds to row // every sample corresponds to column assert(data_pos.rows >= weak_count_); assert(data_pos.rows == data_neg.rows); std::vector<bool> feature_ignore; for (int i = 0; i < data_pos.rows; ++i) { feature_ignore.push_back(false); } Mat1f pos_weights(1, data_pos.cols, 1.0f / (2 * data_pos.cols)); Mat1f neg_weights(1, data_neg.cols, 1.0f / (2 * data_neg.cols)); Mat1f pos_trace(1, data_pos.cols, 0.0f); Mat1f neg_trace(1, data_neg.cols, 0.0f); bool quantize = false; if (data_pos.type() != CV_8U) { std::cerr << "quantize" << std::endl; quantize = true; } Mat1f data_min, data_step; int n_bins = 256; if (quantize) { compute_min_step(data_pos, data_neg, n_bins, data_min, data_step); quantize_data(data_pos, data_min, data_step); quantize_data(data_neg, data_min, data_step); } std::cerr << "pos=" << data_pos.cols << " neg=" << data_neg.cols << std::endl; for (int i = 0; i < weak_count_; ++i) { // Train weak learner with lowest error using weights double min_err = DBL_MAX; int min_feature_ind = -1; int min_polarity = 0; int threshold_q = 0; float min_threshold = 0; //#pragma omp parallel for for (int feat_i = 0; feat_i < data_pos.rows; ++feat_i) { if (feature_ignore[feat_i]) continue; // Construct cdf Mat1f pos_cdf(1, n_bins), neg_cdf(1, n_bins); compute_cdf(data_pos.row(feat_i), pos_weights, pos_cdf); compute_cdf(data_neg.row(feat_i), neg_weights, neg_cdf); float neg_total = (float)sum(neg_weights)[0]; Mat1f err_direct = pos_cdf + neg_total - neg_cdf; Mat1f err_backward = 1.0f - err_direct; int idx1[2], idx2[2]; double err1, err2; minMaxIdx(err_direct, &err1, NULL, idx1); minMaxIdx(err_backward, &err2, NULL, idx2); //#pragma omp critical { if (min(err1, err2) < min_err) { if (err1 < err2) { min_err = err1; min_polarity = +1; threshold_q = idx1[1]; } else { min_err = err2; min_polarity = -1; threshold_q = idx2[1]; } min_feature_ind = feat_i; if (quantize) { min_threshold = data_min(feat_i, 0) + data_step(feat_i, 0) * (threshold_q + .5f); } else { min_threshold = threshold_q + .5f; } } } } float alpha = .5f * (float)log((1 - min_err) / min_err); alphas_.push_back(alpha); feature_indices_.push_back(min_feature_ind); thresholds_.push_back(min_threshold); polarities_.push_back(min_polarity); feature_ignore[min_feature_ind] = true; double loss = 0; // Update positive weights for (int j = 0; j < data_pos.cols; ++j) { int val = data_pos.at<unsigned char>(min_feature_ind, j); int label = min_polarity * (val - threshold_q) >= 0 ? +1 : -1; pos_weights(0, j) *= exp(-alpha * label); pos_trace(0, j) += alpha * label; loss += exp(-pos_trace(0, j)) / (2.0f * data_pos.cols); } // Update negative weights for (int j = 0; j < data_neg.cols; ++j) { int val = data_neg.at<unsigned char>(min_feature_ind, j); int label = min_polarity * (val - threshold_q) >= 0 ? +1 : -1; neg_weights(0, j) *= exp(alpha * label); neg_trace(0, j) += alpha * label; loss += exp(+neg_trace(0, j)) / (2.0f * data_neg.cols); } double cascade_threshold = -1; minMaxIdx(pos_trace, &cascade_threshold); cascade_thresholds_.push_back((float)cascade_threshold); std::cerr << "i=" << std::setw(4) << i; std::cerr << " feat=" << std::setw(5) << min_feature_ind; std::cerr << " thr=" << std::setw(3) << threshold_q; std::cerr << " casthr=" << std::fixed << std::setprecision(3) << cascade_threshold; std::cerr << " alpha=" << std::fixed << std::setprecision(3) << alpha << " err=" << std::fixed << std::setprecision(3) << min_err << " loss=" << std::scientific << loss << std::endl; //int pos = 0; //for (int j = 0; j < data_pos.cols; ++j) { // if (pos_trace(0, j) > cascade_threshold - 0.5) { // pos_trace(0, pos) = pos_trace(0, j); // data_pos.col(j).copyTo(data_pos.col(pos)); // pos_weights(0, pos) = pos_weights(0, j); // pos += 1; // } //} //std::cerr << "pos " << data_pos.cols << "/" << pos << std::endl; //pos_trace = pos_trace.colRange(0, pos); //data_pos = data_pos.colRange(0, pos); //pos_weights = pos_weights.colRange(0, pos); int pos = 0; for (int j = 0; j < data_neg.cols; ++j) { if (neg_trace(0, j) > cascade_threshold - 0.5) { neg_trace(0, pos) = neg_trace(0, j); data_neg.col(j).copyTo(data_neg.col(pos)); neg_weights(0, pos) = neg_weights(0, j); pos += 1; } } std::cerr << "neg " << data_neg.cols << "/" << pos << std::endl; neg_trace = neg_trace.colRange(0, pos); data_neg = data_neg.colRange(0, pos); neg_weights = neg_weights.colRange(0, pos); if (loss < 1e-50 || min_err > 0.5) { std::cerr << "Stopping early" << std::endl; weak_count_ = i + 1; break; } // Normalize weights double z = (sum(pos_weights) + sum(neg_weights))[0]; pos_weights /= z; neg_weights /= z; } }