double CMT::ExponentialFunction::inverse(double data) const { return log(data - mEpsilon); }
void colorize(const Image<uint32_t> &input, const Image<uint32_t> &strokes, Image<uint32_t> &output) { int w = input.width(); int h = input.height(); int x,y; int max_d = floor(log(min(h,w))/log(2)-2); float scale_factor = 1.0f/( pow(2,max_d-1) ); int padded_w = ceil(w*scale_factor)*pow(2,max_d-1); int padded_h = ceil(h*scale_factor)*pow(2,max_d-1); // RGB 2 YUV and padarray Image<float> yuv_fused(padded_w,padded_h,3); hl_fuse_yuv(input, strokes, yuv_fused); // Extract Strokes mask Image<float> stroke_mask(padded_w, padded_h); hl_nonzero(strokes,stroke_mask); Image<float> result(padded_w,padded_h,3); int n = padded_h; int m = padded_w; int k = 1; Tensor3d D,G,I; Tensor3d Dx,Dy,iDx,iDy; MG smk; G.set(n,m,k); D.set(n,m,k); I.set(n,m,k); int in_itr_num = 5; int out_itr_num = 1; Dx.set(n,m,k-1); Dy.set(n,m,k-1); iDx.set(n,m,k-1); iDy.set(n,m,k-1); // Fill in label mask and luminance channels for ( y = 0; y<n; y++){ for ( x = 0; x<m; x++){ I(y,x,0) = stroke_mask(x,y); G(y,x,0) = yuv_fused(x,y,0); I(y,x,0) = !I(y,x,0); } } // Write output luminance for ( y=0; y<n; y++){ for ( x=0; x<m; x++){ result(x,y,0)=G(y,x,0); } } smk.set(n,m,k,max_d); smk.setI(I) ; smk.setG(G); smk.setFlow(Dx,Dy,iDx,iDy); // Solve chrominance for (int t=1; t<3; t++){ for ( y=0; y<n; y++){ for ( x=0; x<m; x++){ D(y,x,0) = yuv_fused(x,y,t); smk.P()(y,x,0) = yuv_fused(x,y,t); D(y,x,0) *= (!I(y,x,0)); } } smk.Div() = D ; Tensor3d tP2; for (int itr=0; itr<out_itr_num; itr++){ smk.setDepth(max_d); Field_MGN(&smk, in_itr_num, 2) ; smk.setDepth(ceil(max_d/2)); Field_MGN(&smk, in_itr_num, 2) ; smk.setDepth(2); Field_MGN(&smk, in_itr_num, 2) ; smk.setDepth(1); Field_MGN(&smk, in_itr_num, 4) ; } tP2 = smk.P(); for ( y=0; y<n; y++){ for ( x=0; x<m; x++){ result(x,y,t) = tP2(y,x,0); } } } hl_yuv2rgb(result,output); }
typename return_type<T_y, T_loc, T_scale>::type normal_ccdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { static const char* function("stan::math::normal_ccdf_log"); typedef typename stan::partials_return_type<T_y, T_loc, T_scale>::type T_partials_return; using stan::math::check_positive; using stan::math::check_finite; using stan::math::check_not_nan; using stan::math::check_consistent_sizes; using stan::math::value_of; using stan::math::INV_SQRT_2; using std::log; using std::exp; T_partials_return ccdf_log(0.0); // check if any vectors are zero length if (!(stan::length(y) && stan::length(mu) && stan::length(sigma))) return ccdf_log; check_not_nan(function, "Random variable", y); check_finite(function, "Location parameter", mu); check_not_nan(function, "Scale parameter", sigma); check_positive(function, "Scale parameter", sigma); check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); OperandsAndPartials<T_y, T_loc, T_scale> operands_and_partials(y, mu, sigma); VectorView<const T_y> y_vec(y); VectorView<const T_loc> mu_vec(mu); VectorView<const T_scale> sigma_vec(sigma); size_t N = max_size(y, mu, sigma); double log_half = std::log(0.5); const double SQRT_TWO_OVER_PI = std::sqrt(2.0 / stan::math::pi()); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = value_of(y_vec[n]); const T_partials_return mu_dbl = value_of(mu_vec[n]); const T_partials_return sigma_dbl = value_of(sigma_vec[n]); const T_partials_return scaled_diff = (y_dbl - mu_dbl) / (sigma_dbl * SQRT_2); T_partials_return one_m_erf; if (scaled_diff < -37.5 * INV_SQRT_2) one_m_erf = 2.0; else if (scaled_diff < -5.0 * INV_SQRT_2) one_m_erf = 2.0 - erfc(-scaled_diff); else if (scaled_diff > 8.25 * INV_SQRT_2) one_m_erf = 0.0; else one_m_erf = 1.0 - erf(scaled_diff); // log ccdf ccdf_log += log_half + log(one_m_erf); // gradients if (contains_nonconstant_struct<T_y, T_loc, T_scale>::value) { const T_partials_return rep_deriv_div_sigma = scaled_diff > 8.25 * INV_SQRT_2 ? std::numeric_limits<double>::infinity() : SQRT_TWO_OVER_PI * exp(-scaled_diff * scaled_diff) / one_m_erf / sigma_dbl; if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] -= rep_deriv_div_sigma; if (!is_constant_struct<T_loc>::value) operands_and_partials.d_x2[n] += rep_deriv_div_sigma; if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x3[n] += rep_deriv_div_sigma * scaled_diff * stan::math::SQRT_2; } } return operands_and_partials.value(ccdf_log); }
TEST(prob_transform, lb_f) { EXPECT_FLOAT_EQ(log(3.0 - 2.0), stan::prob::lb_free(3.0,2.0)); EXPECT_FLOAT_EQ(1.7, stan::prob::lb_free(1.7, -std::numeric_limits<double>::infinity())); }
typename return_type<T_y, T_loc, T_scale>::type lognormal_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { static const char* function("stan::math::lognormal_log"); typedef typename stan::partials_return_type<T_y, T_loc, T_scale>::type T_partials_return; using stan::is_constant_struct; using stan::math::check_not_nan; using stan::math::check_finite; using stan::math::check_positive_finite; using stan::math::check_nonnegative; using stan::math::check_consistent_sizes; using stan::math::value_of; using stan::math::include_summand; // check if any vectors are zero length if (!(stan::length(y) && stan::length(mu) && stan::length(sigma))) return 0.0; // set up return value accumulator T_partials_return logp(0.0); // validate args (here done over var, which should be OK) check_not_nan(function, "Random variable", y); check_nonnegative(function, "Random variable", y); check_finite(function, "Location parameter", mu); check_positive_finite(function, "Scale parameter", sigma); check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma); VectorView<const T_y> y_vec(y); VectorView<const T_loc> mu_vec(mu); VectorView<const T_scale> sigma_vec(sigma); size_t N = max_size(y, mu, sigma); for (size_t n = 0; n < length(y); n++) if (value_of(y_vec[n]) <= 0) return LOG_ZERO; OperandsAndPartials<T_y, T_loc, T_scale> operands_and_partials(y, mu, sigma); using stan::math::square; using std::log; using stan::math::NEG_LOG_SQRT_TWO_PI; using std::log; VectorBuilder<include_summand<propto, T_scale>::value, T_partials_return, T_scale> log_sigma(length(sigma)); if (include_summand<propto, T_scale>::value) { for (size_t n = 0; n < length(sigma); n++) log_sigma[n] = log(value_of(sigma_vec[n])); } VectorBuilder<include_summand<propto, T_y, T_loc, T_scale>::value, T_partials_return, T_scale> inv_sigma(length(sigma)); VectorBuilder<include_summand<propto, T_y, T_loc, T_scale>::value, T_partials_return, T_scale> inv_sigma_sq(length(sigma)); if (include_summand<propto, T_y, T_loc, T_scale>::value) { for (size_t n = 0; n < length(sigma); n++) inv_sigma[n] = 1 / value_of(sigma_vec[n]); } if (include_summand<propto, T_y, T_loc, T_scale>::value) { for (size_t n = 0; n < length(sigma); n++) inv_sigma_sq[n] = inv_sigma[n] * inv_sigma[n]; } VectorBuilder<include_summand<propto, T_y, T_loc, T_scale>::value, T_partials_return, T_y> log_y(length(y)); if (include_summand<propto, T_y, T_loc, T_scale>::value) { for (size_t n = 0; n < length(y); n++) log_y[n] = log(value_of(y_vec[n])); } VectorBuilder<!is_constant_struct<T_y>::value, T_partials_return, T_y> inv_y(length(y)); if (!is_constant_struct<T_y>::value) { for (size_t n = 0; n < length(y); n++) inv_y[n] = 1 / value_of(y_vec[n]); } if (include_summand<propto>::value) logp += N * NEG_LOG_SQRT_TWO_PI; for (size_t n = 0; n < N; n++) { const T_partials_return mu_dbl = value_of(mu_vec[n]); T_partials_return logy_m_mu(0); if (include_summand<propto, T_y, T_loc, T_scale>::value) logy_m_mu = log_y[n] - mu_dbl; T_partials_return logy_m_mu_sq = logy_m_mu * logy_m_mu; T_partials_return logy_m_mu_div_sigma(0); if (contains_nonconstant_struct<T_y, T_loc, T_scale>::value) logy_m_mu_div_sigma = logy_m_mu * inv_sigma_sq[n]; // log probability if (include_summand<propto, T_scale>::value) logp -= log_sigma[n]; if (include_summand<propto, T_y>::value) logp -= log_y[n]; if (include_summand<propto, T_y, T_loc, T_scale>::value) logp -= 0.5 * logy_m_mu_sq * inv_sigma_sq[n]; // gradients if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] -= (1 + logy_m_mu_div_sigma) * inv_y[n]; if (!is_constant_struct<T_loc>::value) operands_and_partials.d_x2[n] += logy_m_mu_div_sigma; if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x3[n] += (logy_m_mu_div_sigma * logy_m_mu - 1) * inv_sigma[n]; } return operands_and_partials.to_var(logp, y, mu, sigma); }
inline typename boost::math::tools::promote_args<T>::type log2(const T a) { using std::log; return log(a) / LOG_2; }
typename return_type<T_y,T_loc,T_scale,T_shape>::type skew_normal_log(const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { static const char* function("stan::prob::skew_normal_log"); typedef typename stan::partials_return_type<T_y,T_loc, T_scale,T_shape>::type T_partials_return; using std::log; using stan::is_constant_struct; using stan::math::check_positive; using stan::math::check_finite; using stan::math::check_not_nan; using stan::math::check_consistent_sizes; using stan::math::value_of; using stan::prob::include_summand; // check if any vectors are zero length if (!(stan::length(y) && stan::length(mu) && stan::length(sigma) && stan::length(alpha))) return 0.0; // set up return value accumulator T_partials_return logp(0.0); // validate args (here done over var, which should be OK) check_not_nan(function, "Random variable", y); check_finite(function, "Location parameter", mu); check_finite(function, "Shape parameter", alpha); check_positive(function, "Scale parameter", sigma); check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma, "Shape paramter", alpha); // check if no variables are involved and prop-to if (!include_summand<propto,T_y,T_loc,T_scale,T_shape>::value) return 0.0; // set up template expressions wrapping scalars into vector views agrad::OperandsAndPartials<T_y, T_loc, T_scale, T_shape> operands_and_partials(y, mu, sigma, alpha); using boost::math::erfc; using boost::math::erf; VectorView<const T_y> y_vec(y); VectorView<const T_loc> mu_vec(mu); VectorView<const T_scale> sigma_vec(sigma); VectorView<const T_shape> alpha_vec(alpha); size_t N = max_size(y, mu, sigma, alpha); VectorBuilder<true, T_partials_return, T_scale> inv_sigma(length(sigma)); VectorBuilder<include_summand<propto,T_scale>::value, T_partials_return, T_scale> log_sigma(length(sigma)); for (size_t i = 0; i < length(sigma); i++) { inv_sigma[i] = 1.0 / value_of(sigma_vec[i]); if (include_summand<propto,T_scale>::value) log_sigma[i] = log(value_of(sigma_vec[i])); } for (size_t n = 0; n < N; n++) { // pull out values of arguments const T_partials_return y_dbl = value_of(y_vec[n]); const T_partials_return mu_dbl = value_of(mu_vec[n]); const T_partials_return sigma_dbl = value_of(sigma_vec[n]); const T_partials_return alpha_dbl = value_of(alpha_vec[n]); // reusable subexpression values const T_partials_return y_minus_mu_over_sigma = (y_dbl - mu_dbl) * inv_sigma[n]; const double pi_dbl = stan::math::pi(); // log probability if (include_summand<propto>::value) logp -= 0.5 * log(2.0 * pi_dbl); if (include_summand<propto, T_scale>::value) logp -= log(sigma_dbl); if (include_summand<propto,T_y, T_loc, T_scale>::value) logp -= y_minus_mu_over_sigma * y_minus_mu_over_sigma / 2.0; if (include_summand<propto,T_y,T_loc,T_scale,T_shape>::value) logp += log(erfc(-alpha_dbl * y_minus_mu_over_sigma / std::sqrt(2.0))); // gradients T_partials_return deriv_logerf = 2.0 / std::sqrt(pi_dbl) * exp(-alpha_dbl * y_minus_mu_over_sigma / std::sqrt(2.0) * alpha_dbl * y_minus_mu_over_sigma / std::sqrt(2.0)) / (1 + erf(alpha_dbl * y_minus_mu_over_sigma / std::sqrt(2.0))); if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] += -y_minus_mu_over_sigma / sigma_dbl + deriv_logerf * alpha_dbl / (sigma_dbl * std::sqrt(2.0)) ; if (!is_constant_struct<T_loc>::value) operands_and_partials.d_x2[n] += y_minus_mu_over_sigma / sigma_dbl + deriv_logerf * -alpha_dbl / (sigma_dbl * std::sqrt(2.0)); if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x3[n] += -1.0 / sigma_dbl + y_minus_mu_over_sigma * y_minus_mu_over_sigma / sigma_dbl - deriv_logerf * y_minus_mu_over_sigma * alpha_dbl / (sigma_dbl * std::sqrt(2.0)); if (!is_constant_struct<T_shape>::value) operands_and_partials.d_x4[n] += deriv_logerf * y_minus_mu_over_sigma / std::sqrt(2.0); } return operands_and_partials.to_var(logp,y,mu,sigma,alpha); }
template <typename Scalar> Scalar log2(Scalar v) { using std::log; return log(v)/log(Scalar(2)); }
typename return_type<T_y,T_loc,T_scale>::type double_exponential_ccdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { static const char* function = "stan::prob::double_exponential_ccdf_log(%1%)"; using stan::math::check_finite; using stan::math::check_not_nan; using stan::math::check_positive; using stan::math::check_consistent_sizes; using stan::math::value_of; double ccdf_log(0.0); // check if any vectors are zero length if (!(stan::length(y) && stan::length(mu) && stan::length(sigma))) return ccdf_log; if(!check_not_nan(function, y, "Random variable", &ccdf_log)) return ccdf_log; if(!check_finite(function, mu, "Location parameter", &ccdf_log)) return ccdf_log; if(!check_finite(function, sigma, "Scale parameter", &ccdf_log)) return ccdf_log; if(!check_positive(function, sigma, "Scale parameter", &ccdf_log)) return ccdf_log; if (!(check_consistent_sizes(function, y, mu, sigma, "Random variable", "Location parameter", "Scale Parameter", &ccdf_log))) return ccdf_log; using std::log; using std::exp; agrad::OperandsAndPartials<T_y, T_loc, T_scale> operands_and_partials(y, mu, sigma); VectorView<const T_y> y_vec(y); VectorView<const T_loc> mu_vec(mu); VectorView<const T_scale> sigma_vec(sigma); const double log_half = std::log(0.5); size_t N = max_size(y, mu, sigma); for (size_t n = 0; n < N; n++) { const double y_dbl = value_of(y_vec[n]); const double mu_dbl = value_of(mu_vec[n]); const double sigma_dbl = value_of(sigma_vec[n]); const double scaled_diff = (y_dbl - mu_dbl) / sigma_dbl; const double inv_sigma = 1.0 / sigma_dbl; if(y_dbl < mu_dbl) { //log ccdf ccdf_log += log(1.0 - 0.5 * exp(scaled_diff)); //gradients const double rep_deriv = 1.0 / (2.0 * exp(-scaled_diff) - 1.0); if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] -= rep_deriv * inv_sigma; if (!is_constant_struct<T_loc>::value) operands_and_partials.d_x2[n] += rep_deriv * inv_sigma; if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x3[n] += rep_deriv * scaled_diff * inv_sigma; } else { // log ccdf ccdf_log += log_half - scaled_diff; // gradients if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] -= inv_sigma; if (!is_constant_struct<T_loc>::value) operands_and_partials.d_x2[n] += inv_sigma; if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x3[n] += scaled_diff * inv_sigma; } } return operands_and_partials.to_var(ccdf_log); }
/** * Returns the base two logarithm of the argument (C99, C++11). * * The function is defined by: * * <code>log2(a) = log(a) / std::log(2.0)</code>. * * @param[in] u argument * @return base two logarithm of argument */ inline double log2(double u) { using std::log; return log(u) / LOG_2; }
void BaseReverseMode<Base>::reverse_local_computation(size_t ind_num, size_t dep_num) { using std::sin; using std::cos; using std::acos; using std::sqrt; using std::pow; using std::log; using std::exp; DerivativeInfo<locint, Base> info; if (!trace) { warning_NoTraceSet(); } if (ind_num != trace->get_num_ind()) { warning_NumberInconsistent("independent", ind_num, trace->get_num_ind()); } if (dep_num != trace->get_num_dep()) { warning_NumberInconsistent("dependent", dep_num, trace->get_num_dep()); } size_t ind_count = trace->get_num_ind(); size_t dep_count = trace->get_num_dep(); locint res; double coval; trace->init_reverse(); opbyte op = trace->get_next_op_r(); while (op != start_of_tape) { info.clear(); info.opcode = op; switch (op) { case start_of_tape: case end_of_tape: break; case assign_ind: res = trace->get_next_loc_r();; trace->get_next_val_r(); if (ind_count == 0) { // TODO(warning) } ind_count--; indep_index_map[res] = ind_count; break; case assign_dep: res = trace->get_next_loc_r(); dep_value[res] = trace->get_next_val_r(); init_dep_deriv(res); if (dep_count == 0) { // TODO(warning) } dep_count--; dep_index_map[res] = dep_count; break; case assign_param: info.r = trace->get_next_loc_r(); trace->get_next_param_r(); break; case assign_d: info.r = trace->get_next_loc_r(); trace->get_next_coval_r(); break; case assign_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.dx = 1.0; break; case comp_eq: case comp_lt: trace->get_next_loc_r(); trace->get_next_loc_r(); trace->get_next_coval_r(); break; case eq_plus_a: case plus_a_a: info.r = trace->get_next_loc_r(); info.y = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.dx = 1.0; info.dy = 1.0; PSEUDO_BINARY break; case eq_plus_d: case plus_d_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); trace->get_next_coval_r(); info.dx = 1.0; break; case eq_minus_a: case minus_a_a: info.r = trace->get_next_loc_r(); info.y = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.dx = 1.0; info.dy = -1.0; PSEUDO_BINARY break; case minus_d_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); trace->get_next_coval_r(); info.dx = -1.0; break; case eq_mult_a: case mult_a_a: info.r = trace->get_next_loc_r(); info.y = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vy = trace->get_next_val_r(); info.vx = trace->get_next_val_r(); info.dx = info.vy; info.dy = info.vx; info.pxy = 1.0; PSEUDO_BINARY break; case eq_mult_d: case mult_d_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.dx = trace->get_next_coval_r(); break; case eq_div_a: case div_a_a: info.r = trace->get_next_loc_r(); info.y = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vy = trace->get_next_val_r(); info.vx = trace->get_next_val_r(); info.dx = 1.0 / info.vy; info.dy = -info.vx / (info.vy*info.vy); info.pxy = -1.0 / (info.vy*info.vy); info.pyy = 2.0 * info.vx / (info.vy*info.vy*info.vy); info.pxyy = 2.0 / (info.vy * info.vy * info.vy); info.pyyy = -6.0 * info.vx / (info.vy*info.vy*info.vy*info.vy); PSEUDO_BINARY break; case div_d_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); coval = trace->get_next_coval_r(); info.dx = -coval / (info.vx*info.vx); info.pxx = 2.0 * coval / (info.vx*info.vx*info.vx); info.pxxx = -6.0 * coval / (info.vx*info.vx*info.vx*info.vx); break; case sin_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); info.dx = cos(info.vx); info.pxx = -sin(info.vx); info.pxxx = -cos(info.vx); break; case cos_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); info.dx = -sin(info.vx); info.pxx = -cos(info.vx); info.pxxx = sin(info.vx); break; case asin_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); { Base t = sqrt(1.0 - info.vx * info.vx); info.dx = 1.0 / t; info.pxx = info.vx / (t * t * t); info.pxxx = (2.0*info.vx*info.vx+1.0) / (t*t*t*t*t); } break; case acos_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); { Base t = -sqrt(1.0 - info.vx * info.vx); info.dx = 1.0 / t; info.pxx = info.vx / (t * t * t); info.pxxx = (2.0*info.vx*info.vx+1.0) / (t*t*t*t*t); } break; case atan_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); { Base t = 1.0 + info.vx * info.vx; info.dx = 1.0 / t; info.pxx = -2.0 * info.vx / (t * t); info.pxxx = (6.0*info.vx*info.vx-2.0)/(t*t*t); } break; case sqrt_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); if (info.vx != 0.0) { info.dx = 0.5/sqrt(info.vx); info.pxx = -0.5 * info.dx / info.vx; info.pxxx = -1.5 * info.pxx / info.vx; } break; case exp_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); info.dx = exp(info.vx); info.pxx = info.dx; info.pxxx = info.dx; break; case log_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); info.dx = 1.0 / info.vx; info.pxx = - info.dx * info.dx; info.pxxx = -2.0 * info.pxx / info.vx; break; case pow_a_a: info.r = trace->get_next_loc_r(); info.y = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vy = trace->get_next_val_r(); info.vx = trace->get_next_val_r(); { Base t = pow(info.vx, info.vy); info.dx = info.vy * t / info.vx; info.pxx = (info.vy - 1) * info.dx / info.vx; info.dy = log(info.vx) * t; info.pyy = log(info.vx) * info.dy; info.pxy = (info.vy * log(info.vx) + 1) * t / info.vx; info.pxxx = (info.vy - 2) * info.pxx / info.vx; info.pxxy = (info.vy-1)*info.pxy/info.vx + info.vy*t/(info.vx*info.vx); info.pxyy = info.dx*log(info.vx)*log(info.vx) + 2*log(info.vx)*t/info.vx; info.pyyy = log(info.vx) * info.pyy; } PSEUDO_BINARY break; case pow_a_d: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); coval = trace->get_next_coval_r(); info.coval = coval; { Base t = pow(info.vx, coval); info.dx = coval * t / info.vx; info.pxx = (coval - 1) * info.dx / info.vx; info.pxxx = (coval - 2) * info.pxx / info.vx; } break; case pow_d_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); coval = trace->get_next_coval_r(); info.coval = coval; { Base t = pow(coval, info.vx); info.dx = log(coval) * t; info.pxx = log(coval) * info.dx; info.pxxx = log(coval) * info.pxx; } break; case erf_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); info.dx = 2.0/sqrt(PI)*exp(-info.vx*info.vx); info.pxx = info.dx * (-2.0 * info.vx); info.pxxx = info.dx * (4.0 * info.vx * info.vx - 2); break; case fabs_a: info.r = trace->get_next_loc_r(); info.x = trace->get_next_loc_r(); info.vx = trace->get_next_val_r(); if (info.vx > 0) { info.dx = 1.0; } else if (info.vx < 0) { info.dx = -1.0; } else { // TODO(muwang) : warning message } break; case rmpi_send: case rmpi_recv: break; default: warning_UnrecognizedOpcode((int)op); } // call to inherited virtual functions process_sac(info); op = trace->get_next_op_r(); } // this is only for preaccumulation info.clear(); info.opcode = op; process_sac(info); trace->end_reverse(); return; }
typename return_type<T_y, T_scale_succ, T_scale_fail>::type beta_ccdf_log(const T_y& y, const T_scale_succ& alpha, const T_scale_fail& beta) { typedef typename stan::partials_return_type<T_y, T_scale_succ, T_scale_fail>::type T_partials_return; // Size checks if ( !( stan::length(y) && stan::length(alpha) && stan::length(beta) ) ) return 0.0; // Error checks static const char* function("stan::math::beta_cdf"); using stan::math::check_positive_finite; using stan::math::check_not_nan; using stan::math::check_nonnegative; using stan::math::check_less_or_equal; using boost::math::tools::promote_args; using stan::math::check_consistent_sizes; using stan::math::value_of; T_partials_return ccdf_log(0.0); check_positive_finite(function, "First shape parameter", alpha); check_positive_finite(function, "Second shape parameter", beta); check_not_nan(function, "Random variable", y); check_nonnegative(function, "Random variable", y); check_less_or_equal(function, "Random variable", y, 1); check_consistent_sizes(function, "Random variable", y, "First shape parameter", alpha, "Second shape parameter", beta); // Wrap arguments in vectors VectorView<const T_y> y_vec(y); VectorView<const T_scale_succ> alpha_vec(alpha); VectorView<const T_scale_fail> beta_vec(beta); size_t N = max_size(y, alpha, beta); OperandsAndPartials<T_y, T_scale_succ, T_scale_fail> operands_and_partials(y, alpha, beta); // Compute CDF and its gradients using stan::math::inc_beta; using stan::math::digamma; using stan::math::lbeta; using std::pow; using std::exp; using std::log; using std::exp; // Cache a few expensive function calls if alpha or beta is a parameter VectorBuilder<contains_nonconstant_struct<T_scale_succ, T_scale_fail>::value, T_partials_return, T_scale_succ, T_scale_fail> digamma_alpha_vec(max_size(alpha, beta)); VectorBuilder<contains_nonconstant_struct<T_scale_succ, T_scale_fail>::value, T_partials_return, T_scale_succ, T_scale_fail> digamma_beta_vec(max_size(alpha, beta)); VectorBuilder<contains_nonconstant_struct<T_scale_succ, T_scale_fail>::value, T_partials_return, T_scale_succ, T_scale_fail> digamma_sum_vec(max_size(alpha, beta)); if (contains_nonconstant_struct<T_scale_succ, T_scale_fail>::value) { for (size_t i = 0; i < N; i++) { const T_partials_return alpha_dbl = value_of(alpha_vec[i]); const T_partials_return beta_dbl = value_of(beta_vec[i]); digamma_alpha_vec[i] = digamma(alpha_dbl); digamma_beta_vec[i] = digamma(beta_dbl); digamma_sum_vec[i] = digamma(alpha_dbl + beta_dbl); } } // Compute vectorized CDFLog and gradient for (size_t n = 0; n < N; n++) { // Pull out values const T_partials_return y_dbl = value_of(y_vec[n]); const T_partials_return alpha_dbl = value_of(alpha_vec[n]); const T_partials_return beta_dbl = value_of(beta_vec[n]); const T_partials_return betafunc_dbl = exp(lbeta(alpha_dbl, beta_dbl)); // Compute const T_partials_return Pn = 1.0 - inc_beta(alpha_dbl, beta_dbl, y_dbl); ccdf_log += log(Pn); if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] -= pow(1-y_dbl, beta_dbl-1) * pow(y_dbl, alpha_dbl-1) / betafunc_dbl / Pn; T_partials_return g1 = 0; T_partials_return g2 = 0; if (contains_nonconstant_struct<T_scale_succ, T_scale_fail>::value) { stan::math::grad_reg_inc_beta(g1, g2, alpha_dbl, beta_dbl, y_dbl, digamma_alpha_vec[n], digamma_beta_vec[n], digamma_sum_vec[n], betafunc_dbl); } if (!is_constant_struct<T_scale_succ>::value) operands_and_partials.d_x2[n] -= g1 / Pn; if (!is_constant_struct<T_scale_fail>::value) operands_and_partials.d_x3[n] -= g2 / Pn; } return operands_and_partials.to_var(ccdf_log, y, alpha, beta); }
bool fabric_t::build_lid_map(bool determine_lmc) { ///Always start clean clear_lidmap(); { const lmc_t max_lmc_lid = lmc > 0 ? (1 << lmc) - 1 : 0; /** * Walk every entity and build lid map */ for( entities_t::iterator itr = entities.begin(), eitr = entities.end(); itr != eitr; ++itr ) { assert(itr->second.lid() > 0); entity_t &entity = itr->second; const lid_t blid = entity.lid(); assert(blid > 0); if(entity.get_type() == port_type::HCA) for(lmc_t i = 0; i <= max_lmc_lid; ++i) { #ifndef NDEBUG std::cerr << "set HCA lid " << entity.label() << "(" << itr->first << std::hex << ") = " << regex::string_cast_uint(blid + i) << std::endl; #endif assert(lidmap.find(blid + i) == lidmap.end()); lidmap[blid + i] = &entity; } else if(entity.get_type() == port_type::TCA) { ///Switchs do not get a second LID #ifndef NDEBUG { entitiesmap_lid_t::iterator itr = lidmap.find(blid); if(itr != lidmap.end()) { std::cerr << "attempting fabric lmc = " << regex::string_cast_uint(lmc) << std::endl; std::cerr << "found existing port " << itr->second->label() << " on lid " << blid << std::endl; std::cerr << "was going to set port " << entity.label() << " on lid " << blid << std::endl; abort(); } } std::cerr << "set TCA lid " << entity.label() << "(" << itr->first << std::hex << ") = " << regex::string_cast_uint(blid) << std::endl; #endif lidmap[blid] = &entity; } else abort(); ///unknown port type? } } /** * attempt to determine LMC value of the subnet * this can be done with reasonable accuracy since * all lmc lid values are sequential for lmc > 0 * * this is the brute force solution O(ports)*lmc * basically, walk every port and see if there are any other lid+lmc * until the smalled lid, lid+lmc*, lid sequence is found * then use lmc=log2(found) * * LIDs = BASELID to BASELID + (2^LMC - 1) */ if(determine_lmc) { using std::log; const lmc_t current_lmc = lmc; assert(portmap.size() > 1); ///Start off assuming max LMC value lmc_t max_lmc_lid = (1 << MAX_LMC_VALUE) - 1; entitiesmap_lid_t::const_iterator lid_end = lidmap.end(); for( portmap_guidport_t::const_iterator itr = portmap.begin(), eitr = portmap.end(); itr != eitr && max_lmc_lid > 0; ++itr ) ///Only search LIDs of HCAs if(itr->second->type == port_type::HCA) { #ifndef NDEBUG std::cerr << "search port " << itr->second->label() << std::endl; #endif ///walk until highest seen lmc value offset for(lmc_t i = 1; i <= max_lmc_lid; ++i) { assert(itr->second->lid > 0); assert(lidmap.find(itr->second->lid) != lid_end); ///is there lid on base lid + lmc offset if(lidmap.find(itr->second->lid + i) != lid_end) { #ifndef NDEBUG std::cerr << "found base lid " << itr->second->lid << " + " << regex::string_cast_uint(i) << " = " << itr->second->lid + i << " => collision\n"; #endif ///found collision, found new max lid offset max_lmc_lid = i - 1; break; } #ifndef NDEBUG else std::cerr << "found base lid " << itr->second->lid << " + " << regex::string_cast_uint(i) << " = " << itr->second->lid + i << " => no collision\n"; #endif } } #if __cplusplus <= 199711L lmc = (log(max_lmc_lid) / log(2)) + 1; #else lmc = std::log2(max_lmc_lid) + 1; #endif assert(lmc <= MAX_LMC_VALUE); #ifndef NDEBUG std::cerr << "fabric lmc = " << regex::string_cast_uint(lmc) << " max lmc offset = " << regex::string_cast_uint(max_lmc_lid) << std::endl; #endif ///LMC is new number so lid map is incomplete if(current_lmc != lmc) return build_lid_map(false); } return true; }
typename return_type<T_y,T_loc,T_scale>::type normal_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { static const char* function = "stan::prob::normal_log(%1%)"; using std::log; using stan::is_constant_struct; using stan::math::check_positive; using stan::math::check_finite; using stan::math::check_not_nan; using stan::math::check_consistent_sizes; using stan::math::value_of; using stan::prob::include_summand; // check if any vectors are zero length if (!(stan::length(y) && stan::length(mu) && stan::length(sigma))) return 0.0; // set up return value accumulator double logp(0.0); // validate args (here done over var, which should be OK) if (!check_not_nan(function, y, "Random variable", &logp)) return logp; if (!check_finite(function, mu, "Location parameter", &logp)) return logp; if (!check_positive(function, sigma, "Scale parameter", &logp)) return logp; if (!(check_consistent_sizes(function, y,mu,sigma, "Random variable","Location parameter","Scale parameter", &logp))) return logp; // check if no variables are involved and prop-to if (!include_summand<propto,T_y,T_loc,T_scale>::value) return 0.0; // set up template expressions wrapping scalars into vector views agrad::OperandsAndPartials<T_y, T_loc, T_scale> operands_and_partials(y, mu, sigma); VectorView<const T_y> y_vec(y); VectorView<const T_loc> mu_vec(mu); VectorView<const T_scale> sigma_vec(sigma); size_t N = max_size(y, mu, sigma); DoubleVectorView<true,is_vector<T_scale>::value> inv_sigma(length(sigma)); DoubleVectorView<include_summand<propto,T_scale>::value,is_vector<T_scale>::value> log_sigma(length(sigma)); for (size_t i = 0; i < length(sigma); i++) { inv_sigma[i] = 1.0 / value_of(sigma_vec[i]); if (include_summand<propto,T_scale>::value) log_sigma[i] = log(value_of(sigma_vec[i])); } for (size_t n = 0; n < N; n++) { // pull out values of arguments const double y_dbl = value_of(y_vec[n]); const double mu_dbl = value_of(mu_vec[n]); // reusable subexpression values const double y_minus_mu_over_sigma = (y_dbl - mu_dbl) * inv_sigma[n]; const double y_minus_mu_over_sigma_squared = y_minus_mu_over_sigma * y_minus_mu_over_sigma; static double NEGATIVE_HALF = - 0.5; // log probability if (include_summand<propto>::value) logp += NEG_LOG_SQRT_TWO_PI; if (include_summand<propto,T_scale>::value) logp -= log_sigma[n]; if (include_summand<propto,T_y,T_loc,T_scale>::value) logp += NEGATIVE_HALF * y_minus_mu_over_sigma_squared; // gradients double scaled_diff = inv_sigma[n] * y_minus_mu_over_sigma; if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] -= scaled_diff; if (!is_constant_struct<T_loc>::value) operands_and_partials.d_x2[n] += scaled_diff; if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x3[n] += -inv_sigma[n] + inv_sigma[n] * y_minus_mu_over_sigma_squared; } return operands_and_partials.to_var(logp); }
bool CMT::Mixture::train( const MatrixXd& data, const Parameters& parameters, const Component::Parameters& componentParameters) { if(data.rows() != dim()) throw Exception("Data has wrong dimensionality."); if(parameters.initialize && !initialized()) initialize(data, parameters, componentParameters); ArrayXXd logJoint(numComponents(), data.cols()); Array<double, Dynamic, 1> postSum; Array<double, 1, Dynamic> logLik; ArrayXXd post; ArrayXXd weights; double avgLogLoss = numeric_limits<double>::infinity(); double avgLogLossNew; for(int i = 0; i < parameters.maxIter; ++i) { // compute joint probability of data and assignments (E) #pragma omp parallel for for(int k = 0; k < numComponents(); ++k) logJoint.row(k) = mComponents[k]->logLikelihood(data) + log(mPriors[k]); // compute normalized posterior (E) logLik = logSumExp(logJoint); // average negative log-likelihood in bits per component avgLogLossNew = -logLik.mean() / log(2.) / dim(); if(parameters.verbosity > 0) cout << setw(6) << i << setw(14) << setprecision(7) << avgLogLossNew << endl; // test for convergence if(avgLogLoss - avgLogLossNew < parameters.threshold) return true; avgLogLoss = avgLogLossNew; // compute normalized posterior (E) post = (logJoint.rowwise() - logLik).exp(); postSum = post.rowwise().sum(); weights = post.colwise() / postSum; // optimize prior weights (M) if(parameters.trainPriors) { mPriors = postSum / data.cols() + parameters.regularizePriors; mPriors /= mPriors.sum(); } // optimize components (M) if(parameters.trainComponents) { #pragma omp parallel for for(int k = 0; k < numComponents(); ++k) mComponents[k]->train(data, weights.row(k), componentParameters); } else { return true; } } if(parameters.verbosity > 0) cout << setw(6) << parameters.maxIter << setw(14) << setprecision(7) << evaluate(data) << endl; return false; }
typename return_type<T_y, T_scale>::type rayleigh_log(const T_y& y, const T_scale& sigma) { static const char* function("stan::math::rayleigh_log"); typedef typename stan::partials_return_type<T_y, T_scale>::type T_partials_return; using std::log; using stan::is_constant_struct; using stan::math::check_positive; using stan::math::check_not_nan; using stan::math::check_consistent_sizes; using stan::math::value_of; using stan::math::include_summand; using std::log; // check if any vectors are zero length if (!(stan::length(y) && stan::length(sigma))) return 0.0; // set up return value accumulator T_partials_return logp(0.0); // validate args (here done over var, which should be OK) check_not_nan(function, "Random variable", y); check_positive(function, "Scale parameter", sigma); check_positive(function, "Random variable", y); check_consistent_sizes(function, "Random variable", y, "Scale parameter", sigma); // check if no variables are involved and prop-to if (!include_summand<propto, T_y, T_scale>::value) return 0.0; // set up template expressions wrapping scalars into vector views OperandsAndPartials<T_y, T_scale> operands_and_partials(y, sigma); VectorView<const T_y> y_vec(y); VectorView<const T_scale> sigma_vec(sigma); size_t N = max_size(y, sigma); VectorBuilder<true, T_partials_return, T_scale> inv_sigma(length(sigma)); VectorBuilder<include_summand<propto, T_scale>::value, T_partials_return, T_scale> log_sigma(length(sigma)); for (size_t i = 0; i < length(sigma); i++) { inv_sigma[i] = 1.0 / value_of(sigma_vec[i]); if (include_summand<propto, T_scale>::value) log_sigma[i] = log(value_of(sigma_vec[i])); } for (size_t n = 0; n < N; n++) { // pull out values of arguments const T_partials_return y_dbl = value_of(y_vec[n]); // reusable subexpression values const T_partials_return y_over_sigma = y_dbl * inv_sigma[n]; static double NEGATIVE_HALF = -0.5; // log probability if (include_summand<propto, T_scale>::value) logp -= 2.0 * log_sigma[n]; if (include_summand<propto, T_y>::value) logp += log(y_dbl); // if (include_summand<propto, T_y, T_scale>::value) logp += NEGATIVE_HALF * y_over_sigma * y_over_sigma; // gradients T_partials_return scaled_diff = inv_sigma[n] * y_over_sigma; if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] += 1.0 / y_dbl - scaled_diff; if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x2[n] += y_over_sigma * scaled_diff - 2.0 * inv_sigma[n]; } return operands_and_partials.to_var(logp, y, sigma); }
bool CMT::Mixture::train( const MatrixXd& data, const MatrixXd& dataValid, const Parameters& parameters, const Component::Parameters& componentParameters) { if(parameters.initialize && !initialized()) initialize(data, parameters, componentParameters); ArrayXXd logJoint(numComponents(), data.cols()); Array<double, Dynamic, 1> postSum; Array<double, 1, Dynamic> logLik; ArrayXXd post; ArrayXXd weights; // training and validation log-loss for checking convergence double avgLogLoss = numeric_limits<double>::infinity(); double avgLogLossNew; double avgLogLossValid = evaluate(dataValid); double avgLogLossValidNew = avgLogLossValid; int counter = 0; // backup model parameters VectorXd priors = mPriors; vector<Component*> components; for(int k = 0; k < numComponents(); ++k) components.push_back(mComponents[k]->copy()); for(int i = 0; i < parameters.maxIter; ++i) { // compute joint probability of data and assignments (E) #pragma omp parallel for for(int k = 0; k < numComponents(); ++k) logJoint.row(k) = mComponents[k]->logLikelihood(data) + log(mPriors[k]); // compute normalized posterior (E) logLik = logSumExp(logJoint); // average negative log-likelihood in bits per component avgLogLossNew = -logLik.mean() / log(2.) / dim(); if(parameters.verbosity > 0) { if(i % parameters.valIter == 0) { // print training and validation error cout << setw(6) << i; cout << setw(14) << setprecision(7) << avgLogLossNew; cout << setw(14) << setprecision(7) << avgLogLossValidNew << endl; } else { // print training error cout << setw(6) << i << setw(14) << setprecision(7) << avgLogLossNew << endl; } } // test for convergence if(avgLogLoss - avgLogLossNew < parameters.threshold) return true; avgLogLoss = avgLogLossNew; // compute normalized posterior (E) post = (logJoint.rowwise() - logLik).exp(); postSum = post.rowwise().sum(); weights = post.colwise() / postSum; // optimize prior weights (M) if(parameters.trainPriors) { mPriors = postSum / data.cols() + parameters.regularizePriors; mPriors /= mPriors.sum(); } // optimize components (M) if(parameters.trainComponents) { #pragma omp parallel for for(int k = 0; k < numComponents(); ++k) mComponents[k]->train(data, weights.row(k), componentParameters); } else { return true; } if((i + 1) % parameters.valIter == 0) { // check validation error avgLogLossValidNew = evaluate(dataValid); if(avgLogLossValidNew < avgLogLossValid) { // backup new found model parameters priors = mPriors; for(int k = 0; k < numComponents(); ++k) *components[k] = *mComponents[k]; avgLogLossValid = avgLogLossValidNew; } else { counter++; if(parameters.valLookAhead > 0 && counter >= parameters.valLookAhead) { // set parameters to best parameters found during training mPriors = priors; for(int k = 0; k < numComponents(); ++k) { *mComponents[k] = *components[k]; delete components[k]; } return true; } } } } if(parameters.verbosity > 0) cout << setw(6) << parameters.maxIter << setw(11) << setprecision(5) << evaluate(data) << endl; return false; }
typename return_type<T_y,T_loc,T_scale>::type double_exponential_log(const T_y& y, const T_loc& mu, const T_scale& sigma) { static const char* function("stan::prob::double_exponential_log"); typedef typename stan::partials_return_type<T_y,T_loc,T_scale>::type T_partials_return; using stan::is_constant_struct; using stan::math::check_finite; using stan::math::check_positive_finite; using stan::math::check_consistent_sizes; using stan::math::value_of; using stan::prob::include_summand; using std::log; using std::fabs; using stan::math::sign; // check if any vectors are zero length if (!(stan::length(y) && stan::length(mu) && stan::length(sigma))) return 0.0; // set up return value accumulator T_partials_return logp(0.0); check_finite(function, "Random variable", y); check_finite(function, "Location parameter", mu); check_positive_finite(function, "Scale parameter", sigma); check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Shape parameter", sigma); // check if no variables are involved and prop-to if (!include_summand<propto,T_y,T_loc,T_scale>::value) return 0.0; // set up template expressions wrapping scalars into vector views VectorView<const T_y> y_vec(y); VectorView<const T_loc> mu_vec(mu); VectorView<const T_scale> sigma_vec(sigma); size_t N = max_size(y, mu, sigma); agrad::OperandsAndPartials<T_y,T_loc,T_scale> operands_and_partials(y, mu, sigma); VectorBuilder<include_summand<propto,T_y,T_loc,T_scale>::value, T_partials_return, T_scale> inv_sigma(length(sigma)); VectorBuilder<!is_constant_struct<T_scale>::value, T_partials_return, T_scale> inv_sigma_squared(length(sigma)); VectorBuilder<include_summand<propto,T_scale>::value, T_partials_return, T_scale> log_sigma(length(sigma)); for (size_t i = 0; i < length(sigma); i++) { const T_partials_return sigma_dbl = value_of(sigma_vec[i]); if (include_summand<propto,T_y,T_loc,T_scale>::value) inv_sigma[i] = 1.0 / sigma_dbl; if (include_summand<propto,T_scale>::value) log_sigma[i] = log(value_of(sigma_vec[i])); if (!is_constant_struct<T_scale>::value) inv_sigma_squared[i] = inv_sigma[i] * inv_sigma[i]; } for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = value_of(y_vec[n]); const T_partials_return mu_dbl = value_of(mu_vec[n]); // reusable subexpressions values const T_partials_return y_m_mu = y_dbl - mu_dbl; const T_partials_return fabs_y_m_mu = fabs(y_m_mu); // log probability if (include_summand<propto>::value) logp += NEG_LOG_TWO; if (include_summand<propto,T_scale>::value) logp -= log_sigma[n]; if (include_summand<propto,T_y,T_loc,T_scale>::value) logp -= fabs_y_m_mu * inv_sigma[n]; // gradients T_partials_return sign_y_m_mu_times_inv_sigma(0); if (contains_nonconstant_struct<T_y,T_loc>::value) sign_y_m_mu_times_inv_sigma = sign(y_m_mu) * inv_sigma[n]; if (!is_constant_struct<T_y>::value) { operands_and_partials.d_x1[n] -= sign_y_m_mu_times_inv_sigma; } if (!is_constant_struct<T_loc>::value) { operands_and_partials.d_x2[n] += sign_y_m_mu_times_inv_sigma; } if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x3[n] += -inv_sigma[n] + fabs_y_m_mu * inv_sigma_squared[n]; } return operands_and_partials.to_var(logp,y,mu,sigma); }
T log2(const T &v)/*{{{*/ { using std::log; return log(v)/log(2.0); }/*}}}*/
typename return_type<T_shape, T_inv_scale>::type neg_binomial_cdf_log(const T_n& n, const T_shape& alpha, const T_inv_scale& beta) { static const char* function("stan::math::neg_binomial_cdf_log"); typedef typename stan::partials_return_type<T_n, T_shape, T_inv_scale>::type T_partials_return; using stan::math::check_positive_finite; using stan::math::check_nonnegative; using stan::math::check_consistent_sizes; using stan::math::include_summand; // Ensure non-zero arugment lengths if (!(stan::length(n) && stan::length(alpha) && stan::length(beta))) return 0.0; T_partials_return P(0.0); // Validate arguments check_positive_finite(function, "Shape parameter", alpha); check_positive_finite(function, "Inverse scale parameter", beta); check_consistent_sizes(function, "Failures variable", n, "Shape parameter", alpha, "Inverse scale parameter", beta); // Wrap arguments in vector views VectorView<const T_n> n_vec(n); VectorView<const T_shape> alpha_vec(alpha); VectorView<const T_inv_scale> beta_vec(beta); size_t size = max_size(n, alpha, beta); // Compute vectorized cdf_log and gradient using stan::math::value_of; using stan::math::inc_beta; using stan::math::digamma; using stan::math::lbeta; using std::exp; using std::pow; using std::log; using std::exp; OperandsAndPartials<T_shape, T_inv_scale> operands_and_partials(alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::length(n); i++) { if (value_of(n_vec[i]) < 0) return operands_and_partials.value(stan::math::negative_infinity()); } // Cache a few expensive function calls if alpha is a parameter VectorBuilder<!is_constant_struct<T_shape>::value, T_partials_return, T_shape> digammaN_vec(stan::length(alpha)); VectorBuilder<!is_constant_struct<T_shape>::value, T_partials_return, T_shape> digammaAlpha_vec(stan::length(alpha)); VectorBuilder<!is_constant_struct<T_shape>::value, T_partials_return, T_shape> digammaSum_vec(stan::length(alpha)); if (!is_constant_struct<T_shape>::value) { for (size_t i = 0; i < stan::length(alpha); i++) { const T_partials_return n_dbl = value_of(n_vec[i]); const T_partials_return alpha_dbl = value_of(alpha_vec[i]); digammaN_vec[i] = digamma(n_dbl + 1); digammaAlpha_vec[i] = digamma(alpha_dbl); digammaSum_vec[i] = digamma(n_dbl + alpha_dbl + 1); } } for (size_t i = 0; i < size; i++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (value_of(n_vec[i]) == std::numeric_limits<int>::max()) return operands_and_partials.value(0.0); const T_partials_return n_dbl = value_of(n_vec[i]); const T_partials_return alpha_dbl = value_of(alpha_vec[i]); const T_partials_return beta_dbl = value_of(beta_vec[i]); const T_partials_return p_dbl = beta_dbl / (1.0 + beta_dbl); const T_partials_return d_dbl = 1.0 / ( (1.0 + beta_dbl) * (1.0 + beta_dbl) ); const T_partials_return Pi = inc_beta(alpha_dbl, n_dbl + 1.0, p_dbl); const T_partials_return beta_func = exp(lbeta(n_dbl + 1, alpha_dbl)); P += log(Pi); if (!is_constant_struct<T_shape>::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; stan::math::grad_reg_inc_beta(g1, g2, alpha_dbl, n_dbl + 1, p_dbl, digammaAlpha_vec[i], digammaN_vec[i], digammaSum_vec[i], beta_func); operands_and_partials.d_x1[i] += g1 / Pi; } if (!is_constant_struct<T_inv_scale>::value) operands_and_partials.d_x2[i] += d_dbl * pow(1-p_dbl, n_dbl) * pow(p_dbl, alpha_dbl-1) / beta_func / Pi; } return operands_and_partials.value(P); }
TEST(prob_transform, positive_f) { EXPECT_FLOAT_EQ(log(0.5), stan::prob::positive_free(0.5)); }
int main( int argc, const char * argv[] ) { args_t args = args_t( argc, argv ); coverage_t coverage; vector< cov_t > variants; vector< pair< int, int > > data; // accumulate the data at each position in a linked list { cov_citer cit; bam1_t * in_bam = bam_init1(); while ( args.bamin->next( in_bam ) ) { aligned_t read( in_bam ); coverage.include( read ); } for ( cit = coverage.begin(); cit != coverage.end(); ++cit ) { int cov = 0; for ( obs_citer it = cit->obs.begin(); it != cit->obs.end(); ++it ) cov += it->second; for ( obs_citer it = cit->obs.begin(); it != cit->obs.end(); ++it ) if ( it->second ) data.push_back( make_pair( cov, it->second ) ); #if 0 obs_citer it = cit->obs.begin(); int cov = 0, maj; if ( it == cit->obs.end() ) continue; maj = it->second; cov += maj; for ( ++it; it != cit->obs.end(); ++it ) { if ( it->second > maj ) maj = it->second; cov += it->second; } data.push_back( make_pair( cov, maj ) ); #endif } bam_destroy1( in_bam ); } // learn a joint multi-binomial model for the mutation rate classes { cov_iter cit; double lg_L, aicc, bg, lg_bg, lg_invbg; rateclass_t rc( data ); vector< pair< double, double > > params; rc( lg_L, aicc, params ); bg = params[ 0 ].second; lg_bg = log( bg ); lg_invbg = log( 1.0 - bg ); params_json_dump( stderr, lg_L, aicc, params ); // cerr << "background: " << bg << endl; // determine which variants are above background and those which are not for ( cit = coverage.begin(); cit != coverage.end(); ++cit ) { if ( cit->op == INS ) continue; int cov = 0; for ( obs_citer it = cit->obs.begin(); it != cit->obs.end(); ++it ) cov += it->second; for ( obs_iter it = cit->obs.begin(); it != cit->obs.end(); ++it ) { const double p = prob_background( lg_bg, lg_invbg, cov, it->second ); if ( p < args.cutoff ) { cout << cit->col << "\t" << cov << "\t" << it->second; for ( unsigned i = 0; i < it->first.size(); ++i ) cout << bits2nuc( it->first[ i ] ); cout << ":" << p << endl; it->second = 1; } else { it->second = 0; } } #if 0 variants.push_back( *cit ); #endif } } return 0; // write out the input reads, but only with "real" variants this time { bam1_t * const in_bam = bam_init1(); if ( !args.bamin->seek0() ) { cerr << "unable to seek( 0 )" << endl; exit( 1 ); } if ( !args.bamout->write_header( args.bamin->hdr ) ) { cerr << "error writing out BAM header" << endl; exit( 1 ); } while ( args.bamin->next( in_bam ) ) { aligned_t read( in_bam ); bam1_t * const out_bam = punchout_read( in_bam, variants, read ); if ( !out_bam->core.l_qseq ) continue; if ( !args.bamout->write( out_bam ) ) { cerr << "error writing out read" << endl; exit( 1 ); } bam_destroy1( out_bam ); } bam_destroy1( in_bam ); } return 0; }
typename return_type<T_y, T_dof, T_scale>::type scaled_inv_chi_square_ccdf_log(const T_y& y, const T_dof& nu, const T_scale& s) { typedef typename stan::partials_return_type<T_y, T_dof, T_scale>::type T_partials_return; if (!(stan::length(y) && stan::length(nu) && stan::length(s))) return 0.0; static const char* function("scaled_inv_chi_square_ccdf_log"); using std::exp; T_partials_return P(0.0); check_not_nan(function, "Random variable", y); check_nonnegative(function, "Random variable", y); check_positive_finite(function, "Degrees of freedom parameter", nu); check_positive_finite(function, "Scale parameter", s); check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu, "Scale parameter", s); VectorView<const T_y> y_vec(y); VectorView<const T_dof> nu_vec(nu); VectorView<const T_scale> s_vec(s); size_t N = max_size(y, nu, s); OperandsAndPartials<T_y, T_dof, T_scale> operands_and_partials(y, nu, s); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::length(y); i++) { if (value_of(y_vec[i]) == 0) return operands_and_partials.value(0.0); } using std::exp; using std::pow; using std::log; VectorBuilder<!is_constant_struct<T_dof>::value, T_partials_return, T_dof> gamma_vec(stan::length(nu)); VectorBuilder<!is_constant_struct<T_dof>::value, T_partials_return, T_dof> digamma_vec(stan::length(nu)); if (!is_constant_struct<T_dof>::value) { for (size_t i = 0; i < stan::length(nu); i++) { const T_partials_return half_nu_dbl = 0.5 * value_of(nu_vec[i]); gamma_vec[i] = tgamma(half_nu_dbl); digamma_vec[i] = digamma(half_nu_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (value_of(y_vec[n]) == std::numeric_limits<double>::infinity()) { return operands_and_partials.value(negative_infinity()); } const T_partials_return y_dbl = value_of(y_vec[n]); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return half_nu_dbl = 0.5 * value_of(nu_vec[n]); const T_partials_return s_dbl = value_of(s_vec[n]); const T_partials_return half_s2_overx_dbl = 0.5 * s_dbl * s_dbl * y_inv_dbl; const T_partials_return half_nu_s2_overx_dbl = 2.0 * half_nu_dbl * half_s2_overx_dbl; const T_partials_return Pn = gamma_p(half_nu_dbl, half_nu_s2_overx_dbl); const T_partials_return gamma_p_deriv = exp(-half_nu_s2_overx_dbl) * pow(half_nu_s2_overx_dbl, half_nu_dbl-1) / tgamma(half_nu_dbl); P += log(Pn); if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] -= half_nu_s2_overx_dbl * y_inv_dbl * gamma_p_deriv / Pn; if (!is_constant_struct<T_dof>::value) operands_and_partials.d_x2[n] -= (0.5 * grad_reg_inc_gamma(half_nu_dbl, half_nu_s2_overx_dbl, gamma_vec[n], digamma_vec[n]) - half_s2_overx_dbl * gamma_p_deriv) / Pn; if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x3[n] += 2.0 * half_nu_dbl * s_dbl * y_inv_dbl * gamma_p_deriv / Pn; } return operands_and_partials.value(P); }
typename return_type<T_y, T_dof, T_loc, T_scale>::type student_t_cdf_log(const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) { typedef typename stan::partials_return_type<T_y, T_dof, T_loc, T_scale>::type T_partials_return; // Size checks if (!(stan::length(y) && stan::length(nu) && stan::length(mu) && stan::length(sigma))) return 0.0; static const char* function("stan::math::student_t_cdf_log"); using stan::math::check_positive_finite; using stan::math::check_finite; using stan::math::check_not_nan; using stan::math::check_consistent_sizes; using stan::math::value_of; using std::exp; T_partials_return P(0.0); check_not_nan(function, "Random variable", y); check_positive_finite(function, "Degrees of freedom parameter", nu); check_finite(function, "Location parameter", mu); check_positive_finite(function, "Scale parameter", sigma); // Wrap arguments in vectors VectorView<const T_y> y_vec(y); VectorView<const T_dof> nu_vec(nu); VectorView<const T_loc> mu_vec(mu); VectorView<const T_scale> sigma_vec(sigma); size_t N = max_size(y, nu, mu, sigma); OperandsAndPartials<T_y, T_dof, T_loc, T_scale> operands_and_partials(y, nu, mu, sigma); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::length(y); i++) { if (value_of(y_vec[i]) == -std::numeric_limits<double>::infinity()) return operands_and_partials.value(stan::math::negative_infinity()); } using stan::math::digamma; using stan::math::lbeta; using stan::math::inc_beta; using std::pow; using std::exp; using std::log; // Cache a few expensive function calls if nu is a parameter T_partials_return digammaHalf = 0; VectorBuilder<!is_constant_struct<T_dof>::value, T_partials_return, T_dof> digamma_vec(stan::length(nu)); VectorBuilder<!is_constant_struct<T_dof>::value, T_partials_return, T_dof> digammaNu_vec(stan::length(nu)); VectorBuilder<!is_constant_struct<T_dof>::value, T_partials_return, T_dof> digammaNuPlusHalf_vec(stan::length(nu)); if (!is_constant_struct<T_dof>::value) { digammaHalf = digamma(0.5); for (size_t i = 0; i < stan::length(nu); i++) { const T_partials_return nu_dbl = value_of(nu_vec[i]); digammaNu_vec[i] = digamma(0.5 * nu_dbl); digammaNuPlusHalf_vec[i] = digamma(0.5 + 0.5 * nu_dbl); } } // Compute vectorized cdf_log and gradient for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (value_of(y_vec[n]) == std::numeric_limits<double>::infinity()) { continue; } const T_partials_return sigma_inv = 1.0 / value_of(sigma_vec[n]); const T_partials_return t = (value_of(y_vec[n]) - value_of(mu_vec[n])) * sigma_inv; const T_partials_return nu_dbl = value_of(nu_vec[n]); const T_partials_return q = nu_dbl / (t * t); const T_partials_return r = 1.0 / (1.0 + q); const T_partials_return J = 2 * r * r * q / t; const T_partials_return betaNuHalf = exp(lbeta(0.5, 0.5 * nu_dbl)); T_partials_return zJacobian = t > 0 ? - 0.5 : 0.5; if (q < 2) { T_partials_return z = inc_beta(0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r); const T_partials_return Pn = t > 0 ? 1.0 - 0.5 * z : 0.5 * z; const T_partials_return d_ibeta = pow(r, -0.5) * pow(1.0 - r, 0.5*nu_dbl - 1) / betaNuHalf; P += log(Pn); if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] += - zJacobian * d_ibeta * J * sigma_inv / Pn; if (!is_constant_struct<T_dof>::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; stan::math::grad_reg_inc_beta(g1, g2, 0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r, digammaNu_vec[n], digammaHalf, digammaNuPlusHalf_vec[n], betaNuHalf); operands_and_partials.d_x2[n] += zJacobian * (d_ibeta * (r / t) * (r / t) + 0.5 * g1) / Pn; } if (!is_constant_struct<T_loc>::value) operands_and_partials.d_x3[n] += zJacobian * d_ibeta * J * sigma_inv / Pn; if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x4[n] += zJacobian * d_ibeta * J * sigma_inv * t / Pn; } else { T_partials_return z = 1.0 - inc_beta((T_partials_return)0.5, 0.5*nu_dbl, r); zJacobian *= -1; const T_partials_return Pn = t > 0 ? 1.0 - 0.5 * z : 0.5 * z; T_partials_return d_ibeta = pow(1.0-r, 0.5*nu_dbl-1) * pow(r, -0.5) / betaNuHalf; P += log(Pn); if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] += zJacobian * d_ibeta * J * sigma_inv / Pn; if (!is_constant_struct<T_dof>::value) { T_partials_return g1 = 0; T_partials_return g2 = 0; stan::math::grad_reg_inc_beta(g1, g2, (T_partials_return)0.5, 0.5 * nu_dbl, r, digammaHalf, digammaNu_vec[n], digammaNuPlusHalf_vec[n], betaNuHalf); operands_and_partials.d_x2[n] += zJacobian * (- d_ibeta * (r / t) * (r / t) + 0.5 * g2) / Pn; } if (!is_constant_struct<T_loc>::value) operands_and_partials.d_x3[n] += - zJacobian * d_ibeta * J * sigma_inv / Pn; if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x4[n] += - zJacobian * d_ibeta * J * sigma_inv * t / Pn; } } return operands_and_partials.value(P); }
typename return_type<T_y, T_dof>::type inv_chi_square_cdf_log(const T_y& y, const T_dof& nu) { typedef typename stan::partials_return_type<T_y, T_dof>::type T_partials_return; // Size checks if ( !( stan::length(y) && stan::length(nu) ) ) return 0.0; // Error checks static const char* function("stan::math::inv_chi_square_cdf_log"); using stan::math::check_positive_finite; using stan::math::check_not_nan; using stan::math::check_consistent_sizes; using stan::math::check_nonnegative; using boost::math::tools::promote_args; using stan::math::value_of; using std::exp; T_partials_return P(0.0); check_positive_finite(function, "Degrees of freedom parameter", nu); check_not_nan(function, "Random variable", y); check_nonnegative(function, "Random variable", y); check_consistent_sizes(function, "Random variable", y, "Degrees of freedom parameter", nu); // Wrap arguments in vectors VectorView<const T_y> y_vec(y); VectorView<const T_dof> nu_vec(nu); size_t N = max_size(y, nu); OperandsAndPartials<T_y, T_dof> operands_and_partials(y, nu); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::length(y); i++) if (value_of(y_vec[i]) == 0) return operands_and_partials.to_var(stan::math::negative_infinity(), y, nu); // Compute cdf_log and its gradients using stan::math::gamma_q; using stan::math::digamma; using boost::math::tgamma; using std::exp; using std::pow; using std::log; // Cache a few expensive function calls if nu is a parameter VectorBuilder<!is_constant_struct<T_dof>::value, T_partials_return, T_dof> gamma_vec(stan::length(nu)); VectorBuilder<!is_constant_struct<T_dof>::value, T_partials_return, T_dof> digamma_vec(stan::length(nu)); if (!is_constant_struct<T_dof>::value) { for (size_t i = 0; i < stan::length(nu); i++) { const T_partials_return nu_dbl = value_of(nu_vec[i]); gamma_vec[i] = tgamma(0.5 * nu_dbl); digamma_vec[i] = digamma(0.5 * nu_dbl); } } // Compute vectorized cdf_log and gradient for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (value_of(y_vec[n]) == std::numeric_limits<double>::infinity()) { continue; } // Pull out values const T_partials_return y_dbl = value_of(y_vec[n]); const T_partials_return y_inv_dbl = 1.0 / y_dbl; const T_partials_return nu_dbl = value_of(nu_vec[n]); // Compute const T_partials_return Pn = gamma_q(0.5 * nu_dbl, 0.5 * y_inv_dbl); P += log(Pn); if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] += 0.5 * y_inv_dbl * y_inv_dbl * exp(-0.5*y_inv_dbl) * pow(0.5*y_inv_dbl, 0.5*nu_dbl-1) / tgamma(0.5*nu_dbl) / Pn; if (!is_constant_struct<T_dof>::value) operands_and_partials.d_x2[n] += 0.5 * stan::math::grad_reg_inc_gamma(0.5 * nu_dbl, 0.5 * y_inv_dbl, gamma_vec[n], digamma_vec[n]) / Pn; } return operands_and_partials.to_var(P, y, nu); }
typename return_type<T_y, T_loc, T_scale, T_shape>::type skew_normal_ccdf_log(const T_y& y, const T_loc& mu, const T_scale& sigma, const T_shape& alpha) { static const char* function("stan::math::skew_normal_ccdf_log"); typedef typename stan::partials_return_type<T_y, T_loc, T_scale, T_shape>::type T_partials_return; using stan::math::check_positive; using stan::math::check_finite; using stan::math::check_not_nan; using stan::math::check_consistent_sizes; using stan::math::owens_t; using stan::math::value_of; T_partials_return ccdf_log(0.0); // check if any vectors are zero length if (!(stan::length(y) && stan::length(mu) && stan::length(sigma) && stan::length(alpha))) return ccdf_log; check_not_nan(function, "Random variable", y); check_finite(function, "Location parameter", mu); check_not_nan(function, "Scale parameter", sigma); check_positive(function, "Scale parameter", sigma); check_finite(function, "Shape parameter", alpha); check_not_nan(function, "Shape parameter", alpha); check_consistent_sizes(function, "Random variable", y, "Location parameter", mu, "Scale parameter", sigma, "Shape paramter", alpha); OperandsAndPartials<T_y, T_loc, T_scale, T_shape> operands_and_partials(y, mu, sigma, alpha); using stan::math::SQRT_2; using stan::math::pi; using std::log; using std::exp; VectorView<const T_y> y_vec(y); VectorView<const T_loc> mu_vec(mu); VectorView<const T_scale> sigma_vec(sigma); VectorView<const T_shape> alpha_vec(alpha); size_t N = max_size(y, mu, sigma, alpha); const double SQRT_TWO_OVER_PI = std::sqrt(2.0 / stan::math::pi()); for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = value_of(y_vec[n]); const T_partials_return mu_dbl = value_of(mu_vec[n]); const T_partials_return sigma_dbl = value_of(sigma_vec[n]); const T_partials_return alpha_dbl = value_of(alpha_vec[n]); const T_partials_return alpha_dbl_sq = alpha_dbl * alpha_dbl; const T_partials_return diff = (y_dbl - mu_dbl) / sigma_dbl; const T_partials_return diff_sq = diff * diff; const T_partials_return scaled_diff = diff / SQRT_2; const T_partials_return scaled_diff_sq = diff_sq * 0.5; const T_partials_return ccdf_log_ = 1.0 - 0.5 * erfc(-scaled_diff) + 2 * owens_t(diff, alpha_dbl); // ccdf_log ccdf_log += log(ccdf_log_); // gradients const T_partials_return deriv_erfc = SQRT_TWO_OVER_PI * 0.5 * exp(-scaled_diff_sq) / sigma_dbl; const T_partials_return deriv_owens = erf(alpha_dbl * scaled_diff) * exp(-scaled_diff_sq) / SQRT_TWO_OVER_PI / (-2.0 * pi()) / sigma_dbl; const T_partials_return rep_deriv = (-2.0 * deriv_owens + deriv_erfc) / ccdf_log_; if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] -= rep_deriv; if (!is_constant_struct<T_loc>::value) operands_and_partials.d_x2[n] += rep_deriv; if (!is_constant_struct<T_scale>::value) operands_and_partials.d_x3[n] += rep_deriv * diff; if (!is_constant_struct<T_shape>::value) operands_and_partials.d_x4[n] -= -2.0 * exp(-0.5 * diff_sq * (1.0 + alpha_dbl_sq)) / ((1 + alpha_dbl_sq) * 2.0 * pi()) / ccdf_log_; } return operands_and_partials.to_var(ccdf_log, y, mu, sigma, alpha); }
// inline float degreeToRadian(float deg){ return deg/180*M_PI; } const float a = 6378137; // semi-major axis of the ellipsoid const float e = 0.08181919106; // first eccentricity of the ellipsoid const float lc = degreeToRadian(3.f); const float l0 = degreeToRadian(3.f); const float phi1 = degreeToRadian(44.f); // 1st automecoic parallel const float phi2 = degreeToRadian(49.f); // 2nd automecoic parallel const float phi0 = degreeToRadian(46.5f);// latitude of origin const float X0 = 700000; // x coordinate at origin const float Y0 = 6600000; // y coordinate at origin // Normals const float gN1 = a/sqrt(1-e*e*sin(phi1)*sin(phi1)); const float gN2 = a/sqrt(1-e*e*sin(phi2)*sin(phi2)); // Isometric latitudes const float gl1=log(tan(M_PI/4+phi1/2)*pow((1-e*sin(phi1))/(1+e*sin(phi1)),e/2)); const float gl2=log(tan(M_PI/4+phi2/2)*pow((1-e*sin(phi2))/(1+e*sin(phi2)),e/2)); const float gl0=log(tan(M_PI/4+phi0/2)*pow((1-e*sin(phi0))/(1+e*sin(phi0)),e/2)); // Projection exponent const float n = (log((gN2*cos(phi2))/(gN1*cos(phi1))))/(gl1-gl2); // Projection constant const float c = ((gN1*cos(phi1))/n)*exp(n*gl1); // Coordinate const float ys = Y0 + c*exp(-n*gl0); // Convert geographic coordinates (latitude, longitude in degrees) into // cartesian coordinates (in kilometers) using the Lambert 93 projection. pair<float,float> geoToLambert93(float latitude,float longitude) { float phi = degreeToRadian(latitude); float l = degreeToRadian(longitude);
typename return_type<T_y, T_shape, T_inv_scale>::type gamma_log(const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { static const char* function("gamma_log"); typedef typename stan::partials_return_type<T_y, T_shape, T_inv_scale>::type T_partials_return; using stan::is_constant_struct; if (!(stan::length(y) && stan::length(alpha) && stan::length(beta))) return 0.0; T_partials_return logp(0.0); check_not_nan(function, "Random variable", y); check_positive_finite(function, "Shape parameter", alpha); check_positive_finite(function, "Inverse scale parameter", beta); check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Inverse scale parameter", beta); if (!include_summand<propto, T_y, T_shape, T_inv_scale>::value) return 0.0; VectorView<const T_y> y_vec(y); VectorView<const T_shape> alpha_vec(alpha); VectorView<const T_inv_scale> beta_vec(beta); for (size_t n = 0; n < length(y); n++) { const T_partials_return y_dbl = value_of(y_vec[n]); if (y_dbl < 0) return LOG_ZERO; } size_t N = max_size(y, alpha, beta); OperandsAndPartials<T_y, T_shape, T_inv_scale> operands_and_partials(y, alpha, beta); using boost::math::lgamma; using boost::math::digamma; using std::log; VectorBuilder<include_summand<propto, T_y, T_shape>::value, T_partials_return, T_y> log_y(length(y)); if (include_summand<propto, T_y, T_shape>::value) { for (size_t n = 0; n < length(y); n++) { if (value_of(y_vec[n]) > 0) log_y[n] = log(value_of(y_vec[n])); } } VectorBuilder<include_summand<propto, T_shape>::value, T_partials_return, T_shape> lgamma_alpha(length(alpha)); VectorBuilder<!is_constant_struct<T_shape>::value, T_partials_return, T_shape> digamma_alpha(length(alpha)); for (size_t n = 0; n < length(alpha); n++) { if (include_summand<propto, T_shape>::value) lgamma_alpha[n] = lgamma(value_of(alpha_vec[n])); if (!is_constant_struct<T_shape>::value) digamma_alpha[n] = digamma(value_of(alpha_vec[n])); } VectorBuilder<include_summand<propto, T_shape, T_inv_scale>::value, T_partials_return, T_inv_scale> log_beta(length(beta)); if (include_summand<propto, T_shape, T_inv_scale>::value) { for (size_t n = 0; n < length(beta); n++) log_beta[n] = log(value_of(beta_vec[n])); } for (size_t n = 0; n < N; n++) { const T_partials_return y_dbl = value_of(y_vec[n]); const T_partials_return alpha_dbl = value_of(alpha_vec[n]); const T_partials_return beta_dbl = value_of(beta_vec[n]); if (include_summand<propto, T_shape>::value) logp -= lgamma_alpha[n]; if (include_summand<propto, T_shape, T_inv_scale>::value) logp += alpha_dbl * log_beta[n]; if (include_summand<propto, T_y, T_shape>::value) logp += (alpha_dbl-1.0) * log_y[n]; if (include_summand<propto, T_y, T_inv_scale>::value) logp -= beta_dbl * y_dbl; if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] += (alpha_dbl-1)/y_dbl - beta_dbl; if (!is_constant_struct<T_shape>::value) operands_and_partials.d_x2[n] += -digamma_alpha[n] + log_beta[n] + log_y[n]; if (!is_constant_struct<T_inv_scale>::value) operands_and_partials.d_x3[n] += alpha_dbl / beta_dbl - y_dbl; } return operands_and_partials.value(logp); }
typename return_type<T_y, T_shape, T_inv_scale>::type gamma_ccdf_log(const T_y& y, const T_shape& alpha, const T_inv_scale& beta) { if (!(stan::length(y) && stan::length(alpha) && stan::length(beta))) return 0.0; typedef typename stan::partials_return_type<T_y, T_shape, T_inv_scale>::type T_partials_return; static const char* function("gamma_ccdf_log"); using boost::math::tools::promote_args; using std::exp; T_partials_return P(0.0); check_positive_finite(function, "Shape parameter", alpha); check_positive_finite(function, "Scale parameter", beta); check_not_nan(function, "Random variable", y); check_nonnegative(function, "Random variable", y); check_consistent_sizes(function, "Random variable", y, "Shape parameter", alpha, "Scale Parameter", beta); VectorView<const T_y> y_vec(y); VectorView<const T_shape> alpha_vec(alpha); VectorView<const T_inv_scale> beta_vec(beta); size_t N = max_size(y, alpha, beta); OperandsAndPartials<T_y, T_shape, T_inv_scale> operands_and_partials(y, alpha, beta); // Explicit return for extreme values // The gradients are technically ill-defined, but treated as zero for (size_t i = 0; i < stan::length(y); i++) { if (value_of(y_vec[i]) == 0) return operands_and_partials.value(0.0); } using boost::math::tgamma; using std::exp; using std::pow; using std::log; VectorBuilder<!is_constant_struct<T_shape>::value, T_partials_return, T_shape> gamma_vec(stan::length(alpha)); VectorBuilder<!is_constant_struct<T_shape>::value, T_partials_return, T_shape> digamma_vec(stan::length(alpha)); if (!is_constant_struct<T_shape>::value) { for (size_t i = 0; i < stan::length(alpha); i++) { const T_partials_return alpha_dbl = value_of(alpha_vec[i]); gamma_vec[i] = tgamma(alpha_dbl); digamma_vec[i] = digamma(alpha_dbl); } } for (size_t n = 0; n < N; n++) { // Explicit results for extreme values // The gradients are technically ill-defined, but treated as zero if (value_of(y_vec[n]) == std::numeric_limits<double>::infinity()) return operands_and_partials.value(negative_infinity()); const T_partials_return y_dbl = value_of(y_vec[n]); const T_partials_return alpha_dbl = value_of(alpha_vec[n]); const T_partials_return beta_dbl = value_of(beta_vec[n]); const T_partials_return Pn = gamma_q(alpha_dbl, beta_dbl * y_dbl); P += log(Pn); if (!is_constant_struct<T_y>::value) operands_and_partials.d_x1[n] -= beta_dbl * exp(-beta_dbl * y_dbl) * pow(beta_dbl * y_dbl, alpha_dbl-1) / tgamma(alpha_dbl) / Pn; if (!is_constant_struct<T_shape>::value) operands_and_partials.d_x2[n] += grad_reg_inc_gamma(alpha_dbl, beta_dbl * y_dbl, gamma_vec[n], digamma_vec[n]) / Pn; if (!is_constant_struct<T_inv_scale>::value) operands_and_partials.d_x3[n] -= y_dbl * exp(-beta_dbl * y_dbl) * pow(beta_dbl * y_dbl, alpha_dbl-1) / tgamma(alpha_dbl) / Pn; } return operands_and_partials.value(P); }
double CMT::LogisticFunction::inverse(double data) const { return log((data - mEpsilon / 2.) / (1. - data - mEpsilon / 2.)); }