void init( const Shared & shared, const Group & group, rng_t &) { Shared post = shared.plus_group(group); score_coeff = -fast_log(1.f + post.inv_beta); score = -fast_lgamma(post.alpha) + post.alpha * (fast_log(post.inv_beta) + score_coeff); post_alpha = post.alpha; }
float score_data( const Shared & shared, rng_t &) const { Shared post = shared.plus_group(*this); float score = fast_lgamma(post.alpha) - fast_lgamma(shared.alpha); score += shared.alpha * fast_log(shared.inv_beta) - post.alpha * fast_log(post.inv_beta); score += -log_prod; return score; }
/* Update embeddings & return likelihood */ real Update(real *vec_u, real *vec_v, real *vec_error, int label) { real x = 0, f, g; for (int c = 0; c != dim; c++) x += vec_u[c] * vec_v[c]; f = FastSigmoid(x); g = (label - f) * rho; for (int c = 0; c != dim; c++) vec_error[c] += g * vec_v[c]; for (int c = 0; c != dim; c++) vec_v[c] += g * vec_u[c]; return label > 0? fast_log(f+LOG_MIN): fast_log(1-f+LOG_MIN); }
inline float fast_score_student_t ( float x, float nu, float mu, float lambda) { // \cite{murphy2007conjugate}, Eq. 304 float p = 0.f; p += fast_lgamma_nu(nu); p += 0.5f * fast_log(lambda / (M_PIf * nu)); p += (-0.5f * nu - 0.5f) * fast_log(1.f + (lambda * sqr(x - mu)) / nu); return p; }
float score_data( const Shared & shared, const std::vector<Group> & groups, rng_t &) const { const float alpha_part = fast_lgamma(shared.alpha); const float beta_part = shared.alpha * fast_log(shared.inv_beta); float score = 0; for (auto const & group : groups) { if (group.count) { Shared post = shared.plus_group(group); score += fast_lgamma(post.alpha) - alpha_part; score += beta_part - post.alpha * fast_log(post.inv_beta); score += -group.log_prod; } } return score; }
/* This routine will scan over the sieve for values that look promising, and then attempt to factor * them over the factor base. If it finds one that factors (or is a partial), it will add it to the * list that we're accumulating for this poly_group. */ void extract_relations (block_data_t *data, poly_group_t *pg, poly_t *p, nsieve_t *ns, int block_start){ /* The sieve now contains estimates of the size (in bits) of the unfactored portion of the * polynomial values. We scan the sieve for values less than this cutoff, and trial divide * the ones that pass the test. */ mpz_t temp; mpz_init (temp); poly(temp, p, block_start + BLOCKSIZE/2); mpz_abs(temp, temp); uint8_t logQ = (uint8_t) mpz_sizeinbase (temp, 2); int cutoff = (int) (fast_log(ns->lp_bound) * ns->T); /* To accelerate the sieve scanning for promising values, instead of comparing each 8-bit entry * one at a time, we instead look at 64 bits at a time. We are looking for sieve values such that * sieve[x] > logQ - cutoff. We can use as a first approximation to this the test that * sieve[x] > S, where S is the smallest power of 2 larger than (logQ - cutoff). Create an 8-bit * mask - say S is 32 - the mask is 11100000. Then if you AND the mask with any sieve value, and * the result is nonzero, it cannot possibly be below the cutoff. Make 8 copies of the mask, put * it in a 64-bit int, cast the pointer to the sieve block, and do this AND on 64-bit chunks (8 * sieve locations at a time). Since it will be relatively rare for a value to pass the cutoff, * most of the time this test will immediately reject all 8 values. If it doesn't, test each one * against the cutoff individually. */ uint64_t *chunk = (uint64_t *) data->sieve; uint32_t nchunks = BLOCKSIZE/8; uint8_t maskchar = 1; // this is the 8-bit version of the mask while (maskchar < logQ - cutoff){ maskchar *= 2; } maskchar /= 2; maskchar = ~ (maskchar - 1); uint64_t mask = maskchar; for (int i=0; i<8; i++){ // make 8 copies of it mask = (mask << 8) | maskchar; } // now loop over the sieve, a chunk at a time for (int i=0; i < nchunks; i++){ if (mask & chunk[i]){ // then some value *might* have passed the test for (int j=0; j<8; j++){ if (logQ - data->sieve[i*8+j] < cutoff){ // check them all poly (temp, p, block_start + i*8 + j); construct_relation (temp, block_start + i*8 + j, p, ns); } } } } mpz_clear (temp); }
void score_value(const Model & model, AlignedFloats scores) const { if (DIST_DEBUG_LEVEL >= 1) { DIST_ASSERT_EQ(scores.size(), counts().size()); } const size_t size = counts().size(); const float shift = -fast_log(sample_size() + model.alpha); const float * __restrict__ in = VectorFloat_data(shifted_scores_); float * __restrict__ out = VectorFloat_data(scores); for (size_t i = 0; i < size; ++i) { out[i] = in[i] + shift; } }
float score_add_value( count_t group_size, count_t nonempty_group_count, count_t sample_size, count_t empty_group_count = 1) const { // What is the probability (score) of adding a customer // to a table which currently has: // // group_size people sitting at it (can be zero) // nonempty_group_count tables that have people sitting at them // sample_size people seated total // // In particular, if group_size == 0, this is the prob of sitting // at a new table. In that case, nonempty_group_count does not // include this "new" table, as it is obviously unoccupied. if (group_size == 0) { float numer = alpha + d * nonempty_group_count; float denom = (sample_size + alpha) * empty_group_count; return fast_log(numer / denom); } else { return fast_log((group_size - d) / (sample_size + alpha)); } }
/* update the switching posterior weights */ void SkipCTS::updatePosteriors(SkipNode &n, int n_submodels, double alpha, double log_alpha, double log_stop_mul, double log_split_mul) { // update switching log-posteriors double dn = static_cast<double>(n_submodels); double K = (1.0 - alpha) * dn - 1.0; double log_K = fast_log(K); double log_scale = -s_log_tbl[n_submodels-1]; posteriorUpdate(n, log_scale, log_alpha, log_K, log_stop_mul, n.m_log_prob_est); posteriorUpdate(n, log_scale, log_alpha, log_K, log_split_mul, n.m_log_prob_split); for (int k=0; k < m_log_skip_preds.size(); k++) { posteriorUpdate(n, log_scale, log_alpha, log_K, m_log_skip_preds[k], n.m_log_skip_lik[k]); } }
static int set_auto_gain(sensor_t *sensor, int enable, float gain_db, float gain_db_ceiling) { uint8_t reg; int ret = cambus_readb(sensor->slv_addr, BANK_SEL, ®); ret |= cambus_writeb(sensor->slv_addr, BANK_SEL, reg | BANK_SEL_SENSOR); ret |= cambus_readb(sensor->slv_addr, COM8, ®); ret |= cambus_writeb(sensor->slv_addr, COM8, (reg & (~COM8_AGC_EN)) | ((enable != 0) ? COM8_AGC_EN : 0)); if ((enable == 0) && (!isnanf(gain_db)) && (!isinff(gain_db))) { float gain = IM_MAX(IM_MIN(fast_expf((gain_db / 20.0) * fast_log(10.0)), 32.0), 1.0); int gain_temp = fast_roundf(fast_log2(IM_MAX(gain / 2.0, 1.0))); int gain_hi = 0xF >> (4 - gain_temp); int gain_lo = IM_MIN(fast_roundf(((gain / (1 << gain_temp)) - 1.0) * 16.0), 15); ret |= cambus_writeb(sensor->slv_addr, GAIN, (gain_hi << 4) | (gain_lo << 0)); } else if ((enable != 0) && (!isnanf(gain_db_ceiling)) && (!isinff(gain_db_ceiling))) {
static int set_auto_gain(sensor_t *sensor, int enable, float gain_db, float gain_db_ceiling) { uint8_t reg; int ret = cambus_readb(sensor->slv_addr, REG_COM8, ®); ret |= cambus_writeb(sensor->slv_addr, REG_COM8, (reg & (~REG_COM8_AGC)) | ((enable != 0) ? REG_COM8_AGC : 0)); if ((enable == 0) && (!isnanf(gain_db)) && (!isinf(gain_db))) { float gain = IM_MAX(IM_MIN(fast_expf((gain_db / 20.0) * fast_log(10.0)), 128.0), 1.0); int gain_temp = fast_roundf(fast_log2(IM_MAX(gain / 2.0, 1.0))); int gain_hi = 0x3F >> (6 - gain_temp); int gain_lo = IM_MIN(fast_roundf(((gain / (1 << gain_temp)) - 1.0) * 16.0), 15); ret |= cambus_writeb(sensor->slv_addr, REG_GAIN, ((gain_hi & 0x0F) << 4) | (gain_lo << 0)); ret |= cambus_readb(sensor->slv_addr, REG_VREF, ®); ret |= cambus_writeb(sensor->slv_addr, REG_VREF, ((gain_hi & 0x30) << 2) | (reg & 0x3F)); } else if ((enable != 0) && (!isnanf(gain_db_ceiling)) && (!isinf(gain_db_ceiling))) {
JNIEXPORT jfloat JNICALL Java_jist_swans_misc_Util_fast_1log (JNIEnv *env, jclass cl, jfloat n) { return fast_log(n); }
void imlib_illuminvar(image_t *img) // http://ai.stanford.edu/~alireza/publication/cic15.pdf { switch(img->bpp) { case IMAGE_BPP_BINARY: { break; } case IMAGE_BPP_GRAYSCALE: { break; } case IMAGE_BPP_RGB565: { for (int y = 0, yy = img->h; y < yy; y++) { uint16_t *row_ptr = IMAGE_COMPUTE_RGB565_PIXEL_ROW_PTR(img, y); for (int x = 0, xx = img->w; x < xx; x++) { int pixel = IMAGE_GET_RGB565_PIXEL_FAST(row_ptr, x); #ifdef IMLIB_ENABLE_INVARIANT_TABLE int rgb565 = invariant_table[pixel]; #else float r_lin = xyz_table[COLOR_RGB565_TO_R8(pixel)] + 1.0; float g_lin = xyz_table[COLOR_RGB565_TO_G8(pixel)] + 1.0; float b_lin = xyz_table[COLOR_RGB565_TO_B8(pixel)] + 1.0; float r_lin_sharp = (r_lin * 0.9968f) + (g_lin * 0.0228f) + (b_lin * 0.0015f); float g_lin_sharp = (r_lin * -0.0071f) + (g_lin * 0.9933f) + (b_lin * 0.0146f); float b_lin_sharp = (r_lin * 0.0103f) + (g_lin * -0.0161f) + (b_lin * 0.9839f); float lin_sharp_avg = r_lin_sharp * g_lin_sharp * b_lin_sharp; lin_sharp_avg = (lin_sharp_avg > 0.0f) ? fast_cbrtf(lin_sharp_avg) : 0.0f; float r_lin_sharp_div = 0.0f; float g_lin_sharp_div = 0.0f; float b_lin_sharp_div = 0.0f; if (lin_sharp_avg > 0.0f) { lin_sharp_avg = 1.0f / lin_sharp_avg; r_lin_sharp_div = r_lin_sharp * lin_sharp_avg; g_lin_sharp_div = g_lin_sharp * lin_sharp_avg; b_lin_sharp_div = b_lin_sharp * lin_sharp_avg; } float r_lin_sharp_div_log = (r_lin_sharp_div > 0.0f) ? fast_log(r_lin_sharp_div) : 0.0f; float g_lin_sharp_div_log = (g_lin_sharp_div > 0.0f) ? fast_log(g_lin_sharp_div) : 0.0f; float b_lin_sharp_div_log = (b_lin_sharp_div > 0.0f) ? fast_log(b_lin_sharp_div) : 0.0f; float chi_x = (r_lin_sharp_div_log * 0.7071f) + (g_lin_sharp_div_log * -0.7071f) + (b_lin_sharp_div_log * 0.0000f); float chi_y = (r_lin_sharp_div_log * 0.4082f) + (g_lin_sharp_div_log * 0.4082f) + (b_lin_sharp_div_log * -0.8164f); float e_t_x = 0.9326f; float e_t_y = -0.3609f; float p_th_00 = e_t_x * e_t_x; float p_th_01 = e_t_x * e_t_y; float p_th_10 = e_t_y * e_t_x; float p_th_11 = e_t_y * e_t_y; float x_th_x = (p_th_00 * chi_x) + (p_th_01 * chi_y); float x_th_y = (p_th_10 * chi_x) + (p_th_11 * chi_y); float r_chi = (x_th_x * 0.7071f) + (x_th_y * 0.4082f); float g_chi = (x_th_x * -0.7071f) + (x_th_y * 0.4082f); float b_chi = (x_th_x * 0.0000f) + (x_th_y * -0.8164f); float r_chi_invariant = fast_expf(r_chi); float g_chi_invariant = fast_expf(g_chi); float b_chi_invariant = fast_expf(b_chi); float chi_invariant_sum = r_chi_invariant + g_chi_invariant + b_chi_invariant; float r_chi_invariant_m = 0.0f; float g_chi_invariant_m = 0.0f; float b_chi_invariant_m = 0.0f; if (chi_invariant_sum > 0.0f) { chi_invariant_sum = 1.0f / chi_invariant_sum; r_chi_invariant_m = r_chi_invariant * chi_invariant_sum; g_chi_invariant_m = g_chi_invariant * chi_invariant_sum; b_chi_invariant_m = b_chi_invariant * chi_invariant_sum; } int r_chi_invariant_m_int = IM_MAX(IM_MIN(r_chi_invariant_m * 255.0f, COLOR_R8_MAX), COLOR_R8_MIN); int g_chi_invariant_m_int = IM_MAX(IM_MIN(g_chi_invariant_m * 255.0f, COLOR_G8_MAX), COLOR_G8_MIN); int b_chi_invariant_m_int = IM_MAX(IM_MIN(b_chi_invariant_m * 255.0f, COLOR_B8_MAX), COLOR_B8_MIN); int rgb565 = COLOR_R8_G8_B8_TO_RGB565(r_chi_invariant_m_int, g_chi_invariant_m_int, b_chi_invariant_m_int); #endif IMAGE_PUT_RGB565_PIXEL_FAST(row_ptr, x, rgb565); } } break; } default: { break; } } }
/* compute the logarithm of the KT-estimator update multiplier */ inline double SkipNode::logKTMul(bit_t b) const { return fast_log(ktMul(b)); }
/* process a new piece of sensory experience */ void SkipCTS::update(bit_t b) { getContext(); double alpha = switchRate(m_history.size()); double log_alpha = fast_log(alpha); zobhash_t hash = 0; int skips_left, last_idx; // update nodes from deepest to shallowest for (int i=m_depth; i >= 0; i--) { // update the KT statistics, then the weighted // probability for every node on this level const indices_list_t &il = m_indices[i]; for (int j=0; j < il.size(); j++) { m_log_skip_preds.clear(); getContextInfo(hash, il[j]); skips_left = m_auxinfo[i][j].skips_left; last_idx = m_auxinfo[i][j].last_idx; // update the node int n_submodels = numSubmodels(last_idx, skips_left); SkipNode &n = getNode(hash, i, n_submodels); n.m_buf = n.m_log_prob_weighted; // lazy allocation of skipping prior weights if (n_submodels > 2 && n.m_log_skip_lik == NULL) lazyAllocate(n, n_submodels, skips_left); // handle the stop case double log_est_mul = n.logKTMul(b); if (n_submodels == 1) { n.updateKT(b, log_est_mul); n.m_log_prob_weighted += log_est_mul; n.m_buf = log_est_mul; continue; } double log_acc = n.m_log_prob_est + log_est_mul; n.updateKT(b, log_est_mul); // handle the split case zobhash_t delta = s_zobtbl[last_idx+1][m_context[last_idx+1]]; const SkipNode &nn = getNode(hash ^ delta, i+1, numSubmodels(last_idx+1, skips_left)); double log_split_pred = nn.m_buf; log_acc = fast_logadd(log_acc, n.m_log_prob_split + log_split_pred); // handle the skipping case if (n_submodels > 2) { // update the skipping models for (int k=last_idx+2; k < m_depth; k++) { zobhash_t h = hash ^ s_zobtbl[k][m_context[k]]; SkipNode &sn = getNode(h, i+1, numSubmodels(k, skips_left - 1)); double log_skip_pred = sn.m_buf; m_log_skip_preds.push_back(log_skip_pred); int z = k - last_idx - 2; log_acc = fast_logadd(log_acc, n.m_log_skip_lik[z] + log_skip_pred); } } // store the weighted probability n.m_log_prob_weighted = log_acc; assert(n.m_log_prob_weighted < n.m_buf); // Store the *difference* in log probability in m_buf n.m_buf = n.m_log_prob_weighted - n.m_buf; updatePosteriors(n, n_submodels, alpha, log_alpha, log_est_mul, log_split_pred); } } m_history.push_back(b != 0); }