struct factors factors(long n) { struct factors ret; long i; BOOL nochange = FALSE; ret.size = 0; ret.vecsize = 10; ret.vector = (long*)malloc(sizeof(long) * ret.vecsize); while (!nochange) { nochange = TRUE; for (i = 2; i < n; i++) { if (n % i == 0) { add_vec(&ret, i); nochange = FALSE; n = n / i; break; } } } add_vec(&ret, n); return ret; }
int main(){ std::vector<double> vec1(10); std::vector<double> vec2(10); // fill vec1 with 1 to 10 // fill vec2 with 10 to 19 for(int i = 0; i < vec1.size(); i++){ vec1[i] = i + 1; vec2[i] = i + 10; } std::vector<double> vec3(10); vec3 = add_vec(vec1, vec2); std::vector<double> vec4(10); vec4 = add_vec(vec1, 5.5); std::cout << "Result of add_vec(vec1, vec2)" << std::endl; for(int i = 0; i < vec3.size(); i++){ std::cout << vec3[i] << " "; } std::cout << std::endl; std::cout << "Result of add_vec(vec1, 5.5)" << std::endl; for(int i = 0; i < vec4.size(); i++){ std::cout << vec4[i] << " "; } std::cout << std::endl; return 0; }
void validate_dims(const std::string& stage, const std::string& name, const std::string& base_type, const std::vector<size_t>& dims_declared) const { bool is_int_type = base_type == "int"; if (is_int_type) { if (!contains_i(name)) { std::stringstream msg; msg << (contains_r(name) ? "int variable contained non-int values" : "variable does not exist" ) << "; processing stage=" << stage << "; variable name=" << name << "; base type=" << base_type; throw std::runtime_error(msg.str()); } } else { if (!contains_r(name)) { std::stringstream msg; msg << "variable does not exist" << "; processing stage=" << stage << "; variable name=" << name << "; base type=" << base_type; throw std::runtime_error(msg.str()); } } std::vector<size_t> dims = dims_r(name); if (dims.size() != dims_declared.size()) { std::stringstream msg; msg << "mismatch in number dimensions declared and found in context" << "; processing stage=" << stage << "; variable name=" << name << "; dims declared="; add_vec(msg,dims_declared); msg << "; dims found="; add_vec(msg,dims); throw std::runtime_error(msg.str()); } for (size_t i = 0; i < dims.size(); ++i) { if (dims_declared[i] != dims[i]) { std::stringstream msg; msg << "mismatch in dimension declared and found in context" << "; processing stage=" << stage << "; variable name=" << name << "; position=" << i << "; dims declared="; add_vec(msg,dims_declared); msg << "; dims found="; add_vec(msg,dims); throw std::runtime_error(msg.str()); } } }
int main() { srand(time(NULL)); vector* v = new_vector(2, sizeof(int), compare_integer); vector* q = new_vector(2, sizeof(int), compare_integer); int **y = malloc(sizeof(int*)*8); int i; int predef[] = {1, 50, 12, 68, 3, 4, 78, 2}; for (i = 0; i < 8; i++) { y[i] = malloc(sizeof(int)); // *y[i] = rand()%1000; *y[i] = predef[i]; } int *z = malloc(sizeof(int)); *z = 888; add_all_vec(v, (void**) y, 8); add_vec(v, z); delete_vec(v, &y[0]); print_vector(v); return 0; }
t_vec raytracer(t_rayparams *params) { t_raytracer *ray; ray = init_ray(params->over.l); first_loop(ray, params->dir, params->o); if (!ray->ret) return (ray->color); *(params->distance) = ray->ret2; if (ray->ret->light == TRUE) return (set_vec(1, 1, 1)); ray->pi = add_vec(params->o, mul_vec(params->dir, ray->ret2)); ray->tmp = params->over.l; while (ray->tmp) { if (ray->tmp->light == TRUE) { set_nray(ray, params->over.l, params->dir, params->o); shade(ray, params->dir); } ray->tmp = ray->tmp->next; } reflexion(ray, params); refraction(ray, params); return (ray->color); }
/* Moving the stars * ---------------- * * update_star() moves a star on the screen using the star * motion vector and tests for screen bounds collisions. */ static void update_star(struct star* star, float elapsed_ms) { bbox screen_bbox = { { 0, 0 }, { 192, 108 } }; star->star_pos = add_vec(star->star_pos, star->star_vec); star->star_bbox = translate_bbox(star->star_bbox, star->star_pos); if (bbox_in_bbox(star->star_bbox, screen_bbox) == 0) { if (star->star_bbox.p1.x < screen_bbox.p1.x) star->star_vec.x = 1; if (star->star_bbox.p1.y < screen_bbox.p1.y) star->star_vec.y = 1; if (star->star_bbox.p2.x > screen_bbox.p2.x) star->star_vec.x = -1; if (star->star_bbox.p2.y > screen_bbox.p2.y) star->star_vec.y = -1; } UNUSED(elapsed_ms); }
/* * Web begin by creating, positioning * and propelling the stars. */ static void* create_sample(void) { int i; struct state* state = malloc(sizeof(struct state)); state->star_img = create_image("res/star.png"); for (i = 0; i < MAX_STARS; i++) { state->stars[i].star_pos = xy_vec(i * 16, i * 16); state->stars[i].star_vec = xy_vec(2 - rand() % 4, 2 - rand() % 4); state->stars[i].star_bbox.p1 = state->stars[i].star_pos; state->stars[i].star_bbox.p2 = add_vec(state->stars[i].star_pos, xy_vec(16, 16)); } return state; }
void f1(void) { float array_of_vec[NUM_VECTORS][VECTOR_SIZE]; float vec_sum[VECTOR_SIZE]; memset(vec_sum, 0, sizeof(vec_sum)); int i; #pragma omp parallel for reduction(add_vec : vec_sum) for (i = 0; i < NUM_VECTORS; i++) { add_vec(VECTOR_SIZE, &vec_sum[0], &array_of_vec[i][0]); } }
static void test_vec_array() { array_t *pos, *rpos; pos = CEX_make_vec_array(0); add_vec(pos, 0,0,0); add_vec(pos, 5,0,M_PI); add_vec(pos, 23e12,-5e-13,34.23); rpos = CEX_reversed_array(pos); CEX_extend_array(pos, rpos); CEX_extend_array(pos, pos); CEX_free_array(rpos); msg_t *msg = CEX_make_write_msg(0); CEX_msg_write_vec_array(msg, pos); setup_read(msg); array_t *pos2 = CEX_msg_read_vec_array(msg); REQ_MSG_EOFP(msg); CEX_free_msg(msg); REQ_VARR(pos2); assert(ARR_LENGTH(pos) == ARR_LENGTH(pos2)); for (int i=0; i <ARR_LENGTH(pos); i++) { vec_t v1 = ARR_INDEX_AS(vec_t, pos, i); vec_t v2 = ARR_INDEX_AS(vec_t, pos2, i); #if 0 xprintf("%d " Vec3_FRMT("%.5e") " " Vec3_FRMT("%.5e"), i, Vec3_ARGS(v1), Vec3_ARGS(v2)); #endif EXPECT_DOUBLE(1e-10, v1.x, v2.x); EXPECT_DOUBLE(1e-10, v2.y, v2.y); EXPECT_DOUBLE(1e-10, v1.z, v2.z); } CEX_free_array(pos); CEX_free_array(pos2); }
void do_tr4_in8(t_list **a, t_list **b, t_list **in) { char **patterns; int *abcd; t_kit *k; patterns = load_file("patterns/IN8"); abcd = make_sequence(lstlen(*a) / 4 + 1, -1, 1, 4); while (lstlen(*a) != lstlen(*b)) { k = new_kit(a, b, in, abcd); do_ops_kit(make_instr_arr(recognize_8pat(patterns, \ grab_next_n(*a, 8), abcd)), k, 8, 0); add_vec(abcd, make_sequence(2, -4, 2, 4), 4); } free(abcd); free_split(patterns); }
void specular(t_raytracer *ray, t_vec dir) { t_specular spc; if (ray->ret->specular > 0) { spc.r_spect = 2.0f * DOT(ray->l, ray->n); spc.r_spec = mul_vec(ray->n, spc.r_spect); spc.r_spec = sub_vec(ray->l, spc.r_spec); spc.dot_spec = DOT(dir, spc.r_spec); if (spc.dot_spec > 0) { spc.spect_diff = powf(spc.dot_spec, 20) * ray->ret->specular * ray->shade; spc.ret_spec = mul_vec(ray->tmp->col, spc.spect_diff); ray->color = add_vec(ray->color, spc.ret_spec); } } }
void icm_Abst::gradientDescent_step(){ if(failedGA_counts > 500){return;} double org_llk = sum_llk(); backupCH = baseCH; baseCH_2_baseS(); baseS_2_baseP(); numeric_dobs_dp(true); int k = base_p_derv.size(); prop_p.resize(k); double prop_mean = 0; int act_sum = 0; double new_llk; vector<bool> isActive(k); for(int i = 0; i < k; i++){ if(baseP[i] > 0 && !ISNAN(base_p_derv[i]) ){ isActive[i] = true; act_sum++; } else { isActive[i] = false; } } for(int i = 0; i < k; i++){ if(isActive[i]){ prop_mean += base_p_derv[i]; } } prop_mean = prop_mean / act_sum; for(int i = 0; i < k; i++){ if(isActive[i]){ prop_p[i] = base_p_derv[i] - prop_mean;} else {prop_p[i] = 0.0;} } makeUnitVector(prop_p); double scale_max = getMaxScaleSize(baseP, prop_p); for(int i = 0; i < k; i++){ prop_p[i] *= -1.0; } scale_max = min(scale_max, getMaxScaleSize(baseP, prop_p)); for(int i = 0; i < k; i++){ prop_p[i] *= -1.0; } double delta_val = scale_max/2.0; delta_val = min(delta_val, h); delta_val = delta_val/10.0; double analytic_dd = directional_derv(base_p_derv, prop_p); if(delta_val == 0){ failedGA_counts++; baseCH = backupCH; new_llk = sum_llk(); return; } add_vec(delta_val, prop_p, baseP); double llk_h = llk_from_p(); add_vec(-2.0 * delta_val, prop_p, baseP); double llk_l = llk_from_p(); add_vec(delta_val, prop_p, baseP); double llk_0 = llk_from_p(); double d1 = ( llk_h - llk_l ) / ( 2 * delta_val ); double d2 = (llk_h + llk_l - 2.0 * llk_0 ) / (delta_val * delta_val); if(iter % 2 ==0){ d1 = analytic_dd; } delta_val = -d1/d2; if(!(delta_val > 0)){ failedGA_counts++; baseCH = backupCH; new_llk = sum_llk(); return; } if(ISNAN(delta_val)){ failedGA_counts++; baseCH= backupCH; new_llk = sum_llk(); return; } scale_max = getMaxScaleSize(baseP, prop_p); delta_val = min( delta_val, scale_max ); add_vec(delta_val, prop_p, baseP); new_llk = llk_from_p(); mult_vec(-1.0, prop_p); int tries = 0; double this_delta = delta_val; while(tries < 5 && new_llk < llk_0){ tries++; this_delta = this_delta/2; add_vec(this_delta, prop_p, baseP); new_llk = llk_from_p(); } if(new_llk < llk_0){ failedGA_counts++; baseCH = backupCH; new_llk = sum_llk(); //Should NOT be llk_from_p(), since we are resetting the CH return; } if(org_llk > new_llk){ failedGA_counts++; baseCH = backupCH; new_llk = sum_llk(); } // Rprintf("change in llk in CGA step = %f\n", new_llk - org_llk); }
void add_vec3(vec3 a, vec3 b, vec3 c) { add_vec(a,b,c,3); }
t_vec miroiratorvcalculator2(t_vec ray, t_vec norm) { return (normalizator_ret(add_vec(norm, normalizator_ret(add_vec(ray, norm))))); }
int main(int argc, char* argv[]) { TEST_PARAS myparas = parse_test_paras(argc, argv, testfile, embeddingfile, trainfile); printf("Predicting...\n"); if(!myparas.allow_self_transition) printf("Do not allow self-transtion.\n"); if (!myparas.underflow_correction) printf("Underflow correction disabled\n"); int new_test_song_exp = (myparas.train_test_hash_file[0] != '\0'); if(myparas.tagfile[0] == '\0' && new_test_song_exp) { printf("Have to support with a tag file if you want to test on unseen songs.\n"); exit(1); } int d; int m; int l; int i; int j; int s; int fr; int to; double* bias_terms = 0; double** X = read_matrix_file(embeddingfile, &l, &d, &bias_terms); double** realX; PDATA pd = read_playlists_data(testfile); //int k = pd.num_songs; int k; double llhood = 0.0; double uniform_llhood = 0.0; double realn = 0.0; double not_realn= 0.0; int* train_test_hash; int k_train; int k_test; TDATA td; if(!new_test_song_exp) { k = pd.num_songs; if(myparas.tagfile[0] != '\0') { td = read_tag_data(myparas.tagfile); m = td.num_tags; myparas.num_points = l / (k + m); realX = zerosarray(k * myparas.num_points, d); calculate_realX(X, realX, td, k, m, d, myparas.num_points); free_tag_data(td); if(myparas.tag_ebd_filename[0] != '\0') write_embedding_to_file(X + k * myparas.num_points, m * myparas.num_points, d, myparas.tag_ebd_filename, 0); } else { myparas.num_points = l / k; realX = zerosarray(k * myparas.num_points, d); Array2Dcopy(X, realX, l, d); } Array2Dfree(X, l, d); } else { printf("Prediction on unseen songs.\n"); td = read_tag_data(myparas.tagfile); m = td.num_tags; k = td.num_songs; train_test_hash = read_hash(myparas.train_test_hash_file, &k_train); k_test = k - k_train; printf("Number of new songs %d.\n", k_test); myparas.num_points = l / (k_train + m); realX = zerosarray(k * myparas.num_points, d); calculate_realX_with_hash(X, realX, td, k, m, d, myparas.num_points, k_train, train_test_hash); free_tag_data(td); Array2Dfree(X, l, d); } if(myparas.song_ebd_filename[0] != '\0') write_embedding_to_file(realX, k * myparas.num_points, d, myparas.song_ebd_filename, 0); if(myparas.bias_ebd_filename[0] != '\0') { FILE* fp = fopen(myparas.bias_ebd_filename, "w"); for( i = 0; i < k ;i++) { fprintf(fp, "%f", bias_terms[i]); if ( i != k - 1) fputc('\n', fp); } fclose(fp); } double** square_dist; if(myparas.square_dist_filename[0] != '\0') square_dist = zerosarray(k, k); int n = 0; for(i = 0; i < pd.num_playlists; i ++) if(pd.playlists_length[i] > 0) n += pd.playlists_length[i] - 1; printf("Altogether %d transitions.\n", n);fflush(stdout); PHASH* tcount; PHASH* tcount_train; double** tcount_full; double** tcount_full_train; if(myparas.use_hash_TTable) tcount = create_empty_hash(2 * n); else tcount_full = zerosarray(k, k); HELEM temp_elem; TPAIR temp_pair; int idx; double temp_val; for(i = 0; i < pd.num_playlists; i ++) { if(pd.playlists_length[i] > myparas.range) { for(j = 0; j < pd.playlists_length[i] - 1; j++) { temp_pair.fr = pd.playlists[i][j]; temp_pair.to = pd.playlists[i][j + myparas.range]; //printf("(%d, %d)\n", temp_pair.fr, temp_pair.to); if(temp_pair.fr >= 0 && temp_pair.to >= 0) { if(myparas.use_hash_TTable) { idx = exist_in_hash(tcount, temp_pair); if(idx < 0) { temp_elem.key = temp_pair; temp_elem.val = 1.0; add_entry(tcount, temp_elem); } else update_with(tcount, idx, 1.0); } else tcount_full[temp_pair.fr][temp_pair.to] += 1.0; } } } } TRANSITIONTABLE ttable; TRANSITIONTABLE BFStable; //Need to use the training file if(myparas.output_distr) { PDATA pd_train = read_playlists_data(trainfile); if(myparas.use_hash_TTable) tcount_train = create_empty_hash(2 * n); else tcount_full_train = zerosarray(k, k); for(i = 0; i < pd_train.num_playlists; i ++) { if(pd_train.playlists_length[i] > 1) { for(j = 0; j < pd_train.playlists_length[i] - 1; j++) { temp_pair.fr = pd_train.playlists[i][j]; temp_pair.to = pd_train.playlists[i][j + 1]; if(myparas.use_hash_TTable) { idx = exist_in_hash(tcount_train, temp_pair); if(idx < 0) { temp_elem.key = temp_pair; temp_elem.val = 1.0; add_entry(tcount_train, temp_elem); } else update_with(tcount_train, idx, 1.0); } else tcount_full_train[temp_pair.fr][temp_pair.to] += 1.0; } } } } FILE* song_distr_file; FILE* trans_distr_file; double* song_sep_ll; if(myparas.output_distr) { printf("Output likelihood distribution file turned on.\n"); if(myparas.output_distr) { song_distr_file = fopen(songdistrfile, "w"); trans_distr_file = fopen(transdistrfile, "w"); song_sep_ll = (double*)calloc(k, sizeof(double)); } } int* test_ids_for_new_songs; if(new_test_song_exp) test_ids_for_new_songs = get_test_ids(k, k_train, train_test_hash); for(fr = 0; fr < k; fr++) { int collection_size; int* collection_idx; if(myparas.fast_collection) { collection_size = (BFStable.parray)[fr].length; if (collection_size == 0) continue; collection_idx = (int*)malloc(collection_size * sizeof(int)); LINKEDELEM* tempp = (BFStable.parray)[fr].head; for(i = 0; i < collection_size; i++) { collection_idx[i] = tempp -> idx; tempp = tempp -> pnext; } } else if(new_test_song_exp) { collection_size = k_test; collection_idx = (int*)malloc(collection_size * sizeof(int)); int_list_copy(test_ids_for_new_songs, collection_idx, k_test); } else collection_size = k; double** delta = zerosarray(collection_size, d); double* p = (double*)calloc(collection_size, sizeof(double)); double** tempkd = zerosarray(collection_size, d); double* tempk = (double*)calloc(collection_size, sizeof(double)); double** mid_delta = 0; double* mid_p = 0; double** mid_tempkd = 0; // I get a seg fault when these get freed. Don't understand. if (myparas.num_points == 3) { mid_delta = zerosarray(collection_size, d); mid_p = (double*)calloc(collection_size, sizeof(double)); mid_tempkd = zerosarray(collection_size, d); } for(j = 0; j < collection_size; j++) { for(i = 0; i < d; i++) { if(myparas.fast_collection || new_test_song_exp) delta[j][i] = realX[fr][i] - realX[(myparas.num_points - 1) * k + collection_idx[j]][i]; else delta[j][i] = realX[fr][i] - realX[(myparas.num_points - 1) * k + j][i]; } if(myparas.num_points == 3) { if(myparas.fast_collection || new_test_song_exp) mid_delta[j][i] = realX[k + fr][i] - realX[k + collection_idx[j]][i]; else mid_delta[j][i] = realX[k + fr][i] - realX[k + j][i]; } } mat_mult(delta, delta, tempkd, collection_size, d); scale_mat(tempkd, collection_size, d, -1.0); sum_along_direct(tempkd, p, collection_size, d, 1); if(myparas.square_dist_filename[0] != '\0') for(i = 0; i < k; i++) square_dist[fr][i] = -p[i]; if (bias_terms != 0) add_vec(p, bias_terms, collection_size, 1.0); if (myparas.num_points == 3) { // Just use the mid_deltas (midpoint differences): square them, // then sum and add to the p vector directly, then the midpoint // probability is incorporated mat_mult(mid_delta, mid_delta, mid_tempkd, collection_size, d); scale_mat(mid_tempkd, collection_size, d, -1.0); sum_along_direct(mid_tempkd, mid_p, collection_size, d, 1); add_vec(p, mid_p, collection_size, 1.0); } if (myparas.underflow_correction == 1) { double max_val = p[0]; for(i = 0; i < collection_size; i++) max_val = p[i] > max_val? p[i] : max_val; vec_scalar_sum(p, -max_val, collection_size); } Veccopy(p, tempk, collection_size); exp_on_vec(tempk, collection_size); //exp_on_vec(p, collection_size); // underflow checking: // for (i = 0; i < collection_size; i++) // if (p[i] < 0.000001) // p[i] = 0.000001; double temp_sum; if(myparas.allow_self_transition) temp_sum = sum_vec(tempk, collection_size); else { temp_sum = 0.0; for(i = 0; i < collection_size; i++) if(!myparas.fast_collection || new_test_song_exp) temp_sum += (i != fr)? tempk[i] : 0.0; else temp_sum += (collection_idx[i] != fr)? tempk[i] : 0.0; } vec_scalar_sum(p, -log(temp_sum), collection_size); //scale_vec(p, collection_size, 1.0 / temp_sum); //printf("done...\n"); for(to = 0; to < k; to++) { if(myparas.allow_self_transition || (!myparas.allow_self_transition && fr != to)) { temp_pair.fr = fr; temp_pair.to = to; //printf("(%d, %d)\n", fr, to); if(myparas.use_hash_TTable) idx = exist_in_hash(tcount, temp_pair); else idx = tcount_full[fr][to] > 0.0? 1 : -1; //printf("%d\n", idx);fflush(stdout); int idx_train; //printf("done...\n");fflush(stdout); if(myparas.output_distr) { if(myparas.use_hash_TTable) idx_train = exist_in_hash(tcount_train, temp_pair); else idx_train = tcount_full_train[fr][to] > 0.0? 1 : -1; } if(idx >= 0) { if(myparas.fast_collection || new_test_song_exp) { s = -1; for(i = 0; i < collection_size; i++) { if(collection_idx[i] == to) { s = i; break; } } } else s = to; //printf("%d\n", idx);fflush(stdout); if(myparas.use_hash_TTable) temp_val = retrieve_value_with_idx(tcount, idx); else temp_val = tcount_full[fr][to]; if(s < 0) not_realn += temp_val; else { //printf("s = %d\n", s); llhood += temp_val * p[s]; if(new_test_song_exp) uniform_llhood += temp_val * log(1.0 / (double) k_test); realn += temp_val; if(myparas.output_distr) { //double temp_val_train = idx_train >= 0? retrieve_value_with_idx(tcount_train, idx_train): 0.0; double temp_val_train; if(idx_train < 0) temp_val_train = 0.0; else temp_val_train = myparas.use_hash_TTable ? retrieve_value_with_idx(tcount_train, idx_train) : tcount_full_train[fr][to]; song_sep_ll[fr] += temp_val * p[s]; song_sep_ll[to] += temp_val * p[s]; fprintf(trans_distr_file, "%d %d %f\n", (int)temp_val_train, (int)temp_val, temp_val * p[s]); } } } } } Array2Dfree(delta, collection_size, d); free(p); Array2Dfree(tempkd, collection_size, d); free(tempk); if (myparas.num_points == 3) { Array2Dfree(mid_delta, collection_size, d); free(mid_p); Array2Dfree(mid_tempkd, collection_size, d); } if(myparas.fast_collection || new_test_song_exp) free(collection_idx); } if(myparas.output_distr) { printf("Writing song distr.\n"); for(i = 0; i < k; i++) fprintf(song_distr_file, "%d %f\n", (int)(pd.id_counts[i]), song_sep_ll[i]); fclose(song_distr_file); fclose(trans_distr_file); free(song_sep_ll); } llhood /= realn; printf("Avg log-likelihood on test: %f\n", llhood); if(myparas.fast_collection) printf("Ratio of transitions that do not appear in the training set: %f\n", not_realn / (realn + not_realn)); if(new_test_song_exp) { uniform_llhood /= realn; printf("Avg log-likelihood for uniform baseline: %f\n", uniform_llhood); } if(myparas.use_hash_TTable) free_hash(tcount); else Array2Dfree(tcount_full, k, k); free_playlists_data(pd); if(myparas.output_distr) { if(myparas.use_hash_TTable) free_hash(tcount_train); else Array2Dfree(tcount_full_train, k, k); } Array2Dfree(realX, k * myparas.num_points, d); if(new_test_song_exp) { free(train_test_hash); free(test_ids_for_new_songs); } if(myparas.square_dist_filename[0] != '\0') { write_embedding_to_file(square_dist, k, k, myparas.square_dist_filename, 0); Array2Dfree(square_dist, k, k); } }
//solves the CG algorithm int CGSolver(std::vector<double> &val, std::vector<int> &row_ptr, std::vector<int> &col_idx, std::vector<double> &b, std::vector<double> &x, double tol, std::string soln_prefix) { //initializes variables std::vector<double> r; std::vector<double> Ax = multiply_vec(val, col_idx, row_ptr, x); //make multiply vec double l2norm0; r=subtract_vec(b,Ax); l2norm0 = l2normer(r);// finds norm of l std::vector<double> p=r; int niter = 1; // write the initial guess std::stringstream s; s << std::setfill('0') << std::setw(3) << 0; //--compile_0 //--Originally, this was left `std::string filename = soln_` where soln_ undefined //--and the line missing a closing semi-colon. Also, `ix` not defined below... std::string filename = soln_prefix; //ix + s.str() + ".txt"; // This line was left-in (not commented upon submission) but it doesn't perform any assignment or operation! WriteSoln(x, filename); //--END while (niter<row_ptr.size()){ niter = niter +1; //multiply p from first 3 inpouts std::vector<double> Ap = multiply_vec(val, col_idx, row_ptr,p); //get dot product double r_initial = dot_product(r,r); double alpha = r_initial/dot_product(p,Ap); //multiply vector by coeffifient alpha, also single for loop x= add_vec(x, multiply_coeff(alpha,p)); // add vector adds two vectors . will be similar to dot product but will add instead r = add_vec(r,multiply_coeff(-alpha,Ap)); double l2normr=l2normer(r); //returns the number of iterations if (l2normr/l2norm0 < tol){ std::stringstream s; s << std::setfill('0') << std::setw(3) << niter; std::string filename = soln_prefix + s.str() + ".txt"; WriteSoln(x, filename); return int(niter); } double beta = dot_product(r,r)/r_initial; p=add_vec(r, multiply_coeff(beta,p)); if (niter % 10==0) { std::stringstream s; s << std::setfill('0') << std::setw(3) << niter; std::string filename = soln_prefix + s.str() + ".txt"; WriteSoln(x, filename); } } // otherwise the algorithm diverges return -1; }
void add_vec4(vec4 a, vec4 b, vec4 c) { add_vec(a,b,c,4); }
void icm_Abst::experimental_step(){ if(failedGA_counts > 500){return;} double org_llk = sum_llk(); backupCH = baseCH; baseCH_2_baseS(); baseS_2_baseP(); numeric_dobs2_d2p(); int k = base_p_derv.size(); prop_p.resize(k); double prop_mean = 0; int act_sum = 0; double new_llk; vector<bool> isActive(k); for(int i = 0; i < k; i++){ if(baseP[i] > 0 && !ISNAN(base_p_derv[i]) && base_p_2ndDerv[i] < -0.001){ isActive[i] = true; act_sum++; } else { isActive[i] = false; } } for(int i = 0; i < k; i++){ if(isActive[i]){ prop_mean += -base_p_derv[i]/base_p_2ndDerv[i]; } } prop_mean = prop_mean / act_sum; for(int i = 0; i < k; i++){ if(isActive[i]){ prop_p[i] = -base_p_derv[i]/base_p_2ndDerv[i] - prop_mean;} else {prop_p[i] = 0.0;} } makeUnitVector(prop_p); double scale_max = getMaxScaleSize(baseP, prop_p); for(int i = 0; i < k; i++){ prop_p[i] *= -1.0; } scale_max = min(scale_max, getMaxScaleSize(baseP, prop_p)); for(int i = 0; i < k; i++){ prop_p[i] *= -1.0; } double delta_val = scale_max/2.0; delta_val = min(delta_val, h); delta_val = delta_val/10.0; // double analytic_dd = directional_derv(base_p_derv, prop_p); if(delta_val == 0){ failedGA_counts++; baseCH = backupCH; new_llk = sum_llk(); Rprintf("Exit 1\n"); return; } add_vec(delta_val, prop_p, baseP); double llk_h = llk_from_p(); add_vec(-2.0 * delta_val, prop_p, baseP); double llk_l = llk_from_p(); add_vec(delta_val, prop_p, baseP); double llk_0 = llk_from_p(); double d1 = ( llk_h - llk_l ) / ( 2 * delta_val ); double d2 = (llk_h + llk_l - 2.0 * llk_0 ) / (delta_val * delta_val); delta_val = -d1/d2; if(ISNAN(delta_val)){ failedGA_counts++; baseCH= backupCH; new_llk = sum_llk(); Rprintf("warning: delta_val is nan in GA step. llk_h = %f, llk_l = %f, llk_0 = %f, scale_max = %f\n", llk_h, llk_l, llk_0, scale_max); Rprintf("Exit 3\n"); return; } scale_max = getMaxScaleSize(baseP, prop_p); delta_val = min( delta_val, scale_max ); add_vec(delta_val, prop_p, baseP); new_llk = llk_from_p(); mult_vec(-1.0, prop_p); int tries = 0; double this_delta = delta_val; while(tries < 5 && new_llk < llk_0){ tries++; this_delta = this_delta/2; add_vec(this_delta, prop_p, baseP); new_llk = llk_from_p(); } if(new_llk < llk_0){ failedGA_counts++; baseCH = backupCH; new_llk = sum_llk(); //Should NOT be llk_from_p(), since we are resetting the CH Rprintf("Exit 4\n"); return; } if(org_llk > new_llk){ failedGA_counts++; baseCH = backupCH; new_llk = sum_llk(); } }
int main(int ac, char **av){ char *outfile = NULL; char *infile = NULL; int i,k, cid; int rcode; RecBase Rec; int sw_err = 0; for ( k=1 ; k<ac ; k++ ){ if ( 0 == strcmp(av[k],"-h") ){ sw_help = 1; continue; } if ( 0 == strcmp(av[k],"-v") ){ sw_verbose = 1; continue; } if ( 0 == strcmp(av[k],"-dim") ){ if ( ++k >= ac ){ sw_err = 1; break; } sw_dim = atoi(av[k]); if ( sw_dim < 1 ){ sw_err = 1; break; } continue; } if ( 0 == strcmp(av[k],"-F") ){ if ( ++k >= ac ){ sw_err = 1; break; } #ifdef Use_DEfeature if ( 0 == strcmp(av[k],"DEF") ){ sw_dim = FVECDIM_DEF; continue; } if ( 0 == strcmp(av[k],"DEFOL") ){ sw_dim = FVECDIM_DEFOL; continue; } #endif if ( 0 == strcmp(av[k],"P-LOVE") ){ sw_dim = FVECDIM_PLOVE; continue; } if ( 0 == strcmp(av[k],"PLOVE") ){ sw_dim = FVECDIM_PLOVE; continue; } if ( 0 == strcmp(av[k],"P-LM") ){ sw_dim = FVECDIM_PLM; continue; } if ( 0 == strcmp(av[k],"PLM") ){ sw_dim = FVECDIM_PLM; continue; } sw_err = 1; break; } if ( 0 == strcmp(av[k],"-o") ){ if ( ++k >= ac ){ sw_err = 1; break; } outfile = av[k]; continue; } if ( infile == NULL ){ infile = av[k]; break; } } if ( sw_err || sw_help || outfile == NULL || infile == NULL ){ fputs("Make character dictionary\n",stderr); fputs("makedic v1.1 Copyright (C) 2005-2010 Hideaki Goto\n",stderr); fputs("usage: makedic [options] -o out_dic_file vec_file1 vec_file2 ...\n",stderr); #ifdef Use_DEfeature fputs(" -F type : feature type PLOVE/PLM(default)/DEF/DEFOL\n",stderr); #else fputs(" -F type : feature type PLOVE/PLM(default)\n",stderr); #endif fprintf(stderr," -dim N : dimension of feature vector (default:%d)\n",FVECDIM_PLM); fputs(" -v : verbose mode\n",stderr); return(1); } rcode = load_vec(&Rec, infile, sw_dim); if ( rcode ) return(rcode); for ( k++ ; k<ac ; k++ ){ rcode = add_vec(&Rec, av[k]); if ( rcode ) return(rcode); } if ( sw_verbose ){ fprintf(stderr,"%d sets loaded.\n",n_set); } /* normalization */ for ( cid=0 ; cid<Rec.n_cat ; cid++ ){ for ( i=0 ; i<Rec.dic[cid].dim ; i++ ){ Rec.dic[cid].e[i] /= (double)n_set; } } save_dic(&Rec, outfile); return(rcode); }
void myKeyHandler(unsigned char ch, int x, int y) { int i; static int subdiv=0; struct point_t *slice; struct point_t *linecur; struct point_t *cur; struct point_t *new_points; struct slice_t *cur_slice,*cur2_slice; struct point_t *cur2; struct point_t points[5]; double a,b,c; GLfloat v1[3],v2[3],v3[3]; double deginc; // struct slice_t *cur_slice; switch(ch) { case 'q': endSubdiv(0); break; case 'z': mode=(~mode)&1; printf("%s\n",mode?"3D mode":"2D mode"); switch(mode) { case 0: resetCamera(); break; case 1: reset3DCamera(); break; } break; case 'k': /* test phong stuff */ cur_slice = slices; cur2_slice = slices->n; //while(cur_slice!=NULL) { cur = cur_slice->line; cur2 = cur2_slice->line; //while(cur->n!=NULL) { /* right vertex */ add_vec(&(cur->nx),&(cur->n->nx),&(points[0].nx)); normalize(&(points[0].nx)); sub_vec(&(cur->n->x),&(cur->x),v1); v1[0] /= 2; v1[1] /= 2; v1[2] /= 2; add_vec(&(cur->x),v1,&(points[0].x)); /* top vertex */ add_vec(&(cur->nx),&(cur2->nx),&(points[1].nx)); normalize(&(points[1].nx)); sub_vec(&(cur2->x),&(cur->x),v1); v1[0] /= 2; v1[1] /= 2; v1[2] /= 2; add_vec(&(cur->x),v1,&(points[1].x)); /* left vertex */ add_vec(&(cur2->nx),&(cur2->n->nx),&(points[2].nx)); normalize(&(points[2].nx)); sub_vec(&(cur2->n->x),&(cur2->x),v1); v1[0] /= 2; v1[1] /= 2; v1[2] /= 2; add_vec(&(cur2->x),v1,&(points[2].x)); /* bottom vertex */ add_vec(&(cur2->n->nx),&(cur->n->nx),&(points[3].nx)); normalize(&(points[3].nx)); sub_vec(&(cur->n->x),&(cur2->n->x),v1); v1[0] /= 2; v1[1] /= 2; v1[2] /= 2; add_vec(&(cur2->n->x),v1,&(points[3].x)); /* center vertex */ add_vec(&(points[0].nx),&(points[1].nx),v1); add_vec(&(points[2].nx),&(points[3].nx),v2); add_vec(v1,v2,&(points[4].nx)); normalize(&(points[4].nx)); sub_vec(&(points[3].x),&(cur2->n->x),v1); sub_vec(&(points[2].x),&(cur2->n->x),v2); add_vec(v1,v2,v3); normalize(v3); a=sqrt(v1[0]*v1[0]+v1[1]*v1[1]+v1[2]*v1[2]); b=sqrt(v2[0]*v2[0]+v2[1]*v2[1]+v2[2]*v2[2]); c=sqrt(a*a+b*b); v3[0] *= c; v3[1] *= c; v3[2] *= c; add_vec(&(cur2->n->x),v3,&(points[4].x)); printf("v2[0]=%f,v2[1]=%f,v2[2]=%f\nv3[0]=%f,v3[1]=%f,v3[2]=%f\n",v2[0],v2[1],v2[2],v3[0],v3[1],v3[2]); for(i=0; i<5; i++) printf("points[%d]->x=%f,points[%d]->y=%f,points[%d]->z=%f\n", i,points[i].x,i,points[i].y,i,points[i].z); printf("cur->x=%f,cur->y=%f,cur->z=%f\ncur->n->x=%f,cur->n->y=%f,cur->n->z=%f\n", cur->x,cur->y,cur->z,cur->n->x,cur->n->y,cur->n->z); printf("cur2->x=%f,cur2->y=%f,cur2->z=%f\ncur2->n->x=%f,cur2->n->y=%f,cur2->n->z=%f\n", cur2->x,cur2->y,cur2->z,cur2->n->x,cur2->n->y,cur2->n->z); cur = cur->n; cur2 = cur2->n; } cur_slice = cur_slice->n; cur2_slice = cur2_slice->n != NULL ? cur2_slice->n : slices; /* circle around */ } break; case 'n': normals=(~normals)&1; printf("Normal mode %s\n",normals?"on":"off"); break; case 'e': solid=(~solid)&1; printf("%s\n",solid?"Solid mode":"Wireframe mode"); switch(solid) { case 0: glPolygonMode(GL_FRONT_AND_BACK,GL_LINE); break; case 1: glPolygonMode(GL_FRONT_AND_BACK,GL_FILL); break; } break; case 'r': faces=(~faces)&1; printf("%s\n",faces?"Faces mode":"Control points mode"); break; case 'w': /* calculate initial 3d object */ if(num<5) printf("There must be at least 5 control points.\n"); else if(!mode) { mode=(~mode)&1; printf("%s\n",mode?"3D mode":"2D mode"); switch(mode) { case 0: resetCamera(); break; case 1: reset3DCamera(); break; } freeModel(); subdiv_v = 0; subdiv_h = NUMSLICES; /* the radius of the circle for each of the points is x */ for(i=0;i<subdiv_h;i++) { ALLOC_POINT(slice); cur=slice; linecur=line; while(linecur!=NULL) { cur->z = linecur->x*sin(DEGINC*i); cur->x = linecur->x*cos(DEGINC*i); cur->y = linecur->y; linecur = linecur->n; if(linecur!=NULL) { ALLOC_POINT(cur->n); cur = cur->n; } } addSlice(slice); } } recompute_normals(); break; case 's': /* horizontal subdivision */ if(!mode || slices==NULL) break; /* backup the original slice */ new_points = duplicate_slice(slices->line); freeModel(); subdiv_h<<=1; subdiv++; printf("Horizontal subdivision level %d\n",subdiv); deginc = 2*M_PI/subdiv_h; for(i=0;i<subdiv_h;i++) { ALLOC_POINT(slice); cur=slice; linecur=new_points; while(linecur!=NULL) { cur->z = linecur->x*sin(deginc*i); cur->x = linecur->x*cos(deginc*i); cur->y = linecur->y; linecur = linecur->n; if(linecur!=NULL) { ALLOC_POINT(cur->n); cur = cur->n; } } addSlice(slice); } recompute_normals(); break; case 'a': /* vertical subdivision */ if(!mode || slices==NULL) break; cur_slice=slices; subdiv_v++; printf("Vertical subdivision level %d\n",subdiv_v); linecur = cur_slice->line; /* calc the first point */ cur = new_points = calc_point(linecur,linecur,linecur->n,linecur->n->n); /* calc middle and last points */ while(linecur->n->n!=NULL) { if(linecur->n->n->n!=NULL) /* middle points */ cur->n = calc_point(linecur,linecur->n,linecur->n->n,linecur->n->n->n); else cur->n = calc_point(linecur,linecur->n,linecur->n->n,linecur->n->n); cur = cur->n; linecur = linecur->n; } interleave(cur_slice->line,new_points); new_points = duplicate_slice(cur_slice->line); deginc = 2*M_PI/subdiv_h; freeModel(); for(i=0;i<subdiv_h;i++) { ALLOC_POINT(slice); cur=slice; linecur=new_points; while(linecur!=NULL) { cur->z = linecur->x*sin(deginc*i); cur->x = linecur->x*cos(deginc*i); cur->y = linecur->y; linecur = linecur->n; if(linecur!=NULL) { ALLOC_POINT(cur->n); cur = cur->n; } } addSlice(slice); } recompute_normals(); break; case 'd': shading=(~shading)&1; printf("%s shading\n",shading?"Phong":"Gouraud"); break; case '<': if(mode) { glMatrixMode(GL_MODELVIEW); glRotatef(1,0.0,1.0,0.0); } break; case '>': if(mode) { glMatrixMode(GL_MODELVIEW); glRotatef(-1,0.0,1.0,0.0); } break; default: /* Unrecognized keypress */ return; } glutPostRedisplay(); return; }