double regularization_path(problem *prob, double epsilon, int nval) { int nr_folds = 5; double llog, error, best_error = DBL_MAX, lambda, best_lambda; double lmax, lmin, lstep; double *y_hat = dvector(1, prob->n); double *w = dvector(1, prob->dim); /* compute maximum lambda for which all weights are 0 (Osborne et al. 1999) * lambda_max = ||X'y||_inf. According to scikit-learn source code, you can * divide by npatterns and it still works */ dmvtransmult(prob->X, prob->n, prob->dim, prob->y, prob->n, w); lmax = dvnorm(w, prob->dim, INF) / prob->n; lmin = epsilon*lmax; lstep = (log2(lmax)-log2(lmin))/nval; fprintf(stdout, "lmax=%g lmin=%g epsilon=%g nval=%d\n", lmax, lmin, epsilon, nval); /* warm-starts: weights are set to 0 only at the begining */ dvset(w, prob->dim, 0); for(llog=log2(lmax); llog >= log2(lmin); llog -= lstep) { lambda = pow(2, llog); /*cross_validation(prob, w, lambda, 0, nr_folds, y_hat);*/ /*******************************************************/ int iter = 1000; double tol = 0, fret; fista(prob, w, lambda, 0, tol, 0, &iter, &fret); fista_predict(prob, w, y_hat); /*******************************************************/ error = mae(prob->y, prob->n, y_hat); fprintf(stdout, " lambda %10.6lf MAE %7.6lf active weights %d/%d\n", lambda, error, dvnotzero(w, prob->dim), prob->dim); dvprint(stdout, w, prob->dim); if (error < best_error) { best_error = error; best_lambda = lambda; } } free_dvector(y_hat, 1, prob->n); free_dvector(w, 1, prob->dim); print_line(60); fprintf(stdout, "\nBest: lambda=%g MAE=%g active weights=%d/%d\n", best_lambda, best_error, dvnotzero(w, prob->dim), prob->dim); return best_lambda; }
gsl_matrix * dcovariance(gsl_matrix *A, gsl_matrix *B, gsl_matrix *Cov) { int i, j, k, m; double x, y, u, v, sum; gsl_vector *mean1 = NULL, *mean2 = NULL; m = A->size2; /* each column of A,B is one data point */ if(A->size2 != B->size2) { fprintf(stderr, "dcovariance: inconsistent dimensions: %d %d\n", A->size2, B->size2); exit(0); } if(Cov == NULL) { Cov = gsl_matrix_calloc(A->size1, B->size1); } else { if(Cov->size1 != A->size1 || Cov->size2 != B->size1) { fprintf(stderr, "dcovariance: inconsistent dimensions (%d %d), (%d %d).\n", Cov->size1, A->size1, Cov->size2, B->size1); exit(0); } } mean1 = gsl_vector_calloc(A->size1); for(i = 0; i < A->size1; i++) { sum = 0; for(j = 0; j < A->size2; j++) sum += dmget(A, i, j); u = sum / (float)A->size2; } mean2 = gsl_vector_calloc(B->size1); for(i = 0; i < B->size1; i++) { sum = 0; for(j = 0; j < B->size2; j++) sum += dmget(B, i, j); u = sum / (float)B->size2; dvset(mean2, i, u); } for(i = 0; i < Cov->size1; i++) { u = dvget(mean1, i); for(j = 0; j < Cov->size2; j++) { v = dvget(mean2, j); sum = 0; for(k = 0; k < m; k++) { x = dmget(A, i, k); y = dmget(B, j, k); sum += (x - u) * (y - v); } sum /= (float)(m - 1); dmset(Cov, i, j, sum); } } gsl_vector_free(mean1); gsl_vector_free(mean2); return Cov; }
static void vServiceTask( void *pvParameters ) { int last_backlight = -1; int last_contrast = -1; char last_repeater_mode = 0; char last_parrot_mode = 0; char dcs_boot_timer = 8; // bool update = true; bool last_rmu_enabled = false; for (;;) { vTaskDelay(500); // gpio_toggle_pin(AVR32_PIN_PB28); //gpio_toggle_pin(AVR32_PIN_PB18); // x_counter ++; // rtclock_disp_xy(84, 0, x_counter & 0x02, 1); rtclock_disp_xy(84, 0, 2, 1); vdisp_i2s( tmp_buf, 5, 10, 0, voltage); tmp_buf[3] = tmp_buf[2]; tmp_buf[2] = '.'; tmp_buf[4] = 'V'; tmp_buf[5] = 0; vdisp_prints_xy( 55, 0, VDISP_FONT_4x6, 0, tmp_buf ); // vdisp_i2s( tmp_buf, 5, 10, 0, serial_rx_error ); // vd_prints_xy(VDISP_DEBUG_LAYER, 108, 28, VDISP_FONT_4x6, 0, tmp_buf ); vdisp_i2s( tmp_buf, 5, 10, 0, serial_rx_ok ); vd_prints_xy(VDISP_DEBUG_LAYER, 108, 34, VDISP_FONT_4x6, 0, tmp_buf ); // vdisp_i2s( tmp_buf, 5, 10, 0, serial_timeout_error ); vdisp_i2s( tmp_buf, 5, 10, 0, dstar_pos_not_correct ); vd_prints_xy(VDISP_DEBUG_LAYER, 108, 40, VDISP_FONT_4x6, 0, tmp_buf ); vdisp_i2s( tmp_buf, 5, 10, 0, serial_putc_q_full ); vd_prints_xy(VDISP_DEBUG_LAYER, 108, 46, VDISP_FONT_4x6, 0, tmp_buf ); vdisp_i2s( tmp_buf, 5, 10, 0, initialHeapSize ); vd_prints_xy(VDISP_DEBUG_LAYER, 108, 52, VDISP_FONT_4x6, 0, tmp_buf ); vdisp_i2s( tmp_buf, 5, 10, 0, xPortGetFreeHeapSize() ); vd_prints_xy(VDISP_DEBUG_LAYER, 108, 58, VDISP_FONT_4x6, 0, tmp_buf ); int v = 0; switch (eth_autoneg_state) { case 0: if (SETTING_BOOL(B_ONLY_TEN_MBIT)) { AVR32_MACB.man = 0x50920061; // write register 0x04, advertise only 10MBit/s for autoneg } eth_autoneg_state = 1; break; case 1: AVR32_MACB.man = 0x50821200; // write register 0x00, power on, autoneg, restart autoneg eth_autoneg_state = 2; break; case 2: AVR32_MACB.man = 0x60C20000; // read register 0x10 eth_autoneg_state = 3; break; case 3: v = AVR32_MACB.MAN.data; // read data from previously read register 0x10 AVR32_MACB.man = 0x60C20000; // read register 0x10 break; } dvset(); nodeinfo_print(); if (last_rmu_enabled != rmu_enabled) { rmuset_print(); last_rmu_enabled = rmu_enabled; } const char * net_status = " "; dhcp_set_link_state( v & 1 ); if (v & 1) // Ethernet link is active { v = ((v >> 1) & 0x03) ^ 0x01; switch (v) { case 0: net_status = " 10HD"; break; case 1: net_status = "100HD"; break; case 2: net_status = " 10FD"; break; case 3: net_status = "100FD"; break; } AVR32_MACB.ncfgr = 0x00000800 | v; // SPD, FD, CLK = MCK / 32 -> 1.875 MHz vdisp_prints_xy( 28, 0, VDISP_FONT_4x6, (dhcp_is_ready() != 0) ? 0 : 1, net_status ); } else {
int main(int argc, char *argv[]) { char *ftest = NULL; struct timeval t0, t1, diff; problem *train, *test; int regpath_flag = 0, backtracking_flag = 0, std_flag = 1, verbose_flag = 0; int iter = 1000, c, crossval_flag = 0, nr_folds = 10, nval = 100, nzerow; double *w, *y_hat, *mean, *var; double lambda_1 = 1e-6, lambda_2 = 0, tol = 1e-9, epsilon, fret; while (1) { static struct option long_options[] = { /* These options don't set a flag. We distinguish them by their indices. */ {"help", no_argument, 0, 'h'}, {"verbose", no_argument, 0, 'v'}, {"backtracking", no_argument, 0, 'b'}, {"original", no_argument, 0, 'o'}, {"test", required_argument, 0, 't'}, {"l1", required_argument, 0, 'l'}, {"l2", required_argument, 0, 'r'}, {"cross-validation", optional_argument, 0, 'c'}, {"tolerance ", optional_argument, 0, 'e'}, {"regpath", optional_argument, 0, 'p'}, /*{"stop", optional_argument, 0, 's'},*/ {"max-iters", optional_argument, 0, 'i'}, {0, 0, 0, 0} }; int option_index = 0; c = getopt_long (argc, argv, "vhbot:r:l:p::c::e::s::i::", long_options, &option_index); /* Detect the end of the options. */ if (c == -1) break; switch(c) { case 'h': exit_with_help(argv[PROG]); break; case 'b': backtracking_flag = 1; break; case 'v': verbose_flag = 1; break; case 'o': std_flag = 0; break; case 't': ftest = optarg; break; case 'c': crossval_flag = 1; if (optarg) if (sscanf(optarg, "%d", &nr_folds) != 1) { fprintf(stderr, "%s: option -c requires an int\n", argv[PROG]); exit_without_help(argv[PROG]); } break; case 'e': if (optarg) if (sscanf(optarg, "%lf", &tol) != 1) { fprintf(stderr, "%s: option -e requires a double\n", argv[PROG]); exit_without_help(argv[PROG]); } break; case 'p': regpath_flag = 1; if (optarg) if (sscanf(optarg, "%d", &nval) != 1) { fprintf(stderr, "%s: option -p requires an int\n", argv[PROG]); exit_without_help(argv[PROG]); } break; //case 's': // search_flag = 1; // if (optarg) // if (sscanf(optarg, "%lf:%d:%lf", &lmax, &nval, &lmin) != 3) // { // printf("%s\n", optarg); // fprintf(stderr, "%s: option -s requires a range in the format MAX:NVAL:MIN\n", argv[PROG]); // exit_without_help(argv[PROG]); // } // break; case 'l': if (sscanf(optarg, "%lf", &lambda_1) != 1) { fprintf(stderr, "%s: option -l requires a float\n", argv[PROG]); exit_without_help(argv[PROG]); } break; case 'r': if (sscanf(optarg, "%lf", &lambda_2) != 1) { fprintf(stderr, "%s: option -r requires a float\n", argv[PROG]); exit_without_help(argv[PROG]); } break; case 'i': if (optarg) if (sscanf(optarg, "%d", &iter) != 1) { fprintf(stderr, "%s: option -i requires an int\n", argv[PROG]); exit_without_help(argv[PROG]); } break; case '?': /* getopt_long already printed an error message. */ exit_without_help(argv[PROG]); break; default: printf("?? getopt returned character code 0%o ??\n", c); } } if ((argc - optind) < ARGC_MIN || (argc - optind) > ARGC_MAX) { fprintf(stderr, "%s: missing file operand\n", argv[PROG]); exit_without_help(argv[PROG]); } /* start time */ gettimeofday(&t0, 0); train = read_problem(argv[optind]); fprintf(stdout, "n:%d dim:%d\n", train->n, train->dim); /* alloc vector for means and variances, plus 1 for output */ if (std_flag) { fprintf(stdout, "Standarizing train set...\n"); mean = dvector(1, train->dim+1); var = dvector(1, train->dim+1); standarize(train, 1, mean, var); } if (ftest) { test = read_problem(ftest); if (std_flag) standarize(test, 0, mean, var); } if (regpath_flag) { fprintf(stdout, "Regularization path...\n"); /* in glmnet package they use 0.0001 instead of 0.001 ? */ epsilon = train->n > train->dim ? 0.001 : 0.01; lambda_1 = regularization_path(train, epsilon, nval); } fprintf(stdout, "lambda_1: %g\n", lambda_1); fprintf(stdout, "lambda_2: %g\n", lambda_2); /* initialize weight vector to 0 */ w = dvector(1, train->dim); dvset(w, train->dim, 0); fprintf(stdout, "Training model...\n"); if (backtracking_flag) /*fista_backtrack(train, w, lambda_1, lambda_2, tol, &iter, &fret);*/ fista_nocov(train, w, lambda_1, lambda_2, tol, &iter, &fret); else fista(train, w, lambda_1, lambda_2, tol, verbose_flag, &iter, &fret); y_hat = dvector(1, train->n); fista_predict(train, w, y_hat); nzerow = dvnotzero(w, train->dim); fprintf(stdout, "Iterations: %d\n", iter); fprintf(stdout, "Active weights: %d/%d\n", nzerow, train->dim); if (std_flag) fprintf(stdout, "MAE train: %g\n", var[train->dim+1]*mae(train->y, train->n, y_hat)); fprintf(stdout, "MAE train (standarized): %g\n", mae(train->y, train->n, y_hat)); free_dvector(y_hat, 1, train->n); if (crossval_flag) { dvset(w, train->dim, 0); y_hat = dvector(1, train->n); cross_validation(train, w, lambda_1, lambda_2, nr_folds, y_hat); fprintf(stdout, "MAE cross-validation: %lf\n", mae(train->y, train->n, y_hat)); free_dvector(y_hat, 1, train->n); } if (ftest) { /* we alloc memory again since test size is different from train size */ y_hat = dvector(1, test->n); fista_predict(test, w, y_hat); fprintf(stdout, "MAE test: %g\n", mae(test->y, test->n, y_hat)); free_dvector(y_hat, 1, test->n); } /* stop time */ gettimeofday(&t1, 0); timeval_subtract(&t1, &t0, &diff); fprintf(stdout, "Time(h:m:s.us): %02d:%02d:%02d.%06ld\n", diff.tv_sec/3600, (diff.tv_sec/60), diff.tv_sec%60, diff.tv_usec); if (verbose_flag) { fprintf(stdout, "Weights: "); dvprint(stdout, w, train->dim); } free_dvector(w, 1, train->dim); if (std_flag) { free_dvector(mean, 1, train->dim+1); free_dvector(var, 1, train->dim+1); } if (ftest) { free_dvector(test->y, 1, test->n); free_dmatrix(test->X, 1, test->n, 1, test->dim); free(test); } free_dvector(train->y, 1, train->n); free_dmatrix(train->X, 1, train->n, 1, train->dim); free(train); return 0; }