Beispiel #1
0
int main(int argc, char * argv[]){

    /* MPI Initialization */
    int nprocs, rank;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (argc!=4){
        fprintf(stderr, "Usage: %s\n", argv[0]);
        exit(EXIT_FAILURE);
    }

    /* number of steps in mcmc */
    int max_steps;
    /* flag to indicate starting parameters */
    int param_flag;
    /* name of file to output -- not including path */
    char file_string[256];

    sscanf(argv[1], "%d", &max_steps);
    sscanf(argv[2], "%d", &param_flag);
    if(rank==0) fprintf(stderr, "%d steps in mcmc chain.\n", max_steps);
    sscanf(argv[3], "%s", file_string);

    /* -- Initialize parameters --*/
    STEP_DATA initial;
    load_step_data(&initial, param_flag, rank);

    /* -- Load data from various files --*/
    int i, j;
    int N_plist;
    int N_bins;
    POINTING *plist;

    /* have each process separately access these files */
    int current_rank = 0;
    while ( current_rank < nprocs ){
        if (current_rank == rank) {
            load_pointingID(&N_plist, &plist);
            if(rank == 0) fprintf(stderr, "%d pointings to do\n", N_plist);
            N_bins = load_Nbins();
            if(rank == 0) fprintf(stderr, "%d bins per pointing\n", N_bins);
        }
        MPI_Barrier(MPI_COMM_WORLD);
        current_rank++;
    }

    /* Establish slice of pointings for each process to handle */
    int slice_length;
    int remain = N_plist % nprocs;
    int lower_ind, upper_ind;

    /* Make slices as even as possible */
    slice_length = N_plist / nprocs;
    lower_ind = rank * slice_length;
    if (rank < remain){
        lower_ind += rank;
        slice_length++;
    }
    else lower_ind += remain;
    upper_ind = lower_ind + slice_length;

    /* Each process now loads data for its slice only */
    load_ZRW(plist, lower_ind, upper_ind, rank);
    load_rbins(plist, N_bins, lower_ind, upper_ind, rank);
    load_pairs(plist, N_bins, lower_ind, upper_ind, rank);
    // load_covariance(plist, N_bins, lower_ind, upper_ind, rank);
    // load_correlation(plist, N_bins, lower_ind, upper_ind, rank);
    load_inv_covariance(plist, N_bins, lower_ind, upper_ind, rank);

    /* test loading of covariance */
    // if(rank==0){
    //     for(i=0; i<N_bins; i++){
    //         for(j=0; j<N_bins; j++){
    //             fprintf(stderr, "Value: %le, Row: %d, Col: %d \n",
    //                 plist[1].cov_row[i].cov_col[j], i, j);
    //         }
    //     }

    // }
    // MPI_Barrier(MPI_COMM_WORLD);

    /* Calculate DD/RR */
    /* Only must be done once */
    calculate_DD_RR(plist, N_bins, lower_ind, upper_ind);

    /* Run mcmc */
    run_mcmc(plist, initial, N_bins, max_steps, lower_ind, upper_ind,
        rank, nprocs, file_string);

    /* Free allocated values */
    for(i=lower_ind; i<upper_ind; i++){
        for(j=0; j<N_bins; j++){
            free(plist[i].rbin[j].pair1);
            free(plist[i].rbin[j].pair2);
            // free(plist[i].cov_row[j].cov_col);
            // free(plist[i].cor_row[j].cor_col);
            free(plist[i].invcov_row[j].invcov_col);
        }
        free(plist[i].rbin);
        // free(plist[i].cov_row);
        // free(plist[i].cor_row);
        free(plist[i].invcov_row);
        free(plist[i].Z);
        free(plist[i].R);
        free(plist[i].weight);
    }
    free(plist);
    if(rank==0) fprintf(stderr, "Allocated space cleared. \n");

    /* barrier to ensure all procs clear space before MPI_Finalize */
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Finalize();

    return EXIT_SUCCESS;

}
Beispiel #2
0
int main(int argc, char * argv[]){

    /* MPI Initialization */
    int nprocs, rank;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    /* parse command line for starting params, steps, and filename */
    ARGS cl = parse_command_line( argc, argv );

    if(rank==0){
        fprintf(stderr, "N_parameters: %d\n", cl.N_params);
        fprintf(stderr, "Starting parameters: r0_thin = %lf , z0_thin = %lf , r0_thick = %lf , z0_thick = %lf , ratio = %lf\n",
            cl.r0_thin, cl.z0_thin, cl.r0_thick, cl.z0_thick, cl.ratio);
        fprintf(stderr, "%d steps in MCMC chain...\n", cl.max_steps);
        fprintf(stderr, "Results will be output to %s\n", cl.filename);
    }

    /* Assign cl arguments to initial parameters */
    STEP_DATA initial;
    initial.r0_thin = cl.r0_thin;
    initial.z0_thin = cl.z0_thin;
    initial.r0_thick = cl.r0_thick;
    initial.z0_thick = cl.z0_thick;
    initial.ratio_thick_thin = cl.ratio;
    initial.chi2 = 0.0;
    initial.chi2_reduced = 0.0;

    /* -- Load data from various files --*/
    int i, j;
    int N_plist;
    int N_bins;
    POINTING *plist;

    /* have each process separately access these files */
    int current_rank = 0;
    while ( current_rank < nprocs ){
        if (current_rank == rank){
            load_pointingID(&N_plist, &plist);
            if(rank == 0) fprintf(stderr, "%d pointings to do\n", N_plist);
            N_bins = load_Nbins();
            if(rank == 0) fprintf(stderr, "%d bins per pointing\n", N_bins);
        }
        MPI_Barrier(MPI_COMM_WORLD);
        current_rank++;
    }

    /* Establish slice of pointings for each process to handle */
    int slice_length;
    int remain = N_plist % nprocs;
    int lower_ind, upper_ind;

    /* Make slices as even as possible */
    slice_length = N_plist / nprocs;
    lower_ind = rank * slice_length;
    if (rank < remain){
        lower_ind += rank;
        slice_length++;
    }
    else lower_ind += remain;
    upper_ind = lower_ind + slice_length;

    /* Each process now loads data for its slice only */
    load_ZRW(plist, lower_ind, upper_ind, rank);
    load_rbins(plist, N_bins, lower_ind, upper_ind, rank);
    load_pairs(plist, N_bins, lower_ind, upper_ind, rank);
    load_inv_correlation(plist, N_bins, lower_ind, upper_ind, rank);

    /* Run mcmc */
    run_mcmc(plist, cl.N_params, initial, N_bins, cl.max_steps, lower_ind, upper_ind,
        rank, nprocs, cl.filename);

    /* Free allocated values */
    for(i=lower_ind; i<upper_ind; i++){
        for(j=0; j<N_bins; j++){
            free(plist[i].rbin[j].pair1);
            free(plist[i].rbin[j].pair2);
        }
        free(plist[i].rbin);
        free(plist[i].Z);
        free(plist[i].R);
        free(plist[i].weight);
    }
    free(plist);
    if(rank==0) fprintf(stderr, "Allocated space cleared. \n");

    /* barrier to ensure all procs clear space before MPI_Finalize */
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Finalize();

    return EXIT_SUCCESS;

}
Beispiel #3
0
int main(int argc, char * argv[]){

    /* MPI Initialization */
    int nprocs, rank;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    // if (argc!=4){
    //     fprintf(stderr, "Usage: %s\n", argv[0]);
    //     exit(EXIT_FAILURE);
    // }

    // /* number of steps in mcmc */
    // int max_steps;
    // /* flag to indicate starting parameters */
    // int param_flag;
    // /* name of file to output -- not including path */
    // char file_string[256];

    // sscanf(argv[1], "%d", &max_steps);
    // sscanf(argv[2], "%d", &param_flag);
    // if(rank==0) fprintf(stderr, "%d steps in mcmc chain.\n", max_steps);
    // sscanf(argv[3], "%s", file_string);

    // /* -- Initialize parameters --*/
    // STEP_DATA initial;
    // load_step_data(&initial, param_flag, rank);

    /* parse command line for starting params, steps, and filename */
    ARGS cl = parse_command_line( argc, argv );

    if(rank==0){
        fprintf(stderr, "N_parameters: %d\n", cl.N_params);
        fprintf(stderr, "Starting parameters: r0_thin = %lf , z0_thin = %lf , r0_thick = %lf , z0_thick = %lf , ratio = %lf\n", cl.r0_thin, cl.z0_thin, cl.r0_thick, cl.z0_thick, cl.ratio);
        fprintf(stderr, "%d steps in MCMC chain...\n", cl.max_steps);
        fprintf(stderr, "Results will be output to %s\n", cl.filename);
    }

    /* Assign cl arguments to initial parameters */
    STEP_DATA initial;
    initial.r0_thin = cl.r0_thin;
    initial.z0_thin = cl.z0_thin;
    initial.r0_thick = cl.r0_thick;
    initial.z0_thick = cl.z0_thick;
    initial.ratio_thick_thin = cl.ratio;
    initial.chi2 = 0.0;
    initial.chi2_reduced = 0.0;

    /* -- Load data from various files --*/
    int i, j;
    int N_plist;
    int N_bins;
    POINTING *plist;

    /* have each process separately access these files */
    int current_rank = 0;
    while ( current_rank < nprocs ){
        if (current_rank == rank) {
            load_pointingID(&N_plist, &plist);
            if(rank == 0) fprintf(stderr, "%d pointings to do\n", N_plist);
            N_bins = load_Nbins();
            if(rank == 0) fprintf(stderr, "%d bins per pointing\n", N_bins);
        }
        MPI_Barrier(MPI_COMM_WORLD);
        current_rank++;
    }

    /* Establish slice of pointings for each process to handle */
    int slice_length;
    int remain = N_plist % nprocs;
    int lower_ind, upper_ind;

    /* Make slices as even as possible */
    slice_length = N_plist / nprocs;
    lower_ind = rank * slice_length;
    if (rank < remain){
        lower_ind += rank;
        slice_length++;
    }
    else lower_ind += remain;
    upper_ind = lower_ind + slice_length;

    /* Each process now loads data for its slice only */
    load_ZRW(plist, lower_ind, upper_ind, rank);
    load_rbins(plist, N_bins, lower_ind, upper_ind, rank);
    load_pairs(plist, N_bins, lower_ind, upper_ind, rank);

    /* Calculate fractional error in DD/MM */
    /* Only must be done once */
    calculate_frac_error(plist, N_bins, lower_ind, upper_ind);

    /* Run mcmc */
    run_mcmc(plist, cl.N_params, initial, N_bins, cl.max_steps, lower_ind, upper_ind,
        rank, nprocs, cl.filename);

    /* Free allocated values */
    for(i=lower_ind; i<upper_ind; i++){
        for(j=0; j<N_bins; j++){
            free(plist[i].rbin[j].pair1);
            free(plist[i].rbin[j].pair2);
        }
        free(plist[i].rbin);
        free(plist[i].Z);
        free(plist[i].R);
        free(plist[i].weight);
    }
    free(plist);
    if(rank==0) fprintf(stderr, "Allocated space cleared. \n");

    /* barrier to ensure all procs clear space before MPI_Finalize */
    MPI_Barrier(MPI_COMM_WORLD);
    MPI_Finalize();

    return EXIT_SUCCESS;

}
Beispiel #4
0
pred_model_t   *
train(data_t *train_data, int initialization, int method, params_t *params)
{
	int chain, default_rule;
	pred_model_t *pred_model;
	ruleset_t *rs, *rs_temp;
	double max_pos, pos_temp, null_bound;

	pred_model = NULL;
	rs = NULL;
	if (compute_pmf(train_data->nrules, params) != 0)
		goto err;
	compute_cardinality(train_data->rules, train_data->nrules);

	if (compute_log_gammas(train_data->nsamples, params) != 0)
		goto err;

	if ((pred_model = calloc(1, sizeof(pred_model_t))) == NULL)
		goto err;

	default_rule = 0;
	if (ruleset_init(1,
	    train_data->nsamples, &default_rule, train_data->rules, &rs) != 0)
	    	goto err;

	max_pos = compute_log_posterior(rs, train_data->rules,
	    train_data->nrules, train_data->labels, params, 1, -1, &null_bound);
	if (permute_rules(train_data->nrules) != 0)
		goto err;

	for (chain = 0; chain < params->nchain; chain++) {
		rs_temp = run_mcmc(params->iters,
		    train_data->nsamples, train_data->nrules,
		    train_data->rules, train_data->labels, params, max_pos);
		pos_temp = compute_log_posterior(rs_temp, train_data->rules,
		    train_data->nrules, train_data->labels, params, 1, -1,
		    &null_bound);

		if (pos_temp >= max_pos) {
			ruleset_destroy(rs);
			rs = rs_temp;
			max_pos = pos_temp;
		} else {
			ruleset_destroy(rs_temp);
		}
	}

	pred_model->theta =
	    get_theta(rs, train_data->rules, train_data->labels, params);
	pred_model->rs = rs;
	rs = NULL;

	/*
	 * THIS IS INTENTIONAL -- makes error handling localized.
	 * If we branch to err, then we want to free an allocated model;
	 * if we fall through naturally, then we don't.
	 */
	if (0) {
err:
		if (pred_model != NULL)
			free (pred_model);
	}
	/* Free allocated memory. */
	if (log_lambda_pmf != NULL)
		free(log_lambda_pmf);
	if (log_eta_pmf != NULL)
		free(log_eta_pmf);
	if (rule_permutation != NULL)
		free(rule_permutation);
	if (log_gammas != NULL)
		free(log_gammas);
	if (rs != NULL)
		ruleset_destroy(rs);
	return (pred_model);
}