int main(int argc, char *argv[]) { if (argc == 1) { printf("Usage: %s par-file [block-name par-name]\n",argv[0]); exit(0); } par_debug(0); par_open(argv[1]); par_cmdline(argc,argv); if (argc == 4) { char *cp = par_gets(argv[2],argv[3]); int ival; double dval; char *sval = "hello world"; printf("PAR_GETS: %s.%s = \"%s\"\n",argv[2],argv[3],cp); printf("PAR_GETI: %s.%s = \"%d\" as integer\n", argv[2],argv[3],par_geti(argv[2],argv[3])); printf("PAR_GETD: %s.%s = \"%g\" as double\n", argv[2],argv[3],par_getd(argv[2],argv[3])); free(cp); } par_dump(0,stdout); par_close(); return 0; }
static int par_list(char *file) { PAR *t; int i; if (par_open(&t, file, 1, O_RDONLY, 0, 0) == -1) { fprintf(stderr, "tar_open(): %s\n", strerror(errno)); return -1; } if (par_read_header(t) != 0) { fprintf(stderr, "par_read_header(): %s\n", strerror(errno)); par_close(t); return -1; } while ((i = th_read(t)) == 0) { th_print_long_ls(t); #ifdef DEBUG th_print(t); #endif if (TH_ISREG(t) && par_skip_regfile(t) != 0) { fprintf(stderr, "tar_skip_regfile(): %s\n", strerror(errno)); return -1; } } if (par_close(t) != 0) { fprintf(stderr, "tar_close(): %s\n", strerror(errno)); return -1; } return 0; }
static int par_extract(char *file, char *rootdir) { PAR *t; if (par_open(&t, file, 1, O_RDONLY, 0, 0) == -1) { fprintf(stderr, "tar_open(): %s\n", strerror(errno)); return -1; } if (par_read_header(t) != 0) { fprintf(stderr, "par_read_header(): %s\n", strerror(errno)); par_close(t); return -1; } if (par_extract_all(t, rootdir) != 0) { fprintf(stderr, "par_extract_all(): %s\n", strerror(errno)); par_close(t); return -1; } if (par_close(t) != 0) { fprintf(stderr, "tar_close(): %s\n", strerror(errno)); return -1; } return 0; }
int main(int argc, char *argv[]) { int mytid; /* int numprocs; */ int namelen; char processor_name[MPI_MAX_PROCESSOR_NAME]; par_debug(0); if(MPI_SUCCESS != MPI_Init(&argc,&argv)) ath_error("Error on calling MPI_Init\n"); /* Get the number of processes */ /* MPI_Comm_size(MPI_COMM_WORLD,&numprocs); */ /* Get my task id, or rank as it is called in MPI */ MPI_Comm_rank(MPI_COMM_WORLD,&mytid); /* Get the name of the processor or machine name */ MPI_Get_processor_name(processor_name,&namelen); printf("My task id / rank = %d on %s\n",mytid, processor_name); /* Parent and child have different jobs */ if(mytid != 0) printf("My Parent's task id / rank = 0\n"); else{ printf("I am the Parent\n"); if (argc == 1) { printf("Usage: %s par-file [block-name par-name]\n",argv[0]); exit(0); } par_open(argv[1]); par_cmdline(argc,argv); if (argc == 4) { char *cp = par_gets(argv[2],argv[3]); printf("PAR_GETS: %s.%s = \"%s\"\n",argv[2],argv[3],cp); printf("PAR_GETI: %s.%s = \"%d\" as integer\n", argv[2],argv[3],par_geti(argv[2],argv[3])); printf("PAR_GETD: %s.%s = \"%g\" as double\n", argv[2],argv[3],par_getd(argv[2],argv[3])); free(cp); } } par_dist_mpi(mytid,MPI_COMM_WORLD); par_dump(0,stdout); par_close(); MPI_Finalize(); return 0; }
static int par_create(char *file, char *rootdir, libtar_list_t *l) { PAR *t; char *pathname; char buf[MAXPATHLEN]; libtar_listptr_t lp; if (par_open(&t, file, 1, O_WRONLY | O_CREAT, 0644, 0) == -1) { fprintf(stderr, "tar_open(): %s\n", strerror(errno)); return -1; } if (par_write_header(t) == -1) { fprintf(stderr, "par_write_header(): %s\n", strerror(errno)); par_close(t); return -1; } libtar_listptr_reset(&lp); while (libtar_list_next(l, &lp) != 0) { pathname = (char *)libtar_listptr_data(&lp); if (pathname[0] != '/' && rootdir != NULL) snprintf(buf, sizeof(buf), "%s/%s", rootdir, pathname); else strlcpy(buf, pathname, sizeof(buf)); if (par_append_tree(t, buf, pathname) != 0) { fprintf(stderr, "tar_append_tree(\"%s\", \"%s\"): %s\n", buf, pathname, strerror(errno)); par_close(t); return -1; } } if (par_append_eof(t) != 0) { fprintf(stderr, "tar_append_eof(): %s\n", strerror(errno)); par_close(t); return -1; } if (par_close(t) != 0) { fprintf(stderr, "tar_close(): %s\n", strerror(errno)); return -1; } return 0; }
int initialize_code(){ #ifdef MPI_PARALLEL /* Get my task id (rank in MPI) */ if(MPI_SUCCESS != MPI_Comm_rank(MPI_COMM_WORLD,&(level0_Grid.my_id))) ath_error("Error on calling MPI_Comm_rank\n"); /* Get the number of processes */ if(MPI_SUCCESS != MPI_Comm_size(MPI_COMM_WORLD,&(level0_Grid.nproc))) ath_error("Error on calling MPI_Comm_size\n"); #else level0_Grid.my_id = 0; level0_Grid.nproc = 1; #endif par_open("/dev/null"); /* to trick athena into thinking it has opened a parameter file, will not work on windows */ is_restart = 0; show_config_par(); /* Add the configure block to the parameter database */ return 0; }