/* temporal update of states --------------------------------------------------*/ static void udstate(const obsd_t *obs, int n, const nav_t *nav, double *x, double *P, int nx, ssat_t *ssat) { gtime_t time; double tt; int i,sat; for (i=0;i<n;i++) { sat=obs[i].sat time=ssat[sat-1].time; if (!time.time) { init_iono(obs+i,nav,x,P,nx); init_bias(obs+i,nav,x,P,nx); } else { tt=timediff(obs[i].time,time); P[II(sat)*(nx+1)]+=PRN_IONO*fabs(tt); if (det_slip(obs+i,nav,ssat)||fabs(tt)>MAXGAP_BIAS) { init_bias(obs+i,nav,x,P,nx); } } ssat[sat-1].time=time; } }
int main(int argc,const char *argv[]) { Net *net; Group *input,*hidden,*output,*bias; ExampleSet *examples, *test; Example *ex; Connections *c1,*c2,*c3,*c4; char * fileName = "input"; int i,count,j, dataCount =0, testCount = 0; int dataCountArray[6] = {0}; char * testFileArray[6] = {NULL}; int inputCount = 0,outputCount = 0, hiddenCount = 200; int batchFlag = 0; Real error, correct; Real epsilon,range, hiddenRatio = 0; unsigned long seed = 0; /* don't buffer output */ setbuf(stdout,NULL); /* set seed to unique number */ mikenet_set_seed(seed); /* a default learning rate */ epsilon=0.1; /* default weight range */ range=0.5; default_errorRadius=0.0; const char *usage = "mike_childes [options]\n" "-seed\t\tRandom seed (default to 0)\n" "-i\t\tNumer of input units (default to 0)\n" "-h\t\tNumer of hidden units (default to 200)\n" "-o\t\tNumer of output units (default to 0)\n" "-r\t\tRatio of input units / hidden units.(default to 200)\n" "-b\t\tEnables batch learning (default online learning)\n" "-epsilon\tBack probagation epsilon (Default 0.1)\n" "-help\t\tprints this help\n"; /* what are the command line arguments? */ if (argc == 1) { fprintf(stderr, "%s", usage); exit(1); } for(i=1;i<argc;i++) { if (strcmp(argv[i],"-seed")==0) { seed = atol(argv[i+1]); mikenet_set_seed(seed); i++; } else if (strncmp(argv[i],"-epsilon",5)==0) { epsilon=atof(argv[i+1]); i++; } else if (strcmp(argv[i],"-range")==0) { range=atof(argv[i+1]); i++; } else if (strcmp(argv[i],"-errorRadius")==0) { default_errorRadius=atof(argv[i+1]); i++; } else if (strcmp(argv[i],"-t")==0){ testFileArray[testCount] = (char *)argv[i+1]; testCount++; i++; } else if (strcmp(argv[i],"-d")==0){ dataCountArray[dataCount]= atoi(argv[i+1]); dataCount++; i++; } else if (strcmp(argv[i], "-iter")==0) { ITER = atoi(argv[i+1]); i++; } else if (strcmp(argv[i], "-f") == 0) { fileName = (char*)argv[i+1]; i++; } else if (strcmp(argv[i], "-v") == 0) { VERBOSE = 1; } else if (strcmp(argv[i], "-i") == 0) { inputCount = atoi(argv[i+1]); i++; } else if (strcmp(argv[i], "-o") == 0) { outputCount = atoi(argv[i+1]); i++; } else if (strcmp(argv[i], "-h") == 0) { hiddenCount = atoi(argv[i+1]); i++; } else if (strcmp(argv[i], "-r") == 0) { hiddenRatio = atof(argv[i+1]); i++; } else if (strcmp(argv[i], "-b") == 0) { batchFlag = 1; } else { fprintf(stderr,"unknown argument: %s\n%s\n",argv[i],usage); exit(-1); } } /* build a network, with TIME number of time ticks */ net=create_net(TIME); /* learning rate */ default_epsilon=epsilon; /* hidden ratio */ if (hiddenRatio > 0 && hiddenRatio <= 1) { hiddenCount = (int)inputCount * hiddenRatio; if(VERBOSE) fprintf(stderr, "number of hidden units is set to %d\n",hiddenCount); } else if(hiddenRatio > 1) { fprintf(stderr, "%s", usage); exit(1); } /* First stdout of this code is nummber of hidden units*/ printf("%d\t",hiddenCount); /* create our groups. format is: name, num of units, ticks */ input=init_group("Input",inputCount,TIME); hidden=init_group("hidden",hiddenCount,TIME); output=init_group("Output",outputCount,TIME); /* bias is special. format is: value, ticks */ bias=init_bias(1.0,TIME); /* now add our groups to the network object */ bind_group_to_net(net,input); bind_group_to_net(net,hidden); bind_group_to_net(net,output); bind_group_to_net(net,bias); /* now connect our groups, instantiating */ /* connection objects c1 through c4 */ c1=connect_groups(input,hidden); c2=connect_groups(hidden,output); c3=connect_groups(bias,hidden); c4=connect_groups(bias,output); /* add connections to our network */ bind_connection_to_net(net,c1); bind_connection_to_net(net,c2); bind_connection_to_net(net,c3); bind_connection_to_net(net,c4); /* randomize the weights in the connection objects. 2nd argument is weight range. */ randomize_connections(c1,range); randomize_connections(c2,range); randomize_connections(c3,range); randomize_connections(c4,range); /* how to load and save weights */ /* load_weights(net,"init.weights"); */ /* erase old initial weight file */ /* system("rm -f init.weights.Z"); */ /* save out our weights to file 'init.weights' */ /* load in our example set */ if (VERBOSE){ fprintf(stderr, "Reading %s Iter:%d ",fileName, ITER); for(i=0; i < 6; i++){ if (testFileArray[i] == NULL) break; fprintf(stderr, "TestSet:%s\n", testFileArray[i]); } } examples=load_examples(fileName,TIME); if (VERBOSE){ fprintf(stderr, "size:%d\n", examples->numExamples); for(i=0; i < dataCount; i++){ if (i == 0){ fprintf(stderr, "DataCounts[%d] start:0 end:%d size:%d\n", \ i,dataCountArray[i],dataCountArray[i]); }else{ fprintf(stderr, "DataCounts[%d] start:%d end:%d size:%d\n", \ i,dataCountArray[i - 1], dataCountArray[i], dataCountArray[i] - dataCountArray[i - 1]); } } } error=0.0; count=0; /* loop for ITER number of times */ /* Reset the seed to get same training set*/ fprintf(stderr, "Training: %s size:%d\n", fileName, examples->numExamples); mikenet_set_seed(seed); for(i=0;i<ITER;i++) { /* get j'th example from exampleset */ int ridx = (int) (mikenet_random() * (Real)examples->numExamples); ex = &examples->examples[ridx]; /*Original one*/ // ex = get_random_example(examples); /* do forward propagation */ bptt_forward(net,ex); /* backward pass: compute gradients */ bptt_compute_gradients(net,ex); /* sum up error for this example */ error+=compute_error(net,ex); /* online learning: apply the deltas from previous call to compute_gradients */ if(batchFlag == 0) bptt_apply_deltas(net); /* is it time to write status? */ if (count==REP) { /* average error over last 'count' iterations */ error = error/(float)count; count=0; /* print a message about average error so far */ if (VERBOSE) fprintf(stderr, "%d\t%f\n",i,error); if (error < 0.1) { break; } /* zero error; start counting again */ error=0.0; } count++; } if(batchFlag == 1) bptt_apply_deltas(net); /* done training. write out results for each example */ if (testCount == 0){ correct = 0; dataCount = 0; for(i=0;i<examples->numExamples;i++) { if (VERBOSE && i % 1000 == 0) fprintf(stderr,"."); if (dataCount && i == dataCountArray[dataCount]){ if (dataCount ==0){ printf("%f\t", correct / dataCountArray[dataCount]); }else{ printf("%f\t", correct / (dataCountArray[dataCount] - dataCountArray[dataCount - 1])); } correct = 0; dataCount++; } ex=&examples->examples[i]; bptt_forward(net,ex); int maxj = -1; Real maxx = 0; for(j=0 ; j < outputCount; j++){ if (output->outputs[TIME-1][j] > maxx){ maxj = j; maxx = output->outputs[TIME-1][j]; } /* printf("%d:%f ",j,output->outputs[TIME-1][j]); */ } if (get_value(ex->targets,output->index,TIME-1,maxj) == 1) correct += 1; } printf("%f\n", correct / (dataCountArray[dataCount] - dataCountArray[dataCount - 1])); } else{ int tt = 0, dc = 0; correct = 0; for(tt = 0; tt < testCount; tt++){ test = load_examples(testFileArray[tt],TIME); if (VERBOSE) fprintf(stderr,"Testing:%s size:%d\n",testFileArray[tt],test->numExamples); correct = 0; for(i=0;i<test->numExamples;i++){ if (dataCount && i == dataCountArray[dc]){ if (dc == 0) printf("%f\t", correct / dataCountArray[dc]); else printf("%f\t", correct / (dataCountArray[dc] - dataCountArray[dc - 1])); correct = 0; dc++; } if (VERBOSE && i % 1000 == 0) fprintf(stderr,"."); ex=&test->examples[i]; bptt_forward(net,ex); int maxj = -1; Real maxx = 0; int goldIdx = -1; for(j=0 ; j < outputCount; j++){ if (output->outputs[TIME-1][j] > maxx){ maxj = j; maxx = output->outputs[TIME-1][j]; } if (get_value(ex->targets,output->index,TIME-1,j) == 1) { if(goldIdx != -1) { fprintf(stderr,\ "Multiple active output unit: Instance:%d unit:%d in test set!"\ ,i,j); } goldIdx = j; } /* printf("%d:%f ",j,output->outputs[TIME-1][j]); */ } if (goldIdx != -1 || maxj != -1) { // prints the goldtag and answer tag fprintf(stderr, "%d %d\n", goldIdx, maxj); } else{ fprintf(stderr, "No active output units in test set"); exit(-1); } if (get_value(ex->targets,output->index,TIME-1,maxj) == 1) { correct += 1; } } if (dataCount == 0) printf("%f %d %d\t", correct / test->numExamples, (int)correct, test->numExamples); else printf("%f\n", correct / (dataCountArray[dc] - dataCountArray[dc - 1])); } } return 0; }
int main(int argc,char *argv[]) { int j; float e; int dbd=0; Net *net; float momentum=0.0; Group *input,*hidden,*output,*bias; ExampleSet *examples; Example *ex; Connections *c1,*c2,*c3,*c4; int i,count; Real error; int seed=666; Real epsilon,range,tolerance; /* don't buffer output */ setbuf(stdout,NULL); announce_version(); seed=getpid(); /* how low must error get before we quit? */ tolerance=0.01; /* set random number seed to process id (only unix machines have getpid call) */ default_temperature=1.0; default_temporg=50; default_tempmult=0.9; /* a default learning rate */ epsilon=0.1; /* default weight range */ range=0.5; default_errorRadius=0.1; /* what are the command line arguments? */ for(i=1;i<argc;i++) { if (strncmp(argv[i],"-epsilon",5)==0) { epsilon=atof(argv[i+1]); i++; } else if (strncmp(argv[i],"-momentum",5)==0) { momentum=atof(argv[i+1]); i++; } else if (strncmp(argv[i],"-seed",5)==0) { seed=atoi(argv[i+1]); i++; } else if (strcmp(argv[i],"-dbd")==0) { dbd=1; } else if (strcmp(argv[i],"-range")==0) { range=atof(argv[i+1]); i++; } else if (strcmp(argv[i],"-errorRadius")==0) { default_errorRadius=atof(argv[i+1]); i++; } else if (strncmp(argv[i],"-tolerance",4)==0) { tolerance=atof(argv[i+1]); i++; } else { fprintf(stderr,"unknown option: %s\n",argv[i]); exit(-1); } } mikenet_set_seed(seed); default_momentum=0.0; if ((sizeof(Real)==4 && tolerance < 0.001) || (sizeof(Real)==8 && tolerance < 0.00001)) { fprintf(stderr,"careful; your tolerance is probably "); fprintf(stderr,"too tight for this machines precision\n"); } /* build a network, with TIME number of time ticks */ net=create_net(TIME); net->integrationConstant=0.5; /* learning rate */ default_epsilon=epsilon; /* create our groups. format is: name, num of units, ticks */ input=init_group("Input",2,TIME); hidden=init_group("hidden",10,TIME); output=init_group("Output",21,TIME); /* bias is special. format is: value, ticks */ bias=init_bias(1.0,TIME); /* now add our groups to the network object */ bind_group_to_net(net,input); bind_group_to_net(net,hidden); bind_group_to_net(net,output); bind_group_to_net(net,bias); /* now connect our groups, instantiating */ /* connection objects c1 through c4 */ c1=connect_groups(input,hidden); c2=connect_groups(hidden,output); c3=connect_groups(bias,hidden); c4=connect_groups(bias,output); /* add connections to our network */ bind_connection_to_net(net,c1); bind_connection_to_net(net,c2); bind_connection_to_net(net,c3); bind_connection_to_net(net,c4); /* randomize the weights in the connection objects. 2nd argument is weight range. */ randomize_connections(c1,range); randomize_connections(c2,range); randomize_connections(c3,range); randomize_connections(c4,range); /* load in our example set */ examples=load_examples("xor.ex",TIME); error=0.0; count=0; /* loop for ITER number of times */ for(i=0;i<ITER;i++) { for(j=0;j<examples->numExamples;j++) { ex=&examples->examples[j]; dbm_positive(net,ex); dbm_negative(net,ex); dbm_update(net); } e = output->outputs[TIME-1][0] - get_value(ex->targets,output->index,0,0); error += e * e; dbm_apply_deltas(net); if (count==REP) { /* average error over last 'count' iterations */ error = error/(float)count; count=0; /* print a message about average error so far */ printf("%d\t%f\n",i,error); /* are we done? */ if (error < tolerance) { printf("quitting... error low enough\n"); /* pop out of loop */ break; } /* zero error; start counting again */ error=0.0; } count++; } system("rm out.weights*"); save_weights(net,"out.weights"); return 0; }
int main(int argc, char **argv) { char *rawfilename = NULL; int numiter = 250; int use_apc = 1; int use_normalization = 0; conjugrad_float_t lambda_single = F001; // 0.01 conjugrad_float_t lambda_pair = FInf; conjugrad_float_t lambda_pair_factor = F02; // 0.2 int conjugrad_k = 5; conjugrad_float_t conjugrad_eps = 0.01; parse_option *optList, *thisOpt; char *optstr; char *old_optstr = malloc(1); old_optstr[0] = 0; optstr = concat("r:i:n:w:k:e:l:ARh?", old_optstr); free(old_optstr); #ifdef OPENMP int numthreads = 1; old_optstr = optstr; optstr = concat("t:", optstr); free(old_optstr); #endif #ifdef CUDA int use_def_gpu = 0; old_optstr = optstr; optstr = concat("d:", optstr); free(old_optstr); #endif #ifdef MSGPACK char* msgpackfilename = NULL; old_optstr = optstr; optstr = concat("b:", optstr); free(old_optstr); #endif optList = parseopt(argc, argv, optstr); free(optstr); char* msafilename = NULL; char* matfilename = NULL; char* initfilename = NULL; conjugrad_float_t reweighting_threshold = F08; // 0.8 while(optList != NULL) { thisOpt = optList; optList = optList->next; switch(thisOpt->option) { #ifdef OPENMP case 't': numthreads = atoi(thisOpt->argument); #ifdef CUDA use_def_gpu = -1; // automatically disable GPU if number of threads specified #endif break; #endif #ifdef CUDA case 'd': use_def_gpu = atoi(thisOpt->argument); break; #endif #ifdef MSGPACK case 'b': msgpackfilename = thisOpt->argument; break; #endif case 'r': rawfilename = thisOpt->argument; break; case 'i': initfilename = thisOpt->argument; break; case 'n': numiter = atoi(thisOpt->argument); break; case 'w': reweighting_threshold = (conjugrad_float_t)atof(thisOpt->argument); break; case 'l': lambda_pair_factor = (conjugrad_float_t)atof(thisOpt->argument); break; case 'k': conjugrad_k = (int)atoi(thisOpt->argument); break; case 'e': conjugrad_eps = (conjugrad_float_t)atof(thisOpt->argument); break; case 'A': use_apc = 0; break; case 'R': use_normalization = 1; break; case 'h': case '?': usage(argv[0], 1); break; case 0: if(msafilename == NULL) { msafilename = thisOpt->argument; } else if(matfilename == NULL) { matfilename = thisOpt->argument; } else { usage(argv[0], 0); } break; default: die("Unknown argument"); } free(thisOpt); } if(msafilename == NULL || matfilename == NULL) { usage(argv[0], 0); } FILE *msafile = fopen(msafilename, "r"); if( msafile == NULL) { printf("Cannot open %s!\n\n", msafilename); return 2; } #ifdef JANSSON char* metafilename = malloc(2048); snprintf(metafilename, 2048, "%s.meta.json", msafilename); FILE *metafile = fopen(metafilename, "r"); json_t *meta; if(metafile == NULL) { // Cannot find .meta.json file - create new empty metadata meta = meta_create(); } else { // Load metadata from matfile.meta.json meta = meta_read_json(metafile); fclose(metafile); } json_object_set(meta, "method", json_string("ccmpred")); json_t *meta_step = meta_add_step(meta, "ccmpred"); json_object_set(meta_step, "version", json_string(__VERSION)); json_t *meta_parameters = json_object(); json_object_set(meta_step, "parameters", meta_parameters); json_t *meta_steps = json_array(); json_object_set(meta_step, "iterations", meta_steps); json_t *meta_results = json_object(); json_object_set(meta_step, "results", meta_results); #endif int ncol, nrow; unsigned char* msa = read_msa(msafile, &ncol, &nrow); fclose(msafile); int nsingle = ncol * (N_ALPHA - 1); int nvar = nsingle + ncol * ncol * N_ALPHA * N_ALPHA; int nsingle_padded = nsingle + N_ALPHA_PAD - (nsingle % N_ALPHA_PAD); int nvar_padded = nsingle_padded + ncol * ncol * N_ALPHA * N_ALPHA_PAD; #ifdef CURSES bool color = detect_colors(); #else bool color = false; #endif logo(color); #ifdef CUDA int num_devices, dev_ret; struct cudaDeviceProp prop; dev_ret = cudaGetDeviceCount(&num_devices); if(dev_ret != CUDA_SUCCESS) { num_devices = 0; } if(num_devices == 0) { printf("No CUDA devices available, "); use_def_gpu = -1; } else if (use_def_gpu < -1 || use_def_gpu >= num_devices) { printf("Error: %d is not a valid device number. Please choose a number between 0 and %d\n", use_def_gpu, num_devices - 1); exit(1); } else { printf("Found %d CUDA devices, ", num_devices); } if (use_def_gpu != -1) { cudaError_t err = cudaSetDevice(use_def_gpu); if(cudaSuccess != err) { printf("Error setting device: %d\n", err); exit(1); } cudaGetDeviceProperties(&prop, use_def_gpu); printf("using device #%d: %s\n", use_def_gpu, prop.name); size_t mem_free, mem_total; err = cudaMemGetInfo(&mem_free, &mem_total); if(cudaSuccess != err) { printf("Error getting memory info: %d\n", err); exit(1); } size_t mem_needed = nrow * ncol * 2 + // MSAs sizeof(conjugrad_float_t) * nrow * ncol * 2 + // PC, PCS sizeof(conjugrad_float_t) * nrow * ncol * N_ALPHA_PAD + // PCN sizeof(conjugrad_float_t) * nrow + // Weights (sizeof(conjugrad_float_t) * ((N_ALPHA - 1) * ncol + ncol * ncol * N_ALPHA * N_ALPHA_PAD)) * 4; setlocale(LC_NUMERIC, ""); printf("Total GPU RAM: %'17lu\n", mem_total); printf("Free GPU RAM: %'17lu\n", mem_free); printf("Needed GPU RAM: %'17lu ", mem_needed); if(mem_needed <= mem_free) { printf("✓\n"); } else { printf("⚠\n"); } #ifdef JANSSON json_object_set(meta_parameters, "device", json_string("gpu")); json_t* meta_gpu = json_object(); json_object_set(meta_parameters, "gpu_info", meta_gpu); json_object_set(meta_gpu, "name", json_string(prop.name)); json_object_set(meta_gpu, "mem_total", json_integer(mem_total)); json_object_set(meta_gpu, "mem_free", json_integer(mem_free)); json_object_set(meta_gpu, "mem_needed", json_integer(mem_needed)); #endif } else { printf("using CPU"); #ifdef JANSSON json_object_set(meta_parameters, "device", json_string("cpu")); #endif #ifdef OPENMP printf(" (%d thread(s))", numthreads); #ifdef JANSSON json_object_set(meta_parameters, "cpu_threads", json_integer(numthreads)); #endif #endif printf("\n"); } #else // CUDA printf("using CPU"); #ifdef JANSSON json_object_set(meta_parameters, "device", json_string("cpu")); #endif #ifdef OPENMP printf(" (%d thread(s))\n", numthreads); #ifdef JANSSON json_object_set(meta_parameters, "cpu_threads", json_integer(numthreads)); #endif #endif // OPENMP printf("\n"); #endif // CUDA conjugrad_float_t *x = conjugrad_malloc(nvar_padded); if( x == NULL) { die("ERROR: Not enough memory to allocate variables!"); } memset(x, 0, sizeof(conjugrad_float_t) * nvar_padded); // Auto-set lambda_pair if(isnan(lambda_pair)) { lambda_pair = lambda_pair_factor * (ncol - 1); } // fill up user data struct for passing to evaluate userdata *ud = (userdata *)malloc( sizeof(userdata) ); if(ud == 0) { die("Cannot allocate memory for user data!"); } ud->msa = msa; ud->ncol = ncol; ud->nrow = nrow; ud->nsingle = nsingle; ud->nvar = nvar; ud->lambda_single = lambda_single; ud->lambda_pair = lambda_pair; ud->weights = conjugrad_malloc(nrow); ud->reweighting_threshold = reweighting_threshold; if(initfilename == NULL) { // Initialize emissions to pwm init_bias(x, ud); } else { // Load potentials from file read_raw(initfilename, ud, x); } // optimize with default parameters conjugrad_parameter_t *param = conjugrad_init(); param->max_iterations = numiter; param->epsilon = conjugrad_eps; param->k = conjugrad_k; param->max_linesearch = 5; param->alpha_mul = F05; param->ftol = 1e-4; param->wolfe = F02; int (*init)(void *) = init_cpu; int (*destroy)(void *) = destroy_cpu; conjugrad_evaluate_t evaluate = evaluate_cpu; #ifdef OPENMP omp_set_num_threads(numthreads); if(numthreads > 1) { init = init_cpu_omp; destroy = destroy_cpu_omp; evaluate = evaluate_cpu_omp; } #endif #ifdef CUDA if(use_def_gpu != -1) { init = init_cuda; destroy = destroy_cuda; evaluate = evaluate_cuda; } #endif init(ud); #ifdef JANSSON json_object_set(meta_parameters, "reweighting_threshold", json_real(ud->reweighting_threshold)); json_object_set(meta_parameters, "apc", json_boolean(use_apc)); json_object_set(meta_parameters, "normalization", json_boolean(use_normalization)); json_t *meta_regularization = json_object(); json_object_set(meta_parameters, "regularization", meta_regularization); json_object_set(meta_regularization, "type", json_string("l2")); json_object_set(meta_regularization, "lambda_single", json_real(lambda_single)); json_object_set(meta_regularization, "lambda_pair", json_real(lambda_pair)); json_object_set(meta_regularization, "lambda_pair_factor", json_real(lambda_pair_factor)); json_t *meta_opt = json_object(); json_object_set(meta_parameters, "optimization", meta_opt); json_object_set(meta_opt, "method", json_string("libconjugrad")); json_object_set(meta_opt, "float_bits", json_integer((int)sizeof(conjugrad_float_t) * 8)); json_object_set(meta_opt, "max_iterations", json_integer(param->max_iterations)); json_object_set(meta_opt, "max_linesearch", json_integer(param->max_linesearch)); json_object_set(meta_opt, "alpha_mul", json_real(param->alpha_mul)); json_object_set(meta_opt, "ftol", json_real(param->ftol)); json_object_set(meta_opt, "wolfe", json_real(param->wolfe)); json_t *meta_msafile = meta_file_from_path(msafilename); json_object_set(meta_parameters, "msafile", meta_msafile); json_object_set(meta_msafile, "ncol", json_integer(ncol)); json_object_set(meta_msafile, "nrow", json_integer(nrow)); if(initfilename != NULL) { json_t *meta_initfile = meta_file_from_path(initfilename); json_object_set(meta_parameters, "initfile", meta_initfile); json_object_set(meta_initfile, "ncol", json_integer(ncol)); json_object_set(meta_initfile, "nrow", json_integer(nrow)); } double neff = 0; for(int i = 0; i < nrow; i++) { neff += ud->weights[i]; } json_object_set(meta_msafile, "neff", json_real(neff)); ud->meta_steps = meta_steps; #endif printf("\nWill optimize %d %ld-bit variables\n\n", nvar, sizeof(conjugrad_float_t) * 8); if(color) { printf("\x1b[1m"); } printf("iter\teval\tf(x) \t║x║ \t║g║ \tstep\n"); if(color) { printf("\x1b[0m"); } conjugrad_float_t fx; int ret; #ifdef CUDA if(use_def_gpu != -1) { conjugrad_float_t *d_x; cudaError_t err = cudaMalloc((void **) &d_x, sizeof(conjugrad_float_t) * nvar_padded); if (cudaSuccess != err) { printf("CUDA error No. %d while allocation memory for d_x\n", err); exit(1); } err = cudaMemcpy(d_x, x, sizeof(conjugrad_float_t) * nvar_padded, cudaMemcpyHostToDevice); if (cudaSuccess != err) { printf("CUDA error No. %d while copying parameters to GPU\n", err); exit(1); } ret = conjugrad_gpu(nvar_padded, d_x, &fx, evaluate, progress, ud, param); err = cudaMemcpy(x, d_x, sizeof(conjugrad_float_t) * nvar_padded, cudaMemcpyDeviceToHost); if (cudaSuccess != err) { printf("CUDA error No. %d while copying parameters back to CPU\n", err); exit(1); } err = cudaFree(d_x); if (cudaSuccess != err) { printf("CUDA error No. %d while freeing memory for d_x\n", err); exit(1); } } else { ret = conjugrad(nvar_padded, x, &fx, evaluate, progress, ud, param); } #else ret = conjugrad(nvar_padded, x, &fx, evaluate, progress, ud, param); #endif printf("\n"); printf("%s with status code %d - ", (ret < 0 ? "Exit" : "Done"), ret); if(ret == CONJUGRAD_SUCCESS) { printf("Success!\n"); } else if(ret == CONJUGRAD_ALREADY_MINIMIZED) { printf("Already minimized!\n"); } else if(ret == CONJUGRADERR_MAXIMUMITERATION) { printf("Maximum number of iterations reached.\n"); } else { printf("Unknown status code!\n"); } printf("\nFinal fx = %f\n\n", fx); FILE* out = fopen(matfilename, "w"); if(out == NULL) { printf("Cannot open %s for writing!\n\n", matfilename); return 3; } conjugrad_float_t *outmat = conjugrad_malloc(ncol * ncol); FILE *rawfile = NULL; if(rawfilename != NULL) { printf("Writing raw output to %s\n", rawfilename); rawfile = fopen(rawfilename, "w"); if(rawfile == NULL) { printf("Cannot open %s for writing!\n\n", rawfilename); return 4; } write_raw(rawfile, x, ncol); } #ifdef MSGPACK FILE *msgpackfile = NULL; if(msgpackfilename != NULL) { printf("Writing msgpack raw output to %s\n", msgpackfilename); msgpackfile = fopen(msgpackfilename, "w"); if(msgpackfile == NULL) { printf("Cannot open %s for writing!\n\n", msgpackfilename); return 4; } #ifndef JANSSON void *meta = NULL; #endif } #endif sum_submatrices(x, outmat, ncol); if(use_apc) { apc(outmat, ncol); } if(use_normalization) { normalize(outmat, ncol); } write_matrix(out, outmat, ncol, ncol); #ifdef JANSSON json_object_set(meta_results, "fx_final", json_real(fx)); json_object_set(meta_results, "num_iterations", json_integer(json_array_size(meta_steps))); json_object_set(meta_results, "opt_code", json_integer(ret)); json_t *meta_matfile = meta_file_from_path(matfilename); json_object_set(meta_results, "matfile", meta_matfile); if(rawfilename != NULL) { json_object_set(meta_results, "rawfile", meta_file_from_path(rawfilename)); } if(msgpackfilename != NULL) { json_object_set(meta_results, "msgpackfile", meta_file_from_path(msgpackfilename)); } fprintf(out, "#>META> %s", json_dumps(meta, JSON_COMPACT)); if(rawfile != NULL) { fprintf(rawfile, "#>META> %s", json_dumps(meta, JSON_COMPACT)); } #endif if(rawfile != NULL) { fclose(rawfile); } #ifdef MSGPACK if(msgpackfile != NULL) { write_raw_msgpack(msgpackfile, x, ncol, meta); fclose(msgpackfile); } #endif fflush(out); fclose(out); destroy(ud); conjugrad_free(outmat); conjugrad_free(x); conjugrad_free(ud->weights); free(ud); free(msa); free(param); printf("Output can be found in %s\n", matfilename); return 0; }
int main(int argc,const char *argv[]) { Net *net; Group *input,*hidden,*output,*bias; ExampleSet *examples; Example *ex; Connections *c1,*c2,*c3,*c4; char * fileName = "input"; int i,count,j; int inputCount = 0,outputCount = 0, hiddenCount = 200; Real error, correct; Real epsilon,range; /* don't buffer output */ setbuf(stdout,NULL); /* set seed to unique number */ mikenet_set_seed(0); /* a default learning rate */ epsilon=0.1; /* default weight range */ range=0.5; default_errorRadius=0.0; /* what are the command line arguments? */ for(i=1;i<argc;i++) { if (strcmp(argv[i],"-seed")==0) { mikenet_set_seed(atol(argv[i+1])); i++; } else if (strncmp(argv[i],"-epsilon",5)==0) { epsilon=atof(argv[i+1]); i++; } else if (strcmp(argv[i],"-range")==0) { range=atof(argv[i+1]); i++; } else if (strcmp(argv[i],"-errorRadius")==0) { default_errorRadius=atof(argv[i+1]); i++; } else if (strcmp(argv[i], "-f") == 0) { fileName = (char*)argv[i+1]; i++; } else if (strcmp(argv[i], "-i") == 0) { inputCount = atoi(argv[i+1]); i++; } else if (strcmp(argv[i], "-o") == 0) { outputCount = atoi(argv[i+1]); i++; } else if (strcmp(argv[i], "-h") == 0) { hiddenCount = atoi(argv[i+1]); i++; } else { fprintf(stderr,"unknown argument: %s\n",argv[i]); exit(-1); } } /* build a network, with TIME number of time ticks */ net=create_net(TIME); /* learning rate */ default_epsilon=epsilon; /* create our groups. format is: name, num of units, ticks */ input=init_group("Input",inputCount,TIME); hidden=init_group("hidden",hiddenCount,TIME); output=init_group("Output",outputCount,TIME); /* bias is special. format is: value, ticks */ bias=init_bias(1.0,TIME); /* now add our groups to the network object */ bind_group_to_net(net,input); bind_group_to_net(net,hidden); bind_group_to_net(net,output); bind_group_to_net(net,bias); /* now connect our groups, instantiating */ /* connection objects c1 through c4 */ c1=connect_groups(input,hidden); c2=connect_groups(hidden,output); c3=connect_groups(bias,hidden); c4=connect_groups(bias,output); /* add connections to our network */ bind_connection_to_net(net,c1); bind_connection_to_net(net,c2); bind_connection_to_net(net,c3); bind_connection_to_net(net,c4); /* randomize the weights in the connection objects. 2nd argument is weight range. */ randomize_connections(c1,range); randomize_connections(c2,range); randomize_connections(c3,range); randomize_connections(c4,range); /* how to load and save weights */ /* load_weights(net,"init.weights"); */ /* erase old initial weight file */ /* system("rm -f init.weights.Z"); */ /* save out our weights to file 'init.weights' */ /* save_weights(net,"init.weights"); */ /* load in our example set */ fprintf(stderr, "Reading %s\n",fileName); examples=load_examples(fileName,TIME); fprint(stderr, "Input: %d, Output: %d",); error=0.0; count=0; /* loop for ITER number of times */ for(i=0;i<ITER;i++) { /* get j'th example from exampleset */ ex=get_random_example(examples); /* do forward propagation */ bptt_forward(net,ex); /* backward pass: compute gradients */ bptt_compute_gradients(net,ex); /* sum up error for this example */ error+=compute_error(net,ex); /* online learning: apply the deltas from previous call to compute_gradients */ bptt_apply_deltas(net); /* is it time to write status? */ if (count==REP) { /* average error over last 'count' iterations */ error = error/(float)count; count=0; /* print a message about average error so far */ fprintf(stderr, "%d\t%f\n",i,error); if (error < 0.01) { break; } /* zero error; start counting again */ error=0.0; } count++; } /* done training. write out results for each example */ correct = 0; for(i=0;i<examples->numExamples;i++) { ex=&examples->examples[i]; bptt_forward(net,ex); int maxj = -1; Real maxx = 0; for(j=0 ; j < outputCount; j++){ if (output->outputs[TIME-1][j] > maxx){ maxj = j; maxx = output->outputs[TIME-1][j]; } /* printf("%d:%f ",j,output->outputs[TIME-1][j]); */ } /* printf("max:%d\n",maxj); */ if (get_value(ex->targets,output->index,TIME-1,maxj) == 1) correct += 1; printf("i:%d g:%f cor:%f\n",i, get_value(ex->targets,output->index,TIME-1,maxj),correct / (i+1)); /* printf("example %d\tinputs %f\t%f\toutput %f\ttarget %f\n", */ /* i, */ /* get_value(ex->inputs,input->index,TIME-1,0), */ /* get_value(ex->inputs,input->index,TIME-1,1), */ /* output->outputs[TIME-1][0], */ /* get_value(ex->targets,output->index,TIME-1,0)); */ } fprintf(stderr,"Acc:%f\n", correct / examples->numExamples); return 0; }