double max_parallelism(benchmp_f* benchmarks, int warmup, int repetitions, void* cookie) { int i, j, k; double baseline, max_load_parallelism, load_parallelism; result_t *results, *r_save; max_load_parallelism = 1.; for (i = 0; i < MAX_LOAD_PARALLELISM; ++i) { benchmp(initialize, benchmarks[i], cleanup, 0, 1, warmup, repetitions, cookie); save_minimum(); if (gettime() == 0) return -1.; if (i == 0) { baseline = (double)gettime() / (double)get_n(); } else { load_parallelism = baseline; load_parallelism /= (double)gettime(); load_parallelism *= (double)((i + 1) * get_n()); if (load_parallelism > max_load_parallelism) { max_load_parallelism = load_parallelism; } } } return max_load_parallelism; }
//FUNCION DE LA FASE 3 void sparse_matrix_t::mostrarMatrizDensa_2(void) { char aux[80]; /*Declaracion de los 3 vectores de punteros con el tamaño del numero de columnas de la matriz y asignación de memoria dinámica*/ vector_inx_t** matind_ptr = NULL; vector_inx_t** matind_end_ptr = NULL; matrix_item_t** matval_ptr = NULL; matind_ptr = new vector_inx_t* [n_]; matind_end_ptr = new vector_inx_t* [n_]; matval_ptr = new matrix_item_t*[n_]; // //Inicializacion de valores de los vectores de punteros tal como en el PDF for(int j=0;j<get_n();j++){ matind_ptr[j] = matind_ + matbeg_[j]; matval_ptr[j] = matval_ + matbeg_[j]; matind_end_ptr[j] = matind_ptr[j] + matcnt_[j]; } // //Dos bucles para recorrer filas y columnas de la matriz for(int i=0;i<get_m();i++){//Recorrido filas for(int j=0;j<get_n();j++){//Recorrido columnas if (matind_ptr[j]==matind_end_ptr[j]) { //Primer condicional sprintf(aux," %10.6lf ",0.0); cout << aux; } else { if ((*matind_ptr[j])==i) { //Segundo condicional sprintf(aux," %10.6lf ",*matval_ptr[j]); cout << aux; matind_ptr[j] ++; //Ponemos el puntero en la siguiente direccion matval_ptr[j] ++; //Ponemos el puntero en la siguiente direccion } else { sprintf(aux," %10.6lf ",0.0); cout << aux; } } } std::cout << endl; } //Borramos la memoria dinamica de los punteros y les quitamos la direccion (NULL) delete [] matind_ptr; delete [] matval_ptr; delete [] matind_end_ptr; matind_ptr=NULL; matval_ptr=NULL; matind_end_ptr=NULL; // }
index RAM::position(index i, index j) { if ((i<0) || (i>get_m()) || (j<0) || (j>get_n())) // Verificamos que está dentro de los límites, para indexar en (1,1), i<1, j< 1, .... { cerr << "Error en los índices de la matriz." << endl; return 0; } return i*get_n() + j; // (i - 1)*get_n() + (j - 1) si lo queremos indexar desde (1,1) en vez (0,0) }
bool Classifier_Multinomial_NaiveBayes<RETURNTYPE,DATATYPE,REALTYPE,IDXTYPE,DIMTYPE,SUBSET,DATAACCESSOR>::test(RETURNTYPE &result, const PDataAccessor da) { // NOTE: mean[] and cov[] must be pre-computed using initialize() // NOTE: _inverse[] _det[] and _constant[] must be pre-computed notify("Classifier_Multinomial_NaiveBayes::test()."); assert(_model); if(_prelearn_mode) assert(get_n()>0); assert(_model->get_classes()>1); assert(_classes==_model->get_classes()); assert(da); if(_prelearn_mode) assert(da->getNoOfFeatures()==get_n()); assert(da->getNoOfClasses()>0); assert(da->getNoOfClasses()==_classes); assert(_index); assert(_subfeatures==_model->get_d()); if(!_prelearn_mode) assert(_subfeatures==_model->get_n()); assert(_subfeatures>0); typename DATAACCESSOR::PPattern p; IDXTYPE s,i; IDXTYPE count, correct; DIMTYPE _features=da->getNoOfFeatures(); DIMTYPE clstmp; if(_subfeatures==1) { // NOTE: single features are unusable for classification because theta=1 in each class, // thus log(theta)=0 and consequently Pcd[] does not depend on feature frequency correct=0; count=1; // consider all single features equally unusable } else { bool b; const DIMTYPE da_test_loop=1; // to avoid mixup of get*Block() loops of different types count=0; correct=0; for(DIMTYPE c_test=0;c_test<_classes;c_test++) { da->setClass(c_test); for(b=da->getFirstBlock(TEST,p,s,da_test_loop);b==true;b=da->getNextBlock(TEST,p,s,da_test_loop)) for(i=0;i<s;i++) { if(!classify(clstmp,&p[i*_features])) return false; if(clstmp==c_test) correct++; count++; } } } assert(count>0); result=(RETURNTYPE)correct/(RETURNTYPE)count; #ifdef DEBUG { ostringstream sos; sos << " result=" << result << std::endl; syncout::print(std::cout,sos); } #endif return true; }
int main(int ac, char **av) { int parallel = 1; int warmup = 0; int repetitions = -1; int c; char* usage = "[-P <parallelism>] [-W <warmup>] [-N <repetitions>] procedure|fork|exec|shell\n"; while (( c = getopt(ac, av, "P:W:N:")) != EOF) { switch(c) { case 'P': parallel = atoi(optarg); if (parallel <= 0) lmbench_usage(ac, av, usage); break; case 'W': warmup = atoi(optarg); break; case 'N': repetitions = atoi(optarg); break; default: lmbench_usage(ac, av, usage); break; } } if (optind + 1 != ac) { /* should have one argument left */ lmbench_usage(ac, av, usage); } if (!strcmp("procedure", av[optind])) { benchmp(NULL, do_procedure, cleanup, 0, parallel, warmup, repetitions, &ac); micro("Procedure call", get_n()); } else if (!strcmp("fork", av[optind])) { benchmp(NULL, do_fork, cleanup, 0, parallel, warmup, repetitions, NULL); micro(STATIC_PREFIX "Process fork+exit", get_n()); } else if (!strcmp("exec", av[optind])) { benchmp(NULL, do_forkexec, cleanup, 0, parallel, warmup, repetitions, NULL); micro(STATIC_PREFIX "Process fork+execve", get_n()); } else if (!strcmp("shell", av[optind])) { benchmp(NULL, do_shell, cleanup, 0, parallel, warmup, repetitions, NULL); micro(STATIC_PREFIX "Process fork+/bin/sh -c", get_n()); } else { lmbench_usage(ac, av, usage); } return(0); }
cmpl disp_sample_table_n_value(const disp_t *disp, double lam) { const struct disp_sample_table *dt = & disp->disp.sample_table; if (lam <= get_wavelength(dt, 0)) { return get_n(dt, 0) - get_k(dt, 0) * I; } else if (lam >= get_wavelength(dt, dt->len - 1)) { return get_n(dt, dt->len - 1) - get_k(dt, dt->len - 1) * I; } double nx = gsl_interp_eval(dt->interp_n, wavelength_const_array(dt), n_const_array(dt), lam, dt->accel); double kx = gsl_interp_eval(dt->interp_k, wavelength_const_array(dt), k_const_array(dt), lam, dt->accel); return nx - kx * I; }
int main(int ac, char **av) { state_t state; int parallel = 1; int warmup = 0; int repetitions = -1; int c; double time; uint64 usecs; char buf[1024]; char* usage = "[-P <parallelism>] [-W <warmup>] [-N <repetitions>] Njobs usecs...\n"; while (( c = getopt(ac, av, "P:W:N:")) != EOF) { switch(c) { case 'P': parallel = atoi(optarg); if (parallel <= 0) lmbench_usage(ac, av, usage); break; case 'W': warmup = atoi(optarg); break; case 'N': repetitions = atoi(optarg); break; default: lmbench_usage(ac, av, usage); break; } } if (ac < optind + 2) { lmbench_usage(ac, av, usage); } state.jobs = atoi(av[optind]); state.pids = NULL; fprintf(stderr, "\"pmake jobs=%d\n", state.jobs); while (++optind < ac) { usecs = bytes(av[optind]); benchmp(setup, work, NULL, 0, 1, 0, TRIES, &state); if (gettime() == 0) exit(1); state.iterations = (iter_t)((usecs * get_n()) / gettime()); benchmp(setup, bench, NULL, 0, parallel, warmup, repetitions, &state); time = gettime(); time /= get_n(); if (time > 0.0) fprintf(stderr, "%llu %.2f\n", usecs, time); } return (0); }
int main(int ac, char **av) { int parallel = 1; int warmup = 0; int repetitions = TRIES; int c; char* usage = "[-P <parallelism>] [-W <warmup>] [-N <repetitions>]\n"; while (( c = getopt(ac, av, "P:W:N:")) != EOF) { switch(c) { case 'P': parallel = atoi(optarg); if (parallel <= 0) lmbench_usage(ac, av, usage); break; case 'W': warmup = atoi(optarg); break; case 'N': repetitions = atoi(optarg); break; default: lmbench_usage(ac, av, usage); break; } } if (optind < ac) { lmbench_usage(ac, av, usage); } #ifdef HAVE_DRAND48 benchmp(NULL, bench_drand48, NULL, 0, parallel, warmup, repetitions, NULL); nano("drand48 latency", get_n()); benchmp(NULL, bench_lrand48, NULL, 0, parallel, warmup, repetitions, NULL); nano("lrand48 latency", get_n()); #endif #ifdef HAVE_RAND benchmp(NULL, bench_rand, NULL, 0, parallel, warmup, repetitions, NULL); nano("rand latency", get_n()); #endif #ifdef HAVE_RANDOM benchmp(NULL, bench_random, NULL, 0, parallel, warmup, repetitions, NULL); nano("random latency", get_n()); #endif return (0); }
ostream& matrix_t::write(ostream& os) { char aux[80]; sprintf(aux, " %10d %10d ",get_m(),get_n()); os << aux<<endl; for(int i=1;i<=get_m();i++){ for(int j=1;j<=get_n();j++){ sprintf(aux," %10.6lf ",get_matrix_item(i,j)); os << aux; } os << endl; } }
double loads(benchmp_f initialize, int len, int warmup, int repetitions, void* cookie) { double result; int count; int parallel = 1; struct mem_state* state = (struct mem_state*)cookie; state->len = len; state->maxlen = len; count = 100 * (state->len / (state->line * 100) + 1); /* * Now walk them and time it. */ benchmp(initialize, benchmark_loads, mem_cleanup, 0, parallel, warmup, repetitions, cookie); /* We want to get to nanoseconds / load. */ result = (1000. * (double)gettime()) / (double)(count * get_n()); /* fprintf(stderr, "%.5f %.3f\n", len / (1024. * 1024.), result); /**/ return result; }
void sparse_matrix_t::mostrarMatrizDensa_1(void) { char aux[80]; for(int i=0;i<get_m();i++){ for(int j=0;j<get_n();j++){ bool encontrado=false; int l=matbeg_[j]; for(;(l<matbeg_[j]+matcnt_[j]) && (!encontrado);l++) if (matind_[l]==i) encontrado=true; if (encontrado) sprintf(aux," %10.6lf ",matval_[l-1]); else sprintf(aux," %10.6lf ",0.0); cout << aux; } std::cout << endl; } }
/* ** Copy elements (1[f], ..., 1[e]) into (tt[t], tt[t+1], ...). Whenever ** possible, copy in increasing order, which is better for rehashing. ** "possible" means destination after original range, or smaller ** than origin, or copying to another table. */ static int tmove (lua_State *L) { lua_Integer f = luaL_checkinteger(L, 2); lua_Integer e = luaL_checkinteger(L, 3); lua_Integer t = luaL_checkinteger(L, 4); int tt = !lua_isnoneornil(L, 5) ? 5 : 1; /* destination table */ checktab(L, 1, TAB_R); checktab(L, tt, TAB_W); if (e >= f) { /* otherwise, nothing to move */ lua_Integer n, i; lua_Integer size = get_n(L, tt); luaL_argcheck(L, f > 0 || e < LUA_MAXINTEGER + f, 3, "too many elements to move"); n = e - f + 1; /* number of elements to move */ luaL_argcheck(L, t <= LUA_MAXINTEGER - n + 1, 4, "destination wrap around"); if (size >= 0 && t+n-1 > size) { lua_pushinteger(L, t+n-1); lua_setfield(L, tt, "n" ); } if (t > e || t <= f || (tt != 1 && !lua_compare(L, 1, tt, LUA_OPEQ))) { for (i = 0; i < n; i++) { lua_geti(L, 1, f + i); lua_seti(L, tt, t + i); } } else { for (i = n - 1; i >= 0; i--) { lua_geti(L, 1, f + i); lua_seti(L, tt, t + i); } } } lua_pushvalue(L, tt); /* return destination table */ return 1; }
//вывод в консоль void cnsl_output(NODE* list) { system("cls"); int i = 0; int n = get_n(amt(list), "Введите количество записей, выводимых на одной странице", "out"); while (list) { if (!(i % n)) { if (i && list) { printf_s("\n Далее - нажмите Enter"); rewind(stdin); getchar(); } system("cls"); printf_s("\n-------------------------------------------------------------------------------- \n"); printf_s(" | %3s | %20s | %20s | %6s | %10s, USD |", "№", "Исполнитель", "Альбом", "Год", "Цена"); printf_s("\n-------------------------------------------------------------------------------- \n"); } printf_s(" | %3d | %20s | %20s | %6d | %10.2f |", (i++) + 1, list->data->artist, list->data->album, list->data->year, list->data->price); printf_s("\n-------------------------------------------------------------------------------- \n"); list = list->next; } printf_s("\n Возврат в меню - нажмите Enter"); rewind(stdin); getchar(); }
int main(int ac, char **av) { int fd; int size; int random = 0; char *prog = av[0]; if (ac != 3 && ac != 4) { fprintf(stderr, "usage: %s [-r] size file\n", prog); exit(1); } if (strcmp("-r", av[1]) == 0) { random = 1; ac--, av++; } size = bytes(av[1]); if (size < MINSIZE) { return (1); } CHK(fd = open(av[2], O_CREAT|O_RDWR, 0666)); CHK(ftruncate(fd, size)); BENCH(mapit(fd, size, random), 0); micromb(size, get_n()); return(0); }
void client_main(int ac, char **av) { int sock; char *server; char buf[100]; if (ac != 2) { fprintf(stderr, "usage: %s host\n", av[0]); exit(1); } server = av[1][0] == '-' ? &av[1][1] : av[1]; sock = tcp_connect(server, TCP_XACT, SOCKOPT_NONE); /* * Stop server code. */ if (av[1][0] == '-') { close(sock); exit(0); } BENCH(doclient(sock), MEDIUM); sprintf(buf, "TCP latency using %s", av[1]); micro(buf, get_n()); exit(0); /* NOTREACHED */ }
int main(int ac, char **av) { int parallel = 1; int warmup = 0; int repetitions = TRIES; char *usage = "-s\n OR [-P <parallelism>] [-W <warmup>] [-N <repetitions>]\n OR -S\n"; int c; /* Start the server "-s" or Shut down the server "-S" */ if (ac == 2) { if (!strcmp(av[1], "-s")) { #ifdef CONFIG_NOMMU if (fork() == 0) { server_main(); _exit(0); } #else if (fork() == 0) { server_main(); } #endif exit(0); } if (!strcmp(av[1], "-S")) { int sock = unix_connect(CONNAME); write(sock, "0", 1); close(sock); exit(0); } } /* * Rest is client */ while (( c = getopt(ac, av, "P:W:N:")) != EOF) { switch(c) { case 'P': parallel = atoi(optarg); if (parallel <= 0) lmbench_usage(ac, av, usage); break; case 'W': warmup = atoi(optarg); break; case 'N': repetitions = atoi(optarg); break; default: lmbench_usage(ac, av, usage); break; } } if (optind != ac) { lmbench_usage(ac, av, usage); } benchmp(NULL, benchmark, NULL, 0, parallel, warmup, repetitions, NULL); micro("UNIX connection cost", get_n()); }
void IASIAM::get_G(complex<double>* V) { r->mu = real(V[0]); get_G(); V[0] = r->mu + get_n(r->G) - r->n; }
void IASIAM::get_G0_f(complex<double>* V) { mu0 = real(V[0]); get_G0_f(); V[0] = mu0 + get_n(G0_f) - n; }
void SIAM::get_G_CHM(complex<double>* V) { mu = real(V[0]); get_G_CHM(); V[0] = mu + get_n(G) - n; }
void SIAM::get_G0(complex<double>* V) { mu0 = real(V[0]); get_G0(); V[0] = mu0 + get_n(G0) - n; }
int main(int argc, char *argv[]) { struct _state state; int parallel = 1; int warmup = 0; int repetitions = TRIES; int c; char* usage = "[-m <message size>] [-M <total bytes>] [-P <parallelism>] [-W <warmup>] [-N <repetitions>]\n"; state.xfer = XFERSIZE; /* per-packet size */ state.bytes = XFER; /* total bytes per call */ while (( c = getopt(argc,argv,"m:M:P:W:N:")) != EOF) { switch(c) { case 'm': state.xfer = bytes(optarg); break; case 'M': state.bytes = bytes(optarg); break; case 'P': parallel = atoi(optarg); if (parallel <= 0) lmbench_usage(argc, argv, usage); break; case 'W': warmup = atoi(optarg); break; case 'N': repetitions = atoi(optarg); break; default: lmbench_usage(argc, argv); break; } } if (optind == argc - 1) { state.bytes = bytes(argv[optind]); } else if (optind < argc - 1) { lmbench_usage(argc, argv); } state.pid = 0; /* round up total byte count to a multiple of xfer */ if (state.bytes % state.xfer) { state.bytes += state.bytes - state.bytes % state.xfer; } benchmp(initialize, reader, cleanup, MEDIUM, parallel, warmup, repetitions, &state); if (gettime() > 0) { fprintf(stderr, "AF_UNIX sock stream bandwidth: "); mb(get_n() * parallel * XFER); } return(0); }
int main(int ac, char **av) { int parallel = 1; int warmup = 0; int repetitions = TRIES; int c; char* usage = "[-P <parallelism>] [-W <warmup>] [-N <repetitions>] install|catch|prot [file]\n"; while (( c = getopt(ac, av, "P:W:N:")) != EOF) { switch(c) { case 'P': parallel = atoi(optarg); if (parallel <= 0) lmbench_usage(ac, av, usage); break; case 'W': warmup = atoi(optarg); break; case 'N': repetitions = atoi(optarg); break; default: lmbench_usage(ac, av, usage); break; } } if (optind != ac - 1 && optind != ac - 2) { lmbench_usage(ac, av, usage); } if (!strcmp("install", av[optind])) { benchmp(NULL, do_install, NULL, 0, parallel, warmup, repetitions, NULL); micro("Signal handler installation", get_n()); } else if (!strcmp("catch", av[optind])) { bench_catch(parallel, warmup, repetitions); micro("Signal handler overhead", get_n()); } else if (!strcmp("prot", av[optind]) && optind == ac - 2) { bench_prot(av[optind+1], parallel, warmup, repetitions); micro("Protection fault", get_n()); } else { lmbench_usage(ac, av, usage); } return(0); }
int main (void) { e = 3; if (frob (0, 2) != 0 || g != 1 || p != 2 || e != 3 || get_n () != 1 || g != 2 || p != 2) abort (); exit (0); }
bool Classifier_Multinomial_NaiveBayes<RETURNTYPE,DATATYPE,REALTYPE,IDXTYPE,DIMTYPE,SUBSET,DATAACCESSOR>::train(const PDataAccessor da, const PSubset sub) { // NOTE: mean[] and cov[] must be pre-computed using initialize() // NOTE: explicit call is needed here to ensure wrapper functionality // but a work-aroung can be implemented to optionally disable the call // (would make sense when testing different subsets for the same da split) initialize(da); notify("Classifier_Multinomial_NaiveBayes::train()."); assert(_model); assert(da); if(_prelearn_mode) assert(get_n()>0); assert(da->getNoOfFeatures()>0); assert(sub); assert(sub->get_frozen_mode()==false); if(_prelearn_mode) assert(sub->get_n_raw()==get_n()); //{ // ostringstream sos; // sos << "sub->get_n_raw()="<<sub->get_n_raw() << " da->getNoOfFeatures()="<<da->getNoOfFeatures() << std::endl; // syncout::print(std::cout,sos); //} assert(sub->get_n_raw()==da->getNoOfFeatures()); if(_prelearn_mode) _model->narrow_to(sub); // assume _n-dimensional model is pre-learned and needs to be narrow()ed only (NOTE: training data must not change since) else {_model->learn(da,sub); _model->denarrow();} // re-learn from scratch, training data has changed (i.e., after switch to next data split) // if(sub->get_d_raw()==1) // ??? single feature subsets can not be used for multinomial model based classification // { // } // prepare feature subset index buffering DIMTYPE f; bool b; for(b=sub->getFirstFeature(f),_subfeatures=0;b==true;b=sub->getNextFeature(f),_subfeatures++) {assert(_subfeatures<get_n()); _index[_subfeatures]=f;} _model->compute_theta(); //thetas to be used in classify() and test() #ifdef DEBUG {ostringstream sos; sos << "index: "; for(f=0;f<_subfeatures;f++) sos << _index[f] << " "; syncout::print(std::cout,sos);} #endif assert(_subfeatures==sub->get_d_raw()); return true; }
/* merge several samples together */ struct sample * merge_samples(struct sample **in, int nsamp, struct param *par){ int i, j, newsize=0, counter=0; /* create output */ for(i=0;i<nsamp;i++) newsize += get_n(in[i]); struct sample * out = create_sample(newsize); /* fill in output */ for(i=0;i<nsamp;i++){ for(j=0;j<get_n(in[i]);j++){ out->pathogens[counter] = copy_pathogen(in[i]->pathogens[j]); out->popid[counter++] = in[i]->popid[j]; } } out->n = newsize; return out; }
/* Free sample */ void free_sample(struct sample *in){ int i, n=get_n(in); if(in->pathogens != NULL) { for(i=0;i<n;i++) { if(in->pathogens[i] != NULL) free_pathogen(in->pathogens[i]); } free(in->pathogens); } if(in->popid != NULL) free(in->popid); free(in); }
int process_option(int option, int* n) { int a; int b; int c; gcd_struct g; switch(option) { case 1: *n = get_n(); break; case 2: if(*n > 1) { printf("Please enter the first integer representative: "); a = get_representative(); printf("Please enter the second integer representative: "); b = get_representative(); printf("%d + %d (mod %d) = %d.\n", a, b, *n, add(a, b, *n)); printf("%d x %d (mod %d) = %d.\n", a, b, *n, multiply(a, b, *n)); } else { fprintf(stderr, "\nError: You must first set the value of n.\n\n"); } break; case 3: if(*n > 1) { printf("Please enter the representative: "); c = get_representative(); g = gcd(c, *n); if(g.gcd != 1) { fprintf(stderr, "\nError: %d does not have an inverse.\n\n", c); } else { while(g.y >= *n) { g.y -= *n; } while(g.y < 0) { g.y += *n; } printf("The inverse of %d is %d.\n", c, g.y); } } else { fprintf(stderr, "\nError: You must first set the value of n.\n\n"); } break; case 4: return 0; } return 1; }
void IASIAM::get_G0(complex<double>* V) { mu0 = real(V[0]); get_G0(); r->n0 = get_n(r->G0); V[0] = mu0 + r->n0 - r->n; printf("get_G0: V[0] = %.5f\n",real(V[0])); }
void rs_encode(void *code,char *data[],int size) { int k=get_k(code); int n=get_n(code); for(int i=k;i<n;i++) { fec_encode(code, (void **)data, data[i],i, size); } return ; }
bool Classifier_Multinomial_NaiveBayes<RETURNTYPE,DATATYPE,REALTYPE,IDXTYPE,DIMTYPE,SUBSET,DATAACCESSOR>::classify(DIMTYPE &cls, const PPattern &pattern) { // NOTE: mean[] and cov[] must be pre-computed using initialize() // NOTE: _inverse[] _det[] and _constant[] must be pre-computed notify("Classifier_Multinomial_NaiveBayes::test()."); assert(_model); if(_prelearn_mode) assert(get_n()>0); assert(_model->get_classes()>1); assert(_classes==_model->get_classes()); assert(_index); assert(_subfeatures==_model->get_d()); if(!_prelearn_mode) assert(_subfeatures==_model->get_n()); assert(_subfeatures>0); DIMTYPE f; DIMTYPE c_cand; REALTYPE res; DIMTYPE wTH; REALTYPE *theta=&(_model->get_theta()[0]); // dirty, but this object owns _model thus no memory corruption is possible REALTYPE *theta_tmp; if(_subfeatures==1) { // NOTE: single features are unusable for classification because theta=1 in each class, // thus log(theta)=0 and consequently Pcd[] does not depend on feature frequency cls=0; // consider all single features equally unusable return false; } else { // compute P(c|pattern) for each class wTH=0; for(c_cand=0;c_cand<_classes;c_cand++) { theta_tmp=&theta[wTH]; res=0.0; for(f=0;f<_subfeatures;f++) { //{ // ostringstream sos; sos << "f:" << _index[f] << " ptmp[]=" << (REALTYPE)ptmp[_index[f]] << ", theta=" << _model->get_theta()[wTH+f] << ", log=" << log(_model->get_theta()[wTH+f]) << std::endl; // syncout::print(std::cout,sos); //} res+=(REALTYPE)pattern[_index[f]] * log(theta_tmp[f]); } _Pcd[c_cand]=log(_model->get_Pc(c_cand))+res; //{ // ostringstream sos; sos << " Pc["<<c_cand<<"]=" << _model->get_Pc(c_cand) << ", log(Pc)=" << log(_model->get_Pc(c_cand)) << ", res="<< res << ", _Pcd[]=" << _Pcd[c_cand] << std::endl << std::endl; // syncout::print(std::cout,sos); //} wTH+=_subfeatures; } // find maximum P[c|pattern] res=_Pcd[0]; cls=0; for(c_cand=1;c_cand<_classes;c_cand++) if(_Pcd[c_cand]>res) {res=_Pcd[c_cand]; cls=c_cand;} } return true; }