void vec_scaleTo(Vector2* vec, float newLength, Vector2* vDest) { float oldLength = vec_length2(vec); if (oldLength != 0) { vec_mult_scalar(vec, newLength/oldLength, vDest); } }
/* * Calculate desired racket velocity given ball incoming and * outgoing velocities * Assuming a mirror law * Assumes no desired spin, i.e. racket velocity along the racket will be set to zero * * Output is the last parameter: racketVel * * TODO: is the CRR value accurate? * */ void calc_racket_vel(Vector velBallIn, Vector velBallOut, Vector normalRacket, Vector velRacket) { double velBallInAlongNormal; double velBallOutAlongNormal; double eps = CRR; velBallInAlongNormal = vec_mult_inner(velBallIn, normalRacket); velBallOutAlongNormal = vec_mult_inner(velBallOut, normalRacket); velBallInAlongNormal = eps * velBallInAlongNormal; velBallOutAlongNormal = (velBallOutAlongNormal + velBallInAlongNormal) / (1+eps); vec_mult_scalar(normalRacket, velBallOutAlongNormal, velRacket); }
/*!***************************************************************************** ******************************************************************************* \note parm_opt \date 10/20/91 \remarks this is the major optimzation program ******************************************************************************* Function Parameters: [in]=input,[out]=output \param[in] tol : error tolernance to be achieved \param[in] n_parm : number of parameters to be optimzed \param[in] n_con : number of contraints to be taken into account \param[in] f_dLda : function which calculates the derivative of the optimziation criterion with respect to the parameters; must return vector \param[in] f_dMda : function which calculates the derivate of the constraints with respect to parameters must return matrix \param[in] f_M : constraints function, must always be formulted to return 0 for properly fulfilled constraints \param[in] f_L : function to calculate simple cost (i.e., constraint cost NOT included), the constraint costs are added by this program automatically, the function returns a double scalar \param[in] f_dLdada : second derivative of L with respect to the parameters, must be a matrix of dim n_parm x n_parm \param[in] f_dMdada : second derivative of M with respect to parameters, must be a matrix n_con*n_parm x n_parm \param[in] use_newton: TRUE or FALSE to indicate that second derivatives are given and can be used for Newton algorithm \param[in,out] a : initial setting of parameters and also return of optimal value (must be a vector, even if scalar) \param[out] final_cost: the final cost \param[out] err : the sqrt of the squared error of all constraints NOTE: - program returns TRUE if everything correct, otherwise FALSE - always minimizes the cost!!! - algorithms come from Dyer McReynolds NOTE: besides the possiblity of a bug, the Newton method seems to sacrifice the validity of the constraint a little up to quite a bit and should be used prudently ******************************************************************************/ int parm_opt(double *a,int n_parm, int n_con, double *tol, void (*f_dLda)(), void (*f_dMda)(), void (*f_M)(), double (*f_L)(), void (*f_dMdada)(), void (*f_dLdada)(), int use_newton, double *final_cost, double *err) { register int i,j,n; double cost= 999.e30; double last_cost = 0.0; double *mult=NULL, *new_mult=NULL; /* this is the vector of Lagrange mulitplier */ double **dMda=NULL, **dMda_t=NULL; double *dLda; double *K=NULL; /* the error in the constraints */ double eps = 0.025; /* the learning rate */ double **aux_mat=NULL; /* needed for inversion of matrix */ double *aux_vec=NULL; double *new_a; double **dMdada=NULL; double **dLdada=NULL; double **A=NULL; /* big matrix, a combination of several other matrices */ double *B=NULL; /* a big vector */ double **A_inv=NULL; int rc=TRUE; long count = 0; int last_sign = 1; int pending1 = FALSE, pending2 = FALSE; int firsttime = TRUE; int newton_active = FALSE; dLda = my_vector(1,n_parm); new_a = my_vector(1,n_parm); if (n_con > 0) { mult = my_vector(1,n_con); dMda = my_matrix(1,n_con,1,n_parm); dMda_t = my_matrix(1,n_parm,1,n_con); K = my_vector(1,n_con); aux_mat = my_matrix(1,n_con,1,n_con); aux_vec = my_vector(1,n_con); } if (use_newton) { dLdada = my_matrix(1,n_parm,1,n_parm); A = my_matrix(1,n_parm+n_con,1,n_parm+n_con); A_inv = my_matrix(1,n_parm+n_con,1,n_parm+n_con); B = my_vector(1,n_parm+n_con); if (n_con > 0) { dMdada = my_matrix(1,n_con*n_parm,1,n_parm); new_mult = my_vector(1,n_con); } for (i=1+n_parm; i<=n_con+n_parm; ++i) { for (j=1+n_parm; j<=n_con+n_parm; ++j) { A[i][j] = 0.0; } } } while (fabs(cost-last_cost) > *tol) { ++count; pending1 = FALSE; pending2 = FALSE; AGAIN: /* calculate the current Lagrange multipliers */ if (n_con > 0) { (*f_M)(a,K); /* takes the parameters, returns residuals */ (*f_dMda)(a,dMda); /* takes the parameters, returns the Jacobian */ } (*f_dLda)(a,dLda); /* takes the parameters, returns the gradient */ if (n_con > 0) { mat_trans(dMda,dMda_t); } if (newton_active) { if (n_con > 0) { (*f_dMdada)(a,dMdada); } (*f_dLdada)(a,dLdada); } /* the first step is always a gradient step */ if (newton_active) { if (firsttime) { firsttime = FALSE; eps = 0.1; } /* build the A matrix */ for (i=1; i<=n_parm; ++i) { for (j=1; j<=n_parm; ++j) { A[i][j] = dLdada[i][j]; for (n=1; n<=n_con; ++n) { A[i][j] += mult[n]*dMdada[n+(i-1)*n_con][j]; } } } for (i=1+n_parm; i<=n_con+n_parm; ++i) { for (j=1; j<=n_parm; ++j) { A[j][i] = A[i][j] = dMda[i-n_parm][j]; } } /* build the B vector */ if (n_con > 0) { mat_vec_mult(dMda_t,mult,B); } for (i=1; i<=n_con; ++i) { B[i+n_parm] = K[i]; } /* invert the A matrix */ if (!my_inv_ludcmp(A, n_con+n_parm, A_inv)) { rc = FALSE; break; } mat_vec_mult(A_inv,B,B); vec_mult_scalar(B,eps,B); for (i=1; i<=n_parm; ++i) { new_a[i] = a[i] + B[i]; } for (i=1; i<=n_con; ++i) { new_mult[i] = mult[i] + B[n_parm+i]; } } else { if (n_con > 0) { /* the mulitpliers are updated according: mult = (dMda dMda_t)^(-1) (K/esp - dMda dLda_t) */ mat_mult(dMda,dMda_t,aux_mat); if (!my_inv_ludcmp(aux_mat, n_con, aux_mat)) { rc = FALSE; break; } mat_vec_mult(dMda,dLda,aux_vec); vec_mult_scalar(K,1./eps,K); vec_sub(K,aux_vec,aux_vec); mat_vec_mult(aux_mat,aux_vec,mult); } /* the update step looks the following: a_new = a - eps * (dLda + mult_t * dMda)_t */ if (n_con > 0) { vec_mat_mult(mult,dMda,new_a); vec_add(dLda,new_a,new_a); } else { vec_equal(dLda,new_a); } vec_mult_scalar(new_a,eps,new_a); vec_sub(a,new_a,new_a); } if (count == 1 && !pending1) { last_cost = (*f_L)(a); if (n_con > 0) { (*f_M)(a,K); last_cost += vec_mult_inner(K,mult); } } else { last_cost = cost; } /* calculate the updated cost */ cost = (*f_L)(new_a); /*printf(" %f\n",cost);*/ if (n_con > 0) { (*f_M)(new_a,K); if (newton_active) { cost += vec_mult_inner(K,new_mult); } else { cost += vec_mult_inner(K,mult); } } /* printf("last=%f new=%f\n",last_cost,cost); */ /* check out whether we reduced the cost */ if (cost > last_cost && fabs(cost-last_cost) > *tol) { /* reduce the gradient climbing rate: sometimes a reduction of eps causes an increase in cost, thus leave an option to increase eps */ cost = last_cost; /* reset last_cost */ if (pending1 && pending2) { /* this means that either increase nor decrease of eps helps, ==> leave the program */ rc = TRUE; break; } else if (pending1) { eps *= 4.0; /* the last cutting by half did not help, thus multiply by 2 to get to previous value, and one more time by 2 to get new value */ pending2 = TRUE; } else { eps /= 2.0; pending1 = TRUE; } goto AGAIN; } else { vec_equal(new_a,a); if (newton_active && n_con > 0) { vec_equal(new_mult,mult); } if (use_newton && fabs(cost-last_cost) < NEWTON_THRESHOLD) newton_active = TRUE; } } my_free_vector(dLda,1,n_parm); my_free_vector(new_a,1,n_parm); if (n_con > 0) { my_free_vector(mult,1,n_con); my_free_matrix(dMda,1,n_con,1,n_parm); my_free_matrix(dMda_t,1,n_parm,1,n_con); my_free_vector(K,1,n_con); my_free_matrix(aux_mat,1,n_con,1,n_con); my_free_vector(aux_vec,1,n_con); } if (use_newton) { my_free_matrix(dLdada,1,n_parm,1,n_parm); my_free_matrix(A,1,n_parm+n_con,1,n_parm+n_con); my_free_matrix(A_inv,1,n_parm+n_con,1,n_parm+n_con); my_free_vector(B,1,n_parm+n_con); if (n_con > 0) { my_free_matrix(dMdada,1,n_con*n_parm,1,n_parm); my_free_vector(new_mult,1,n_con); } } *final_cost = cost; *tol = fabs(cost-last_cost); if (n_con > 0) { *err = sqrt(vec_mult_inner(K,K)); } else { *err = 0.0; } /* printf("count=%ld rc=%d\n",count,rc); */ return rc; }
/* * Calculate desired racket normal using the mirror law */ void calc_racket_normal(Vector bin, Vector bout, Vector normal) { vec_sub(bout, bin, normal); // normalize vec_mult_scalar(normal, 1./sqrt(vec_mult_inner(normal,normal)), normal); }
void read_script(void) { int i,j,k,m,rc; char fname[100]; double dummy; int idummy; FILE *in,*out; int n_train_data_columns; int n_test_data_columns; Vector row; char identification_string[100]; char string[100]; int num; char vnames[50][100]; int ans = 0; double o_noise, c_noise; int old_indx_flag = FALSE; Matrix D_train=NULL; char **vnames_train=NULL; char **units_train=NULL; double freq_train; int n_cols_train; int n_rows_train; Matrix D_test=NULL; char **vnames_test=NULL; char **units_test=NULL; double freq_test; int n_cols_test; int n_rows_test; /* I need the filename of the script file: first check whether the user provided it in the argv_global variables */ if (argc_global > 0) { in = fopen(argv_global[1],"r"); if (in != NULL) { fclose(in); strcpy(fname,argv_global[1]); } else { if (!getFile(fname)) exit(-1); } } else { if (!getFile(fname)) exit(-1); } /* this allows to generate the LWPR */ if (argc_global > 1) { sscanf(argv_global[2],"%d",&new_model); } else { get_int("Generate new LWPR = 1; Read from file = 0",ans,&ans); if (ans) new_model = TRUE; } if (!readLWPRScript(fname,new_model,LWPR1)) { fprintf(stderr,"Error when reading script file >%s<\n",fname); exit(-1); } /* now read additional variables from the script */ in = fopen_strip(fname); /* check for included files */ if (find_keyword(in,"include")) { char fname_include[100]; FILE *fp_include; rc=fscanf(in,"%s",fname_include); fp_include = fopen_strip(fname_include); fseek(fp_include, 0, SEEK_END); rewind(in); while ((rc=fgetc(in)) != EOF) fputc(rc,fp_include); fclose(in); in = fp_include; rewind(in); } /* All the names in file will be parsed. Here I define the names of the variables. Note that the variables defining the dimensionality of the LWPR must come first since we need them to allocate other variables */ i=0; /* this block has all variables needed to create a LWPR */ sprintf(vnames[++i],"n_in_w"); sprintf(vnames[++i],"n_in_reg"); sprintf(vnames[++i],"n_out"); sprintf(vnames[++i],"lwpr_name"); sprintf(vnames[++i],"n_in_reg_2nd"); sprintf(vnames[++i],"n_out_2nd"); /* this block specifies all variables needed to get the data for training and testing, as well as some other parameters */ sprintf(vnames[++i],"sampling_method"); sprintf(vnames[++i],"index_function"); sprintf(vnames[++i],"max_iterations"); sprintf(vnames[++i],"eval_time"); sprintf(vnames[++i],"cutoff"); sprintf(vnames[++i],"blending"); sprintf(vnames[++i],"file_name_train_data"); sprintf(vnames[++i],"file_name_test_data"); sprintf(vnames[++i],"name_train_in_w_columns"); sprintf(vnames[++i],"name_train_in_reg_columns"); sprintf(vnames[++i],"name_train_out_columns"); sprintf(vnames[++i],"name_test_in_w_columns"); sprintf(vnames[++i],"name_test_in_reg_columns"); sprintf(vnames[++i],"name_test_out_columns"); sprintf(vnames[++i],"name_train_in_reg_2nd_columns"); sprintf(vnames[++i],"name_train_out_2nd_columns"); sprintf(vnames[++i],"name_test_in_reg_2nd_columns"); sprintf(vnames[++i],"name_test_out_2nd_columns"); out = fopen("lwpr_test_varnames","w"); for (j=1; j<=i; ++j) fprintf(out,"%s\n",vnames[j]); fclose(out); remove_temp_file(); /* parse keywords */ i = 0; if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } rc=fscanf(in,"%d",&n_in_w); if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } rc=fscanf(in,"%d",&n_in_reg); if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } rc=fscanf(in,"%d",&n_out); if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } rc=fscanf(in,"%s",lwpr_name); if (!find_keyword(in,vnames[++i])) { if (lwprs[LWPR1].use_reg_2nd) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } } else { rc=fscanf(in,"%d",&n_in_reg_2nd); } if (!find_keyword(in,vnames[++i])) { if (lwprs[LWPR1].use_reg_2nd) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } } else { rc=fscanf(in,"%d",&n_out_2nd); } /* at last the parameters need to steer the training and testing of the LWPR */ if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } rc=fscanf(in,"%d",&sampling_method); if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } rc=fscanf(in,"%d",&index_function); if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } rc=fscanf(in,"%ld",&max_iterations); if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } rc=fscanf(in,"%ld",&eval_time); if (find_keyword(in,vnames[++i])) { rc=fscanf(in,"%lf",&cutoff); } if (find_keyword(in,vnames[++i])) { rc=fscanf(in,"%d",&blending); } if (!find_keyword(in,vnames[++i]) && argc_global <= 2) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } if (argc_global > 2) { strcpy(fname_train_data,argv_global[3]); } else { rc=fscanf(in,"%s",fname_train_data); } if (!find_keyword(in,vnames[++i]) && argc_global <= 3) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } if (argc_global > 3) { strcpy(fname_test_data,argv_global[4]); } else { rc=fscanf(in,"%s",fname_test_data); } // at this point the data files can be read -- they are expected to be in // MRDPLOT format printf("Reading training data from >%s<...",fname_train_data); if (!mrdplot_convert(fname_train_data, &D_train, &vnames_train, &units_train, &freq_train, &n_cols_train, &n_rows_train)) { printf("Problems reading MRDPLOT file >%s<\n",fname_train_data); exit(-999); } printf("done\n"); printf("%d rows with %d columns read\n",n_rows_train,n_cols_train); printf("Reading test data from >%s<...",fname_test_data); if (!mrdplot_convert(fname_test_data, &D_test, &vnames_test, &units_test, &freq_test, &n_cols_test, &n_rows_test)) { printf("Problems reading MRDPLOT file >%s<\n",fname_test_data); exit(-999); } printf("done\n"); printf("%d rows with %d columns read\n",n_rows_test,n_cols_test); // allocate memory for all arrays Xw_train = my_matrix(1,n_rows_train,1,n_in_w); Xreg_train = my_matrix(1,n_rows_train,1,n_in_reg); Xreg_train_2nd = my_matrix(1,n_rows_train,1,n_in_reg_2nd); Xw_test = my_matrix(1,n_rows_test,1,n_in_w); Xreg_test = my_matrix(1,n_rows_test,1,n_in_reg); Xreg_test_2nd = my_matrix(1,n_rows_test,1,n_in_reg_2nd); Y_train = my_matrix(1,n_rows_train,1,n_out); Y_train_2nd = my_matrix(1,n_rows_train,1,n_out_2nd); Y_test = my_matrix(1,n_rows_test,1,n_out); Y_test_2nd = my_matrix(1,n_rows_test,1,n_out_2nd); x_w = my_vector(1,n_in_w); x_reg = my_vector(1,n_in_reg); x_reg_2nd = my_vector(1,n_in_reg_2nd); y = my_vector(1,n_out); y_2nd = my_vector(1,n_out_2nd); conf = my_vector(1,n_out); conf_2nd = my_vector(1,n_out_2nd); var_y = my_vector(1,n_out); var_y_2nd = my_vector(1,n_out_2nd); mean_y = my_vector(1,n_out); mean_y_2nd = my_vector(1,n_out_2nd); // sort the test and training data into the appropriate arrays if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } for (j=1; j<=n_in_w; ++j) { rc=fscanf(in,"%s",string); if (!(k=findIndex(string,vnames_train,n_cols_train))) { printf("Couldn't find column >%s< in training data\n",string); exit(-i); } else { for (m=1; m<=n_rows_train; ++m) Xw_train[m][j] = D_train[m][k]; } } if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } for (j=1; j<=n_in_reg; ++j) { rc=fscanf(in,"%s",string); if (!(k=findIndex(string,vnames_train,n_cols_train))) { printf("Couldn't find column >%s< in training data\n",string); exit(-i); } else { for (m=1; m<=n_rows_train; ++m) Xreg_train[m][j] = D_train[m][k]; } } if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } for (j=1; j<=n_out; ++j) { rc=fscanf(in,"%s",string); if (!(k=findIndex(string,vnames_train,n_cols_train))) { printf("Couldn't find column >%s< in training data\n",string); exit(-i); } else { for (m=1; m<=n_rows_train; ++m) Y_train[m][j] = D_train[m][k]; } } if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } for (j=1; j<=n_in_w; ++j) { rc=fscanf(in,"%s",string); if (!(k=findIndex(string,vnames_test,n_cols_test))) { printf("Couldn't find column >%s< in test data\n",string); exit(-i); } else { for (m=1; m<=n_rows_test; ++m) Xw_test[m][j] = D_test[m][k]; } } if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } for (j=1; j<=n_in_reg; ++j) { rc=fscanf(in,"%s",string); if (!(k=findIndex(string,vnames_test,n_cols_test))) { printf("Couldn't find column >%s< in test data\n",string); exit(-i); } else { for (m=1; m<=n_rows_test; ++m) Xreg_test[m][j] = D_test[m][k]; } } if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } for (j=1; j<=n_out; ++j) { rc=fscanf(in,"%s",string); if (!(k=findIndex(string,vnames_test,n_cols_test))) { printf("Couldn't find column >%s< in test data\n",string); exit(-i); } else { for (m=1; m<=n_rows_test; ++m) Y_test[m][j] = D_test[m][k]; } } if (lwprs[LWPR1].use_reg_2nd) { if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } for (j=1; j<=n_in_reg_2nd; ++j) { rc=fscanf(in,"%s",string); if (!(k=findIndex(string,vnames_train,n_cols_train))) { printf("Couldn't find column >%s< in training data\n",string); exit(-i); } else { for (m=1; m<=n_rows_train; ++m) Xreg_train_2nd[m][j] = D_train[m][k]; } } if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } for (j=1; j<=n_out_2nd; ++j) { rc=fscanf(in,"%s",string); if (!(k=findIndex(string,vnames_train,n_cols_train))) { printf("Couldn't find column >%s< in training data\n",string); exit(-i); } else { for (m=1; m<=n_rows_train; ++m) Y_train_2nd[m][j] = D_train[m][k]; } } if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } for (j=1; j<=n_in_reg_2nd; ++j) { rc=fscanf(in,"%s",string); if (!(k=findIndex(string,vnames_test,n_cols_test))) { printf("Couldn't find column >%s< in test data\n",string); exit(-i); } else { for (m=1; m<=n_rows_test; ++m) Xreg_test_2nd[m][j] = D_test[m][k]; } } if (!find_keyword(in,vnames[++i])) { printf("Could not find variable >%s<\n",vnames[i]); exit(-i); } for (j=1; j<=n_out_2nd; ++j) { rc=fscanf(in,"%s",string); if (!(k=findIndex(string,vnames_test,n_cols_test))) { printf("Couldn't find column >%s< in test data\n",string); exit(-i); } else { for (m=1; m<=n_rows_test; ++m) Y_test_2nd[m][j] = D_test[m][k]; } } } fclose(in); n_train_data = n_rows_train; n_test_data = n_rows_test; if (NORMALIZE_BY_TRAIN) { for (i=1; i<=n_train_data; ++i) { vec_add_size(mean_y,Y_train[i],n_out,mean_y); } vec_mult_scalar(mean_y,1./(double)n_train_data,mean_y); for (i=1; i<=n_train_data; ++i) { for (j=1; j<=n_out; ++j) { var_y[j] += sqr(Y_train[i][j]-mean_y[j]); } } vec_mult_scalar(var_y,1./(double)n_train_data,var_y); if (lwprs[LWPR1].use_reg_2nd) { for (i=1; i<=n_train_data; ++i) { vec_add_size(mean_y_2nd,Y_train_2nd[i],n_out,mean_y_2nd); } vec_mult_scalar(mean_y_2nd,1./(double)n_train_data,mean_y_2nd); for (i=1; i<=n_train_data; ++i) { for (j=1; j<=n_out_2nd; ++j) { var_y_2nd[j] += sqr(Y_train_2nd[i][j]-mean_y_2nd[j]); } } vec_mult_scalar(var_y_2nd,1./(double)n_train_data,var_y_2nd); } } else { for (i=1; i<=n_test_data; ++i) { vec_add_size(mean_y,Y_test[i],n_out,mean_y); } vec_mult_scalar(mean_y,1./(double)n_test_data,mean_y); for (i=1; i<=n_test_data; ++i) { for (j=1; j<=n_out; ++j) { var_y[j] += sqr(Y_test[i][j]-mean_y[j]); } } vec_mult_scalar(var_y,1./(double)n_test_data,var_y); if (lwprs[LWPR1].use_reg_2nd) { for (i=1; i<=n_test_data; ++i) { vec_add_size(mean_y_2nd,Y_test_2nd[i],n_out,mean_y_2nd); } vec_mult_scalar(mean_y_2nd,1./(double)n_test_data,mean_y_2nd); for (i=1; i<=n_train_data; ++i) { for (j=1; j<=n_out_2nd; ++j) { var_y_2nd[j] += sqr(Y_test_2nd[i][j]-mean_y_2nd[j]); } } vec_mult_scalar(var_y_2nd,1./(double)n_train_data,var_y_2nd); } } }