static PyObject *rec_stalta(PyObject *self, PyObject *args) { PyArrayObject *a, *b; int ndat, Nsta, Nlta, i; double Csta, Clta, sta, lta; int b_dims[1]; // arguments: a,Nsta,Nlta; parsing with checking the pointer types: if (!PyArg_ParseTuple(args, "O!ii", &PyArray_Type, &a, &Nsta, &Nlta)) return NULL; NDIM_CHECK(a,1); TYPE_CHECK(a,NPY_DOUBLE); ndat = a->dimensions[0]; DIM_CHECK(a,0,ndat); // check if size of the traces is consistent // create output array b_dims[0] = ndat; b = (PyArrayObject *) PyArray_FromDims(1,b_dims, PyArray_DOUBLE); if (b==NULL) { printf("creating %d array failed\n",b_dims[0]); return NULL; //PyArray_FromDims raises exception } Csta = 1./Nsta; Clta = 1./Nlta; sta = 0; lta =0; for (i=1;i<ndat;i++) { sta = Csta * pow(IND1(a,i),2) + (1-Csta)*sta; lta = Clta * pow(IND1(a,i),2) + (1-Clta)*lta; IND1(b,i) = sta/lta; } if (Nlta < ndat) for (i=1;i<Nlta;i++) IND1(b,i) = 0; return PyArray_Return(b); //return array }
static PyObject *nipals(PyObject *self, PyObject *args) /* fills the Scores and Loadings matrices and returns explained_var array */ { /* Estimation of PC components with the iterative NIPALS method: E[0] = mean_center(X) (the E-matrix for the zero-th PC) t = E(:, 0) (a column in X (mean centered) is set as starting t vector) for i=1 to (PCs): 1 p=(E[i-1]'t) / (t't) Project X onto t to find the corresponding loading p 2 p = p * (p'p)^-0.5 Normalise loading vector p to length 1 3 t = (E[i-1]p) / (p'p) Project X onto p to find corresponding score vector t 4 Check for convergence, if difference between eigenval_new and eigenval_old is larger than threshold*eigenval_new return to step 1 5 E[i] = E[i-1] - tp' Remove the estimated PC component from E[i-1] */ PyArrayObject *Scores, *Loadings, *E, *explained_var; double threshold, eigenval_t, eigenval_p, eigenval_new; double eigenval_old = 0.0; double e_tot0, e_tot, tot_explained_var, temp; int i, j, PCs, cols, rows, cols_t, rows_t; int convergence, ready_for_compare; int dims[2]; // for explained_var creation /* Get arguments: */ if (!PyArg_ParseTuple(args, "O!O!O!id:nipals", &PyArray_Type, &Scores, &PyArray_Type, &Loadings, &PyArray_Type, &E, &PCs, &threshold)) { return NULL; } /* safety checks */ if (NULL == Scores) return NULL; if (NULL == Loadings) return NULL; if (NULL == E) return NULL; if (Scores->nd != 2) { PyErr_Format(PyExc_ValueError, "Scores array has wrong dimension (%d)", Scores->nd); return NULL; } if (Loadings->nd != 2) { PyErr_Format(PyExc_ValueError, "Loadings array has wrong dimension (%d)", Loadings->nd); return NULL; } if (E->nd != 2) { PyErr_Format(PyExc_ValueError, "E array has wrong dimension (%d)", E->nd); return NULL; } //TYPECHECK(Scores, PyArray_DOUBLE); //TYPECHECK(Loadings, PyArray_DOUBLE); //TYPECHECK(E, PyArray_DOUBLE); rows = E->dimensions[0]; cols = E->dimensions[1]; /* Set 2d array pointer e */ double *data_ptr; data_ptr = (double *) E->data; /* a is a PyArrayObject* pointer */ double **e; e = (double **) malloc((rows)*sizeof(double*)); for (i = 0; i < rows; i++) { e[i] = &(data_ptr[i*cols]); /* point row no. i in E->data */ } /* Set t vector */ double t[rows]; double p[cols]; //for(i = 0; i < rows; i++) //{ t[i] = e[i][0]; } get_column(t, e, cols, rows); /* Create explained variance array */ dims[0] = PCs; explained_var = (PyArrayObject *) PyArray_FromDims(1, dims, PyArray_DOUBLE); e_tot0 = total_residual_obj_var_e0(e, cols, rows); tot_explained_var = 0; /* Transposed E[0] */ cols_t = rows; rows_t = cols; double **e_transposed; e_transposed = (double **) malloc((rows_t)*sizeof(double*)); for (i = 0; i < rows_t; i++) { e_transposed[i] = (double *) malloc((cols_t)*sizeof(double)); } /* Do iterations (0, PCs) */ for(i = 0; i < PCs; i++) { convergence = 0; ready_for_compare = 0; transpose(e, e_transposed, cols, rows); while(convergence == 0) { // 1 p=(E[i-1]'t) / (t't) Project X onto t to find the corresponding loading p matrix_vector_prod(e_transposed, cols_t, rows_t, t, p); eigenval_t = vector_eigenval(t, rows); vector_div(p, cols, eigenval_t); // 2 p = p * (p'p)^-0.5 Normalise loading vector p to length 1 eigenval_p = vector_eigenval(p, cols); temp = pow(eigenval_p, (-0.5)); vector_mul(p, cols, temp); // 3 t = (E[i-1]p) / (p'p) Project X onto p to find corresponding score vector t matrix_vector_prod(e, cols, rows, p, t); eigenval_p = vector_eigenval(p, cols); vector_div(t, rows, eigenval_p); // 4 Check for convergence eigenval_new = vector_eigenval(t, rows); if(ready_for_compare == 0) { ready_for_compare = 1; } else { if((eigenval_new - eigenval_old) < threshold*eigenval_new) { convergence = 1; } } eigenval_old = eigenval_new; } // 5 E[i] = E[i-1] - tp' Remove the estimated PC component from E[i-1] and sets result to E[i] remove_tp(e, cols, rows, t, p); /* Add current Scores and Loadings to collection */ for(j = 0; j < rows; j++){ IND2(Scores, j, i) = t[j]; } for(j = 0; j < cols; j++){ IND2(Loadings, i, j) = p[j]; } /* Update explained variance array */ e_tot = total_residual_obj_var(e, cols, rows, e_tot0); // for E[i] IND1(explained_var, i) = 1 - e_tot - tot_explained_var; // explained var for PC[i] tot_explained_var += IND1(explained_var, i); } free(e); for (i = 0; i < rows_t; i++) { free(e_transposed[i]); } free(e_transposed); return PyArray_Return(explained_var); }