예제 #1
0
파일: plsa.cpp 프로젝트: luxox20/PLSA
/*******M step, maximazize log-likelihood*/
void Plsa::M_step(MatrixXd &data)
{
    MatrixXd X;
    for (int i=0;i<K;i++)
    {
      Pw_z.col(i)=(Pz_wd[i].cwiseProduct(data)).rowwise().sum();//suma de filas[vector(1XN)]
      Pd_z.col(i)=(Pz_wd[i].cwiseProduct(data)).colwise().sum();//suma de columnas[vector(1XN)]
    }

    //normalize
    RowVectorXd Temp;
    RowVectorXd C;
    VectorXd E;
    VectorXd T;
    Temp=RowVectorXd::Ones(K);
    T=VectorXd::Ones(K); // vector of K with ones
    P_z=Pd_z.colwise().sum();
    cout<<P_z;

    C=Pd_z.colwise().sum(); //suma de columnas[vector(1XN)]
    Temp=Temp.cwiseQuotient(C);
    Pd_z=Pd_z*(Temp.asDiagonal());

    C=Pw_z.colwise().sum(); //
    Temp=Temp.cwiseQuotient(C);
    Pw_z=Pw_z*(Temp.asDiagonal());

    E=P_z.rowwise().sum();
    P_z=P_z.cwiseProduct(T.cwiseQuotient(E));

}
예제 #2
0
파일: NumInt.cpp 프로젝트: koecher/FEAPB
MatrixXd NumInt::GuassLobattoQaudrature(const int &N) {

    int N0=N;
    double a=1.0; double b=1.0;
    double a1=2.0; double b1 = 2.0;
    if (N0>=1)
    {
        N0=N0-2;
    }

    // Build the pointers
    int* _N = &N0;
    double* _a=&a;     double* _b=&b;
    double* _a1=&a1;     double* _b1=&b1;

    // Initial Guess - Chebyshev-Gauss-Lobatto points
    ArrayXd xu;
    xu.setLinSpaced(N0+2,0.0,N0+1);
    ArrayXd x=-cos(xu/(N0+1)*M_PI);

    // Allocate space for points and weights
    VectorXd z = VectorXd::Zero(x.size());
    VectorXd w = VectorXd::Zero(x.size());

    double x0,x1,del; double* _x0 = &x0; double* _x1 = &x1;
    double deps = std::numeric_limits<double>::epsilon();
    for (int k=0; k<=x.size()-1; k++)
    {
        x0=x(k);
        del=2.0;
        while (fabs(del) > deps)
        {
            // Polynomial Deflation: Exclude the already determined roots
            VectorXd s1 = x.head(k);
            VectorXd ones = VectorXd::Constant(s1.size(),1);
            double s = (ones.cwiseQuotient((x0-s1.array()).matrix())).sum();
            // Compute Jacobi polynomial p(a,b)
            JacobiPolynomials J(_N,_a,_b,_x0,false);
            VectorXd p = J.getJacobiPolynomials();
            // Compute Jacobi polynomial p(a+1,b+1) for derivative dp(a,b)
            JacobiPolynomials J1(_N,_a1,_b1,_x0,false);
            VectorXd p1 = J1.getJacobiPolynomials();
            VectorXd dp=VectorXd::Zero(p1.size()); dp(0)=0;
            // Compute derivative of Jacobi polynomial p(a,b)
            for (int j=0; j<=*_N-1; j++)
            {
                dp(j+1) = 0.5*(*_a+*_b+j+2)*p1(j);
            }
            //Gauss-Lobatto points are roots of (1-x^2)*dp, hence
            double nom = (1-x0*x0)*p(N0);     double dnom = -2*x0*p(N0)+(1-x0*x0)*dp(N0);
            del = - nom/(dnom-nom*s);
            x1 = x0+del;
            x0=x1;
        }
        z(k)=x1;
        double a2=0; double b2=0; int N1=N0+1;
        double* _a2=&a2; double* _b2=&b2; int* _N1 = &N1;
        JacobiPolynomials J(_N1,_a2,_b2,_x1,false);
        VectorXd p = J.getJacobiPolynomials();

        w(k) = 2.0/((N1)*(N1+1)*p(N1)*p(N1));

    }

    // Store
    Matrix<double,Dynamic,Dynamic> zw(N0+2,2);
    zw.col(0)=z;
    zw.col(1)=w;

    return zw;
}
예제 #3
0
MNEInverseOperator MNEInverseOperator::prepare_inverse_operator(qint32 nave ,float lambda2, bool dSPM, bool sLORETA) const
{
    if(nave <= 0)
    {
        printf("The number of averages should be positive\n");
        return MNEInverseOperator();
    }
    printf("Preparing the inverse operator for use...\n");
    MNEInverseOperator inv(*this);
    //
    //   Scale some of the stuff
    //
    float scale     = ((float)inv.nave)/((float)nave);
    inv.noise_cov->data  *= scale;
    inv.noise_cov->eig   *= scale;
    inv.source_cov->data *= scale;
    //
    if (inv.eigen_leads_weighted)
        inv.eigen_leads->data *= sqrt(scale);
    //
    printf("\tScaled noise and source covariance from nave = %d to nave = %d\n",inv.nave,nave);
    inv.nave = nave;
    //
    //   Create the diagonal matrix for computing the regularized inverse
    //
    VectorXd tmp = inv.sing.cwiseProduct(inv.sing) + VectorXd::Constant(inv.sing.size(), lambda2);
//    if(inv.reginv)
//        delete inv.reginv;
    inv.reginv = VectorXd(inv.sing.cwiseQuotient(tmp));
    printf("\tCreated the regularized inverter\n");
    //
    //   Create the projection operator
    //

    qint32 ncomp = FiffProj::make_projector(inv.projs, inv.noise_cov->names, inv.proj);
    if (ncomp > 0)
        printf("\tCreated an SSP operator (subspace dimension = %d)\n",ncomp);

    //
    //   Create the whitener
    //
//    if(inv.whitener)
//        delete inv.whitener;
    inv.whitener = MatrixXd::Zero(inv.noise_cov->dim, inv.noise_cov->dim);

    qint32 nnzero, k;
    if (inv.noise_cov->diag == 0)
    {
        //
        //   Omit the zeroes due to projection
        //
        nnzero = 0;

        for (k = ncomp; k < inv.noise_cov->dim; ++k)
        {
            if (inv.noise_cov->eig[k] > 0)
            {
                inv.whitener(k,k) = 1.0/sqrt(inv.noise_cov->eig[k]);
                ++nnzero;
            }
        }
        //
        //   Rows of eigvec are the eigenvectors
        //
        inv.whitener *= inv.noise_cov->eigvec;
        printf("\tCreated the whitener using a full noise covariance matrix (%d small eigenvalues omitted)\n", inv.noise_cov->dim - nnzero);
    }
    else
    {
        //
        //   No need to omit the zeroes due to projection
        //
        for (k = 0; k < inv.noise_cov->dim; ++k)
            inv.whitener(k,k) = 1.0/sqrt(inv.noise_cov->data(k,0));

        printf("\tCreated the whitener using a diagonal noise covariance matrix (%d small eigenvalues discarded)\n",ncomp);
    }
    //
    //   Finally, compute the noise-normalization factors
    //
    if (dSPM || sLORETA)
    {
        VectorXd noise_norm = VectorXd::Zero(inv.eigen_leads->nrow);
        VectorXd noise_weight;
        if (dSPM)
        {
           printf("\tComputing noise-normalization factors (dSPM)...");
           noise_weight = VectorXd(inv.reginv);
        }
        else
        {
           printf("\tComputing noise-normalization factors (sLORETA)...");
           VectorXd tmp = (VectorXd::Constant(inv.sing.size(), 1) + inv.sing.cwiseProduct(inv.sing)/lambda2);
           noise_weight = inv.reginv.cwiseProduct(tmp.cwiseSqrt());
        }
        VectorXd one;
        if (inv.eigen_leads_weighted)
        {
           for (k = 0; k < inv.eigen_leads->nrow; ++k)
           {
              one = inv.eigen_leads->data.block(k,0,1,inv.eigen_leads->data.cols()).cwiseProduct(noise_weight);
              noise_norm[k] = sqrt(one.dot(one));
           }
        }
        else
        {
//            qDebug() << 32;
            double c;
            for (k = 0; k < inv.eigen_leads->nrow; ++k)
            {
//                qDebug() << 321;
                c = sqrt(inv.source_cov->data(k,0));
//                qDebug() << 322;
//                qDebug() << "inv.eigen_leads->data" << inv.eigen_leads->data.rows() << "x" << inv.eigen_leads->data.cols();
//                qDebug() << "noise_weight" << noise_weight.rows() << "x" << noise_weight.cols();
                one = c*(inv.eigen_leads->data.row(k).transpose()).cwiseProduct(noise_weight);//ToDo eigenleads data -> pointer
                noise_norm[k] = sqrt(one.dot(one));
//                qDebug() << 324;
            }
        }

//        qDebug() << 4;

        //
        //   Compute the final result
        //
        VectorXd noise_norm_new;
        if (inv.source_ori == FIFFV_MNE_FREE_ORI)
        {
            //
            //   The three-component case is a little bit more involved
            //   The variances at three consequtive entries must be squeared and
            //   added together
            //
            //   Even in this case return only one noise-normalization factor
            //   per source location
            //
            VectorXd* t = MNEMath::combine_xyz(noise_norm.transpose());
            noise_norm_new = t->cwiseSqrt();//double otherwise values are getting too small
            delete t;
            //
            //   This would replicate the same value on three consequtive
            //   entries
            //
            //   noise_norm = kron(sqrt(mne_combine_xyz(noise_norm)),ones(3,1));
        }
        VectorXd vOnes = VectorXd::Ones(noise_norm_new.size());
        VectorXd tmp = vOnes.cwiseQuotient(noise_norm_new.cwiseAbs());
//        if(inv.noisenorm)
//            delete inv.noisenorm;

        typedef Eigen::Triplet<double> T;
        std::vector<T> tripletList;
        tripletList.reserve(noise_norm_new.size());
        for(qint32 i = 0; i < noise_norm_new.size(); ++i)
            tripletList.push_back(T(i, i, tmp[i]));

        inv.noisenorm = SparseMatrix<double>(noise_norm_new.size(),noise_norm_new.size());
        inv.noisenorm.setFromTriplets(tripletList.begin(), tripletList.end());

        printf("[done]\n");
    }
    else
    {
//        if(inv.noisenorm)
//            delete inv.noisenorm;
        inv.noisenorm = SparseMatrix<double>();
    }

    return inv;
}