Ejemplo n.º 1
0
 void SparseVlatCluster::setEigenDecomposition(const float* eigenVectors , const float* eigenValues, size_t vectorsDim, size_t vectorsSize){
     if (vectorsDim != vectorsSize)
             retinThrowException("Not square matrix");
     if (vectorsDim != getTensorDim(1))
             retinThrowException2("Invalid dim %d != %d", vectorsDim, getTensorDim(1));
     
     U.resize(vectorsDim*vectorsDim);
     L.resize(vectorsDim);
     memcpy(&U[0], eigenVectors, vectorsDim*vectorsDim * sizeof(float));
     memcpy(&L[0], eigenValues, vectorsDim * sizeof(float));
 }
Ejemplo n.º 2
0
void    PackedVlatCluster::add(const float* feature,size_t dim)
{
    size_t meanDim = getTensorDim(1);
    if (dim != meanDim)
        retinThrowException2("Invalid feature dim %d != %d",dim,meanDim);
    
    vector<double> temp(meanDim);
    const float* mean = getMeanTensor(1);
    if (mean) {
        for (size_t i=0;i<dim;i++)
            temp[i] = feature[i] - mean[i];
    }
    else {
        for (size_t i=0;i<dim;i++)
            temp[i] = feature[i];
    }

    pca.add(&temp[0],dim);
}
Ejemplo n.º 3
0
    void SparseVlatCluster::init(){
        StandardVlatCluster::init();
        size_t dim = getTensorDim(1);
        
        if(meanTensors[1] && L.size() == 0){
            U.resize(dim*dim);
            L.resize(dim);
            memcpy(&U[0], meanTensors[1], dim*dim*sizeof(float));
            matrix_eigSym_float (&U[0], &L[0], dim);
            matrix_sortEig_float (&U[0], &L[0], dim);
        }

        if(mainOrder == 1 || sparsityOn == SparseVlatCluster::sparsityOnDiag){
            varDual.reserve(dim);
            memset(&varDual[0],0,dim*sizeof(float));
        }
        else if(sparsityOn == SparseVlatCluster::sparsityOnFull){
            varDual.reserve(dim*dim);
            memset(&varDual[0],0,dim*dim*sizeof(float));
        }
        
        counter = 0;
    }
Ejemplo n.º 4
0
    size_t SparseVlatCluster::getVlat(float* buf,const std::string& format) const{
        size_t featureDim = getTensorDim(1);
        size_t vlatDim = getTensorDim(mainOrder);
        vector<float> sparseVlat(vlatDim);
        vector<int> sparseMask;
        
        if (buf){
            memcpy(&sparseVlat[0], &vlat[0], vlatDim*sizeof(float));
            vector<pair<size_t, float> > sortTable;
            if(mainOrder == 1 || sparsityOn == SparseVlatCluster::sparsityOnDiag){
                sortTable.reserve(featureDim);
                sparseMask.reserve(featureDim);
                for(size_t i = 0 ; i < featureDim ; i++){
                        sortTable[i].first = i;
                        sortTable[i].second = varDual[i];
                }
                sort(sortTable.begin(), sortTable.end(), myfunction);
            }
            else if(sparsityOn == SparseVlatCluster::sparsityOnFull){
                sortTable.reserve(featureDim*featureDim);
                sparseMask.reserve(featureDim*featureDim);
                for(size_t i = 0 ; i < featureDim*featureDim ; i++){
                        sortTable[i].first = i;
                        sortTable[i].second = varDual[i];
                }
                sort(sortTable.begin(), sortTable.end(), myfunction);
            }

            if(sparsityStrategy == SparseVlatCluster::sparsityStrategyDim){
                for(size_t i = 0 ; i < sparsityDim ; i++)
                    sparseMask[sortTable[i].first] = 1;
                for(size_t i = sparsityDim ; i < sortTable.size() ; i++)
                    sparseMask[sortTable[i].first] = 0;
            }
            else if(sparsityStrategy == SparseVlatCluster::sparsityStrategyEnergy){
                float sumVar = 0;
                for(size_t i = 0 ; i < sortTable.size() ; i++)
                    sumVar += varDual[i];
                size_t i;
                float sum;
                for(i=0, sum = 0; i < sortTable.size() && sum<sumVar*sparsityEnergy ; i++){
                    sparseMask[sortTable[i].first] = 1;
                    sum += sortTable[i].second;
                }
                for( ; i < sortTable.size() ; i++)
                    sparseMask[sortTable[i].first] = 0;
            }
        }
        
        if (mainOrder == 1) {
            if (buf){
                memcpy(&sparseVlat[0], &vlat[0], featureDim*sizeof(float));
                for(size_t i = 0 ; i < featureDim; i++){
                    if(sparseMask[i] == 0){
                        sparseVlat[i] = 0.0f;
                    }
                }
            
                memcpy(buf, &sparseVlat[0], featureDim*sizeof(float));
            }
            return featureDim;
        }
        else if (mainOrder == 2) {
            if (buf){
                if(sparsityOn == SparseVlatCluster::sparsityOnDiag){
                    memcpy(&sparseVlat[0], &vlat[0], vlatDim*sizeof(float));
                    for(size_t i = 0 ; i < featureDim; i++){
                        if(sparseMask[i] == 0){
                            for(size_t j = 0 ; j < featureDim ; j++){
                                sparseVlat[j+featureDim*i] = 0.0f;
                                sparseVlat[i+featureDim*j] = 0.0f;
                            }
                        }
                    }
                }
                else if(sparsityOn == SparseVlatCluster::sparsityOnFull){
                    memcpy(&sparseVlat[0], &vlat[0], vlatDim*sizeof(float));
                    for(size_t i = 0 ; i < vlatDim; i++){
                         if(sparseMask[i] == 0){
                             sparseVlat[i] = 0.0f;
                        }
                    }
                }
                for(size_t i = 0; i < featureDim ; i++)
                        sparseVlat[i*featureDim + i] -= counter*L[i];
            }
            
            if (format == "full") {
                if (buf)
                    memcpy(buf,&sparseVlat[0],featureDim*featureDim*sizeof(float));
                return featureDim*featureDim;
            }
            else if (format == "LD") {
                if (buf) {
                    for (size_t i=0;i<featureDim;i++) {
                        for (size_t j=i;j<featureDim;j++)
                            *buf++ = sparseVlat[i+j*featureDim];
                    }
                }
                return (featureDim*(featureDim+1))/2;
            }
            else if (format == "L") {
                if (buf) {
                    for (size_t i=0;i<featureDim;i++) {
                        for (size_t j=i+1;j<featureDim;j++)
                            *buf++ = sparseVlat[i+j*featureDim];
                    }
                }
                return (featureDim*(featureDim-1))/2;
            }
            else {
                retinThrowException1("Unsupported format %s", format.c_str());
            }
        }
        else {
            retinThrowException1("Unsupported order %d", mainOrder);
        }
    }
Ejemplo n.º 5
0
    void SparseVlatCluster::add(const float* feature, size_t featureDim){
        size_t meanDim = getTensorDim(1);
        if (featureDim != meanDim)
            retinThrowException2("Invalid feature dim %d != %d", featureDim, meanDim);

        vector<float> featureCentred(meanDim);
        const float* mean = getMeanTensor(1);
        if (mean)
            vector_linear_float(&featureCentred[0], feature, -1, mean, meanDim);
        else
            memcpy(&featureCentred[0],feature,meanDim*sizeof(float));
        
        vector<float> featureDual(meanDim);
        matrix_CpAtB_float(&featureDual[0], &U[0], &featureCentred[0], meanDim, meanDim, 1);
        
        if(mainOrder == 1 || sparsityOn == SparseVlatCluster::sparsityOnDiag){
            for(size_t i = 0 ; i < meanDim ; i++)
                varDual[i] += featureDual[i]*featureDual[i];
        }
        else if(sparsityOn == SparseVlatCluster::sparsityOnFull){
            vector<float> temp(meanDim*meanDim);
            matrix_Cpaat_float(&temp[0], &featureDual[0], meanDim);
            for(size_t i = 0 ; i < meanDim*meanDim ; i++)
                temp[i] *= temp[i];
            vector_add_float (&varDual[0], &featureDual[0], meanDim*meanDim);
        }
        
        vector<pair<size_t, float> > sortTable;
        
        if (mainOrder == 1) {
            if(sparsityOn == sparsityMaxValues){
                sortTable.resize(meanDim);
                for(size_t i = 0 ; i < meanDim ; i++){
                        sortTable[i].first = i;
                        sortTable[i].second = featureDual[i];
                }
                sort(sortTable.begin(), sortTable.end(), myfunction);
                for(size_t i = sparsityDim ; i < meanDim ; i++){
                        featureDual[sortTable[i].first] = 0;
                }
            }
            vector_add_float (&vlat[0], &featureDual[0], meanDim);
        }
        else if (mainOrder == 2) {
            if(sparsityOn == sparsityMaxValues){
                vector<float> temp(meanDim*meanDim);
                memset(&temp[0],0,meanDim*meanDim*sizeof(float));
                matrix_Cpaat_float(&temp[0], &featureDual[0], meanDim);
                sortTable.resize(meanDim*meanDim);
                for(size_t i = 0 ; i < meanDim*meanDim ; i++){
                        sortTable[i].first = i;
                        sortTable[i].second = temp[i];
                }
                sort(sortTable.begin(), sortTable.end(), myfunction);
                for(size_t i = sparsityDim ; i < meanDim*meanDim ; i++){
                        temp[sortTable[i].first] = 0;
                }
                vector_add_float (&vlat[0], &temp[0], meanDim*meanDim);
            }
            else
                matrix_Cpaat_float(&vlat[0], &featureDual[0], meanDim);
        }
        counter++;
    }
Ejemplo n.º 6
0
void    PackedVlatCluster::init() {
    pca.init(getTensorDim(1));
}