コード例 #1
0
ファイル: Test.cpp プロジェクト: utpalchakraborty/CppLearning
int main()
{
    Mycode::Vector<int> intVector(2);
    Mycode::Vector<char> charVector(2);
    Mycode::Vector<double> doubleVector(2);
    Mycode::Vector<float> floatVector(2);


    //Mycode::print_vector(intVector);
    std::cout << charVector;

    // calling the variadic.
    //printVectors(intVector, charVector, doubleVector, floatVector);

    Mycode::Vector<int> copy(intVector);
    // printVectors(copy);

    Mycode::LessThan<int>  t(42);

    if(!t(43))
    {
	std::cout << "Obviously 42 is not less than 43." << std::endl;
    }

    std::cout << Mycode::countLessThan(intVector, t) << std::endl;

}
コード例 #2
0
void CSamplingInfEngine::
CreateSamplingPotentials( potsPVector* potsToSampling )
{
    
    CModelDomain *pMD = GetModel()->GetModelDomain();
    
    intVector ndsForSampling;
    GetNdsForSampling( &ndsForSampling );
    potsToSampling->resize( ndsForSampling.size() );
    
    CPotential *tmpPot;
    
    int i;
    for( i = 0; i < ndsForSampling.size(); i++ )
    {
        const CNodeType* nt = pMD->GetVariableType( ndsForSampling[i]);
        if( nt->IsDiscrete() )
        {
            tmpPot = CTabularPotential::
                Create( &ndsForSampling[i], 1, pMD, &floatVector(nt->GetNodeSize(), 1.0f).front() );
        }
        else
        {
            tmpPot = CGaussianPotential::
                CreateUnitFunctionDistribution( &ndsForSampling[i], 1, pMD );
        }
        (*potsToSampling)[i] = tmpPot;
    }
}
コード例 #3
0
ファイル: main.cpp プロジェクト: Chhabi/cppexamples
int main(int argc, char **argv)
{
	vector3<float> floatVector(1.0f, 2.0f, 1.0f);
	vector3<double> doubleVector(2.0f, 2.0f, 2.0f);

	vector3<float> b = floatVector;
	
	// reduce typing
	typedef vector3<float> vector3f;

	vector3f a = b;

	return 0;
}
コード例 #4
0
ファイル: AGibbsInfEngine.cpp プロジェクト: billryan/OpenPNL
int GibbsForScalarGaussianBNet( float eps)
{
    std::cout<<std::endl<<" Scalar gaussian BNet (5 nodes)"<< std::endl;
    CBNet *pBnet;
    pEvidencesVector evidences;

    CGibbsSamplingInfEngine *pGibbsInf;
    const CPotential *pQueryPot1, *pQueryPot2;
    int i, ret;

    ////////////////////////////////////////////////////////////////////////
    //Do the example from Satnam Alag's PhD thesis, UCB ME dept 1996 p46
    //Make the following polytree, where all arcs point down
    //
    // 0   1
    //  \ /
    //   2
    //  / \
    // 3   4
    //
    //////////////////////////////////////////////////////////////////////

    int nnodes = 5;
    int numnt = 1;
    CNodeType *nodeTypes = new CNodeType[numnt];
    nodeTypes[0] = CNodeType(0,1);

    intVector nodeAssociation = intVector(nnodes,0);

    int nbs0[] = { 2 };
    int nbs1[] = { 2 };
    int nbs2[] = { 0, 1, 3, 4 };
    int nbs3[] = { 2 };
    int nbs4[] = { 2 };
    ENeighborType ori0[] = { ntChild };
    ENeighborType ori1[] = { ntChild };
    ENeighborType ori2[] = { ntParent, ntParent, ntChild, ntChild };
    ENeighborType ori3[] = { ntParent };
    ENeighborType ori4[] = { ntParent };
    int *nbrs[] = { nbs0, nbs1, nbs2, nbs3, nbs4 };
    ENeighborType *orient[] = { ori0, ori1, ori2, ori3, ori4 };
    intVector numNeighb = intVector(5,1);
    numNeighb[2] = 4;
    CGraph *graph;
    graph = CGraph::Create(nnodes, &numNeighb.front(), nbrs, orient);

    pBnet = CBNet::Create( nnodes, numnt, nodeTypes, &nodeAssociation.front(),graph );
    pBnet->AllocFactors();

    for( i = 0; i < nnodes; i++ )
    {
	pBnet->AllocFactor(i);
    }
    //now we need to create data for factors - we'll create matrices
    floatVector smData = floatVector(1,0.0f);
    floatVector bigData = floatVector(1,1.0f);
    intVector ranges = intVector(2, 1);
    ranges[0] = 1;
    smData[0] = 1.0f;
    CNumericDenseMatrix<float> *mean0 = CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &smData.front());
    bigData[0] = 4.0f;

    CNumericDenseMatrix<float> *cov0 = CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &bigData.front());
    pBnet->GetFactor(0)->AttachMatrix(mean0, matMean);
    pBnet->GetFactor(0)->AttachMatrix(cov0, matCovariance);

    float val = 1.0f;

    CNumericDenseMatrix<float> *mean1 = CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &val );
    CNumericDenseMatrix<float> *cov1 = CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &val );
    pBnet->GetFactor(1)->AttachMatrix(mean1, matMean);
    pBnet->GetFactor(1)->AttachMatrix(cov1, matCovariance);
    smData[0] = 0.0f;

    CNumericDenseMatrix<float> *mean2 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &smData.front());
    smData[0] = 2.0f;

    CNumericDenseMatrix<float> *w21 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &smData.front());
    bigData[0] = 2.0f;

    CNumericDenseMatrix<float> *cov2 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &bigData.front());
    bigData[0] = 1.0f;

    CNumericDenseMatrix<float> *w20 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &bigData.front());
    pBnet->GetFactor(2)->AttachMatrix( mean2, matMean );
    pBnet->GetFactor(2)->AttachMatrix( cov2, matCovariance );
    pBnet->GetFactor(2)->AttachMatrix( w20, matWeights,0 );
    pBnet->GetFactor(2)->AttachMatrix( w21, matWeights,1 );

    val = 0.0f;

    CNumericDenseMatrix<float> *mean3 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &val);
    val = 4.0f;
    CNumericDenseMatrix<float> *cov3 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &val);

    smData[0] = 1.1f;

    CNumericDenseMatrix<float> *w30 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &smData.front());
    pBnet->GetFactor(3)->AttachMatrix( mean3, matMean );
    pBnet->GetFactor(3)->AttachMatrix( cov3, matCovariance );
    pBnet->GetFactor(3)->AttachMatrix( w30, matWeights,0 );


    smData[0] = -0.8f;

    CNumericDenseMatrix<float> *mean4 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &smData.front());

    bigData[0] = 1.2f;

    CNumericDenseMatrix<float> *cov4 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &bigData.front());
    bigData[0] = 2.0f;

    CNumericDenseMatrix<float> *w40 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &bigData.front());
    pBnet->GetFactor(4)->AttachMatrix( mean4, matMean );
    pBnet->GetFactor(4)->AttachMatrix( cov4, matCovariance );
    pBnet->GetFactor(4)->AttachMatrix( w40, matWeights,0 );


    evidences.clear();
    pBnet->GenerateSamples( &evidences, 1 );

    const int ndsToToggle2[] = { 0, 1, 2 };
    evidences[0]->ToggleNodeState( 3, ndsToToggle2 );
    const int *flags1 = evidences[0]->GetObsNodesFlags();
    std::cout<<"observed nodes"<<std::endl;
    for( i = 0; i < pBnet->GetNumberOfNodes(); i++ )
    {
	if ( flags1[i] )
	{
	    std::cout<<"node "<<i<<"; ";
	}
    }
    std::cout<<std::endl<<std::endl;

    const int querySz2 = 1;
    const int query2[] = { 0 };

    CNaiveInfEngine *pNaiveInf = CNaiveInfEngine::Create(pBnet);
    pNaiveInf->EnterEvidence( evidences[0] );
    pNaiveInf->MarginalNodes( query2,querySz2 );

    pGibbsInf = CGibbsSamplingInfEngine::Create( pBnet );
    pGibbsInf->SetNumStreams( 1 );
    pGibbsInf->SetMaxTime( 10000 );
    pGibbsInf->SetBurnIn( 1000 );
    


    intVecVector queries(1);
    queries[0].clear();
    queries[0].push_back( 0 );
    //queries[0].push_back( 2 );
    pGibbsInf->SetQueries( queries );

    pGibbsInf->EnterEvidence( evidences[0] );
    pGibbsInf->MarginalNodes( query2, querySz2 );

    pQueryPot1 = pGibbsInf->GetQueryJPD();
    pQueryPot2 = pNaiveInf->GetQueryJPD();
    std::cout<<"result of gibbs"<<std::endl<<std::endl;
    pQueryPot1->Dump();
    std::cout<<"result of naive"<<std::endl;
    pQueryPot2->Dump();

    ret = pQueryPot1->IsFactorsDistribFunEqual( pQueryPot2, eps, 0 );

    delete evidences[0];
    delete pNaiveInf;
    delete pGibbsInf;
    delete pBnet;

    return ret;

    ////////////////////////////////////////////////////////////////////////////////////////
}
コード例 #5
0
ファイル: AMRF2PearlInf.cpp プロジェクト: JacobCWard/PyPNL
int testPearlInfEngForMRF2()
{
    
    int ret = TRS_OK; // of course ;)


    int numOfRows = 0;

    while( ( numOfRows < 1 ) || ( numOfRows > MAX_NUM_OF_ROWS ) )
    {
        trsiRead( &numOfRows, "3", "Number of rows in a 2-layered MRF2" );
    }
    
	
	int numOfCols = 0;

    while( ( numOfCols < 1 ) || ( numOfCols > MAX_NUM_OF_COLS ) )
    {
        trsiRead( &numOfCols, "3", "Number of columns in a 2-layered MRF2" );
    }


    int numOfNodeVals = 0;

    while( ( numOfNodeVals < 1 ) || ( numOfNodeVals > MAX_NUM_OF_NODE_VALS ) )
    {
        trsiRead( &numOfNodeVals, "10",
            "Number of values each node in a 2-layered MRF2 can take" );
    }


    int numOfNodeValDims = 0;

    while( ( numOfNodeValDims < 1 )
        || ( numOfNodeValDims > MAX_NUM_OF_NODE_VAL_DIMS ) )
    {
        trsiRead( &numOfNodeValDims, "10",
            "Number of dimensions of each node value in a 2-layered MRF2" );
    }


    int equalValNum = -2;

    while( ( equalValNum < -1 ) || ( equalValNum >= numOfNodeVals  ) )
    {
        trsiRead( &equalValNum, "0",
            "values of this number will be equal for all nodes MRF2\n"
            "if you choose value -1 (negative one), there will be no\n"
            "equal values for all nodes");
    }

    int maxNumOfIters = 0;

    while( ( maxNumOfIters < 1 ) || ( maxNumOfIters > MAX_NUM_OF_ITERS ) )
    {
        trsiRead( &maxNumOfIters, "3",
            "Number of iterations Pearl Inference engine will run for" );
    }

	int chosenType = 1;
	char *strVal;
	strVal = trsInt(chosenType);
	int typeOfPotential = -1;
    
    while( ( typeOfPotential < 0 )  )
    {
        trsiRead( &typeOfPotential, strVal, "Type of potential in created model" );
    }


    const int numOfNds    = 2*numOfRows*numOfCols;

    int numOfObsNds = numOfNds/2;


    nodeTypeVector nodeTypes(2);

    nodeTypes[0].SetType( true, numOfNodeVals );

    nodeTypes[1].SetType( true, 1 );

    intVector nodeAssociation( numOfNds, 0 );

    std::fill( nodeAssociation.begin() + numOfNds/2,
        nodeAssociation.end(), 1 );


    CModelDomain* pModelDomain = CModelDomain::Create( nodeTypes,
        nodeAssociation );

    pnlVector< floatVecVector > nodeVals( numOfNds/2,
        floatVecVector( numOfNodeVals,
        floatVector(numOfNodeValDims) ) );

    int i;

    for( i = 0; i < numOfNds/2; ++i )
    {
        int j;

        for( j = 0; j < numOfNodeVals; ++j )
        {
            if( j != equalValNum )
            {
                super_helper::GetRandomNodeVal( &nodeVals[i][j] );
            }
            else
            {
                super_helper::GetEqualNodeVal( &nodeVals[i][j] );
            }
        }
    }

//    for( i = 0 ; i < numOfNds/2; ++i )
//    {
//        for_each( nodeVals[i].begin(), nodeVals[i].end(),
//            super_helper::PrintVector<float>() );
//    }

    CMRF2* p2LMRF2Model = SuperResolution2lMRF2( numOfRows, numOfCols,
        pModelDomain, nodeVals );


    intVector   obsNds(numOfObsNds);

    valueVector obsNdsVals(numOfObsNds);


    for( i = 0; i < numOfObsNds; ++i )
    {
        obsNds[i] = numOfObsNds + i;

        obsNdsVals[i].SetInt(0);
    }

    CEvidence* pEvidence = CEvidence::Create( p2LMRF2Model,
        obsNds, obsNdsVals );

    CPearlInfEngine* pPearlInfEngine = 
        CPearlInfEngine::Create(p2LMRF2Model);

    CSpecPearlInfEngine* pSpecPearlInfEngine = 
       CSpecPearlInfEngine::Create(p2LMRF2Model);


    pPearlInfEngine->SetMaxNumberOfIterations(maxNumOfIters);
    pSpecPearlInfEngine->SetMaxNumberOfIterations(maxNumOfIters);

    pPearlInfEngine->EnterEvidence( pEvidence, true );
    pSpecPearlInfEngine->EnterEvidence( pEvidence, true );

    intVector maxIndices(numOfNds/2);
    intVector specMaxIndices(numOfNds/2);


    for( i = 0; i < numOfNds/2; ++i )
    {
        pPearlInfEngine->MarginalNodes( &i, 1 );

        const CEvidence* pMPE = pPearlInfEngine->GetMPE();

        pSpecPearlInfEngine->MarginalNodes( &i, 1 );

        const CEvidence* pSpecMPE = pSpecPearlInfEngine->GetMPE();

        intVector MPEobsNdsNums;

        pConstValueVector MPEobsNdsVals;

        pConstNodeTypeVector MPENodeTypes;

        pMPE->GetObsNodesWithValues( &MPEobsNdsNums, &MPEobsNdsVals,
            &MPENodeTypes );

        maxIndices[i] = MPEobsNdsVals[0]->GetInt();

        pSpecMPE->GetObsNodesWithValues( &MPEobsNdsNums, &MPEobsNdsVals,
            &MPENodeTypes );

        specMaxIndices[i] = MPEobsNdsVals[0]->GetInt();
    }

    std::cout << "Here're the numbers of node values chosen"
        " by Pearl (max_indices)" << std::endl;

    std::for_each( maxIndices.begin(), maxIndices.end(),
        super_helper::Print<int>() );
    //compare results
    int numIndices = maxIndices.size();
    for( i = 0; i < numIndices; i++ )
    {
        if( specMaxIndices[i] != maxIndices[i] )
        {
            ret = TRS_FAIL;
        }
    }

    if( ( equalValNum != -1 )
        && ( std::count_if( maxIndices.begin(), maxIndices.end(),
        super_helper::NotEqual<int>(equalValNum) ) > 0 ) )
    {
        ret = TRS_FAIL;
    }

    //add the code for testing specPearl 
	CMRF2* pModelToWorkWith = pnlExCreateBigMRF2( typeOfPotential, 15, 15, 4, 1.0f, 1.0f );

	const int numNodes = pModelToWorkWith->GetNumberOfNodes();

	maxNumOfIters = numNodes/2;

	numOfObsNds = rand()%( numNodes - 2 );
	
	intVector   obsNdsSp(numOfObsNds);
  
    valueVector obsNdsValsSp(numOfObsNds);

    SetRndObsNdsAndVals( pModelToWorkWith->GetModelDomain(), &obsNdsSp,
        &obsNdsValsSp );
    
    CEvidence* pEvidenceSp = CEvidence::Create( pModelToWorkWith,
        obsNdsSp, obsNdsValsSp );


    CPearlInfEngine* pPearlEng = CPearlInfEngine::Create(pModelToWorkWith);
    
    pPearlEng->SetMaxNumberOfIterations(maxNumOfIters);
    
//	trsTimerStart(0);
    
    pPearlEng->EnterEvidence(pEvidenceSp);

//    trsTimerStop(0);
    
//     double timeOfEnterEvidenceForPearl = trsTimerSec(0);
    
    int numProvIters = pPearlEng->GetNumberOfProvideIterations();
    
    CSpecPearlInfEngine* pPearlEng1 = CSpecPearlInfEngine::Create(pModelToWorkWith);
    
    pPearlEng1->SetMaxNumberOfIterations(maxNumOfIters);
    
//     trsTimerStart(0);
    
    pPearlEng1->EnterEvidence(pEvidenceSp);
    
//     trsTimerStop(0);
    
//     double timeOfEnterEvidenceForPearl1 = trsTimerSec(0);
    
    int numProvIters1 = pPearlEng1->GetNumberOfProvideIterations();
    
    //check are the potentials the same

    int potsAreTheSame = 1;
    float eps = 1e-5f;
    float maxDiff = 0.0f;
    const CPotential* potPearl = NULL;
    const CPotential* pot1Pearl = NULL;
    for( i = 0; i < numNodes; i++ )
    {
        pPearlEng->MarginalNodes(&i, 1);
        potPearl = pPearlEng->GetQueryJPD();
        pPearlEng1->MarginalNodes(&i, 1);
        pot1Pearl = pPearlEng1->GetQueryJPD();
        if( !potPearl->IsFactorsDistribFunEqual(pot1Pearl, eps, 0, &maxDiff ) )
        {
            potsAreTheSame = 0;
        }
    }
	std::cout<<"num iterations pearl: "<<numProvIters<<std::endl;
	std::cout<<"num iterations spec pearl: "<<numProvIters1<<std::endl;
//	std::cout<<"time pearl: "<<timeOfEnterEvidenceForPearl<<std::endl;
//	std::cout<<"time spec pearl: "<<timeOfEnterEvidenceForPearl1<<std::endl;

    delete pPearlEng;

    delete pPearlEng1;
    delete pModelToWorkWith;

	delete pEvidenceSp;
	//create other model

	//add the code for testing specPearl 

    CMRF2* pOtherModel= pnlExCreateBigMRF2( 5, 5, 5, 6, 1.0f, 1.0f );

    const int numOtherNodes = pOtherModel->GetNumberOfNodes();

	numOfObsNds = rand()%( numOtherNodes - 2 );

	intVector   obsNdsOtherSp(numOfObsNds);
  
    valueVector obsNdsValsOtherSp(numOfObsNds);

    SetRndObsNdsAndVals( pOtherModel->GetModelDomain(), &obsNdsOtherSp,
        &obsNdsValsOtherSp );
    
    CEvidence* pEvidenceOtherSp = CEvidence::Create( pOtherModel,
        obsNdsOtherSp, obsNdsValsOtherSp );

    CPearlInfEngine* pOtherPearlEng = CPearlInfEngine::Create(pOtherModel);
    
    pOtherPearlEng->SetMaxNumberOfIterations(maxNumOfIters);
    
    pOtherPearlEng->EnterEvidence(pEvidenceOtherSp);
    
    CSpecPearlInfEngine* pOtherPearlEng1 = CSpecPearlInfEngine::Create(pOtherModel);
    
    pOtherPearlEng1->SetMaxNumberOfIterations(maxNumOfIters);
    
    pOtherPearlEng1->EnterEvidence(pEvidenceOtherSp);
    
    //check are the potentials the same
    potsAreTheSame = 1;
    maxDiff = 0.0f;
    for( i = 0; i < numOtherNodes; i++ )
    {
        pOtherPearlEng->MarginalNodes(&i, 1);
        potPearl = pOtherPearlEng->GetQueryJPD();
        pOtherPearlEng1->MarginalNodes(&i, 1);
        pot1Pearl = pOtherPearlEng1->GetQueryJPD();
        if( !potPearl->IsFactorsDistribFunEqual(pot1Pearl, eps, 0, &maxDiff ) )
        {
            potsAreTheSame = 0;
        }
    }

    delete pEvidenceOtherSp;
	delete pOtherPearlEng;
    delete pOtherPearlEng1;
    delete pOtherModel;

    delete pEvidence;
    //CPearlInfEngine::Release(&pPearlInfEngine);
    delete pPearlInfEngine;
    delete pSpecPearlInfEngine;
    delete p2LMRF2Model;
    delete pModelDomain;


    return ret;
}
コード例 #6
0
ファイル: learn_param.cpp プロジェクト: JacobCWard/PyPNL
int main()
{
    PNL_USING
	//we create very small model to start inference on it
	// the model is from Kevin Murphy's BNT\examples\static\belprop_polytree_gaussain
	/*
	Do the example from Satnam Alag's PhD thesis, UCB ME dept 1996 p46
	Make the following polytree, where all arcs point down
	
	 0   1
	  \ /
	   2
	  / \
	 3   4


	*/
	int i;
	//create this model
	int nnodes = 5;
	int numnt = 2;
	CNodeType *nodeTypes = new CNodeType[numnt];
 	nodeTypes[0] = CNodeType(0,2);
	nodeTypes[1] = CNodeType(0,1);
	
	intVector nodeAssociation = intVector(nnodes,0);
	nodeAssociation[1] = 1;
	nodeAssociation[3] = 1;
	int nbs0[] = { 2 };
	int nbs1[] = { 2 };
	int nbs2[] = { 0, 1, 3, 4 };
	int nbs3[] = { 2 };
	int nbs4[] = { 2 };
	int *nbrs[] = { nbs0, nbs1, nbs2, nbs3, nbs4 };
	int numNeighb[] = {1, 1, 4, 1, 1};

	
	ENeighborType ori0[] = { ntChild };
	ENeighborType ori1[] = { ntChild };
	ENeighborType ori2[] = { ntParent, ntParent, ntChild, ntChild };
	ENeighborType ori3[] = { ntParent };
	ENeighborType ori4[] = { ntParent };
	ENeighborType *orient[] = { ori0, ori1, ori2, ori3, ori4 }; 
	
	
	CGraph *pGraph;
	pGraph = CGraph::Create(nnodes, numNeighb, nbrs, orient);
	
	CBNet *pBNet;
	
	pBNet = CBNet::Create( nnodes, numnt, nodeTypes, &nodeAssociation.front(), pGraph );
	//Allocation space for all factors of the model
	pBNet->AllocFactors();
	
	for( i = 0; i < nnodes; i++ )
	{
	    //Allocation space for all matrices of CPD
	    pBNet->AllocFactor(i);
	}
	
	//now we need to create data for CPDs - we'll create matrices
	CFactor *pCPD;
	floatVector smData = floatVector(2,0.0f);
	floatVector bigData = floatVector(4,1.0f);
	intVector ranges = intVector(2, 1);
	ranges[0] = 2;
	smData[0] = 1.0f;
	CNumericDenseMatrix<float> *mean0 = CNumericDenseMatrix<float>::
        Create( 2, &ranges.front(), &smData.front());
	bigData[0] = 4.0f;
	bigData[3] = 4.0f;
	ranges[1] = 2;
	CNumericDenseMatrix<float> *cov0 = CNumericDenseMatrix<float>::
        Create( 2, &ranges.front(), &bigData.front());
	pCPD = pBNet->GetFactor(0);
	pCPD->AttachMatrix(mean0, matMean);
	pCPD->AttachMatrix(cov0, matCovariance);
	ranges[0] = 1;
	ranges[1] = 1;
	float val = 1.0f;
	CNumericDenseMatrix<float> *mean1 = CNumericDenseMatrix<float>::
        Create( 2, &ranges.front(), &val );
	CNumericDenseMatrix<float> *cov1 = CNumericDenseMatrix<float>::
        Create( 2, &ranges.front(), &val );
	pCPD = pBNet->GetFactor(1);
	pCPD->AttachMatrix(mean1, matMean);
	pCPD->AttachMatrix(cov1, matCovariance);
	smData[0] = 0.0f;
	smData[1] = 0.0f;
	ranges[0] = 2;
	CNumericDenseMatrix<float> *mean2 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &smData.front());
	smData[0] = 2.0f;
	smData[1] = 1.0f;
	CNumericDenseMatrix<float> *w21 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &smData.front());
	bigData[0] = 2.0f;
	bigData[1] = 1.0f;
	bigData[2] = 1.0f;
	bigData[3] = 1.0f;
	ranges[1] = 2;
	CNumericDenseMatrix<float> *cov2 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &bigData.front());
	bigData[0] = 1.0f;
	bigData[1] = 2.0f;
	bigData[2] = 1.0f;
	bigData[3] = 0.0f;
	CNumericDenseMatrix<float> *w20 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &bigData.front());
	pCPD = pBNet->GetFactor(2);
	pCPD->AttachMatrix( mean2, matMean );
	pCPD->AttachMatrix( cov2, matCovariance );
	pCPD->AttachMatrix( w20, matWeights,0 );
	pCPD->AttachMatrix( w21, matWeights,1 );
	
	val = 0.0f;
	ranges[0] = 1;
	ranges[1] = 1;
	CNumericDenseMatrix<float> *mean3 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &val);
	val = 1.0f;
	CNumericDenseMatrix<float> *cov3 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &val);
	ranges[1] = 2;
	smData[0] = 1.0f;
	smData[1] = 1.0f;
	CNumericDenseMatrix<float> *w30 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &smData.front());
	pCPD = pBNet->GetFactor(3);
	pCPD->AttachMatrix( mean3, matMean );
	pCPD->AttachMatrix( cov3, matCovariance );
	pCPD->AttachMatrix( w30, matWeights,0 );

	ranges[0] = 2; 
	ranges[1] = 1;
	smData[0] = 0.0f;
	smData[1] = 0.0f;
	CNumericDenseMatrix<float> *mean4 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &smData.front());
	ranges[1] = 2;
	bigData[0] = 1.0f;
	bigData[1] = 0.0f;
	bigData[2] = 0.0f;
	bigData[3] = 1.0f;
	CNumericDenseMatrix<float> *cov4 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &bigData.front());
	bigData[2] = 1.0f;
	CNumericDenseMatrix<float> *w40 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &bigData.front());
	pCPD = pBNet->GetFactor(4);
	pCPD->AttachMatrix( mean4, matMean );
	pCPD->AttachMatrix( cov4, matCovariance );
	pCPD->AttachMatrix( w40, matWeights,0 );

	//Generate random evidences for the modes
	int nEv = 1000;
	pEvidencesVector evid;
	pBNet->GenerateSamples( &evid, nEv );
	/////////////////////////////////////////////////////////////////////
		
	//Create copy of initial model with random matrices 
	CGraph *pGraphCopy = CGraph::Copy(pGraph); 
	CBNet *pLearnBNet = CBNet::CreateWithRandomMatrices(pGraphCopy, pBNet->GetModelDomain() );
	
	// Creating learning process	
	CEMLearningEngine *pLearn = CEMLearningEngine::Create(pLearnBNet);

	pLearn->SetData(nEv, &evid.front());
	pLearn->Learn();
	CNumericDenseMatrix<float> *pMatrix;
	int length = 0;
	const float *output;
	
	///////////////////////////////////////////////////////////////////////
	std::cout<<" results of learning (number of evidences = "<<nEv<<std::endl;
	for (i = 0; i < nnodes; i++ )
	{
	    int j;
	    std::cout<<"\n matrix mean for node "<<i;
	    std::cout<<"\n initial BNet \n";
	    pMatrix = static_cast<CNumericDenseMatrix<float>*>
		(pBNet->GetFactor(i)->GetMatrix(matMean));
	    pMatrix->GetRawData(&length, &output);
	    for ( j = 0; j < length; j++ )
	    {
		std::cout<<" "<<output[j];
	    }
	    std::cout<<"\n BNet with random matrices after learning \n ";
	    pMatrix = static_cast<CNumericDenseMatrix<float>*>
		(pLearnBNet->GetFactor(i)->GetMatrix(matMean));
	    pMatrix->GetRawData(&length, &output);
	    for ( j = 0; j < length; j++)
	    {
		std::cout<<" "<<output[j];
	    }
	    
    	    std::cout<<"\n \n matrix covariance for node "<<i<<'\n';
	    std::cout<<"\n initial BNet \n";

	    pMatrix = static_cast<CNumericDenseMatrix<float>*>
		(pBNet->GetFactor(i)->GetMatrix(matCovariance));
	    pMatrix->GetRawData(&length, &output);
	    for (j = 0; j < length; j++ )
	    {
		std::cout<<" "<<output[j];
	    }
    	    std::cout<<"\n BNet with random matrices after learning \n ";
	    pMatrix = static_cast<CNumericDenseMatrix<float>*>
		(pLearnBNet->GetFactor(i)->GetMatrix(matCovariance));
	    pMatrix->GetRawData(&length, &output);
	    for ( j = 0; j < length; j++ )
	    {
		std::cout<<" "<<output[j];
	    }

	    std::cout<<"\n ___________________________\n";
	    
	}
	
	
	for( i = 0; i < nEv; i++)
	{
	    delete evid[i];
	}
	delete pLearn;
	delete pLearnBNet;
	delete pBNet;
	
	

return 1;
}
コード例 #7
0
ファイル: pnlTabularCPD.hpp プロジェクト: JacobCWard/PyPNL
#ifdef PNL_RTTI
#include "pnlpnlType.hpp"
#endif 
PNL_BEGIN

#ifdef SWIG
%rename(CreateUnitF) CTabularCPD::CreateUnitFunctionCPD( const intVector& domainIn, CModelDomain* pMD);
#endif

class PNL_API CTabularCPD : public CCPD
{
public:
    static CTabularCPD* Copy( const CTabularCPD* pTabCPD );
#ifndef SWIG
    static CTabularCPD* Create( const intVector& domainIn, CModelDomain* pMD,
	const floatVector& dataIn = floatVector() );
#endif
    static CTabularCPD* Create( CModelDomain* pMD, const intVector& domainIn, 
	CMatrix<float>* dataIn = NULL);
    

    static CTabularCPD* CreateUnitFunctionCPD( const intVector& domainIn,
	CModelDomain* pMD);

    float GetMatrixValue(const CEvidence *pEv);

    virtual void CreateAllNecessaryMatrices(int typeOfMatrices = 1);
    //typeOfMatrices = 1 - all matrices are random
    //only Gaussian covariance matrix is matrix unit
    //for ConditionalGaussianDistribution 
    //the matrix of Gaussian distribution functions is dense