コード例 #1
0
ファイル: HashSetTest.cpp プロジェクト: schidler/MeCloud
//----------------------------------------
//	main
//----------------------------------------
int main(int /*argc*/, char** /*argv*/)
{
	PrepareConsoleLogger logger(Poco::Logger::ROOT, Poco::Message::PRIO_INFORMATION);

	const std::string::size_type kNumKeys = 262144;

	std::vector<Poco::UInt32>	intVector(kNumKeys);
	std::vector<std::string>	strVector(kNumKeys);

	Poco::Random random;

	for(std::size_t i=0; i<kNumKeys; ++i)
	{
		intVector[i] = random.next();
		strVector[i] = Poco::format("%08x", intVector[i]);
	}

	std::cout << "------------------------------------" << std::endl;
	std::cout << "Comparison for key type Poco::UInt32" << std::endl;
	std::cout << "------------------------------------" << std::endl;
	TestAll(intVector);

	std::cout << "----------------------------------------------" << std::endl;
	std::cout << "Comparison for key type std::string (length=8)" << std::endl;
	std::cout << "----------------------------------------------" << std::endl;
	TestAll(strVector);

	return 0;
}
コード例 #2
0
ファイル: Test.cpp プロジェクト: utpalchakraborty/CppLearning
int main()
{
    Mycode::Vector<int> intVector(2);
    Mycode::Vector<char> charVector(2);
    Mycode::Vector<double> doubleVector(2);
    Mycode::Vector<float> floatVector(2);


    //Mycode::print_vector(intVector);
    std::cout << charVector;

    // calling the variadic.
    //printVectors(intVector, charVector, doubleVector, floatVector);

    Mycode::Vector<int> copy(intVector);
    // printVectors(copy);

    Mycode::LessThan<int>  t(42);

    if(!t(43))
    {
	std::cout << "Obviously 42 is not less than 43." << std::endl;
    }

    std::cout << Mycode::countLessThan(intVector, t) << std::endl;

}
コード例 #3
0
CSoftMaxCPD::CSoftMaxCPD(const CSoftMaxCPD& SMCPD):
CCPD(dtSoftMax, ftCPD, SMCPD.GetModelDomain())
{
    if (SMCPD.m_CorrespDistribFun->GetDistributionType() == dtSoftMax)
    {
        delete m_CorrespDistribFun;
        m_CorrespDistribFun = CSoftMaxDistribFun::Copy(
            static_cast<CSoftMaxDistribFun*>(SMCPD.m_CorrespDistribFun));
    }
    else
    {
        if (SMCPD.m_CorrespDistribFun->GetDistributionType() == dtCondSoftMax)
        {
            delete m_CorrespDistribFun;
            m_CorrespDistribFun = CCondSoftMaxDistribFun::Copy(
                static_cast<CCondSoftMaxDistribFun*>(SMCPD.m_CorrespDistribFun));
        }
        else
        {
            PNL_THROW(CInconsistentType,
                "distribution must be SoftMax or conditional SoftMax")
        }
    }
    m_Domain = intVector(SMCPD.m_Domain);
    m_MaximizingMethod = SMCPD.m_MaximizingMethod;
}
コード例 #4
0
CGaussianCPD::CGaussianCPD( const CGaussianCPD& GauCPD )
:CCPD( dtGaussian, ftCPD, GauCPD.GetModelDomain() )
{
    //m_CorrespDistribFun = GauCPD.m_CorrespDistribFun->CloneDistribFun();
    if( GauCPD.m_CorrespDistribFun->GetDistributionType() == dtGaussian )
    {
	delete m_CorrespDistribFun;
	m_CorrespDistribFun = CGaussianDistribFun::Copy(
	    static_cast<CGaussianDistribFun*>(GauCPD.m_CorrespDistribFun ));
    }
    else
    {
	if( GauCPD.m_CorrespDistribFun->GetDistributionType() == dtCondGaussian )
	{
	    delete m_CorrespDistribFun;
	    m_CorrespDistribFun = CCondGaussianDistribFun::Copy(
		static_cast<CCondGaussianDistribFun*>(GauCPD.m_CorrespDistribFun));
	}
	else
	{
	    PNL_THROW( CInconsistentType,
		"distribution must be gaussian or conditional gaussian" )
	}
    }
    m_Domain = intVector( GauCPD.m_Domain );
}
コード例 #5
0
CGraphicalModel::CGraphicalModel(int numberOfNodes, 
                                 int numberOfNodeTypes,
                                 const CNodeType *nodeTypes,
                                 const int *nodeAssociation )
{
    CGraphicalModel* pObj = this;
    nodeTypeVector nt = nodeTypeVector( nodeTypes, nodeTypes + numberOfNodeTypes );
    intVector nAssociation = intVector( nodeAssociation, 
        nodeAssociation + numberOfNodes );
    m_pMD = CModelDomain::Create( nt, nAssociation, pObj );
}
コード例 #6
0
CTreeCPD::CTreeCPD( const CTreeCPD& TreeCPD )
:CCPD( dtTree, ftCPD, TreeCPD.GetModelDomain() )
{
	//m_CorrespDistribFun = TreeCPD.m_CorrespDistribFun->CloneDistribFun();
	if( TreeCPD.m_CorrespDistribFun->GetDistributionType() == dtTree )
    {
        delete m_CorrespDistribFun;
        m_CorrespDistribFun = CTreeDistribFun::Copy( 
            static_cast<CTreeDistribFun*>(TreeCPD.m_CorrespDistribFun ));
    }
    else
    {
        PNL_THROW( CInconsistentType, "distribution must be tree" );
    }
	m_Domain = intVector( TreeCPD.m_Domain );
}
コード例 #7
0
ファイル: AlgorithmTests.cpp プロジェクト: vcellucci/Pal
TEST_F(AlgorithmTests, testForEach)
{
    std::vector<int,Pal::aligned_allocator<int>> intVector(257, 1);
    
    // assign 2 to each element in the vector
    Pal::parallel_for_each(intVector.begin(), intVector.end(), [](int& val)
    {
        val = 2;
    });
    
    
    std::for_each(intVector.begin(), intVector.end(), [](int& val)
    {
        ASSERT_EQ(2, val);
    });
}
コード例 #8
0
ファイル: AlgorithmTests.cpp プロジェクト: vcellucci/Pal
TEST_F(AlgorithmTests, testBlockedRange)
{
    using IntVector =std::vector<int,Pal::aligned_allocator<int>>;
    IntVector intVector(257, 1);
    Pal::parallel_for_each_range(intVector.begin(), intVector.end(),
    [](Pal::chunk_range<IntVector::iterator> range)
    {
        for(auto it = range.begin; it != range.end; ++it)
        {
            *it = 2;
        }
    });
    
    std::for_each(intVector.begin(), intVector.end(), [](int& val)
    {
        ASSERT_EQ(2, val);
    });
}
コード例 #9
0
CGaussianCPD*
CGaussianCPD::CreateUnitFunctionCPD(const int *domain, int nNodes, CModelDomain* pMD)
{
    PNL_CHECK_IS_NULL_POINTER( domain );
    PNL_CHECK_IS_NULL_POINTER( pMD );
    PNL_CHECK_LEFT_BORDER( nNodes, 1 );

    CGaussianCPD* resCPD = new CGaussianCPD( domain, nNodes, pMD );
    intVector dom = intVector( domain, domain + nNodes );
    pConstNodeTypeVector ntVec;
    pMD->GetVariableTypes( dom, &ntVec );
    CGaussianDistribFun* UniData =
	CGaussianDistribFun::CreateUnitFunctionDistribution( nNodes,
	&ntVec.front(), 0, 0);
    delete (resCPD->m_CorrespDistribFun);
    resCPD->m_CorrespDistribFun = UniData;
    return resCPD;
}
コード例 #10
0
CNodeValues::CNodeValues( int nNodes, const CNodeType * const*ObsNodeTypes,
			 const valueVector& pValues )
			 :m_numberObsNodes(nNodes),
			 m_NodeTypes( ObsNodeTypes, ObsNodeTypes+nNodes ),
			 m_rawValues( pValues.begin(), pValues.end() )
{
    int i;
    int maxValue;
    int numValuesForNodes = 0;
    /*Create vector for NodeTypes of observed nodes, numbers of really
    observed nodes and offsets*/
    m_isObsNow = intVector( m_numberObsNodes, 1 );
    m_offset.assign( nNodes + 1, 0 );//last is for determinig the size of last node
    for( i = 0; i < m_numberObsNodes; i++ )
    {

	if ( ( m_NodeTypes[i] )->IsDiscrete() )
	{
	    /*checking up the values from pValues*/
	    maxValue = m_NodeTypes[i]->GetNodeSize();
	    if( pValues[m_offset[i]].GetInt() >= maxValue )
	    {
		PNL_THROW( COutOfRange, "value of node is more than range" );
		/*every observed value should be less maxValue*/
	    }
	    else
	    {
		numValuesForNodes++;
	    }
	}
	else
	{
	    numValuesForNodes += (m_NodeTypes[i]->GetNodeSize());
	}
        m_offset[i + 1] = numValuesForNodes;
    }
    //check validity of input vector
    if( numValuesForNodes != int(m_rawValues.size()) )
    {
        PNL_THROW( CInconsistentSize, "size of vector with values"
	    " should corresponds node sizes of observed values" );
    }
}
コード例 #11
0
int testShrinkObservedNodes()
{
    int i/*,j*/;
    int ret = TRS_OK;
    /*prepare to read the values from console*/
    EDistributionType dt;
    int disType = -1;
    EFactorType pt;
    int paramType = -1;
    /*read int disType corresponding DistributionType*/
    while((disType<0)||(disType>0))/*now we have only Tabulars&Gaussian*/
    {
	trsiRead( &disType, "0", "DistributionType");
    }
    /*read int paramType corresponding FactorType*/
    while((paramType<0)||(paramType>2))
    {
	trsiRead( &paramType, "0", "FactorType");
    }
    dt = EDistributionType(disType);
    pt = EFactorType(paramType);
    int numberOfNodes = 0;
    /*read number of nodes in Factor domain*/
    while(numberOfNodes<=0)
    {
	trsiRead( &numberOfNodes, "1", "Number of Nodes in domain");
    }
    int numNodeTypes = 0;
    /*read number of node types in model*/
    while(numNodeTypes<=0)
    {
	trsiRead( &numNodeTypes, "1", "Number of node types in Domain");
    }
    //int seed1 = pnlTestRandSeed()/*%100000*/;
    /*create string to display the value*/
    /*	char *value = new char[20];
    value = _itoa(seed1, value, 10);
    trsiRead(&seed1, value, "Seed for srand to define NodeTypes etc.");
    delete []value;
    trsWrite(TW_CON|TW_RUN|TW_DEBUG|TW_LST, "seed for rand = %d\n", seed1);
    int *domain = (int *)trsGuardcAlloc(numberOfNodes, sizeof(int));
    CNodeType * allNodeTypes = (CNodeType*)trsGuardcAlloc(numNodeTypes,
    sizeof(CNodeType));
    //To generate the NodeTypes we use rand()% and creates only Tabular now
    for(i=0; i<numNodeTypes; i++)
    {
    allNodeTypes[i] = CNodeType(1, 1+rand()%(numNodeTypes+3));
    }
    */	
    
    /*load data for parameter::ShrinkObservedNodes from console*/
    intVector domain;
    domain.assign( numberOfNodes, 0 );
    nodeTypeVector allNodeTypes;
    allNodeTypes.assign( numNodeTypes, CNodeType() );
    /*read node types*/
    for(i=0; i < numNodeTypes; i++)
    {
	int IsDiscrete = -1;
	int NodeSize = -1;
	while((IsDiscrete<0)||(IsDiscrete>1))
	    /*now we have tabular & Gaussian nodes!! */
	    trsiRead(&IsDiscrete, "1", "Is the node discrete?");
	while(NodeSize<0)
	    trsiRead(&NodeSize, "2", "NodeSize of node");
	allNodeTypes[i] = CNodeType( IsDiscrete != 0, NodeSize );
    }
    const CNodeType **nodeTypesOfDomain = (const CNodeType**)
	trsGuardcAlloc(numberOfNodes, sizeof(CNodeType*));
    int numData = 1;
    int *Ranges = (int*)trsGuardcAlloc(numberOfNodes, sizeof(int));
    /*associate nodes to node types*/
    for(i=0; i<numberOfNodes; i++)
    {
	domain[i] = i;
	int nodeAssociationToNodeType = -1;
	while((nodeAssociationToNodeType<0)||(nodeAssociationToNodeType>=
	    numNodeTypes))
	    trsiRead(&nodeAssociationToNodeType, "0", 
	    "node i has type nodeAssociationToNodeType");
	nodeTypesOfDomain[i] = &allNodeTypes[nodeAssociationToNodeType];
	//	nodeTypesOfDomain[i] = &allNodeTypes[rand()%numNodeTypes];
	Ranges[i] = nodeTypesOfDomain[i]->GetNodeSize();
	numData=numData*Ranges[i];
    }
    
    CModelDomain* pMD = CModelDomain::Create( allNodeTypes, domain );
    
    /*create factor according all information*/
    CFactor *pMyParam = NULL;
    float *data = (float *)trsGuardcAlloc(numData, sizeof(float));
    char *stringVal;/* = (char*)trsGuardcAlloc(50, sizeof(char));*/
    double val=0;
    /*read the values from console*/
    if(pt == ftPotential)
    {
	pMyParam = CTabularPotential::Create( &domain.front(), numberOfNodes, pMD );
	/*here we can create data by multiply on 0.1 - numbers are nonnormalized*/
	for(i=0; i<numData; i++)
	{
	    val = 0.1*i;
	    stringVal = trsDouble(val);
	    trsdRead(&val, stringVal, "value of i's data position");
	    data[i] = (float)val;
	    //data[i] = (float)rand()/1000;
	}
    }
    else
    {
    /*we can only read data from console - it must be normalized!!
	(according their dimensions) - or we can normalize it by function!*/
	if(pt == ftCPD)
	    pMyParam = CTabularCPD::Create( &domain.front(), numberOfNodes, pMD );
	for(i=0; i<numData; i++)
	{
	    val = -1;
	    while((val<0)||(val>1))
	    {
		trsdRead(&val, "-1", "value of (2*i)'s data position");
	    }
	    data[i] = (float)val;
	}
    }
    //trsWrite(TW_CON|TW_RUN|TW_DEBUG|TW_LST, "data for Factor = %d\n", data[i]);
    pMyParam->AllocMatrix(data,matTable);
    int nObsNodes = 0;	/*rand()%numberOfNodes;*/
    while((nObsNodes<=0)||(nObsNodes>numberOfNodes))
    {
	trsiRead(&nObsNodes, "1", "Number of Observed Nodes");
    }
    intVector myHelpForEvidence = intVector(domain.begin(), domain.end() );
    int *ObsNodes = (int *)trsGuardcAlloc(nObsNodes, sizeof(int));
    valueVector TabularValues;
    TabularValues.assign( nObsNodes, (Value)0 );
    char *strVal;
    for(i=0; i<nObsNodes; i++)
    {
	//fixme - we need to have noncopy only different ObsNodes
	/*		j = rand()%(numberOfNodes-i);*/
	int numberOfObsNode = -1;
	strVal = trsInt(i);
        intVector::iterator j = std::find( myHelpForEvidence.begin(), myHelpForEvidence.end(), numberOfObsNode );
	while((numberOfObsNode<0)||(numberOfObsNode>numberOfNodes)||
	    (j==myHelpForEvidence.end()))
	{
	    trsiRead(&numberOfObsNode, strVal,"Number of i's observed node");
	    j = std::find(myHelpForEvidence.begin(), myHelpForEvidence.end(),
		numberOfObsNode);
	}
	//ObsNodes[i] = myHelpForEvidence[j];
	myHelpForEvidence.erase( j );
	ObsNodes[i] = numberOfObsNode;
	int valueOfNode = -1;
	int maxValue = (*nodeTypesOfDomain[ObsNodes[i]]).GetNodeSize();
	while((valueOfNode<0)||(valueOfNode>=maxValue))
	{
	    trsiRead(&valueOfNode,"0","this is i's observed node value");
	}
	TabularValues[i].SetInt(valueOfNode);
	/*rand()%((*nodeTypesOfDomain[ObsNodes[i]]).pgmGetNodeSize());*/
    }
    CEvidence* pEvidence = CEvidence::Create( pMD, nObsNodes, ObsNodes, TabularValues );
    myHelpForEvidence.clear();
    CNodeType *ObservedNodeType = (CNodeType*)trsGuardcAlloc(1, 
	sizeof(CNodeType));
    *ObservedNodeType = CNodeType(1,1);
    CPotential *myTakedInFactor = static_cast<CPotential*>(pMyParam)->ShrinkObservedNodes(pEvidence);
    const int *myfactorDomain;
    int factorDomSize ;
    myTakedInFactor->GetDomain(&factorDomSize, &myfactorDomain);
#if 0
    CNumericDenseMatrix<float> *mySmallMatrix = static_cast<
        CNumericDenseMatrix<float>*>(myTakedInFactor->GetMatrix(matTable));
    int n;
    const float* mySmallData;
    mySmallMatrix->GetRawData(&n, &mySmallData);
    int nDims; // = mySmallMatrix->GetNumberDims();
    const int * mySmallRanges;
    mySmallMatrix->GetRanges(&nDims, &mySmallRanges);
    
    if(nDims!=numberOfNodes)
    {
	ret = TRS_FAIL;
	trsWrite(TW_CON|TW_RUN|TW_DEBUG|TW_LST, "nDims = %d\n", nDims);
    }
    else
    {
	int numSmallData = 1;
	for(i=0; i<nDims; i++)
	{
	    numSmallData = numSmallData*mySmallRanges[i];
	    trsWrite(TW_CON|TW_RUN|TW_DEBUG|TW_LST, "Range[%d] = %d\n", i, 
		mySmallRanges[i]);
	}
	for(i=0; i<numSmallData; i++)
	{	
	    trsWrite(TW_CON|TW_RUN|TW_DEBUG|TW_LST, "mySmallData[%d] = %f ",
		i, mySmallData[i]);
	}
    }
#endif
    //getchar();
    delete(myTakedInFactor);
    delete (pMyParam);
    delete pMD;
    //test gaussian parameter
    nodeTypeVector nTypes;
    nTypes.assign( 2, CNodeType() );
    nTypes[0] = CNodeType( 0, 2 );
    nTypes[1] = CNodeType( 0,1 );
    intVector domn = intVector(3,0);
    domn[1] = 1;
    domn[2] = 1;
    
    CModelDomain* pMD1 = CModelDomain::Create( nTypes, domn );
    
    domn[2] = 2;
    
    CPotential *BigFactor = CGaussianPotential::CreateUnitFunctionDistribution( 
	&domn.front(), domn.size(), pMD1,0 );
    float mean[] = { 1.0f, 3.2f};
    CPotential *SmallDelta = CGaussianPotential::CreateDeltaFunction( &domn.front(), 1, pMD1, mean, 1 );
    domn.resize( 2 );
    domn[0] = 1;
    domn[1] = 2;
    CPotential *SmallFunct = CGaussianPotential::Create( &domn.front(),
	domn.size(),  pMD1);
    float datH[] = { 1.1f, 2.2f, 3.3f };
    float datK[] = { 1.2f, 2.3f, 2.3f, 3.4f, 5.6f, 6.7f, 3.4f, 6.7f, 9.0f };
    SmallFunct->AllocMatrix( datH, matH );
    SmallFunct->AllocMatrix( datK, matK );
    static_cast<CGaussianPotential*>(SmallFunct)->SetCoefficient( 0.2f, 1 );
    CPotential* multFact = BigFactor->Multiply( SmallDelta );
    CPotential* nextMultFact = multFact->Multiply( SmallFunct );
    domn[0] = 0;
    domn[1] = 1;
    CPotential *marginalized = static_cast<CPotential*>(nextMultFact->Marginalize( &domn.front(), domn.size() ));
    int isSpecific = marginalized->IsDistributionSpecific();
    if( isSpecific )
    {
	trsWrite(TW_CON|TW_RUN|TW_DEBUG|TW_LST, "\nGaussian Distribution is specific");
    }
    delete BigFactor;
    delete SmallFunct;
    delete SmallDelta;
    delete pMD1;
    
    int ranges_memory_flag = trsGuardCheck(Ranges);
    int data_memory_flag = trsGuardCheck(data);
    int nodeTypesOfDomain_mem_b = trsGuardCheck(nodeTypesOfDomain);
    int ObsNodes_mem_b = trsGuardCheck(ObsNodes);
    int ObsNodeType_mem_b = trsGuardCheck(ObservedNodeType);
    if(((ranges_memory_flag)||(data_memory_flag)||
	(nodeTypesOfDomain_mem_b)||
	(ObsNodes_mem_b)||(ObsNodeType_mem_b)))
    {
	ret = TRS_FAIL;
	return trsResult( ret, ret == TRS_OK ? "No errors" : 
	"Bad test on ShrinkObservedNodes Method - memory");
    }
    else
    {
	trsGuardFree(ObservedNodeType);
	trsGuardFree(ObsNodes);
	trsGuardFree(nodeTypesOfDomain);
	trsGuardFree(data);
	trsGuardFree(Ranges);
    }			
    return trsResult( ret, ret == TRS_OK ? "No errors" : 
    "Bad test on ShrinkObservedNodes Method");
}
コード例 #12
0
void CSpecPearlInfEngine::InitEngine(const CEvidence* pEvidence)
{
    //determine distribution type for messages
//    intVector         obsNds;
//    pConstValueVector obsNdsVals;

    const int nObsNodes = pEvidence->GetNumberObsNodes();
    
    const int *ObsNodes = pEvidence->GetAllObsNodes();
    
    const int *ReallyObs = pEvidence->GetObsNodesFlags();
    
    intVector ReallyObsNodes;

    int i = 0;

    for ( ; i < nObsNodes; i++ )
    {
        if( ReallyObs[i] )
        {
            ReallyObsNodes.push_back(ObsNodes[i]);
        }
    }
    
    int NumReallyObs = ReallyObsNodes.size();

    pConstNodeTypeVector nodeTypes(m_numOfNdsInModel);

    for ( i = 0; i < m_numOfNdsInModel; i++ )
    {
        nodeTypes[i] = m_pGraphicalModel->GetNodeType(i);
    }

    m_modelDt = pnlDetermineDistributionType( m_numOfNdsInModel, NumReallyObs,
                                              &ReallyObsNodes.front(), &nodeTypes.front() );
    

    EDistributionType dtWithoutEv = pnlDetermineDistributionType( m_numOfNdsInModel, 0,
                                                                  &ReallyObsNodes.front(), &nodeTypes.front() );

    switch (dtWithoutEv)
    {
        //create vector of connected nodes
        case dtTabular: case dtGaussian:
        {
            m_connNodes = intVector(m_numOfNdsInModel);

            for( i = 0; i < m_numOfNdsInModel; ++i )
            {
                m_connNodes[i] = i;
            }

            break;
        }
        case dtCondGaussian:
        {	
            int loc;
            
            for( i = 0; i < m_numOfNdsInModel; i++ )
            {
                loc = std::find( ReallyObsNodes.begin(), ReallyObsNodes.end(),
                    i ) - ReallyObsNodes.begin();
            
                if(( loc < ReallyObsNodes.size() )&&
                    ( nodeTypes[ReallyObsNodes[loc]]->IsDiscrete() ))
                {
                    m_connNodes.push_back(i);
                }
            }
            
            break;
        }
        default:
        {
            PNL_THROW( CInconsistentType, "only for fully tabular or fully gaussian models" )
                break;
        }
    }
}
コード例 #13
0
CEvidence const *CExInfEngine< INF_ENGINE, MODEL, FLAV, FALLBACK_ENGINE1, FALLBACK_ENGINE2 >::GetMPE() const
{
    int i, j, k, s;
    intVector conv_nodes;
    valueVector conv_values;

    intVector nodes;
    pConstValueVector values;
    pConstNodeTypeVector node_types;

    CEvidence const *ev;

    if ( PNL_IS_EXINFENGINEFLAVOUR_JTREEKLUDGE( FLAV ) )
    {
        intVecVector clqs;
        intVector dummy;
        intVector mask;
        intVector tmpdom;
        int clqs_lim, coun;
        int best_clq, best_val;
        intVecVector partition;
        intVector partition_clq;

        dummy.resize( 1 );

        for ( i = active_components.size(); i--; )
        {
            intVector tmpclq;
            intVector tmpmask;

            k = active_components[i];
            if ( engines[k]->m_InfType != itJtree )
            {
                ev = engines[k]->GetMPE();
                ev->GetObsNodesWithValues( &nodes, &values, &node_types );
                for ( j = nodes.size(); j--; )
                {
                    conv_nodes.push_back( decomposition[k][nodes[j]] );
                    conv_values.push_back( Value( *values[j] ) );
                }
                continue;
            }
            clqs.resize( 0 );
            clqs.resize( query_dispenser[k].size() );

            tmpmask.resize( 0 );
            tmpmask.assign( decomposition[k].size(), 0 );

            for ( j = query_dispenser[k].size(), clqs_lim = 0; j--; )
            {
                tmpmask[query_dispenser[k][j]] = j + 1;
                dummy[0] = query_dispenser[k][j];
                ((CJtreeInfEngine *)engines[k])->GetClqNumsContainingSubset( dummy, &clqs[j] );

                for ( s = clqs[j].size(); s--; )
                {
                    if ( clqs[j][s] > clqs_lim )
                    {
                        clqs_lim = clqs[j][s];
                    }
                }
            }

            mask.resize( 0 );
            mask.assign( ++clqs_lim, 0 );

            for ( j = query_dispenser[k].size(); j--; )
            {
                for ( s = clqs[j].size(); s--; )
                {
                    ++mask[clqs[j][s]];
                }
            }

            partition.resize( 0 );
            partition_clq.resize( 0 );

            for ( coun = query_dispenser[k].size(); coun; )
            {
                best_clq = clqs_lim;
                best_val = 0;
                for ( j = clqs_lim; j--; )
                {
                    if ( mask[j] > best_val )
                    {
                        best_val = mask[best_clq = j];
                    }
                }

                if ( best_val == query_dispenser[k].size() )
                {
                    // query fits into single clique.  No workaround needed for this component.
                    engines[k]->MarginalNodes( &query_dispenser[k].front(), query_dispenser[k].size() );
                    j = 0; goto brk;
                }

                coun -= best_val;

                partition_clq.push_back( best_clq );
                partition.push_back( intVector() );
                tmpclq.resize( 0 );
                ((CJtreeInfEngine *)engines[k])->GetJTreeNodeContent( best_clq, &tmpclq );

                for ( j = tmpclq.size(); j--; )
                {
                    if ( tmpmask[tmpclq[j]] > 0 )
                    {
                        partition.back().push_back( tmpclq[j] );
                        for ( s = clqs[tmpmask[tmpclq[j]] - 1].size(); s--; )
                        {
                            --mask[clqs[tmpmask[tmpclq[j]] - 1][s]];
                        }
                        tmpmask[tmpclq[j]] = - tmpmask[tmpclq[j]];
                    }
                }
            }

            for ( j = partition.size(); j--; )
            {
                engines[k]->MarginalNodes( &partition[j].front(), partition[j].size() );
brk:
                ev = engines[k]->GetMPE();
                ev->GetObsNodesWithValues( &nodes, &values, &node_types );
                for ( s = nodes.size(); s--; )
                {
                    conv_nodes.push_back( decomposition[k][nodes[s]] );
                    conv_values.push_back( Value( *values[s] ) );
                }
            }
        }
    }
    else
    {
        for ( i = active_components.size(); i--; )
        {
            ev = engines[active_components[i]]->GetMPE();
            ev->GetObsNodesWithValues( &nodes, &values, &node_types );
            for ( j = nodes.size(); j--; )
            {
                conv_nodes.push_back( decomposition[active_components[i]][nodes[j]] );
                conv_values.push_back( Value( *values[j] ) );
            }
        }
    }

    return MPE_ev = CEvidence::Create( graphical_model, conv_nodes, conv_values );
}
コード例 #14
0
void CExInfEngine< INF_ENGINE, MODEL, FLAV, FALLBACK_ENGINE1, FALLBACK_ENGINE2 >::MarginalNodes( int const *query, int querySize, int notExpandJPD )
{
    int i, j;

    if( querySize== 0 )
    {
        PNL_THROW( CInconsistentSize, "query nodes vector should not be empty" );
    }

    saved_query.assign( query, query + querySize );

    if ( evidence == 0 )
    {
        evidence_mine = true;
        EnterEvidence( evidence = CEvidence::Create( graphical_model, intVector(), valueVector() ) );
    }

    active_components.resize( 0 );

    query_dispenser.resize( 0 );
    query_dispenser.resize( decomposition.size() );

    for ( i = 0; i < querySize; ++i )
    {
        query_dispenser[orig2comp[query[i]]].push_back( orig2idx[query[i]] );
    }

    for ( i = decomposition.size(); i--; )
    {
        if ( query_dispenser[i].size() )
        {
            active_components.push_back( i );
        }
    }

	EDistributionType dt=dtTabular;
	bool determined = false;
	
	for ( i = active_components.size(); i--; )
    {
        j = active_components[i];
		engines[j] -> MarginalNodes( &query_dispenser[j].front(), query_dispenser[j].size(), 1 );
		if(!determined)
		{
			dt = engines[j]->GetQueryJPD()->GetDistributionType();
			if(dt != dtScalar)
				determined = true;
		}
    }
	if(!(dt == dtTabular || dt == dtGaussian || dt == dtScalar))
		PNL_THROW(CNotImplemented, "we can not support this type of potentials");

    CPotential *pot1;
    CPotential const *pot2=NULL;
    intVector dom;
	intVector obsIndices;
	for(i=0; i<querySize; i++)
	{
		if(evidence->IsNodeObserved(query[i]))
		{
			obsIndices.push_back(i);
		}
	}

	if(query_JPD)
		delete query_JPD;
	if ( active_components.size() > 1 )
    {
        if((dt == dtTabular) || (dt == dtScalar))
		{
			query_JPD = CTabularPotential::CreateUnitFunctionDistribution( saved_query, graphical_model->GetModelDomain(), 1, obsIndices );
		}
		else if(dt == dtGaussian)
        {
			query_JPD = CGaussianPotential::CreateUnitFunctionDistribution( saved_query, graphical_model->GetModelDomain(), 1, obsIndices );
		}

//        query_JPD = pot2->ShrinkObservedNodes( evidence );
//        delete( pot2 );

        for ( i = active_components.size(); i--; )
        {
            pot2 = engines[active_components[i]]->GetQueryJPD();
            pot2->GetDomain( &dom );
            for ( j = dom.size(); j--; )
            {
                dom[j] = decomposition[active_components[i]][dom[j]];
            }
            pot1 = (CPotential *)CPotential::CopyWithNewDomain( pot2, dom, graphical_model->GetModelDomain() );
            *query_JPD *= *pot1;
            delete( pot1 );
        }
    }
    else
    {
        pot2 = engines[active_components[0]]->GetQueryJPD();
        pot2->GetDomain( &dom );

        for ( j = dom.size(); j--; )
        {
           dom[j] = decomposition[active_components[0]][dom[j]];
        }
        query_JPD = (CPotential *)CPotential::CopyWithNewDomain( pot2, dom, graphical_model->GetModelDomain() );
    }
	query_JPD->Normalize();
}
コード例 #15
0
ファイル: AMRF2PearlInf.cpp プロジェクト: JacobCWard/PyPNL
CMRF2* 
SuperResolution2lMRF2( int numOfRows, int numOfCols, 
                       CModelDomain* pModelDomain,
                       const pnlVector< floatVecVector >& nodeVals )
{
    const int numOfNds  = 2*numOfRows*numOfCols;
    const int numOfClqs = ( numOfRows - 1 )*numOfCols
        + ( numOfCols - 1 )*numOfRows + numOfRows*numOfCols;
    
    intVecVector clqs( numOfClqs, intVector(2) );
    intVector    clqDirs(numOfClqs);
    
    bool right = numOfCols == 1 ? false : true;
    bool down  = numOfRows == 1 ? false : true;
    
    int i = 0, j = 0;
    
    for( ; i < numOfNds/2; ++i )
    {
        bool set = false;
        
        int check1 = i/numOfCols;
        int check2 = i%numOfCols;
        
        clqs[j][0] = i;
        clqs[j][1] = i + numOfCols*numOfRows;
        clqDirs[j] = 2;
        
        j++;
        
        //upper left corner
        if( (check1 == 0) && (check2 == 0) )
        {
            if(right)
            {
                clqs[j][0] = i;
                clqs[j][1] = i + 1;
                clqDirs[j] = 0;
                
                j++;
            }
            
            if(down)
            {
                clqs[j][0] = i;
                clqs[j][1] = i + numOfCols;
                clqDirs[j] = 1;
                
                j++;
            }
            
            set = true;
        }
        
        //upper right corner
        if( ( check1 == 0 ) && ( check2 == ( numOfCols - 1 ) ) && ( !set ) )
        {
            if(down)
            {
                clqs[j][0] = i;
                clqs[j][1] = i + numOfCols;
                clqDirs[j] = 1;
                
                j++;
            }
            
            set = true;
        }
        
        //lower left corner
        if( ( check1 == (numOfRows - 1) ) && ( check2 == 0 ) && ( !set ) )
        {
            if(right)
            {
                clqs[j][0] = i;
                clqs[j][1] = i + 1;
                clqDirs[j] = 0;
                
                j++;
            }
            
            set = true;
        }
        
        //lower right corner
        if( ( check1 == ( numOfRows - 1 ) ) && ( check2 == ( numOfCols - 1 ) )
            && ( !set ) )
        {
            set = true;
        }
        
        //left side
        if( ( check2 == 0 ) && ( !set ) )
        {
            if( right )
            {
                clqs[j][0] = i;
                clqs[j][1] = i + 1;
                clqDirs[j] = 0;
                
                j++;
            }
            
            clqs[j][0] = i;
            clqs[j][1] = i + numOfCols;
            clqDirs[j] = 1;
            
            j++;
            
            set = true;
        }
        
        //right side
        if( ( check2 == ( numOfCols - 1 ) ) && ( !set ) )
        {
            clqs[j][0] = i;
            clqs[j][1] = i + numOfCols;
            clqDirs[j] = 1;
            
            j++;
            
            set = true;
        }
        
        //upper side
        if( ( check1 == 0 ) && ( !set ) )
        {
            clqs[j][0] = i;
            clqs[j][1] = i + 1;
            clqDirs[j] = 0;
            
            j++;
            
            if(down)
            {
                clqs[j][0] = i;
                clqs[j][1] = i + numOfCols;
                clqDirs[j] = 1;
                
                j++;
            }
            
            set = true;
        }
        
        //lower side
        if( ( check1 == ( numOfRows - 1 ) ) && ( !set ) )
        {
            clqs[j][0] = i;
            clqs[j][1] = i + 1;
            clqDirs[j] = 0;
            
            j++;
            
            set = true;
        }
        
        //inner part of the matrix
        if(!set)
        {
            clqs[j][0] = i;
            clqs[j][1] = i + 1;
            clqDirs[j] = 0;
            
            j++;
            
            clqs[j][0] = i;
            clqs[j][1] = i + numOfCols;
            clqDirs[j] = 1;
            
            j++;
        }
    }

    CMRF2* pModel = CMRF2::Create( clqs, pModelDomain );

    // allocate potentials and generate table for them

    pModel->AllocFactors();

    for( i = 0; i < numOfClqs; ++i )
    {
        CTabularPotential* pTPot = SuperResolutionTabPot( clqs[i],
            clqDirs[i], nodeVals, pModelDomain );

        pModel->AttachFactor(pTPot);
    }

    return pModel;
}
コード例 #16
0
ファイル: AGibbsInfEngine.cpp プロジェクト: billryan/OpenPNL
int GibbsForSingleGaussian(float eps)
{
    std::cout<<std::endl<<"Using Gibbs for testing samples from gaussian"<<std::endl;

    int nnodes = 1;
    int numnt = 1;
    CNodeType *nodeTypes = new CNodeType[numnt];
    nodeTypes[0] = CNodeType(0,2);
   
    intVector nodeAssociation = intVector(nnodes,0);
   
   
    CGraph *graph;
    graph = CGraph::Create(nnodes, 0, NULL, NULL);

    CBNet *pBnet = CBNet::Create( nnodes, numnt, nodeTypes,
	&nodeAssociation.front(),graph );
    pBnet->AllocFactors();
	pBnet->AllocFactor(0);


    float mean[2] = {0.0f, 0.0f};
    intVector ranges(2,1);
    ranges[0] = 2;

    ///////////////////////////////////////////////////////////////////
    CNumericDenseMatrix<float> *mean0 =	CNumericDenseMatrix<float>::Create( 2, &ranges.front(), mean);

    ranges[1] = 2;
    float cov[4] = {1.0f, 0.3f, 0.3f, 1.0f};
    CNumericDenseMatrix<float> *cov0 =
	CNumericDenseMatrix<float>::Create( 2, &ranges.front(), cov);

    pBnet->GetFactor(0)->AttachMatrix( mean0, matMean );
    pBnet->GetFactor(0)->AttachMatrix( cov0, matCovariance );
    /////////////////////////////////////////////////////////////////////
    CGibbsSamplingInfEngine *pGibbsInf = CGibbsSamplingInfEngine::Create( pBnet );
    pGibbsInf->SetBurnIn( 100 );
    pGibbsInf->SetMaxTime( 5000 );

    pEvidencesVector evidences;
    pBnet->GenerateSamples(&evidences, 1 );
    
    const int ndsToToggle[] = { 0 };
    evidences[0]->ToggleNodeState( 1, ndsToToggle );
    
    intVector query(1,0);
    
    
    intVecVector queryes(1);
    queryes[0].push_back(0);
    pGibbsInf->SetQueries( queryes);
    pGibbsInf->EnterEvidence( evidences[0] );
    pGibbsInf->MarginalNodes( &query.front(),query.size() );

    const CPotential *pQueryPot1 = pGibbsInf->GetQueryJPD();
  
    std::cout<<"result of gibbs"<<std::endl<<std::endl;
    pQueryPot1->Dump();
    
    delete evidences[0];
   
    delete pGibbsInf;
    delete pBnet;
    delete []nodeTypes;

    return 1;

}
コード例 #17
0
ファイル: AGibbsInfEngine.cpp プロジェクト: billryan/OpenPNL
int GibbsForSimplestGaussianBNet( float eps)
{
    std::cout<<std::endl<<"Gibbs for simplest gaussian BNet (3 nodes) "<<std::endl;

    int nnodes = 3;
    int numnt = 2;
    CNodeType *nodeTypes = new CNodeType[numnt];
    nodeTypes[0] = CNodeType(0,1);
    nodeTypes[1] = CNodeType(0,2);
    intVector nodeAssociation = intVector(nnodes,1);
    nodeAssociation[0] = 0;
    int nbs0[] = { 1 };
    int nbs1[] = { 0, 2 };
    int nbs2[] = { 1 };
    ENeighborType ori0[] = { ntChild };
    ENeighborType ori1[] = { ntParent, ntChild  };
    ENeighborType ori2[] = { ntParent };
    int *nbrs[] = { nbs0, nbs1, nbs2 };
    ENeighborType *orient[] = { ori0, ori1, ori2 };

    intVector numNeighb = intVector(3);
    numNeighb[0] = 1;
    numNeighb[1] = 2;
    numNeighb[2] = 1;

    CGraph *graph;
    graph = CGraph::Create(nnodes, &numNeighb.front(), nbrs, orient);

    CBNet *pBnet = CBNet::Create( nnodes, numnt, nodeTypes,
	&nodeAssociation.front(),graph );
    pBnet->AllocFactors();

    for(int i = 0; i < nnodes; i++ )
    {
	pBnet->AllocFactor(i);

    }

    floatVector data(1,0.0f);
    intVector ranges(2,1);

    ///////////////////////////////////////////////////////////////////
    CNumericDenseMatrix<float> *mean0 =
	CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &data.front());

    data[0] = 0.3f;
    CNumericDenseMatrix<float> *cov0 =
	CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &data.front());

    pBnet->GetFactor(0)->AttachMatrix( mean0, matMean );
    pBnet->GetFactor(0)->AttachMatrix( cov0, matCovariance );
    /////////////////////////////////////////////////////////////////////

    ranges[0] = 2;
    data.resize(2);
    data[0] = -1.0f;
    data[1] = 0.0f;
    CNumericDenseMatrix<float> *mean1 =
	CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &data.front());

    ranges[1] = 2;
    data.resize(4);
    data[0] = 1.0f;
    data[1] = 0.1f;
    data[3] = 3.0f;
    data[2] = 0.1f;
    CNumericDenseMatrix<float> *cov1 =
	CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &data.front());

    ranges[1] =1;
    data.resize(2);
    data[0] = 1.0f;
    data[1] = 0.5f;
    CNumericDenseMatrix<float> *weight1 =
	CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &data.front());


    pBnet->GetFactor(1)->AttachMatrix( mean1, matMean );
    pBnet->GetFactor(1)->AttachMatrix( cov1, matCovariance );
    pBnet->GetFactor(1)->AttachMatrix( weight1, matWeights,0 );
    ///////////////////////////////////////////////////////////////////////////


    ranges[0] = 2;
    data.resize(2);
    data[0] = 1.0f;
    data[1] = 20.5f;
    CNumericDenseMatrix<float> *mean2 =
	CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &data.front());

    ranges[1] = 2;
    data.resize(4);
    data[0] = 1.0f;
    data[1] = 0.0f;
    data[3] = 9.0f;
    data[2] = 0.0f;
    CNumericDenseMatrix<float> *cov2 =
	CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &data.front());


    data.resize(2);
    data[0] = 1.0f;
    data[1] = 3.5f;
    data[2] = 1.0f;
    data[3] = 0.5f;
    CNumericDenseMatrix<float> *weight2 =
	CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &data.front());


    pBnet->GetFactor(2)->AttachMatrix( mean2, matMean );
    pBnet->GetFactor(2)->AttachMatrix( cov2, matCovariance );
    pBnet->GetFactor(2)->AttachMatrix( weight2, matWeights,0 );
    ///////////////////////////////////////////////////////////////////////////

    pEvidencesVector evidences;

    pBnet->GenerateSamples( &evidences, 1 );

    const int ndsToToggle[] = { 0, 1 };
    evidences[0]->ToggleNodeState( 2, ndsToToggle );

    intVector query(1,1);



    CNaiveInfEngine *pNaiveInf = CNaiveInfEngine::Create(pBnet);
    pNaiveInf->EnterEvidence( evidences[0] );
    pNaiveInf->MarginalNodes( &query.front(),query.size() );

    CGibbsSamplingInfEngine *pGibbsInf = CGibbsSamplingInfEngine::Create( pBnet );
    intVecVector queryes(1);
    queryes[0].push_back(1);
    pGibbsInf->SetQueries( queryes);
    pGibbsInf->EnterEvidence( evidences[0] );
    pGibbsInf->MarginalNodes( &query.front(),query.size() );

    const CPotential *pQueryPot1 = pGibbsInf->GetQueryJPD();
    const CPotential *pQueryPot2 = pNaiveInf->GetQueryJPD();
    std::cout<<"result of gibbs"<<std::endl<<std::endl;
    pQueryPot1->Dump();
    std::cout<<"result of naive"<<std::endl;
    pQueryPot2->Dump();

    int ret = pQueryPot1->IsFactorsDistribFunEqual( pQueryPot2, eps, 0 );

    delete evidences[0];
    delete pNaiveInf;
    delete pGibbsInf;
    delete pBnet;

    return ret;

}
コード例 #18
0
ファイル: AMarginalize.cpp プロジェクト: JacobCWard/PyPNL
int testMarginalize()
{
    int ret = TRS_OK;
    
    const int nnodes = 4;
    const int numnt = 2;
    
    intVector nodeAssociation = intVector( nnodes );
    
    nodeTypeVector nodeTypes;
    nodeTypes.assign( numnt, CNodeType() );
    
    nodeTypes[0].SetType(1, 2);
    nodeTypes[1].SetType(1, 3);
    
    nodeAssociation[0] = 0;
    nodeAssociation[1] = 0;
    nodeAssociation[2] = 0;
    nodeAssociation[3] = 1;
    
    CModelDomain* pMD = CModelDomain::Create( nodeTypes, nodeAssociation );
    
    
    int *domain = (int*)trsGuardcAlloc( nnodes, sizeof(int) );
    domain[0]=0; domain[1]=1; domain[2]=2; domain[3]=3;
    
    float* data = (float*)trsGuardcAlloc( 24, sizeof(float) );
    for (int i0=0; i0<24; data[i0]=i0*0.01f, i0++){};
    
    EMatrixType mType=matTable;
    
    CTabularPotential *pxParam1=CTabularPotential::Create( domain, nnodes, pMD); 
    
    pxParam1->AllocMatrix(data, mType);
    pxParam1->Dump();
    
    int domSize=2;
    int *pSmallDom = (int*)trsGuardcAlloc( domSize, sizeof(int) );
    pSmallDom[0] = 1;	pSmallDom[1] = 3;
    
    CFactor *pxParam2=pxParam1->Marginalize(pSmallDom, domSize, 0);
    //	CFactor *pxParam2=pxParam1->Marginalize(pSmallDom, 0, 0);	
    
    const CNumericDenseMatrix<float> *pxMatrix = static_cast<
        CNumericDenseMatrix<float>*>(pxParam2->GetMatrix(mType));
    
    const float* dmatrix1;
    //	const float* dmatrix1 = (const float*)trsGuardcAlloc( 1, sizeof(float) );
    int n;
    pxMatrix->GetRawData(&n, &dmatrix1);
    
    float *testdata1=new float[6];
    testdata1[0]=0.30f; testdata1[1]=0.34f; testdata1[2]=0.38f;
    testdata1[3]=0.54f; testdata1[4]=0.58f; testdata1[5]=0.62f;
    
    for(int i1 = 0; i1 < n; i1++)
    {	// Test the values...
        //printf("%d   %f %f", i1, dmatrix1[i1], testdata1[i1] );
        if(fabs(testdata1[i1] - dmatrix1[i1]) > eps)
        {
            return trsResult(TRS_FAIL, "data doesn't agree at max=0");
        }
    }
    
    CFactor *pxParam3=pxParam1->Marginalize(pSmallDom, domSize, 1);
    
    const CNumericDenseMatrix<float> *pxMatrix1 = static_cast<
        CNumericDenseMatrix<float>*>(pxParam3->GetMatrix(mType));
    
    const float *dmatrix2;
    pxMatrix1->GetRawData(&n, &dmatrix2);
    float *testdata2 = new float[6];
    testdata2[0]=0.15f;
    testdata2[1]=0.16f; testdata2[2]=0.17f;
    testdata2[3]=0.21f; testdata2[4]=0.22f; testdata2[5]=0.23f;
    
    for(int i2 = 0; i2 < 6; i2++)
    {	// Test the values...
        //	printf("%d   %f %f", i2, dmatrix2[i2], testdata2[i2]);
        if( fabs(dmatrix2[i2] - testdata2[i2]) > eps)
        {
            return trsResult(TRS_FAIL, "data doesn't agree at max=1");
        }
    }

    //we can check some methods of Tabular
    CTabularPotential* pUniPot = 
        CTabularPotential::CreateUnitFunctionDistribution( domain, nnodes,
        pMD, 1 );
    CTabularPotential* pCopyUniPot = static_cast<CTabularPotential*>(
        pUniPot->CloneWithSharedMatrices());
    CTabularPotential* pNormUniPot = static_cast<CTabularPotential*>(
        pUniPot->GetNormalized());
    pUniPot->Dump();
    pUniPot->ConvertToDense();
    pUniPot->ConvertToSparse();
    (*pCopyUniPot) = (*pCopyUniPot);
    pxParam1->AllocMatrix(data, matTable);
    intVector indices;
    indices.assign(4,0);
    pxParam1->GetMatrix(matTable)->SetElementByIndexes(-1.0f,&indices.front());
    //we've just damaged the potential
    std::string s;
    if( pxParam1->IsValid(&s) )
    {
        ret = TRS_FAIL;
    }
    else
    {
        std::cout<<s<<std::endl;
    }
    intVector pos;
    floatVector vals;
    intVector offsets;
    pUniPot->GetMultipliedDelta(&pos, &vals, &offsets);

    delete pNormUniPot;
    delete pCopyUniPot;
    delete pUniPot;
    
    delete pxParam1; 
    delete pxParam2; 
    delete pxParam3;
    delete testdata1; 
    delete testdata2;
    delete pMD;
    
    int data_memory_flag = trsGuardCheck( data );
    int domain_memory_flag = trsGuardCheck( domain );
    int Smalldomain_memory_flag = trsGuardCheck( pSmallDom );
    
    trsGuardFree( data );
    trsGuardFree( domain );
    trsGuardFree( pSmallDom );
    
    if( data_memory_flag || domain_memory_flag || Smalldomain_memory_flag )
    {
        return trsResult( TRS_FAIL, "Dirty memory");
    }
    
    return trsResult( ret, ret == TRS_OK ? "No errors" : "Marginalize FAILED");
}
コード例 #19
0
void CSpecPearlInfEngine::InitMessages( const CEvidence* evidence )
{
    int i,j;

    const int  nObsNodes = evidence->GetNumberObsNodes();

    const int* ObsNodes = evidence->GetAllObsNodes();
    
    const int* ReallyObs = evidence->GetObsNodesFlags();
    
    m_areReallyObserved = intVector( m_numOfNdsInModel, 0) ;
    
    for ( i =0; i < nObsNodes; i++ )
    {
        if( ReallyObs[i] )
        {
            m_areReallyObserved[ObsNodes[i]] = 1;
        }
    }
    
    //int numConnNodes = m_connNodes.size();
    intVector obsNds = intVector( 1, 0 );
    int                  numOfNeighb;
    const int*           neighbors;
    const ENeighborType* orientation;
    intVector dom;
    dom.assign(1,0);
    
    //init messages to nbrs
    if( m_modelDt == dtTabular )
    {
        for( i = 0; i < m_numOfNdsInModel; i++ )
        {
            dom[0] = i;
            if( m_areReallyObserved[i] )
            {
                const Value* valP = evidence->GetValue(i);
                int val = valP->GetInt();
                int nodeSize = m_pModelDomain->GetVariableType(i)->GetNodeSize();
                floatVector prob;
                prob.assign( nodeSize, 0 );
                prob[val] = 1.0f;
                
                m_selfMessages[i] = CTabularPotential::Create( dom,
                    m_pModelDomain, &prob.front() );
            }
            else
            {
                m_selfMessages[i] = 
                    CTabularPotential::CreateUnitFunctionDistribution( dom,
                    m_pModelDomain, m_bDense );   
            }
            m_beliefs[0][i] = static_cast<CPotential*>(m_selfMessages[i]->Clone());
            m_beliefs[1][i] = static_cast<CPotential*>(m_selfMessages[i]->Clone());


            m_pModelGraph->GetNeighbors( i,  &numOfNeighb, &neighbors, &orientation );

            for( j = 0; j < numOfNeighb; j++ )
            {
                m_curMessages[0][i][j] = 
                    CTabularPotential::CreateUnitFunctionDistribution( dom,
                    m_pModelDomain, m_bDense );
                m_curMessages[1][i][j] = static_cast<CPotential*>(m_curMessages[0][i][j]->Clone());
            }
        }
    }
    else //m_modelDt == dtGaussian
    {
        for( i = 0; i < m_numOfNdsInModel; i++ )
        {
            dom[0] = i;
            if( m_areReallyObserved[i] )
            {
                //in canonical form
                const Value* valP = evidence->GetValue(i);
                int nodeSize = m_pModelDomain->GetVariableType(i)->GetNodeSize();
                floatVector mean;
                mean.resize(nodeSize);
                for( int j = 0; j < nodeSize; j++ )
                {
                    mean[i] = (valP[i]).GetFlt();
                }
                m_selfMessages[i] = CGaussianPotential::CreateDeltaFunction( dom,
                    m_pModelDomain, mean, 0 );
            }
            else
            {
                //in canonical form
                m_selfMessages[i] = 
                    CGaussianPotential::CreateUnitFunctionDistribution( dom,
                    m_pModelDomain, 1 );   
            }
            m_beliefs[0][i] = static_cast<CPotential*>(m_selfMessages[i]->Clone());
            m_beliefs[1][i] = static_cast<CPotential*>(m_selfMessages[i]->Clone());
            
            m_pModelGraph->GetNeighbors( i,  &numOfNeighb, &neighbors, &orientation );
            
            for( j = 0; j < numOfNeighb; j++ )
            {
                if( orientation[j] == ntParent )//message from parent (pi) - it has another type
                {
                    //in moment form for Gaussian
                    m_curMessages[0][i][j] = 
                        CGaussianPotential::CreateUnitFunctionDistribution( dom, 
                        m_pModelDomain, 0 );
                }
                else
                {
                    //in Canonical form for Gaussian
                    m_curMessages[0][i][j] = CGaussianPotential::CreateUnitFunctionDistribution( dom,
                        m_pModelDomain, 1 );
                }
                m_curMessages[1][i][j] = static_cast<CPotential*>(m_curMessages[0][i][j]->Clone());
            }
        }
    }
    //init distribution functions on families (if model type is BNet)
    if( m_modelType == mtBNet )
    {
        for( i = 0; i < m_numOfNdsInModel; i++ )
        {
            m_familyDistributions[i] = 
                m_pGraphicalModel->GetFactor(i)->GetDistribFun()->Clone();   
        }
    }
    //init potentials on edges (if model type is MNet)
    if( m_modelType == mtMRF2 )
    {
        int numOfParams; CFactor**params;
        for( i = 0; i < m_numOfNdsInModel; i++ )
        {
            m_pGraphicalModel->GetFactors(1, &i, &numOfParams, &params );
            for( j = 0; j < numOfParams; j++ )
            {
                intVector domain;
                params[j]->GetDomain(&domain);
                //here should be only two nodes in domain - the model is MRF2
                if( domain.size() != 2 )
                {
                    PNL_THROW( CInconsistentSize, "all factors shold be of size 2" );
                }
                m_nbrDistributions[domain[0]][domain[1]] = 
                    static_cast<CPotential*>(params[j]->Clone());
                m_nbrDistributions[domain[1]][domain[0]] = 
                    static_cast<CPotential*>(params[j]->Clone());
            }
        }
    }
}
コード例 #20
0
ファイル: learn_param.cpp プロジェクト: JacobCWard/PyPNL
int main()
{
    PNL_USING
	//we create very small model to start inference on it
	// the model is from Kevin Murphy's BNT\examples\static\belprop_polytree_gaussain
	/*
	Do the example from Satnam Alag's PhD thesis, UCB ME dept 1996 p46
	Make the following polytree, where all arcs point down
	
	 0   1
	  \ /
	   2
	  / \
	 3   4


	*/
	int i;
	//create this model
	int nnodes = 5;
	int numnt = 2;
	CNodeType *nodeTypes = new CNodeType[numnt];
 	nodeTypes[0] = CNodeType(0,2);
	nodeTypes[1] = CNodeType(0,1);
	
	intVector nodeAssociation = intVector(nnodes,0);
	nodeAssociation[1] = 1;
	nodeAssociation[3] = 1;
	int nbs0[] = { 2 };
	int nbs1[] = { 2 };
	int nbs2[] = { 0, 1, 3, 4 };
	int nbs3[] = { 2 };
	int nbs4[] = { 2 };
	int *nbrs[] = { nbs0, nbs1, nbs2, nbs3, nbs4 };
	int numNeighb[] = {1, 1, 4, 1, 1};

	
	ENeighborType ori0[] = { ntChild };
	ENeighborType ori1[] = { ntChild };
	ENeighborType ori2[] = { ntParent, ntParent, ntChild, ntChild };
	ENeighborType ori3[] = { ntParent };
	ENeighborType ori4[] = { ntParent };
	ENeighborType *orient[] = { ori0, ori1, ori2, ori3, ori4 }; 
	
	
	CGraph *pGraph;
	pGraph = CGraph::Create(nnodes, numNeighb, nbrs, orient);
	
	CBNet *pBNet;
	
	pBNet = CBNet::Create( nnodes, numnt, nodeTypes, &nodeAssociation.front(), pGraph );
	//Allocation space for all factors of the model
	pBNet->AllocFactors();
	
	for( i = 0; i < nnodes; i++ )
	{
	    //Allocation space for all matrices of CPD
	    pBNet->AllocFactor(i);
	}
	
	//now we need to create data for CPDs - we'll create matrices
	CFactor *pCPD;
	floatVector smData = floatVector(2,0.0f);
	floatVector bigData = floatVector(4,1.0f);
	intVector ranges = intVector(2, 1);
	ranges[0] = 2;
	smData[0] = 1.0f;
	CNumericDenseMatrix<float> *mean0 = CNumericDenseMatrix<float>::
        Create( 2, &ranges.front(), &smData.front());
	bigData[0] = 4.0f;
	bigData[3] = 4.0f;
	ranges[1] = 2;
	CNumericDenseMatrix<float> *cov0 = CNumericDenseMatrix<float>::
        Create( 2, &ranges.front(), &bigData.front());
	pCPD = pBNet->GetFactor(0);
	pCPD->AttachMatrix(mean0, matMean);
	pCPD->AttachMatrix(cov0, matCovariance);
	ranges[0] = 1;
	ranges[1] = 1;
	float val = 1.0f;
	CNumericDenseMatrix<float> *mean1 = CNumericDenseMatrix<float>::
        Create( 2, &ranges.front(), &val );
	CNumericDenseMatrix<float> *cov1 = CNumericDenseMatrix<float>::
        Create( 2, &ranges.front(), &val );
	pCPD = pBNet->GetFactor(1);
	pCPD->AttachMatrix(mean1, matMean);
	pCPD->AttachMatrix(cov1, matCovariance);
	smData[0] = 0.0f;
	smData[1] = 0.0f;
	ranges[0] = 2;
	CNumericDenseMatrix<float> *mean2 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &smData.front());
	smData[0] = 2.0f;
	smData[1] = 1.0f;
	CNumericDenseMatrix<float> *w21 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &smData.front());
	bigData[0] = 2.0f;
	bigData[1] = 1.0f;
	bigData[2] = 1.0f;
	bigData[3] = 1.0f;
	ranges[1] = 2;
	CNumericDenseMatrix<float> *cov2 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &bigData.front());
	bigData[0] = 1.0f;
	bigData[1] = 2.0f;
	bigData[2] = 1.0f;
	bigData[3] = 0.0f;
	CNumericDenseMatrix<float> *w20 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &bigData.front());
	pCPD = pBNet->GetFactor(2);
	pCPD->AttachMatrix( mean2, matMean );
	pCPD->AttachMatrix( cov2, matCovariance );
	pCPD->AttachMatrix( w20, matWeights,0 );
	pCPD->AttachMatrix( w21, matWeights,1 );
	
	val = 0.0f;
	ranges[0] = 1;
	ranges[1] = 1;
	CNumericDenseMatrix<float> *mean3 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &val);
	val = 1.0f;
	CNumericDenseMatrix<float> *cov3 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &val);
	ranges[1] = 2;
	smData[0] = 1.0f;
	smData[1] = 1.0f;
	CNumericDenseMatrix<float> *w30 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &smData.front());
	pCPD = pBNet->GetFactor(3);
	pCPD->AttachMatrix( mean3, matMean );
	pCPD->AttachMatrix( cov3, matCovariance );
	pCPD->AttachMatrix( w30, matWeights,0 );

	ranges[0] = 2; 
	ranges[1] = 1;
	smData[0] = 0.0f;
	smData[1] = 0.0f;
	CNumericDenseMatrix<float> *mean4 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &smData.front());
	ranges[1] = 2;
	bigData[0] = 1.0f;
	bigData[1] = 0.0f;
	bigData[2] = 0.0f;
	bigData[3] = 1.0f;
	CNumericDenseMatrix<float> *cov4 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &bigData.front());
	bigData[2] = 1.0f;
	CNumericDenseMatrix<float> *w40 = CNumericDenseMatrix<float>::
        Create(2, &ranges.front(), &bigData.front());
	pCPD = pBNet->GetFactor(4);
	pCPD->AttachMatrix( mean4, matMean );
	pCPD->AttachMatrix( cov4, matCovariance );
	pCPD->AttachMatrix( w40, matWeights,0 );

	//Generate random evidences for the modes
	int nEv = 1000;
	pEvidencesVector evid;
	pBNet->GenerateSamples( &evid, nEv );
	/////////////////////////////////////////////////////////////////////
		
	//Create copy of initial model with random matrices 
	CGraph *pGraphCopy = CGraph::Copy(pGraph); 
	CBNet *pLearnBNet = CBNet::CreateWithRandomMatrices(pGraphCopy, pBNet->GetModelDomain() );
	
	// Creating learning process	
	CEMLearningEngine *pLearn = CEMLearningEngine::Create(pLearnBNet);

	pLearn->SetData(nEv, &evid.front());
	pLearn->Learn();
	CNumericDenseMatrix<float> *pMatrix;
	int length = 0;
	const float *output;
	
	///////////////////////////////////////////////////////////////////////
	std::cout<<" results of learning (number of evidences = "<<nEv<<std::endl;
	for (i = 0; i < nnodes; i++ )
	{
	    int j;
	    std::cout<<"\n matrix mean for node "<<i;
	    std::cout<<"\n initial BNet \n";
	    pMatrix = static_cast<CNumericDenseMatrix<float>*>
		(pBNet->GetFactor(i)->GetMatrix(matMean));
	    pMatrix->GetRawData(&length, &output);
	    for ( j = 0; j < length; j++ )
	    {
		std::cout<<" "<<output[j];
	    }
	    std::cout<<"\n BNet with random matrices after learning \n ";
	    pMatrix = static_cast<CNumericDenseMatrix<float>*>
		(pLearnBNet->GetFactor(i)->GetMatrix(matMean));
	    pMatrix->GetRawData(&length, &output);
	    for ( j = 0; j < length; j++)
	    {
		std::cout<<" "<<output[j];
	    }
	    
    	    std::cout<<"\n \n matrix covariance for node "<<i<<'\n';
	    std::cout<<"\n initial BNet \n";

	    pMatrix = static_cast<CNumericDenseMatrix<float>*>
		(pBNet->GetFactor(i)->GetMatrix(matCovariance));
	    pMatrix->GetRawData(&length, &output);
	    for (j = 0; j < length; j++ )
	    {
		std::cout<<" "<<output[j];
	    }
    	    std::cout<<"\n BNet with random matrices after learning \n ";
	    pMatrix = static_cast<CNumericDenseMatrix<float>*>
		(pLearnBNet->GetFactor(i)->GetMatrix(matCovariance));
	    pMatrix->GetRawData(&length, &output);
	    for ( j = 0; j < length; j++ )
	    {
		std::cout<<" "<<output[j];
	    }

	    std::cout<<"\n ___________________________\n";
	    
	}
	
	
	for( i = 0; i < nEv; i++)
	{
	    delete evid[i];
	}
	delete pLearn;
	delete pLearnBNet;
	delete pBNet;
	
	

return 1;
}
コード例 #21
0
ファイル: pnlFactor.cpp プロジェクト: JacobCWard/PyPNL
CFactor::CFactor( EDistributionType dt,
                  EFactorType pt,
                  const int *domain, int nNodes, CModelDomain* pMD,
                  const intVector& obsIndices )
                  : m_Domain( domain, domain + nNodes )
{	
    /*fill enum fields:*/
    m_DistributionType = dt;
    m_FactorType = pt;
    m_pMD = pMD;
    m_factNumInHeap = m_pMD->AttachFactor(this);
    int i;
    pConstNodeTypeVector nt;
    intVector dom = intVector( domain, domain+nNodes );
    pMD->GetVariableTypes( dom, &nt );
    m_obsPositions.assign( obsIndices.begin(), obsIndices.end() );
    int numObsNodesHere = obsIndices.size();
    switch (dt)
    {
    case dtScalar:
        {
            if( pt == ftCPD )
            {
                PNL_THROW( CInvalidOperation, "scalar is only potential - to multiply" );
            }
            //if there are observed nodes - get corresponding node types
            if( numObsNodesHere )
            {
                if ( numObsNodesHere != nNodes )
                {
                    PNL_THROW( CInconsistentType,
                        "all nodes in scalar distribution must be observed" )
                }
                //need to find observed nodes in domain and check including their changed types
                for( i = 0; i < numObsNodesHere; i++ )
                {
                    nt[obsIndices[i]] = nt[obsIndices[i]]->IsDiscrete() ? pMD->GetObsTabVarType():
                        pMD->GetObsGauVarType();
                }
            }
            m_CorrespDistribFun = CScalarDistribFun::Create(nNodes, &nt.front());
            break;
        }
	case dtTree:
        {
            if( pt != ftCPD )
            {
                PNL_THROW( CInvalidOperation, "Tree is only CPD" );
            }
            m_CorrespDistribFun = CTreeDistribFun::Create(nNodes, &nt.front());
            break;
        }
    case dtTabular:
        {
            
            if(( pt == ftPotential )&&( numObsNodesHere ))
            {
                //need to find observed nodes in domain and check including their changed types
                for( i = 0; i < numObsNodesHere; i++ )
                {
                    //change node type for this node
                    nt[obsIndices[i]] = nt[obsIndices[i]]->IsDiscrete() ? pMD->GetObsTabVarType():
                    pMD->GetObsGauVarType();
                }
            }
            //check all node types corresponds Tabular distribution
            for( i = 0; i < nNodes; i++ )
            {
                if((!(( nt[i]->IsDiscrete() )||
                    ( !nt[i]->IsDiscrete()&&(nt[i]->GetNodeSize() == 0)))))
                {
                    PNL_THROW( CInconsistentType, 
                        "node types must corresponds Tabular type" );
                }
            }
            m_CorrespDistribFun = CTabularDistribFun::Create( nNodes,
                &nt.front(), NULL );
            break;
        }
    case dtGaussian:
        {
            switch (pt)
            {
            case ftPotential:
                {
                    //need to find observed nodes in domain and check including their changed types
                    for( i = 0; i < numObsNodesHere; i++ )
                    {
                        //change node type for this node
                        nt[obsIndices[i]] = nt[obsIndices[i]]->IsDiscrete() ?
                                pMD->GetObsTabVarType():pMD->GetObsGauVarType();
                    }
                    for( i = 0; i < nNodes; i++ )
                    {
                        if( nt[i]->IsDiscrete() && (nt[i]->GetNodeSize() != 1))
                        {
                            PNL_THROW( CInvalidOperation,
                                "Gaussian potential must be of Gaussian nodes only" )
                        }
                    }
                    m_CorrespDistribFun = 
                        CGaussianDistribFun::CreateInMomentForm( 1, nNodes,
                        &nt.front(), NULL, NULL, NULL  );
                    break;
                }
            case ftCPD:
                {
                    //can check if there are both Continuous & Discrete nodes
                    int noDiscrete = 1;
                    for( int i = 0; i < nNodes; i++ )
                    {
                        if( nt[i]->IsDiscrete() )
                        {
                            noDiscrete = 0;
                            break;
                        }
                    }
                    if( noDiscrete )
                    {
                        m_CorrespDistribFun = 
                            CGaussianDistribFun::CreateInMomentForm( 0, nNodes,
                            &nt.front(), NULL, NULL, NULL );
                        break;
                    }
                    else
                    {
                        m_CorrespDistribFun = 
                            CCondGaussianDistribFun::Create( 0, nNodes, &nt.front() );
                        break;
                    }
                }
            default:
                {
                    PNL_THROW( CBadConst, 
                        "no competent type as EFactorType" );
                    break;
                }
            }
            break;
        }
    case dtMixGaussian:
        {
            switch(pt)
            {
            case ftCPD:
                {
                    //check if where is discrete node - mixture node
                    int noDiscrete = 1;
                    for( int i = 0; i < nNodes; i++ )
                    {
                        if( nt[i]->IsDiscrete() )
                        {
                            noDiscrete = 0;
                            break;
                        }
                    }
                    if( !noDiscrete  )
                    {
                        m_CorrespDistribFun = CCondGaussianDistribFun::Create( 0,
                            nNodes, &nt.front() );
                    }
                    else
                    {
                        PNL_THROW( CInconsistentType, 
                            "mixture Gaussian CPD must have mixture node - discrete" );
                    }
                    break;
                }
            default:
                {
                    PNL_THROW( CNotImplemented, "mixture gaussian potential" );  
                }
            }
            break;
        }
    case dtSoftMax:
        {
            switch (pt)
            {
            case ftPotential:
                {
                  PNL_THROW( CNotImplemented, "only CPD yet" );
                    break;
                }
            case ftCPD:
                {
                    //can check if there are both Continuous & Discrete nodes
                    int noDiscrete = 1;
                    for( int i = 0; i < nNodes-1; i++ )
                    {
                        if( nt[i]->IsDiscrete() )
                        {
                            noDiscrete = 0;
                            break;
                        }
                    }
                    if( noDiscrete )
                    {
                        m_CorrespDistribFun = 
                            CSoftMaxDistribFun::Create( nNodes,
                            &nt.front(), NULL, NULL );
                        break;
                    }
                    else
                    {
                        m_CorrespDistribFun = 
                            CCondSoftMaxDistribFun::Create( nNodes, &nt.front() );
                        break;
                    }
/*
                  //can check if there are both Continuous & Discrete nodes
                        m_CorrespDistribFun = 
                            CSoftMaxDistribFun::CreateUnitFunctionDistribution( nNodes, &nt.front() );
                        break;
*/
                }
            default:
                {
                    PNL_THROW( CBadConst, 
                        "no competent type as EFactorType" );
                    break;
                }
            }
            break;
        }
    default:
        {
            PNL_THROW ( CBadConst,
                "we have no such factor type at EDistributionType");
        }
    }
}
コード例 #22
0
ファイル: pnlFactor.cpp プロジェクト: JacobCWard/PyPNL
CFactor* CFactor::CopyWithNewDomain(const CFactor *factor, intVector &domain, 
                                                 CModelDomain *pMDNew,
                                                 const intVector& /* obsIndices */)
{
    int domSize = domain.size();
    intVector domOld;
    factor->GetDomain( &domOld );
    if( int(domOld.size()) != domSize )
    {
        PNL_THROW( CBadArg, "number of nodes" );
    }
    CModelDomain *pMDOld = factor->GetModelDomain();
    
    //check is the types are the same
    const pConstNodeTypeVector* ntFactor = factor->GetArgType();
    
    /*
    const CNodeType *nt;
    
    for( int i = 0; i < domSize; i++ )
    {
        nt = (*ntFactor)[i];
        if( nt->IsDiscrete() )
        {
            if( nt->GetNodeSize() == 1 )
            {
                if( *pMDOld->GetVariableType(domOld[i]) != 
                    *pMDNew->GetVariableType(domain[i]))
                {
                    PNL_THROW(CInconsistentType, "types of variables should correspond");
                }
                
            }
            else
            {
                if( *nt != *pMDNew->GetVariableType(domain[i]) )
                {
                    PNL_THROW(CInconsistentType, "types of variables should correspond");
                }
            }
        }
        else
        {
            if( nt->GetNodeSize() == 0 )
            {
                if( *pMDOld->GetVariableType(domOld[i]) != 
                    *pMDNew->GetVariableType(domain[i]))
                {
                    PNL_THROW(CInconsistentType, "types of variables should correspond");
                }
                
            }
            else
            {
                if( *nt != *pMDNew->GetVariableType(domain[i]) )
                {
                    PNL_THROW(CInconsistentType, "types of variables should correspond");
                }
            }
        }

        
    }
    */
    const CNodeType *nt;
    int i;
    intVector obsPositions;
    factor->GetObsPositions(&obsPositions);
    if( obsPositions.size() )
    {
	intVector::iterator iterEnd = obsPositions.end();
        for( i = 0; i < domSize; i++)
        {
            if( std::find( obsPositions.begin(),iterEnd, i) != iterEnd )
            {
                if( *pMDOld->GetVariableType(domOld[i]) != 
                    *pMDNew->GetVariableType(domain[i]))
                {
                    PNL_THROW(CInconsistentType, "types of variables should correspond");
                }
            }
        }
    }
    else
    {
        for( i = 0; i < domSize; i++ )
        {
            nt = (*ntFactor)[i];
            if( *nt != *pMDNew->GetVariableType(domain[i]) )
            {
                PNL_THROW(CInconsistentType, "types of variables should correspond");
            }
            
        }
    }
    
    

    CFactor *pNewFactor;
    switch ( factor->GetFactorType() )
    {
    case ftPotential:
            {
                switch ( factor->GetDistributionType() )
                {
            case dtTabular:
                {
                    pNewFactor = CTabularPotential::
                        Copy(static_cast<const CTabularPotential*>(factor) );
                    break;
                }
            case dtGaussian:
                {
                    pNewFactor = CGaussianPotential::
                        Copy(static_cast<const CGaussianPotential*>(factor));
                    break;
                }
            case dtScalar:
                {
                    pNewFactor = CScalarPotential::
                        Copy( static_cast<const CScalarPotential*>(factor) );
                    break;
                }
            default:
                {
                    PNL_THROW(CNotImplemented, "distribution type" );
                }
            }
            break;
        }
    case ftCPD:
        {
            switch ( factor->GetDistributionType() )
            {
            case dtTabular:
                {
                    pNewFactor = CTabularCPD::
                        Copy(static_cast<const CTabularCPD*>(factor));
                    break;
                }
            case dtGaussian:
                {
                    pNewFactor = CGaussianCPD::
                        Copy(static_cast<const CGaussianCPD*>(factor));
                    break;
                }
            case dtCondGaussian:
                {
                    pNewFactor = CGaussianCPD::
                        Copy(static_cast<const CGaussianCPD*>(factor));
                    break;
                }
            case dtSoftMax:
                {
                    pNewFactor = CSoftMaxCPD::
                        Copy(static_cast<const CSoftMaxCPD*>(factor));
                    break;
                }
            case dtCondSoftMax:
                {
                    pNewFactor = CSoftMaxCPD::
                        Copy(static_cast<const CSoftMaxCPD*>(factor));
                    break;
                }
            case dtMixGaussian:
                {
                    pNewFactor = CMixtureGaussianCPD::Copy( 
                        static_cast<const CMixtureGaussianCPD*>(factor));
                    break;
                }
            default:
                {
                    PNL_THROW(CNotImplemented, "distribution type" );
                }
            }
            break;
        }
    default:
        {
            PNL_THROW(CNotImplemented, "factor type" );
        }
    }
    PNL_CHECK_IF_MEMORY_ALLOCATED(pNewFactor);
    /*
    if( pMDNew == factor->GetModelDomain())
    {
        return pNewFactor;
    }
    else*/
    {
        pNewFactor->m_Domain = intVector(domain);
        pNewFactor->SetModelDomain(pMDNew, 0);
        return pNewFactor;
    }
    
}
コード例 #23
0
ファイル: pnlFactor.hpp プロジェクト: JacobCWard/PyPNL
bool PNL_API pnlIsIdentical(int size1, int* Domain1, int size2, int* Domain2);

class CDistribFun;
class CTabularDistribFun ;
class CPotential;
class CEvidence;
class CInfEngine;
class CModelDomain;

class PNL_API CFactor : public CPNLBase
{
public:
    //create new factor in other domain and model domain if node types corresponds
    static CFactor* CopyWithNewDomain(const CFactor *factor, intVector &domain,
        CModelDomain *pModelDomain,
        const intVector& obsIndices = intVector());
    virtual CFactor* Clone() const = 0;

    virtual CFactor* CloneWithSharedMatrices() = 0;

    virtual void CreateAllNecessaryMatrices(int typeOfMatrices = 1);
    //typeOfMatrices = 1 - all matrices are random
    //only Gaussian covariance matrix is matrix unit
    //for ConditionalGaussianDistribution
    //the matrix of Gaussian distribution functions is dense

    //methods for work with Model Domain
    //return factor number in factor heap
    int GetNumInHeap() const;
    //release model domain from this factor
    void ChangeOwnerToGraphicalModel() const;
コード例 #24
0
ファイル: pnlGaussianCPD.hpp プロジェクト: JacobCWard/PyPNL

class PNL_API CGaussianCPD : public CCPD
{
public:
    static CGaussianCPD* Create( const intVector& domainIn, CModelDomain* pMD );
    static CGaussianCPD* Copy( const CGaussianCPD* pGauCPD );
    static CGaussianCPD* CreateUnitFunctionCPD( const intVector& domainIn,
            CModelDomain* pMD);
    virtual CFactor* Clone() const;
    virtual CFactor* CloneWithSharedMatrices();
#ifndef SWIG
    void AllocDistribution( const floatVector& meanIn, const floatVector& covIn,
                            float normCoeff,
                            const floatVecVector& weightsIn,
                            const intVector& parentCombination = intVector());
#endif
    void AllocDistribution( const C2DNumericDenseMatrix<float>* meanMat,
                            const C2DNumericDenseMatrix<float>* covMat, float normCoeff,
                            const p2DDenseMatrixVector& weightsMat,
                            const intVector& parentCombination  = intVector());
    void SetCoefficientVec( float coeff, const intVector& parentCombinationIn
                            = intVector() );
    float GetCoefficientVec( const intVector& parentCombinationIn = intVector() );
#ifdef PNL_OBSOLETE
    static CGaussianCPD* Create( const int *domain, int nNodes,
                                 CModelDomain* pMD );
    static CGaussianCPD* CreateUnitFunctionCPD( const int *domain, int nNodes,
            CModelDomain* pMD );
    void AllocDistribution( const float* pMean, const float* pCov,
                            float normCoeff, const float* const* pWeightsIn,
コード例 #25
0
ファイル: AGibbsInfEngine.cpp プロジェクト: billryan/OpenPNL
int GibbsForScalarGaussianBNet( float eps)
{
    std::cout<<std::endl<<" Scalar gaussian BNet (5 nodes)"<< std::endl;
    CBNet *pBnet;
    pEvidencesVector evidences;

    CGibbsSamplingInfEngine *pGibbsInf;
    const CPotential *pQueryPot1, *pQueryPot2;
    int i, ret;

    ////////////////////////////////////////////////////////////////////////
    //Do the example from Satnam Alag's PhD thesis, UCB ME dept 1996 p46
    //Make the following polytree, where all arcs point down
    //
    // 0   1
    //  \ /
    //   2
    //  / \
    // 3   4
    //
    //////////////////////////////////////////////////////////////////////

    int nnodes = 5;
    int numnt = 1;
    CNodeType *nodeTypes = new CNodeType[numnt];
    nodeTypes[0] = CNodeType(0,1);

    intVector nodeAssociation = intVector(nnodes,0);

    int nbs0[] = { 2 };
    int nbs1[] = { 2 };
    int nbs2[] = { 0, 1, 3, 4 };
    int nbs3[] = { 2 };
    int nbs4[] = { 2 };
    ENeighborType ori0[] = { ntChild };
    ENeighborType ori1[] = { ntChild };
    ENeighborType ori2[] = { ntParent, ntParent, ntChild, ntChild };
    ENeighborType ori3[] = { ntParent };
    ENeighborType ori4[] = { ntParent };
    int *nbrs[] = { nbs0, nbs1, nbs2, nbs3, nbs4 };
    ENeighborType *orient[] = { ori0, ori1, ori2, ori3, ori4 };
    intVector numNeighb = intVector(5,1);
    numNeighb[2] = 4;
    CGraph *graph;
    graph = CGraph::Create(nnodes, &numNeighb.front(), nbrs, orient);

    pBnet = CBNet::Create( nnodes, numnt, nodeTypes, &nodeAssociation.front(),graph );
    pBnet->AllocFactors();

    for( i = 0; i < nnodes; i++ )
    {
	pBnet->AllocFactor(i);
    }
    //now we need to create data for factors - we'll create matrices
    floatVector smData = floatVector(1,0.0f);
    floatVector bigData = floatVector(1,1.0f);
    intVector ranges = intVector(2, 1);
    ranges[0] = 1;
    smData[0] = 1.0f;
    CNumericDenseMatrix<float> *mean0 = CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &smData.front());
    bigData[0] = 4.0f;

    CNumericDenseMatrix<float> *cov0 = CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &bigData.front());
    pBnet->GetFactor(0)->AttachMatrix(mean0, matMean);
    pBnet->GetFactor(0)->AttachMatrix(cov0, matCovariance);

    float val = 1.0f;

    CNumericDenseMatrix<float> *mean1 = CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &val );
    CNumericDenseMatrix<float> *cov1 = CNumericDenseMatrix<float>::Create( 2, &ranges.front(), &val );
    pBnet->GetFactor(1)->AttachMatrix(mean1, matMean);
    pBnet->GetFactor(1)->AttachMatrix(cov1, matCovariance);
    smData[0] = 0.0f;

    CNumericDenseMatrix<float> *mean2 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &smData.front());
    smData[0] = 2.0f;

    CNumericDenseMatrix<float> *w21 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &smData.front());
    bigData[0] = 2.0f;

    CNumericDenseMatrix<float> *cov2 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &bigData.front());
    bigData[0] = 1.0f;

    CNumericDenseMatrix<float> *w20 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &bigData.front());
    pBnet->GetFactor(2)->AttachMatrix( mean2, matMean );
    pBnet->GetFactor(2)->AttachMatrix( cov2, matCovariance );
    pBnet->GetFactor(2)->AttachMatrix( w20, matWeights,0 );
    pBnet->GetFactor(2)->AttachMatrix( w21, matWeights,1 );

    val = 0.0f;

    CNumericDenseMatrix<float> *mean3 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &val);
    val = 4.0f;
    CNumericDenseMatrix<float> *cov3 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &val);

    smData[0] = 1.1f;

    CNumericDenseMatrix<float> *w30 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &smData.front());
    pBnet->GetFactor(3)->AttachMatrix( mean3, matMean );
    pBnet->GetFactor(3)->AttachMatrix( cov3, matCovariance );
    pBnet->GetFactor(3)->AttachMatrix( w30, matWeights,0 );


    smData[0] = -0.8f;

    CNumericDenseMatrix<float> *mean4 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &smData.front());

    bigData[0] = 1.2f;

    CNumericDenseMatrix<float> *cov4 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &bigData.front());
    bigData[0] = 2.0f;

    CNumericDenseMatrix<float> *w40 = CNumericDenseMatrix<float>::Create(2, &ranges.front(), &bigData.front());
    pBnet->GetFactor(4)->AttachMatrix( mean4, matMean );
    pBnet->GetFactor(4)->AttachMatrix( cov4, matCovariance );
    pBnet->GetFactor(4)->AttachMatrix( w40, matWeights,0 );


    evidences.clear();
    pBnet->GenerateSamples( &evidences, 1 );

    const int ndsToToggle2[] = { 0, 1, 2 };
    evidences[0]->ToggleNodeState( 3, ndsToToggle2 );
    const int *flags1 = evidences[0]->GetObsNodesFlags();
    std::cout<<"observed nodes"<<std::endl;
    for( i = 0; i < pBnet->GetNumberOfNodes(); i++ )
    {
	if ( flags1[i] )
	{
	    std::cout<<"node "<<i<<"; ";
	}
    }
    std::cout<<std::endl<<std::endl;

    const int querySz2 = 1;
    const int query2[] = { 0 };

    CNaiveInfEngine *pNaiveInf = CNaiveInfEngine::Create(pBnet);
    pNaiveInf->EnterEvidence( evidences[0] );
    pNaiveInf->MarginalNodes( query2,querySz2 );

    pGibbsInf = CGibbsSamplingInfEngine::Create( pBnet );
    pGibbsInf->SetNumStreams( 1 );
    pGibbsInf->SetMaxTime( 10000 );
    pGibbsInf->SetBurnIn( 1000 );
    


    intVecVector queries(1);
    queries[0].clear();
    queries[0].push_back( 0 );
    //queries[0].push_back( 2 );
    pGibbsInf->SetQueries( queries );

    pGibbsInf->EnterEvidence( evidences[0] );
    pGibbsInf->MarginalNodes( query2, querySz2 );

    pQueryPot1 = pGibbsInf->GetQueryJPD();
    pQueryPot2 = pNaiveInf->GetQueryJPD();
    std::cout<<"result of gibbs"<<std::endl<<std::endl;
    pQueryPot1->Dump();
    std::cout<<"result of naive"<<std::endl;
    pQueryPot2->Dump();

    ret = pQueryPot1->IsFactorsDistribFunEqual( pQueryPot2, eps, 0 );

    delete evidences[0];
    delete pNaiveInf;
    delete pGibbsInf;
    delete pBnet;

    return ret;

    ////////////////////////////////////////////////////////////////////////////////////////
}
コード例 #26
0
//  Author(s):                                                             //
//                                                                         //
/////////////////////////////////////////////////////////////////////////////
#include "pnl_dll.hpp"

PNL_USING

void tCreateRandomPermutation( int n, int buf[] );

CGraph *tCreateRandomDAG( int num_nodes, int num_edges, bool top_sorted );

CBNet *tCreateRandomBNet( int num_nodes, int num_edges,
                          int max_states, int max_dim,
                          int gaussian_seed,
                          bool no_gaussian_parent_with_discrete_child_please,
                          bool no_gaussian_child_with_discrete_parent_please );

inline int tTurboRand( int k )
{
    return rand() % k;
}

inline int tTurboRand( int lo, int hi )
{
    return tTurboRand( hi - lo + 1 ) + lo;
}

CEvidence *tCreateRandomEvidence( CGraphicalModel *model,
                                  int num_nodes,
                                  intVector const &candidates = intVector() );
コード例 #27
0
ファイル: SEEvidence.cpp プロジェクト: JacobCWard/PyPNL
int testEvidence()
{
    int ret = TRS_OK;
    int nnodes = 0;
    int nObsNodes = 0;
    int i,j;
    while(nnodes <= 0)
    {
	trsiRead( &nnodes, "10", "Number of nodes in Model" );
    }
    while((nObsNodes <= 0)||(nObsNodes>nnodes))
    {
        trsiRead( &nObsNodes, "2", "Number of Observed nodes from all nodes in model");
    }
    int seed1 = pnlTestRandSeed();
    /*create string to display the value*/
    char value[42];

    sprintf(value, "%i", seed1);
    trsiRead(&seed1, value, "Seed for srand to define NodeTypes etc.");
    trsWrite(TW_CON|TW_RUN|TW_DEBUG|TW_LST, "seed for rand = %d\n", seed1);
    CNodeType *modelNodeType = new CNodeType[2];
    modelNodeType[0] = CNodeType( 1, 4 );
    modelNodeType[1] = CNodeType( 0, 3 );
    int *NodeAssociat=new int [nnodes+1];
    for(i=0; i<(nnodes+1)/2; i++)
    {
	NodeAssociat[2*i]=0;
	NodeAssociat[2*i+1]=1;
    }
    //create random graph - number of nodes for every node is rand too
    int lowBorder = nnodes - 1;
    int upperBorder = int((nnodes * (nnodes - 1)) / 2);
    int numEdges = rand()%(upperBorder - lowBorder)+lowBorder;
    CGraph* theGraph = tCreateRandomDAG( nnodes, numEdges, 1 );

    CBNet *grModel = CBNet::Create(nnodes, 2, modelNodeType, NodeAssociat,theGraph);
    int *obsNodes = (int*)trsGuardcAlloc(nObsNodes, sizeof(int));
    srand ((unsigned int)seed1);
    intVector residuaryNodes;
    for (i=0; i<nnodes; i++)
    {
	residuaryNodes.push_back(i);
    }
    int num = 0;
    valueVector Values;
    Values.reserve(3*nnodes);
    Value val;
    for (i = 0; i<nObsNodes; i++)
    {
	num = rand()%(nnodes-i);
	obsNodes[i] = residuaryNodes[num];
	residuaryNodes.erase(residuaryNodes.begin()+num);
	CNodeType nt = modelNodeType[NodeAssociat[obsNodes[i]]];
	if(nt.IsDiscrete())
	{
            val.SetInt(1);
            Values.push_back(val);
	}
	else
	{
	    val.SetFlt(1.0f);
            Values.push_back(val);
            val.SetFlt(2.0f);
            Values.push_back(val);
            val.SetFlt(3.0f);
            Values.push_back(val);
	}

    }
    residuaryNodes.clear();
    CEvidence *pMyEvid = CEvidence::Create(grModel,
	nObsNodes, obsNodes, Values) ;
    int nObsNodesFromEv = pMyEvid->GetNumberObsNodes();
    const int *pObsNodesNow = pMyEvid->GetObsNodesFlags();
    //	const int *myOffset = pMyEvid->GetOffset();
    const int *myNumAllObsNodes = pMyEvid->GetAllObsNodes();
    valueVector ev;
    pMyEvid->GetRawData(&ev);
    const Value* vall = pMyEvid->GetValue(obsNodes[0]);
    if( NodeAssociat[obsNodes[0]] == 0 )
    {
	if( (vall)[0].GetInt() != 1 )
	{
	    ret = TRS_FAIL;
	}
    }
    else
    {
	for( j=0; j<3; j++)
	{
	    if( (vall)[j].GetFlt() != (j+1)*1.0f )
	    {
		ret = TRS_FAIL;
		break;
	    }
	}
    }
    if(nObsNodesFromEv == nObsNodes)
    {
	intVector numbersOfReallyObsNodes;
	int numReallyObsNodes=0;
	for ( i=0; i<nObsNodesFromEv; i++)
	{
	    if (pObsNodesNow[i])
	    {
		numbersOfReallyObsNodes.push_back(myNumAllObsNodes[i]);
		numReallyObsNodes++;
	    }
	}
#if 0
	const CNodeType ** AllNodeTypesFromModel= new const CNodeType*[nnodes];
	for (i=0; i<nnodes; i++)
	{
	    AllNodeTypesFromModel[i] = grModel->GetNodeType(i);
	}
	for (i=0; i<nObsNodesFromEv; i++)
	{
	    //Test the values which are keep in Evidence
	    CNodeType nt = *AllNodeTypesFromModel[myNumAllObsNodes[i]];
	    int IsDiscreteNode = nt.IsDiscrete();
	    if(IsDiscreteNode)
	    {
		int valFromEv = (ev[myOffset[i]].GetInt());
		if(!(Values[i].GetInt() == valFromEv))
		{
		    ret=TRS_FAIL;
		    break;
		}
	    }
	    else
	    {
		;
		for (j=0; j<3; j++)
		{
		    if(!((ev[myOffset[i]+j]).GetFlt() == Values[i+j].GetFlt()))
		    {
			ret=TRS_FAIL;
			break;
		    }
		}
	    }
	}
	delete []AllNodeTypesFromModel;
#endif
    }
    else
    {
	ret = TRS_FAIL;
    }
    //Toggle some Node
    int someNumber = (int)(rand()*nObsNodesFromEv/RAND_MAX);
    int *someOfNodes = new int[someNumber];
    intVector residuaryNums = intVector(myNumAllObsNodes,
	myNumAllObsNodes+nObsNodesFromEv);
    num=0;
    for(i=0; i<someNumber;i++)
    {
	num = (int)(rand()%(nObsNodes-i));
	someOfNodes[i] = residuaryNums[num];
	residuaryNums.erase(residuaryNums.begin()+num);
    }
    residuaryNums.clear();
    pMyEvid->ToggleNodeState(someNumber, someOfNodes);
    const int *pObsNodesAfterToggle = pMyEvid->GetObsNodesFlags();
    for (i=0; i<nObsNodesFromEv; i++)
    {
	//Test the ToggleNode method...
	if(pObsNodesAfterToggle[i])
	{
	    for(j=0; j<someNumber;j++)
	    {
		if(myNumAllObsNodes[i]==someOfNodes[j])
		{
		    ret=TRS_FAIL;
		    break;
		}
	    }
	}
    }

    delete grModel;
    delete pMyEvid;
    delete []modelNodeType;
    delete []NodeAssociat;
    delete []someOfNodes;
    int obsNodes_memory_flag = trsGuardCheck( obsNodes );
    if( obsNodes_memory_flag)
    {
	return trsResult( TRS_FAIL, "Dirty memory");
    }
    trsGuardFree( obsNodes );
    return trsResult( ret, ret == TRS_OK ? "No errors" : "Bad test on Values");
}
コード例 #28
0
ファイル: pnlSoftMaxCPD.hpp プロジェクト: JacobCWard/PyPNL
    void InitLearnData();
    
    void CopyLearnDataToDistrib();
    
    virtual CFactor* Clone() const;

	void BuildCurrentEvidenceMatrix(float ***full_evid, 
			float ***evid,intVector family,int numEv);

	virtual	void CreateAllNecessaryMatrices( int typeOfMatrices );
    
    virtual CFactor* CloneWithSharedMatrices();
    
    void AllocDistribution(const floatVector& weightsIn,
        const floatVector& offsetIn,
        const intVector& parentCombination = intVector());
    
    virtual CPotential* ConvertWithEvidenceToTabularPotential(
        const CEvidence* pEvidence,
        int flagSumOnMixtureNode = 1) const;
    
    virtual CPotential *ConvertToTabularPotential(const CEvidence *pEvidence) const;
    
    /*   CPotential*  ConvertWithEvidenceToGaussianPotential(
    const CEvidence* pEvidence,
    int flagSumOnMixtureNode ) const;
    */
    CPotential* ConvertWithEvidenceToGaussianPotential(const CEvidence* pEvidence,
        floatVector MeanContParents, 
        C2DNumericDenseMatrix<float>* CovContParents,
        const int *parentIndices = NULL,