Ejemplo n.º 1
0
CGraph* CreateGraphWithPyramidSpecific(int& num_nodes, int num_indep_nodes, 
                                       int num_layers)
{
    PNL_CHECK_LEFT_BORDER( num_indep_nodes, 1 );
    PNL_CHECK_LEFT_BORDER( num_layers, 1 );

    int i, j, k;
    
    CGraph *pGraph = CGraph::Create(0, NULL, NULL, NULL);
    PNL_CHECK_IF_MEMORY_ALLOCATED( pGraph );
    
    srand((unsigned int)time(NULL));

    num_nodes = num_indep_nodes;
	int num_nodes_into_curr_layer = num_indep_nodes;
	for (i = 0; i < num_layers - 1; i++)
	{
		num_nodes += num_nodes_into_curr_layer * 2 + 1;
		num_nodes_into_curr_layer = num_nodes_into_curr_layer * 2 + 1;
	}

	pGraph->AddNodes(num_nodes);
    
    int StartParent = 0,
        EndParent = num_indep_nodes - 1,
        StartCurrLayer,
        EndCurrLayer;
    int Child1,
        Child2,
        Child3;
    int NumParents;

    for (int layer = 0; layer < num_layers - 1; layer++ )
    {
        StartCurrLayer = EndParent + 1;
        EndCurrLayer = StartCurrLayer + 2 * (EndParent - StartParent + 1);
        NumParents = 0;

        for (j = StartParent; j <= EndParent; j++ )
        {
            Child1 = EndParent + NumParents * 2 + 1;
            Child2 = EndParent + NumParents * 2 + 2;
            Child3 = EndParent + NumParents * 2 + 3;

            pGraph->AddEdge(j, Child1, 1);
            pGraph->AddEdge(j, Child2, 1);
            pGraph->AddEdge(j, Child3, 1);

            NumParents++;
        }
        StartParent = StartCurrLayer;
        EndParent = EndCurrLayer;
    }    

    return pGraph;
}
void CSamplingInfEngine::Continue( int dt )
{
    PNL_CHECK_LEFT_BORDER( dt, 0);
    
    int startTime = GetMaxTime();
    PNL_CHECK_LEFT_BORDER( startTime, 0);
    
    int endTime = startTime + dt;
    SetMaxTime(endTime);
    Sampling( startTime, endTime );
}
Ejemplo n.º 3
0
CGraph* CreateGraphWithRegularGridSpecific(int& num_nodes, int width, 
    int height, int num_layers)
{
    PNL_CHECK_LEFT_BORDER( width, 2 );
    PNL_CHECK_LEFT_BORDER( height, 2 );
    PNL_CHECK_LEFT_BORDER( num_layers, 1 );

    int i, j, k;

    CGraph *pGraph = CGraph::Create(0, NULL, NULL, NULL);
    PNL_CHECK_IF_MEMORY_ALLOCATED( pGraph );

    srand((unsigned int)time(NULL));

    int num_nodes_one_layer = width * height;
    num_nodes = num_nodes_one_layer * num_layers;
    pGraph->AddNodes(num_nodes);
    
    for (i = 0; i < num_layers; i++)
    {
        for (j = 1; j < width; j++)
            pGraph->AddEdge(
                i * num_nodes_one_layer + j - 1,
                i * num_nodes_one_layer + j, 1);
        for (k = 1; k < height; k++)
            pGraph->AddEdge(
                i * num_nodes_one_layer + (k - 1) * width,
                i * num_nodes_one_layer + k * width, 1);

        for (j = 1; j < width; j++)
            for (k = 1; k < height; k++)
            {
                pGraph->AddEdge(
                    i * num_nodes_one_layer + (k - 1) * width + j, 
                    i * num_nodes_one_layer + k * width + j, 1);
                pGraph->AddEdge(
                    i * num_nodes_one_layer + k * width + j - 1,
                    i * num_nodes_one_layer + k * width + j, 1);
            }

        if (i)
        {
            for (j = 0; j < width; j++)
                for (k = 0; k < height; k++)
                    pGraph->AddEdge(
                        (i - 1) * num_nodes_one_layer + k * width + j,
                        i * num_nodes_one_layer + k * width + j, 1);
        }
    }
    
    return pGraph;
}
void CGibbsSamplingInfEngine::SetQueries(intVecVector &queryes)
{
  int nQueryes = queryes.size();
  PNL_CHECK_LEFT_BORDER( nQueryes, 1 );
  
  int i;
  for( i = 0; i < m_queryFactors.size(); i++ )
  {
    delete m_queryFactors[i];
  }
  
  intVector tmp;
  for( i = 0; i < nQueryes; i++ )
  {
    PNL_CHECK_RANGES( queryes[i].size(), 1, m_pGraphicalModel->GetNumberOfNodes() );
    tmp = queryes[i];
    std::sort( tmp.begin(), tmp.end() );
    intVector::iterator it = std::unique( tmp.begin(), tmp.end() );
    tmp.erase( it, tmp.end() );
    if( tmp.size() != queryes[i].size() )
    {
      PNL_THROW(CAlgorithmicException, "equal nodes in qurey");
    }
    tmp.clear();
  }
  m_queryes = queryes;    
  
  
}
Ejemplo n.º 5
0
int CMNet::GetFactors( int numberOfNodes, const int *nodes,
		               pFactorVector *params ) const
{
    // the function is to find all the factors which are set on
	// the domains	which contain "nodes" as a subset

	// bad-args check
    PNL_CHECK_LEFT_BORDER( numberOfNodes, 1 );
    PNL_CHECK_IS_NULL_POINTER(nodes);
    PNL_CHECK_IS_NULL_POINTER(params);
	// bad-args check end

    params->clear();


    int       numOfClqs;
    const int *clqsNums;

    GetClqsNumsForNode( *nodes, &numOfClqs, &clqsNums );

    assert( numOfClqs > 0 );

    intVector factsNums( clqsNums, clqsNums + numOfClqs );

    const int *ndsIt   = nodes + 1,
              *nds_end = nodes + numberOfNodes;

    for( ; ndsIt != nds_end; ++ndsIt )
    {
        GetClqsNumsForNode( *ndsIt, &numOfClqs, &clqsNums );

        intVector::iterator       factsNumsIt = factsNums.end() - 1;
        intVector::const_iterator factsNums_before_begin
            = factsNums.begin() - 1;

        for( ; factsNumsIt != factsNums_before_begin; --factsNumsIt )
        {
            if( std::find( clqsNums, clqsNums + numOfClqs, *factsNumsIt )
                == clqsNums + numOfClqs )
            {
                factsNums.erase(factsNumsIt);
            }

            if( factsNums.empty() )
            {
                return 0;
            }
        }
    }

    intVector::const_iterator factsNumsIt   = factsNums.begin(),
	factsNums_end = factsNums.end();

    for( ; factsNumsIt != factsNums_end; ++factsNumsIt )
    {
	params->push_back(GetFactor(*factsNumsIt));
    }

    return 1;
}
Ejemplo n.º 6
0
CSoftMaxCPD* CSoftMaxCPD::Create(const int *domain, int nNodes,
    CModelDomain* pMD)
{
    PNL_CHECK_IS_NULL_POINTER(domain);
    PNL_CHECK_IS_NULL_POINTER(pMD);
    PNL_CHECK_LEFT_BORDER(nNodes, 1);
    int i;
	int NumContPar = 0;
	for(i = 0; i<nNodes; i++)
	{
		if (!pMD->GetVariableType(domain[i])->IsDiscrete()) 
		{
			NumContPar++;
		}
	}
	if(NumContPar == 0 )
	{
		PNL_THROW(CInconsistentType,
			"SoftMax node does not have continuous parents");          
	}
    CSoftMaxCPD *pNewParam = new CSoftMaxCPD(domain, nNodes, pMD);
    PNL_CHECK_IF_MEMORY_ALLOCATED(pNewParam);
    
    return pNewParam;
}
PNL_USING
PNL_BEGIN

/////////////////////////////////////////////////////////////////////////////
//float C2DNumericDenseMatrix

C2DNumericDenseMatrix<float> *C2DNumericDenseMatrix<float>::Create( const int *lineSizes,
                                                                    const float *data, int Clamp )
{
    PNL_CHECK_IS_NULL_POINTER( lineSizes );
    PNL_CHECK_LEFT_BORDER( Clamp, 0 );
    PNL_CHECK_IS_NULL_POINTER( data );

    int i = 0;
    for( i = 0; i < 2; i++ )
    {
        if( lineSizes[i] < 0 )
        {
            PNL_THROW( COutOfRange, "range is negative" )
        }
    }
    PNL_CHECK_IS_NULL_POINTER( data );
    C2DNumericDenseMatrix<float> *pxMatrix = 
                new C2DNumericDenseMatrix<float>( 2, lineSizes, data, (Clamp>0));
    return pxMatrix;
}
Ejemplo n.º 8
0
CGaussianCPD* CGaussianCPD::Create( const int *domain, int nNodes,
		                   CModelDomain* pMD )
{
    PNL_CHECK_IS_NULL_POINTER( domain );
    PNL_CHECK_IS_NULL_POINTER( pMD );
    PNL_CHECK_LEFT_BORDER( nNodes, 1 );

    CGaussianCPD *pNewParam = new CGaussianCPD( domain, nNodes, pMD);
    PNL_CHECK_IF_MEMORY_ALLOCATED( pNewParam );
    return pNewParam;
}
Ejemplo n.º 9
0
PNL_USING

CFactors* CFactors::Create(int numberOfFactors)
{
    /* bad-args check */
    PNL_CHECK_LEFT_BORDER( numberOfFactors, 1 );
    /* bad-args check end */
    
    CFactors *pFactors = new CFactors(numberOfFactors);
    
    PNL_CHECK_IF_MEMORY_ALLOCATED(pFactors);
    
    return pFactors;
}
Ejemplo n.º 10
0
CMNet* CMNet::Create( int numberOfNodes, int numberOfNodeTypes,
		     const CNodeType *nodeTypes, const int *nodeAssociation,
		     int numberOfCliques, const int *cliqueSizes,
		     const int **cliques )
{
    /* bad-args check */
    PNL_CHECK_LEFT_BORDER( numberOfNodes, 1 );
    PNL_CHECK_RANGES( numberOfNodeTypes, 1, numberOfNodes );
    PNL_CHECK_IS_NULL_POINTER(nodeTypes);
    PNL_CHECK_IS_NULL_POINTER(nodeAssociation);
    PNL_CHECK_LEFT_BORDER( numberOfCliques, 1 );
    PNL_CHECK_IS_NULL_POINTER(cliqueSizes);
    PNL_CHECK_IS_NULL_POINTER(cliques);
    /* bad-args check end */

    /* creating the model */
    CMNet *pMNet = new CMNet( numberOfNodes, numberOfNodeTypes,
	nodeTypes, nodeAssociation, numberOfCliques, cliqueSizes, cliques );

    PNL_CHECK_IF_MEMORY_ALLOCATED(pMNet);

    return pMNet;
}
Ejemplo n.º 11
0
CGaussianCPD*
CGaussianCPD::CreateUnitFunctionCPD(const int *domain, int nNodes, CModelDomain* pMD)
{
    PNL_CHECK_IS_NULL_POINTER( domain );
    PNL_CHECK_IS_NULL_POINTER( pMD );
    PNL_CHECK_LEFT_BORDER( nNodes, 1 );

    CGaussianCPD* resCPD = new CGaussianCPD( domain, nNodes, pMD );
    intVector dom = intVector( domain, domain + nNodes );
    pConstNodeTypeVector ntVec;
    pMD->GetVariableTypes( dom, &ntVec );
    CGaussianDistribFun* UniData =
	CGaussianDistribFun::CreateUnitFunctionDistribution( nNodes,
	&ntVec.front(), 0, 0);
    delete (resCPD->m_CorrespDistribFun);
    resCPD->m_CorrespDistribFun = UniData;
    return resCPD;
}
Ejemplo n.º 12
0
PNL_USING

CMNet* CMNet::Create( int numberOfCliques, const int *cliqueSizes,
		     const int **cliques, CModelDomain* pMD )
{
    /* bad-args check */
    PNL_CHECK_LEFT_BORDER( numberOfCliques, 1 );
    PNL_CHECK_IS_NULL_POINTER(cliqueSizes);
    PNL_CHECK_IS_NULL_POINTER(cliques);
    PNL_CHECK_IS_NULL_POINTER( pMD );
    /* bad-args check end */

    /* creating the model */
    CMNet *pMNet = new CMNet( numberOfCliques, cliqueSizes, cliques, pMD);

    PNL_CHECK_IF_MEMORY_ALLOCATED(pMNet);

    return pMNet;
}
Ejemplo n.º 13
0
CGraph* CreateCompleteGraph(int num_nodes)
{
    PNL_CHECK_LEFT_BORDER( num_nodes, 1 );

    int i, j, k;
    
    CGraph *pGraph = CGraph::Create(0, NULL, NULL, NULL);
    PNL_CHECK_IF_MEMORY_ALLOCATED( pGraph );
    
    srand((unsigned int)time(NULL));

    pGraph->AddNodes(num_nodes);
    
    for (j = 1; j < num_nodes; j++ )
        for (i = 0; i < j; i++ )
            pGraph->AddEdge(i, j, 1);

    return pGraph;
}
Ejemplo n.º 14
0
void CMlLearningEngine::Learn()
{
/*
function takes an information from m_pEvidences and learns factors
of graphical model using prior probabilities or not
    */
    float logLikTmp = 0;
    if(!m_pGrModel)
    {
        PNL_THROW( CNULLPointer, "no graphical model")
    }
    CStaticGraphicalModel *grmodel = this->GetStaticModel();
    CFactor *parameter = NULL;
    int numberOfDomains = grmodel -> GetNumberOfFactors();
    
    for( int domainNodes = 0; domainNodes < numberOfDomains; domainNodes++ )
    {
        factor = grmodel->GetFactor( domainNodes );
        factor ->UpdateStatisticsML( &m_Vector_pEvidences.front(), 
            m_Vector_pEvidences.size() );
        PNL_CHECK_LEFT_BORDER(m_numberOfAllEvidences, 1);
        logLikTmp += parameter->ProcessingStatisticalData(m_numberOfAllEvidences);
    }
    switch( grmodel -> GetModelType() )
    {
    case mtBNet:
        {
            break;
        }
    case mtMRF2:
    case mtMNet:
        {
            logLikTmp = _LearnPotentials();
            break;
        }
    default:
        {
            PNL_THROW(CBadConst, "model type" )
                break;
        }
    }
    m_critValue.push_back(logLikTmp);
}
void CBayesLearningEngine::AppendData( int dim, 
                                       const CEvidence* const* pEvidencesIn )
{
  	//create vector consists of points to evidences
    PNL_CHECK_LEFT_BORDER(dim, 1);
    int i = 0;
    for( i = 0; i < dim; i++ )
    {
        if (!pEvidencesIn[i])
        {
            PNL_THROW(CNULLPointer,"evidence")
        }
        if( IsInfNeed(pEvidencesIn[i]) )
        {
            PNL_THROW(CNotImplemented,"all nodes should be observed")
        }
        m_Vector_pEvidences.push_back( pEvidencesIn[i] );
    }
    m_numberOfAllEvidences = m_Vector_pEvidences.size();
}
Ejemplo n.º 16
0
//generate uniform integer distribution in the range [left,right]
int pnlRand(int left, int right)
{
    int myid = PAR_OMP_NUM_CURR_THREAD;
    
    PNL_CHECK_LEFT_BORDER( right, left );
    g_RNG[myid].m_cxRandState.disttype = CX_RAND_UNI;
    //set range
    cxRandSetRange( &g_RNG[myid].m_cxRandState, left, right, -1 );

    //create matrix wrapper for 1 floating point value
    float val = 0;
    CxMat mat = cxMat( 1, 1, CX_32FC1, &val );

    //generate value
    cxRand( &g_RNG[myid].m_cxRandState, &mat );
    double x1;
   
    double ip = modf(val, &x1);
    return (int)(x1 >= 0 ? ( ip > 0.5 ? ++x1 : x1 ) : ( ip > -0.5 ? --x1 : x1 )); 
    
}
Ejemplo n.º 17
0
void CMNet::GetFactors( int numberOfNodes, const int *nodes,
					    int *numberOfFactors, CFactor ***params ) const
{
	// bad-args check
    PNL_CHECK_LEFT_BORDER( numberOfNodes, 1 );
    PNL_CHECK_IS_NULL_POINTER(nodes);
    PNL_CHECK_IS_NULL_POINTER(numberOfFactors);
    PNL_CHECK_IS_NULL_POINTER(params);
	// bad-args check end

	if( GetFactors( numberOfNodes, nodes, &m_paramsForNodes ) )
	{
		*numberOfFactors = m_paramsForNodes.size();
        *params             = &m_paramsForNodes.front();
	}
    else
    {
        *numberOfFactors = 0;
        *params             = NULL;
    }
}
Ejemplo n.º 18
0
CGraph* CreateRandomGraphWithToyQMRSpecific(int num_nodes, 
    int num_indep_nodes, int max_size_family)
{
    PNL_CHECK_LEFT_BORDER( num_nodes, 10 );
    PNL_CHECK_RANGES( num_indep_nodes, 1, num_nodes-1 );
    PNL_CHECK_RANGES( max_size_family, 2, num_nodes );
    
    int i, j, k;
    
    CGraph *pGraph = CGraph::Create(0, NULL, NULL, NULL);
    PNL_CHECK_IF_MEMORY_ALLOCATED( pGraph );
    
    srand((unsigned int)time(NULL));

    pGraph->AddNodes(num_nodes);
    
    int num_parents;
    int ind_parent;
    intVector prev_nodes(0);
    for ( i = num_indep_nodes; i < num_nodes; i++)
    {
        prev_nodes.resize(0);
        for ( j = 0; j < num_indep_nodes; j++) 
            prev_nodes.push_back(j);

        num_parents = rand() % (max_size_family - 1);
        num_parents += 1;
        num_parents = (num_parents > i) ? i : num_parents;
    
        for ( j = 0; j < num_parents; j++)
        {
            ind_parent = rand() % prev_nodes.size();
            pGraph->AddEdge(prev_nodes[ind_parent], i, 1);
            prev_nodes.erase(prev_nodes.begin() + ind_parent);
        }
    }

    return pGraph;
}
Ejemplo n.º 19
0
CGraph* CreateRandomAndSpecificForIDNetGraph(int num_nodes,
  int num_indep_nodes, int max_size_family)
{
  PNL_CHECK_LEFT_BORDER(num_nodes, 10);
  PNL_CHECK_RANGES(num_indep_nodes, 1, num_nodes-1);
  PNL_CHECK_RANGES(max_size_family, 2, num_nodes);
  
  int i, j, k;
  
  CGraph *pGraph = CGraph::Create(0, NULL, NULL, NULL);
  PNL_CHECK_IF_MEMORY_ALLOCATED(pGraph);
  
  srand((unsigned int)time(NULL));
  
  pGraph->AddNodes(num_nodes);
  
  int num_parents;
  int ind_parent;
  intVector prev_nodes(0);
  for (i = num_indep_nodes; i < num_nodes; i++)
  {
    prev_nodes.resize(0);
    for (j = 0; j < i; j++)
      prev_nodes.push_back(j);
    
    num_parents = rand() % (max_size_family - 1);
    num_parents += 1;
    num_parents = (num_parents > i) ? i : num_parents;
    
    for (j = 0; j < num_parents; j++)
    {
      ind_parent = rand() % prev_nodes.size();
      pGraph->AddEdge(prev_nodes[ind_parent], i, 1);
      prev_nodes.erase(prev_nodes.begin() + ind_parent);
    }
  }
  
  intVector parents(0);
  intVector childs(0);
  for (i = 0; i < num_nodes; i++)
  {
    if (pGraph->GetNumberOfChildren(i) == 0)
    {
      pGraph->GetParents(i, &parents);
      for (j = 0; j < parents.size(); j++)
      {
        pGraph->GetChildren(parents[j], &childs);
        for (k = 0; k < childs.size(); k++)
          if ((childs[k] != i) && 
            (pGraph->GetNumberOfChildren(childs[k]) == 0) &&
            (pGraph->GetNumberOfParents(childs[k]) == 1))
          {
            if (i < childs[k])
            {
              pGraph->RemoveEdge(parents[j], childs[k]);
              pGraph->AddEdge(i, childs[k], 1);
            }
            else
            {
              pGraph->AddEdge(childs[k], i, 1);
            }
          }
      }
    }
  }
  
  return pGraph;
}
void CSpecPearlInfEngine::MarginalNodes( const int* query, int querySize,
                                     int notExpandJPD )
{
    if( notExpandJPD == 1 )
    {
        PNL_THROW( CInconsistentType, "pearl inference work with expanded distributions only" );
    }

    PNL_CHECK_LEFT_BORDER(querySize, 1);
    
    if( m_pQueryJPD )
    {
        delete m_pQueryJPD;
    }

    if( m_pEvidenceMPE )
    {
        delete m_pEvidenceMPE;
    }
    
    if( querySize == 1 )
    {
        if( m_bMaximize )
        {
            //compute MPE
            m_pEvidenceMPE = m_beliefs[m_curState][query[0]]->GetMPE();
        }
        else
        {
            // get marginal for one node - cretae parameter on existing data - m_beliefs[query[0]];
            m_pQueryJPD = m_beliefs[m_curState][query[0]]->GetNormalized();
        }
    }
    else
    {
        int numParams;
        CFactor ** params; 
        m_pGraphicalModel->GetFactors( querySize, query, &numParams ,&params );
        if ( !numParams )
        {
            PNL_THROW( CBadArg, "only members of one family can be in query instead of one node" ) 
        }
        if( numParams != 1 )
        {
            PNL_THROW( CBadArg, "add more nodes to specify which of jpd you want to know")
        }
        int i;
        //get informatiom from parametr on these nodes to crate new parameter
        //with updated Data
        CPotential* allPot;
        if( m_modelType == mtMRF2 )
        {
            //just multiply and marginalize
            allPot = static_cast<CPotential*>(params[0]->Clone());
        }
        else
        {
            //m_modelType == mtBNet
            //need to convert to potential withiut evidence and multiply
            //and marginalize after that
            allPot = static_cast<CCPD*>(params[0])->ConvertToPotential();
        }
        //get neighbors of last node in domain (child for CPD) 
        //to compute JPD for his family
        int domSize;
        const int* dom;
        params[0]->GetDomain(&domSize, &dom);
        //start multiply to add information after inference
        for( i = 0; i < domSize; i++ )
        {
            (*allPot) *= (*m_beliefs[m_curState][dom[i]]) ;
        }
        m_pQueryJPD = allPot->Marginalize( query, querySize, m_bMaximize );
        //fixme - can replace by normalize in self
        m_pQueryJPD->Normalize();
        if( m_bMaximize )
        {
            //compute MPE
            m_pEvidenceMPE = m_pQueryJPD->GetMPE();
            delete m_pQueryJPD;
            m_pQueryJPD = NULL;
        }
    }
}
Ejemplo n.º 21
0
CBNet* CreateRandomBayessian(CGraph* pGraph, int max_num_states)
{
    PNL_CHECK_LEFT_BORDER( max_num_states, 1 );
    PNL_CHECK_IF_MEMORY_ALLOCATED( pGraph );

    if( !pGraph->IsDAG() )
    {
        PNL_THROW( CInconsistentType, " the graph should be a DAG " );
    }
    if( !pGraph->IsTopologicallySorted() )
    {
        PNL_THROW( CInconsistentType, 
            " the graph should be sorted topologically " );
    }
    if (pGraph->NumberOfConnectivityComponents() > 1)
    {
        PNL_THROW( CInconsistentType, " the graph should be linked " );
    }

    int i, j, k;

    int num_nodes = pGraph->GetNumberOfNodes();
    CNodeType *nodeTypes = new CNodeType [num_nodes];
    int num_states;
    
    for ( i = 0; i < num_nodes; i++ )
    {
        num_states = GetRandomNumberOfStates(max_num_states);
        nodeTypes[i].SetType(1, num_states, nsChance);
    }

    int *nodeAssociation = new int[num_nodes];
    for ( i = 0; i < num_nodes; i++ )
    {
        nodeAssociation[i] = i;
    }

    CBNet *pBNet = CBNet::Create( num_nodes, num_nodes, nodeTypes,
                                     nodeAssociation, pGraph );
    
    CModelDomain* pMD = pBNet->GetModelDomain();
    
    CFactor **myParams = new CFactor*[num_nodes];
    int *nodeNumbers = new int[num_nodes];
    int **domains = new int*[num_nodes];

    intVector parents(0);
    for ( i = 0; i < num_nodes; i++)
    {
        nodeNumbers[i] = pGraph->GetNumberOfParents(i) + 1;
        domains[i] = new int[nodeNumbers[i]];
        pGraph->GetParents(i, &parents);
        
        for ( j = 0; j < parents.size(); j++ )
            domains[i][j] = parents[j];
        domains[i][nodeNumbers[i]-1] = i;
    }

    pBNet->AllocFactors();

    for( i = 0; i < num_nodes; i++ )
    {
        myParams[i] = CTabularCPD::Create( domains[i], 
            nodeNumbers[i], pMD);
    }

    float **data = new float*[num_nodes];
    int size_data;
    int num_states_node;
    int num_blocks;
    intVector size_nodes(0);
    float belief, sum_beliefs;

    for ( i = 0; i < num_nodes; i++ )
    {
        size_data = 1;
        size_nodes.resize(0);
        for ( j = 0; j < nodeNumbers[i]; j++ )
        {
            size_nodes.push_back(pBNet->GetNodeType(
                domains[i][j])->GetNodeSize());
            size_data *= size_nodes[j];
        }
        num_states_node = size_nodes[size_nodes.size() - 1];
        num_blocks = size_data / num_states_node;
        
        data[i] = new float[size_data];

        for ( j = 0; j < num_blocks; j++ )
        {
            sum_beliefs = 0.0;
            for ( k = 0; k < num_states_node - 1; k++ )
            {
                belief = GetBelief(1.0 - sum_beliefs);
                data[i][j * num_states_node + k] = belief;
                sum_beliefs += belief;
            }
            belief = 1.0 - sum_beliefs;
            data[i][j * num_states_node + num_states_node - 1] = belief;
        }
    }

    for( i = 0; i < num_nodes; i++ )
    {
        myParams[i]->AllocMatrix(data[i], matTable);
        pBNet->AttachFactor(myParams[i]);
    }    

    delete [] nodeTypes;
    delete [] nodeAssociation;

    return pBNet;
}
Ejemplo n.º 22
0
void CEMLearningEngine::Learn()
{
    CStaticGraphicalModel *pGrModel =  this->GetStaticModel();
    PNL_CHECK_IS_NULL_POINTER(pGrModel);
    PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);
    
    CInfEngine *pInfEng = NULL;
    if (m_pInfEngine)
    {
        pInfEng = m_pInfEngine;
    }
    else
    {
        if (!m_bAllObserved)
        {
            pInfEng = CJtreeInfEngine::Create(pGrModel);
            m_pInfEngine = pInfEng;
        }
    }
    
    float loglik = 0.0f;
    
    int nFactors = pGrModel->GetNumberOfFactors();
    const CEvidence *pEv;
    CFactor *pFactor;
    
    int iteration = 0;
    int ev;

    bool IsCastNeed = false;
    int i;
    for( i = 0; i < nFactors; i++ )
    {
        pFactor = pGrModel->GetFactor(i);
        EDistributionType dt = pFactor->GetDistributionType();
        if ( dt == dtSoftMax ) IsCastNeed = true;
    }

    float ** full_evid = NULL;
    if (IsCastNeed)
    {
        BuildFullEvidenceMatrix(&full_evid);
    }

    
    if (IsAllObserved())
    {
        int i;
        float **evid = NULL;
        EDistributionType dt;
        CFactor *factor = NULL;
        for (i = 0; i < nFactors; i++)
        {
            factor = pGrModel->GetFactor(i);
            dt = factor->GetDistributionType();
            if (dt != dtSoftMax)
            {
                factor->UpdateStatisticsML(&m_Vector_pEvidences[GetNumberProcEv()], 
                    GetNumEv() - GetNumberProcEv());
            }
            else
            {
                
                intVector family;
				family.resize(0);
                pGrModel->GetGraph()->GetParents(i, &family);
                family.push_back(i);
                CSoftMaxCPD* SoftMaxFactor = static_cast<CSoftMaxCPD*>(factor);
                SoftMaxFactor->BuildCurrentEvidenceMatrix(&full_evid, 
					&evid,family,m_Vector_pEvidences.size());
				SoftMaxFactor->InitLearnData();
                SoftMaxFactor->SetMaximizingMethod(m_MaximizingMethod);
                SoftMaxFactor->MaximumLikelihood(evid, m_Vector_pEvidences.size(),
                    0.00001f, 0.01f);
                SoftMaxFactor->CopyLearnDataToDistrib();
                for (int k = 0; k < factor->GetDomainSize(); k++)
                {
                    delete [] evid[k];
                }
                delete [] evid;
            }
        }
        m_critValue.push_back(UpdateModel());
    }
    else
    {
        bool bContinue;
        const CPotential * pot;
        
/*        bool IsCastNeed = false;
        int i;
        for( i = 0; i < nFactors; i++ )
        {
            pFactor = pGrModel->GetFactor(i);
            EDistributionType dt = pFactor->GetDistributionType();
            if ( dt == dtSoftMax ) IsCastNeed = true;
        }

        float ** full_evid;
        if (IsCastNeed)
        {
            BuildFullEvidenceMatrix(full_evid);
        }*/
        
        do
        {
            ClearStatisticData();
            iteration++;
            for( ev = GetNumberProcEv(); ev < GetNumEv() ; ev++ )
            {
                bool bInfIsNeed = !GetObsFlags(ev)->empty(); 
                pEv = m_Vector_pEvidences[ev];
                if( bInfIsNeed )
                {
                    pInfEng->EnterEvidence(pEv, 0, 0);
                }
                int i;
                for( i = 0; i < nFactors; i++ )
                {
                    pFactor = pGrModel->GetFactor(i);
                    int nnodes;
                    const int * domain;
                    pFactor->GetDomain( &nnodes, &domain );
                    if( bInfIsNeed && !IsDomainObserved(nnodes, domain, ev ) )
                    {
                        pInfEng->MarginalNodes( domain, nnodes, 1 );
                        pot = pInfEng->GetQueryJPD(); 
                        if ( (!(m_Vector_pEvidences[ev])->IsNodeObserved(i)) && (IsCastNeed) )
                        {
                            Cast(pot, i, ev, &full_evid);
                        }
                        EDistributionType dt;
                        dt = pFactor->GetDistributionType();
                        if ( !(dt == dtSoftMax) )
                            pFactor->UpdateStatisticsEM( /*pInfEng->GetQueryJPD */ pot, pEv );
                    }
                    else
                    {
                        if ((pFactor->GetDistributionType()) != dtSoftMax)
                            pFactor->UpdateStatisticsML( &pEv, 1 );
                    }
                }
            }
            
            int i;
/*
            printf ("\n My Full Evidence Matrix");
            for (i=0; i<nFactors; i++)
            {
                for (j=0; j<GetNumEv(); j++)
                {
                    printf ("%f   ", full_evid[i][j]);
                }
                printf("\n");
            } 
*/            
            float **evid = NULL;
            EDistributionType dt;
            CFactor *factor = NULL;
            // int i;
            for (i = 0; i < nFactors; i++)
            {
                factor = pGrModel->GetFactor(i);
                dt = factor->GetDistributionType();
                if (dt == dtSoftMax)
                {
					intVector family;
				    family.resize(0);
                    pGrModel->GetGraph()->GetParents(i, &family);
                    family.push_back(i);
                    CSoftMaxCPD* SoftMaxFactor = static_cast<CSoftMaxCPD*>(factor);
					SoftMaxFactor->BuildCurrentEvidenceMatrix(&full_evid, 
						&evid,family,m_Vector_pEvidences.size());
                    SoftMaxFactor->InitLearnData();
                    SoftMaxFactor->SetMaximizingMethod(m_MaximizingMethod);
                    //        SoftMaxFactor->MaximumLikelihood(evid, m_numberOfLastEvidences, 
                    SoftMaxFactor->MaximumLikelihood(evid, m_Vector_pEvidences.size(),
                        0.00001f, 0.01f);
                    SoftMaxFactor->CopyLearnDataToDistrib();
                    for (int k = 0; k < factor->GetDomainSize(); k++)
                    {
                        delete [] evid[k];
                    }
                    delete [] evid;
                }
            }
                        
            loglik = UpdateModel();
            
            if( GetMaxIterEM() != 1)
            {
                bool flag = iteration == 1 ? true : 
                (fabs(2*(m_critValue.back()-loglik)/(m_critValue.back() + loglik)) > GetPrecisionEM() );
                
                bContinue = GetMaxIterEM() > iteration && flag;
            }
            else
            {
                bContinue = false;
            }
            m_critValue.push_back(loglik);
            
        }while(bContinue);
    }
    SetNumProcEv( GetNumEv() );
   
    if (IsCastNeed)
    {
        int NumOfNodes = pGrModel->GetGraph()->GetNumberOfNodes();
        for (i=0; i<NumOfNodes; i++)
        {
            delete [] full_evid[i];
        }
        delete [] full_evid;
    }

}
Ejemplo n.º 23
0
void CEMLearningEngine::LearnExtraCPDs(int nMaxFamily, pCPDVector* additionalCPDs, floatVector* additionalLLs)
{

    CStaticGraphicalModel *pGrModel =  this->GetStaticModel();
    PNL_CHECK_IS_NULL_POINTER(pGrModel);
    PNL_CHECK_LEFT_BORDER(GetNumEv(), 1);
    
    int numberOfFactors = pGrModel->GetNumberOfFactors();
    int numberOfAddFactors = additionalCPDs->size();
    
    additionalLLs->resize(numberOfAddFactors);
    additionalLLs->clear();
    
    m_vFamilyLogLik.resize(numberOfFactors);
    float	loglik = 0.0f, ll;
    int		i, ev;
    int iteration = 0;
    const CEvidence* pEv;
    
    CFactor *factor = NULL;
    int nnodes;
    const int * domain;
    
    bool bInfIsNeed;
    CInfEngine *pInfEng = m_pInfEngine;
    
    if (IsAllObserved())
    {
        for (i = 0; i < numberOfFactors; i++)
        {
            factor = pGrModel->GetFactor(i);
            factor->UpdateStatisticsML(&m_Vector_pEvidences[GetNumberProcEv()], 
                GetNumEv() - GetNumberProcEv());
        }
        
        for( ev = 0; ev < GetNumEv() ; ev++)
        {
            pEv = m_Vector_pEvidences[ev];
            for( i = 0; i < numberOfAddFactors; i++ )
            {
                factor = static_cast<CFactor*>((*additionalCPDs)[i]);
                factor->UpdateStatisticsML( &pEv, 1 );
            }
        }
        
        switch (pGrModel->GetModelType())
        {
        case mtBNet:
            {
                for( i = 0; i<numberOfFactors; i++ )
                {
                    factor = pGrModel->GetFactor(i);
                    ll = factor->ProcessingStatisticalData( GetNumEv());
                    m_vFamilyLogLik[i] = ll;
                    loglik += ll;
                }
                
                for( i = 0; i < numberOfAddFactors; i++ )
                {
                    factor = static_cast<CFactor*>((*additionalCPDs)[i]);
                    ll = factor->ProcessingStatisticalData( GetNumEv());
                    (*additionalLLs)[i] = ll;
                }
                break;
            }
        case mtMRF2:
        case mtMNet:
            {	
                break;
            }
        default:
            {
                PNL_THROW(CBadConst, "model type" )
                    break;
            }
        }
        m_critValue.push_back(loglik);    
        
    }
    else
    {
Ejemplo n.º 24
0
CIDNet* CreateRandomIDNet(int num_nodes, int num_indep_nodes,
  int max_size_family, int num_decision_nodes, int max_num_states_chance_nodes,
  int max_num_states_decision_nodes, int min_utility, int max_utility,
  bool is_uniform_start_policy)
{
  PNL_CHECK_RANGES(num_decision_nodes, 1, num_nodes-1);
  PNL_CHECK_LEFT_BORDER(max_num_states_chance_nodes, 1);
  PNL_CHECK_LEFT_BORDER(max_num_states_decision_nodes, 1);
  PNL_CHECK_LEFT_BORDER(max_utility, min_utility);
  
  CGraph* pGraph = 
    CreateRandomAndSpecificForIDNetGraph(num_nodes, num_indep_nodes,
    max_size_family);
  
  if (!pGraph->IsDAG())
  {
    PNL_THROW(CInconsistentType, " the graph should be a DAG ");
  }
  
  if (!pGraph->IsTopologicallySorted())
  {
    PNL_THROW(CInconsistentType, 
      " the graph should be sorted topologically ");
  }
  if (pGraph->NumberOfConnectivityComponents() > 1)
  {
    PNL_THROW(CInconsistentType, " the graph should be linked ");
  }
  
  int i, j, k;
  
  CNodeType *nodeTypes = new CNodeType [num_nodes];
  
  intVector nonValueNodes(0);
  intVector posibleDecisionNodes(0);
  nonValueNodes.resize(0);
  posibleDecisionNodes.resize(0);
  for (i = 0; i < num_nodes; i++)
  {
    if (pGraph->GetNumberOfChildren(i) == 0)
    {
      nodeTypes[i].SetType(1, 1, nsValue);
    }
    else
    {
      nonValueNodes.push_back(i);
      posibleDecisionNodes.push_back(i);
    }
  }
  int ind_decision_node;
  int num_states;
  int index;
  int node;
  intVector neighbors(0);
  neighborTypeVector neigh_types(0);

  num_decision_nodes = (num_decision_nodes > posibleDecisionNodes.size()) ? 
    posibleDecisionNodes.size() : num_decision_nodes;
  for (i = 0; (i < num_decision_nodes) && (posibleDecisionNodes.size()>0); i++)
  {
    ind_decision_node = rand() % posibleDecisionNodes.size();
    node = posibleDecisionNodes[ind_decision_node];
    num_states = GetRandomNumberOfStates(max_num_states_decision_nodes);
    nodeTypes[node].SetType(1, num_states, nsDecision);
    
    index = -1;
    for (j = 0; j < nonValueNodes.size(); j++)
    {
      if (nonValueNodes[j] == node)
      {
        index = j;
        break;
      }
    }
    if (index != -1)
      nonValueNodes.erase(nonValueNodes.begin() + index);
      
    posibleDecisionNodes.erase(posibleDecisionNodes.begin() + 
      ind_decision_node);
    pGraph->GetNeighbors(node, &neighbors, &neigh_types);
    for (j = 0; j < neighbors.size(); j++)
    {
      index = -1;
      for (k = 0; k < posibleDecisionNodes.size(); k++)
      {
        if (neighbors[j] == posibleDecisionNodes[k])
        {
          index = k;
          break;
        }
      }
      if (index != -1)
        posibleDecisionNodes.erase(posibleDecisionNodes.begin() + index);
    }
  }
  for (i = 0; i < nonValueNodes.size(); i++)
  {
    num_states = GetRandomNumberOfStates(max_num_states_chance_nodes);
    nodeTypes[nonValueNodes[i]].SetType(1, num_states, nsChance);
  }
  
  int *nodeAssociation = new int[num_nodes];
  for (i = 0; i < num_nodes; i++)
  {
    nodeAssociation[i] = i;
  }
  
  CIDNet *pIDNet = CIDNet::Create(num_nodes, num_nodes, nodeTypes,
    nodeAssociation, pGraph);
  pGraph = pIDNet->GetGraph();
  CModelDomain* pMD = pIDNet->GetModelDomain();
  
  CFactor **myParams = new CFactor*[num_nodes];
  int *nodeNumbers = new int[num_nodes];
  int **domains = new int*[num_nodes];
  
  intVector parents(0);
  for (i = 0; i < num_nodes; i++)
  {
    nodeNumbers[i] = pGraph->GetNumberOfParents(i) + 1;
    domains[i] = new int[nodeNumbers[i]];
    pGraph->GetParents(i, &parents);
    
    for (j = 0; j < parents.size(); j++)
    {
      domains[i][j] = parents[j];
    }
    domains[i][nodeNumbers[i]-1] = i;
  }
  
  pIDNet->AllocFactors();
  
  for (i = 0; i < num_nodes; i++)
  {
    myParams[i] = CTabularCPD::Create(domains[i], nodeNumbers[i], pMD);
  }
  
  float **data = new float*[num_nodes];
  int size_data;
  int num_states_node;
  int num_blocks;
  intVector size_nodes(0);
  float belief, sum_beliefs;
  
  for (i = 0; i < num_nodes; i++)
  {
    size_data = 1;
    size_nodes.resize(0);
    for (j = 0; j < nodeNumbers[i]; j++)
    {
      size_nodes.push_back(pIDNet->GetNodeType(domains[i][j])->GetNodeSize());
      size_data *= size_nodes[j];
    }
    num_states_node = size_nodes[size_nodes.size() - 1];
    num_blocks = size_data / num_states_node;
    
    data[i] = new float[size_data];
    switch (pIDNet->GetNodeType(i)->GetNodeState())
    {
      case nsChance:
      {
        for (j = 0; j < num_blocks; j++)
        {
          sum_beliefs = 0.0;
          for (k = 0; k < num_states_node - 1; k++)
          {
            belief = GetBelief(1.0f - sum_beliefs);
            data[i][j * num_states_node + k] = belief;
            sum_beliefs += belief;
          }
          belief = 1.0f - sum_beliefs;
          data[i][j * num_states_node + num_states_node - 1] = belief;
        }
        break;
      }
      case nsDecision:
      {
        if (is_uniform_start_policy)
        {
          belief = 1.0f / float(num_states_node);
          for (j = 0; j < num_blocks; j++)
          {
            sum_beliefs = 0.0;
            for (k = 0; k < num_states_node - 1; k++)
            {
              data[i][j * num_states_node + k] = belief;
              sum_beliefs += belief;
            }
            data[i][j * num_states_node + num_states_node - 1] = 
              1.0f - sum_beliefs;
          }
        }
        else
        {
          for (j = 0; j < num_blocks; j++)
          {
            sum_beliefs = 0.0;
            for (k = 0; k < num_states_node - 1; k++)
            {
              belief = GetBelief(1.0f - sum_beliefs);
              data[i][j * num_states_node + k] = belief;
              sum_beliefs += belief;
            }
            belief = 1.0f - sum_beliefs;
            data[i][j * num_states_node + num_states_node - 1] = belief;
          }
        }
        break;
      }
      case nsValue:
      {
        for (j = 0; j < num_blocks; j++)
        {
          data[i][j] = float(GetUtility(min_utility, max_utility));
        }
        break;
      }
    }
  }

  for (i = 0; i < num_nodes; i++)
  {
    myParams[i]->AllocMatrix(data[i], matTable);
    pIDNet->AttachFactor(myParams[i]);
  }

  delete [] nodeTypes;
  delete [] nodeAssociation;

  return pIDNet;
}
Ejemplo n.º 25
0
void CParEMLearningEngine::LearnOMP()
{
    CStaticGraphicalModel *pGrModel =  this->GetStaticModel();
    PNL_CHECK_IS_NULL_POINTER(pGrModel);
    PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);

    //omp_set_num_threads(2);
    int numberOfThreads = omp_get_num_procs();
    //CParPearlInfEngine **pCurrentInfEng = new CParPearlInfEngine*[numberOfThreads];
    CJtreeInfEngine **pCurrentInfEng = new CJtreeInfEngine*[numberOfThreads];
    for (int i = 0; i < numberOfThreads; i++)
        pCurrentInfEng[i] = NULL;
    CFactor *parameter1 = NULL;

    int exit = 0;
    int numberOfParameters = pGrModel->GetNumberOfParameters();
    int domainNodes;
    //int itsML = 0;

    // !!!
    float loglik = -FLT_MAX;
    float loglikOld = -FLT_MAX;
    float epsilon = GetPrecisionEM();
    float stopExpression = epsilon + 1.0f;
    int iteration = 0;

    int ev;

    // to create additional factors
    CFactor **ppAllFactors = new CFactor*[numberOfParameters*numberOfThreads];
    bool *was_updated = new bool[numberOfParameters*numberOfThreads];
    int factor;

#pragma omp parallel for private(factor) default(shared)
    for (factor = 0; factor < numberOfParameters; factor++)
    {
        ppAllFactors[factor] = pGrModel->GetFactor(factor);
        ppAllFactors[factor]->GetDistribFun()->ClearStatisticalData();
        was_updated[factor] = false;
        for (int proc = 1; proc < numberOfThreads; proc++)
        {
            ppAllFactors[factor + proc * numberOfParameters] =
                ppAllFactors[factor]->Clone();
            ppAllFactors[factor + proc * numberOfParameters]->GetDistribFun()->
                ClearStatisticalData();
            was_updated[factor + proc * numberOfParameters]= false;
        };
    };

    int* itsML = new int[numberOfThreads];  
    for (int delta = 0; delta < numberOfThreads; delta++)
    {
        itsML[delta] = 0;
    };

    int start_ev, end_ev;
    do
    {
        iteration++;

        start_ev = GetNumberProcEv();
        end_ev = GetNumEv();

#pragma omp parallel for schedule(dynamic) private(ev)
        for (ev = start_ev; ev < end_ev ; ev++)
        {  
            CFactor *parameter = NULL;
            int DomainNodes_new; 
            int bMaximize = 0;
            int bSumOnMixtureNode = 0;
            int infIsNeed = 0;
            int currentEvidNumber = ev; // !!!

            const CEvidence* pCurrentEvid = m_Vector_pEvidences[currentEvidNumber];

            infIsNeed = !GetObsFlags(ev)->empty(); // !!!

            int Num_thread = omp_get_thread_num();

            if (infIsNeed)
            {
                if (!pCurrentInfEng[Num_thread])
                {
                    pCurrentInfEng[Num_thread] = CJtreeInfEngine::Create(
                        (const CStaticGraphicalModel *)pGrModel);
                }
                pCurrentInfEng[Num_thread]->EnterEvidence(pCurrentEvid, bMaximize,
                    bSumOnMixtureNode);
            }
            for (DomainNodes_new = 0; DomainNodes_new < numberOfParameters; 
            DomainNodes_new++)
            {
                parameter = ppAllFactors[DomainNodes_new + 
                    Num_thread * numberOfParameters];
                if (infIsNeed)
                {
                    int DomainSize;
                    const int *domain;
                    parameter->GetDomain(&DomainSize, &domain);
                    if (IsDomainObserved(DomainSize, domain, currentEvidNumber))
                    {
                        const CEvidence *pEvidences[] = { pCurrentEvid };
                        parameter->UpdateStatisticsML(pEvidences, 1);
                        was_updated[DomainNodes_new+Num_thread*numberOfParameters]= true;
                    }
                    else
                    {
                        pCurrentInfEng[Num_thread]->MarginalNodes(domain, DomainSize, 1);
                        const CPotential * pMargPot = 
                            pCurrentInfEng[Num_thread]->GetQueryJPD();
                        parameter ->UpdateStatisticsEM(pMargPot, pCurrentEvid);
                        was_updated[DomainNodes_new+Num_thread*numberOfParameters]= true;
                    }
                }
                else
                {
                    const CEvidence *pEvidences[] = { pCurrentEvid };
                    parameter->UpdateStatisticsML(pEvidences, 1); 
                    was_updated[DomainNodes_new+Num_thread*numberOfParameters]= true;
                }  
            }
            itsML[Num_thread] = itsML[Num_thread] || !infIsNeed;
        }  // end of parallel for

        for (int delta = 1; delta < numberOfThreads; delta++)
        {
            itsML[0] = itsML[0] || itsML[delta];
        };

        //to join factors
#pragma omp parallel for private(factor) default(shared)
        for (factor = 0; factor < numberOfParameters; factor++)
        {
            for (int proc = 1; proc < numberOfThreads; proc++)
            {
                if (was_updated[factor+proc*numberOfParameters])
                {
                    ppAllFactors[factor]->UpdateStatisticsML(ppAllFactors[factor + 
                        proc*numberOfParameters]);
                    ppAllFactors[factor+proc*numberOfParameters]->GetDistribFun()->
                        ClearStatisticalData();
                };
                was_updated[factor+proc*numberOfParameters] = false;
            };
        };

        switch (pGrModel->GetModelType())
        {
        case mtBNet:
            {
                loglikOld = loglik;
                loglik = 0.0f;
                for (domainNodes = 0; domainNodes < numberOfParameters; domainNodes++)
                {
                    parameter1 = pGrModel->GetFactor(domainNodes);
                    loglik += parameter1->ProcessingStatisticalData(
                        m_numberOfAllEvidences);
                }
                break;
            }
        case mtMRF2:
        case mtMNet:
            {
                loglikOld = loglik;
                loglik = _LearnPotentials();
                break;
            }
        default:
            {
                PNL_THROW(CBadConst, "model type")
                    break;
            }
        }
        stopExpression = float(fabs(2 * (loglikOld - loglik) / 
            (loglikOld + loglik)));

        exit = ((stopExpression > epsilon) && (iteration <= GetMaxIterEM())) && !itsML[0];

        if (exit)
        {
            ClearStatisticData();
        }

        m_critValue.push_back(loglik);

        for (int j = 0; j < numberOfThreads; j++)
        {
            delete pCurrentInfEng[j];
            pCurrentInfEng[j] = NULL;
        }
    } while (exit);

    delete [] pCurrentInfEng;

    //”даление дополнительных факторов
    for (factor = numberOfParameters; factor < numberOfParameters * numberOfThreads;
    factor++)
    {
        delete ppAllFactors[factor];
    };

    delete[] ppAllFactors;
    delete[] was_updated;

    if (iteration > GetMaxIterEM())
    {
        PNL_THROW(CNotConverged, "maximum number of iterations")
    }

    SetNumProcEv( GetNumEv() );
}
Ejemplo n.º 26
0
void CBKInfEngine::
MarginalNodes( const int *query, int querySize, int slice, int notExpandJPD )
{
    /////////////////////////////////////////////////////////////////////////
    if( GetProcedureType() == ptFiltering )
    {
	PNL_CHECK_LEFT_BORDER(m_CRingpEv.size(), 1);
    }
    else
    {
	PNL_CHECK_LEFT_BORDER(m_CRingpEv.size() , m_CRingJtreeInf.size());
    }
   
    /////////////////////////////////////////////////////////////////////////
    
    if( GetEvidenceMPE() )
    {
	delete GetEvidenceMPE();
	SetEvidenceMPE(NULL);
    }
    if( GetQueryPot() )
    {
	delete GetQueryPot();
	SetQueryPot(NULL);
    }
    
   
    SetQueryNodes(querySize, query);
    intVector queryVec;
    queryVec.assign(query, query + querySize);
    
    intVector finalQuery;

    switch( m_ProcedureType )
    {
    case ptFiltering:
	{
	    FindFinalQuery( queryVec, m_CurrentTime  - 1, &finalQuery);
	    m_QuerryJTree  =  m_CRingJtreeInf[m_CurrentTime  - 1];
	    m_QuerryJTree->MarginalNodes( &finalQuery.front(), querySize, notExpandJPD );
	    break;
	}
    case ptFixLagSmoothing:
	{
	    FindFinalQuery( queryVec, m_CurrentTime - m_Lag - 1, &finalQuery);
	    m_QuerryJTree = m_CRingJtreeInf[m_CurrentTime - m_Lag - 1];
	    m_QuerryJTree->MarginalNodes( &finalQuery.front(), querySize, notExpandJPD );
	    break;
	}
    case ptSmoothing:
    case ptViterbi:
	{
	    PNL_CHECK_RANGES(slice, 0, m_Lag);
	    
	    FindFinalQuery( queryVec, slice, &finalQuery);
	    m_QuerryJTree = m_CRingJtreeInf[slice];
	    m_QuerryJTree->MarginalNodes( &finalQuery.front(), querySize, notExpandJPD );
	    break;
	}
    }
}
Ejemplo n.º 27
0
void CParEMLearningEngine::LearnContMPI()
{
    CStaticGraphicalModel *pGrModel =  this->GetStaticModel();
    PNL_CHECK_IS_NULL_POINTER(pGrModel);
    PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);
    
    CInfEngine *pInfEng = NULL;
  
    pInfEng = CJtreeInfEngine::Create(pGrModel);
      
    
    float loglik = 0.0f;
    int domainNodes;
    CFactor *parameter = NULL;
    int numberOfParameters = pGrModel->GetNumberOfParameters();
    
    int nFactors = pGrModel->GetNumberOfFactors();
    const CEvidence *pEv;
    CFactor *pFactor;
    
    int iteration = 0;
    int ev;
    int i,numSelfEvidences,NumberOfProcesses, MyRank;
    int start_mpi, finish_mpi;
    
    MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);
    
    if (IsAllObserved())
    {
        int i;
        float **evid = NULL;
        EDistributionType dt;
        CFactor *factor = NULL;
        for (i = 0; i < nFactors; i++)
        {
            factor = pGrModel->GetFactor(i);
                 
            factor->UpdateStatisticsML(&m_Vector_pEvidences[GetNumberProcEv()], 
               GetNumEv() - GetNumberProcEv());
            
        }
        m_critValue.push_back(UpdateModel());
    }
    else
    {
        bool bContinue;
        const CPotential * pot;
        
        do
        {
            ClearStatisticData();
            iteration++;

            numSelfEvidences = (GetNumEv() - GetNumberProcEv()) / NumberOfProcesses;
            start_mpi = GetNumberProcEv() + numSelfEvidences * MyRank; 
            if (MyRank < NumberOfProcesses - 1)
                finish_mpi = start_mpi + numSelfEvidences; 
            else
                finish_mpi = GetNumEv();            

            for(int ev = start_mpi; ev < finish_mpi; ev++)
            {
                
                bool bInfIsNeed = !GetObsFlags(ev)->empty(); 
                pEv = m_Vector_pEvidences[ev];
                
                if( bInfIsNeed )
                {
                    pInfEng->EnterEvidence(pEv,      0, 0);
                }
                int i;
                
                for( i = 0; i < nFactors; i++ )
                {
                    pFactor = pGrModel->GetFactor(i);
                    int nnodes;
                    const int * domain;
                    pFactor->GetDomain( &nnodes, &domain );
                    if( bInfIsNeed && !IsDomainObserved(nnodes, domain, ev ) )
                    {
                        pInfEng->MarginalNodes( domain, nnodes, 1 );
                        pot = pInfEng->GetQueryJPD(); 
                        
                        pFactor->UpdateStatisticsEM( /*pInfEng->GetQueryJPD */ pot, pEv );
                    }
                    else
                    {
                        pFactor->UpdateStatisticsML( &pEv, 1 );
                    }
                }
            }
            
            for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++ )
            {   
                parameter = pGrModel->GetFactor(domainNodes);
                
                C2DNumericDenseMatrix<float> *matMeanForSending;
                C2DNumericDenseMatrix<float> *matCovForSending;
                int dataLengthM,dataLengthC;
                
                const float *pMeanDataForSending;
                const float *pCovDataForSending;
                
                matMeanForSending = static_cast<C2DNumericDenseMatrix<float>*>
                    ((parameter->GetDistribFun())->GetStatisticalMatrix(stMatMu));               
                
                matMeanForSending->GetRawData(&dataLengthM, &pMeanDataForSending);
                
                matCovForSending = static_cast<C2DNumericDenseMatrix<float>*>
                    ((parameter->GetDistribFun())->GetStatisticalMatrix(stMatSigma));               
                
                matCovForSending->GetRawData(&dataLengthC, &pCovDataForSending);
                
                float *pMeanDataRecv = new float[dataLengthM];
                float *pCovDataRecv = new float[dataLengthC];
                MPI_Status status;                         
                
                MPI_Allreduce((void*)pMeanDataForSending, pMeanDataRecv, dataLengthM, MPI_FLOAT, MPI_SUM,
                    MPI_COMM_WORLD);
                MPI_Allreduce((void*)pCovDataForSending, pCovDataRecv, dataLengthC, MPI_FLOAT, MPI_SUM,
                    MPI_COMM_WORLD);
                
                memcpy((void*)pMeanDataForSending,pMeanDataRecv,dataLengthM*sizeof(float));
                
                memcpy((void*)pCovDataForSending,pCovDataRecv,dataLengthC*sizeof(float));
            }                        

            loglik = UpdateModel();
            
            if( GetMaxIterEM() != 1)
            {
                bool flag = iteration == 1 ? true : 
                (fabs(2*(m_critValue.back()-loglik)/(m_critValue.back() + loglik)) > GetPrecisionEM() );
                
                bContinue = GetMaxIterEM() > iteration && flag;
            }
            else
            {
                bContinue = false;
            }
            m_critValue.push_back(loglik);
            
        }while(bContinue);
    }
    SetNumProcEv( GetNumEv() );
}
Ejemplo n.º 28
0
void CBKInfEngine::
DefineProcedure( EProcedureTypes procedureType, int lag )
{
    /////////////////////////////////////////////////////////////////////////
    // Selection procedure (smoothing, filtering, ...
    /////////////////////////////////////////////////////////////////////////
    
    DestroyData();
    
    switch( procedureType )
    {
    case ptFiltering:
        {
            PNL_CHECK_FOR_NON_ZERO( lag );
            m_Lag = 0;
            m_CRingJtreeInf.resize( 2 );
            m_CRingJtreeInf[0]= CJtreeInfEngine::Copy(m_pPriorSliceJtreeInf);
            m_CRingJtreeInf[1]= CJtreeInfEngine::Copy(m_p1_5SliceJtreeInf) ;
            m_ProcedureType = ptFiltering;
            break;
        }
        
    case ptFixLagSmoothing:
        {
            PNL_CHECK_LEFT_BORDER( lag, 0 );
            m_Lag = lag;
            m_CRingJtreeInf.resize( m_Lag+1 );
            m_CRingJtreeInf[0] = CJtreeInfEngine::Copy(m_pPriorSliceJtreeInf);
            
            int t;
            for( t = 1; t < lag + 1; t++ )
            {
                m_CRingJtreeInf[t] = CJtreeInfEngine::Copy(m_p1_5SliceJtreeInf);
            }
            
            m_CRingDistrOnSep.resize(lag);
            m_ProcedureType = procedureType;
            break;
        }
        
    case ptSmoothing:
    case ptViterbi:
        {
            PNL_CHECK_LEFT_BORDER( lag, 1 );
            m_Lag = lag;
            
            m_CRingJtreeInf.resize(lag);
            m_CRingJtreeInf[0] = CJtreeInfEngine::Copy(m_pPriorSliceJtreeInf);
            
            int t;
            for( t = 1; t < lag; t++ )
            {
                m_CRingJtreeInf[t] = CJtreeInfEngine::Copy(m_p1_5SliceJtreeInf);
            }
            m_CRingDistrOnSep.resize(lag-1);
            
            m_ProcedureType = procedureType;
            break;
        }
        
        
    default:
        {
            PNL_THROW( CInconsistentType, "incorrect type of inference procedure" );
        }
    }
    
    int i;
    for( i = 0; i < m_CRingDistrOnSep.size(); i++ )
    {
        m_CRingDistrOnSep[i].resize(GetNumOfClusters());
        
    }
    // initialize iterator for the jtree inferences sequence
    m_JTreeInfIter = m_CRingJtreeInf.begin();
    m_CDistrOnSepIter = m_CRingDistrOnSep.begin();
    m_CurrentTime = 0;
    
}
Ejemplo n.º 29
0
void CParEMLearningEngine::Learn()
{
    CStaticGraphicalModel *pGrModel =  this->GetStaticModel();
    PNL_CHECK_IS_NULL_POINTER(pGrModel);
    PNL_CHECK_LEFT_BORDER(GetNumEv() - GetNumberProcEv() , 1);

    CJtreeInfEngine *pCurrentInfEng = NULL;

    CFactor *parameter = NULL;
    int exit = 0;
    int numberOfParameters = pGrModel->GetNumberOfParameters();
    int domainNodes;
    int infIsNeed = 0;
    int itsML = 0;

    // !!!
    float loglik = -FLT_MAX;
    float loglikOld = -FLT_MAX;
    float epsilon = GetPrecisionEM();
    float stopExpression = epsilon + 1.0f;
    int iteration = 0;
    int currentEvidNumber;
    int bMaximize = 0;
    int bSumOnMixtureNode = 0;
    const CEvidence* pCurrentEvid;
    int start_mpi, finish_mpi;
    int NumberOfProcesses, MyRank;
    int numSelfEvidences;
    
    MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcesses);
    MPI_Comm_rank(MPI_COMM_WORLD, &MyRank);

    int d = 0;
    do
    {
        iteration++;

        numSelfEvidences = (GetNumEv() - GetNumberProcEv()) / NumberOfProcesses;
        start_mpi = GetNumberProcEv() + numSelfEvidences * MyRank; // !!!
        if (MyRank < NumberOfProcesses - 1)
            finish_mpi = start_mpi + numSelfEvidences; // !!!
        else
            finish_mpi = GetNumEv(); // !!!        

        for(int ev = start_mpi; ev < finish_mpi; ev++)
        {
            infIsNeed = 0;
            currentEvidNumber = ev; // !!!

            pCurrentEvid = m_Vector_pEvidences[currentEvidNumber];
            if( !pCurrentEvid)
            {
                PNL_THROW(CNULLPointer, "evidence")
            }

            infIsNeed = !GetObsFlags(ev)->empty(); // !!!

            if(infIsNeed)
            {
                // create inference engine
                if(!pCurrentInfEng)
                {
                    pCurrentInfEng = CJtreeInfEngine::Create(pGrModel);
                }
                pCurrentInfEng->EnterEvidence(pCurrentEvid, bMaximize,
                    bSumOnMixtureNode);
            }

            for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++)
            {
                parameter = pGrModel->GetFactor(domainNodes);
                if(infIsNeed)
                {
                    int DomainSize;
                    const int *domain;
                    parameter->GetDomain(&DomainSize, &domain);
                    if (IsDomainObserved(DomainSize, domain, currentEvidNumber))
                    {
                        const CEvidence *pEvidences[] = { pCurrentEvid };
                        parameter->UpdateStatisticsML(pEvidences, 1);
                    }
                    else
                    {
                        pCurrentInfEng->MarginalNodes(domain, DomainSize, 1);
                        const CPotential * pMargPot = pCurrentInfEng->GetQueryJPD();
                        parameter ->UpdateStatisticsEM(pMargPot, pCurrentEvid);
                    }
                }
                else
                {
                    const CEvidence *pEvidences[] = { pCurrentEvid };
                    parameter->UpdateStatisticsML(pEvidences, 1);
                }
            }
            itsML = itsML || !infIsNeed;
        }

        for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++ )
        {
            parameter = pGrModel->GetFactor(domainNodes);
            
            CNumericDenseMatrix<float> *matForSending;
            int matDim;
            const int *pMatRanges;
            int dataLength;
            const float *pDataForSending;

            matForSending = static_cast<CNumericDenseMatrix<float>*>
                ((parameter->GetDistribFun())->GetStatisticalMatrix(stMatTable));

            matForSending->GetRanges(&matDim, &pMatRanges);
            matForSending->GetRawData(&dataLength, &pDataForSending);
            float *pDataRecv = new float[dataLength];
            float *pDataRecv_copy = new float[dataLength];
            MPI_Status status;

            MPI_Allreduce((void*)pDataForSending, pDataRecv, dataLength, MPI_FLOAT, MPI_SUM,
                MPI_COMM_WORLD);

            CNumericDenseMatrix<float> *RecvMatrix =
                static_cast<CNumericDenseMatrix<float>*>
                (parameter->GetDistribFun()->GetStatisticalMatrix(stMatTable));
            int dataLength_new;
            float *pData_new;
            RecvMatrix->GetRawData(&dataLength_new, (const float**)(&pData_new));
            for(int t=0;t<dataLength_new;t++)
                pData_new[t]=pDataRecv[t];
        }
        switch (pGrModel->GetModelType())
        {
        case mtBNet:
            {
                loglikOld = loglik;
                loglik = 0.0f;
                for(domainNodes = 0; domainNodes < numberOfParameters; domainNodes++)
                {
                    parameter = pGrModel->GetFactor(domainNodes);
                    loglik += parameter->ProcessingStatisticalData(m_numberOfAllEvidences);
                }
                break;
            }
        case mtMRF2:
        case mtMNet:
            {
                loglikOld = loglik;
                loglik = _LearnPotentials();
                break;
            }
        default:
            {
                PNL_THROW(CBadConst, "model type")
                    break;
            }
        }

        stopExpression = 
            float(fabs(2 * (loglikOld - loglik) / (loglikOld + loglik)));
        exit = ((stopExpression > epsilon) && (iteration <= GetMaxIterEM())) && !itsML;
        if(exit)
        {
            ClearStatisticData();
        }

        delete pCurrentInfEng;
        pCurrentInfEng = NULL;
    }while(exit);

    if(iteration > GetMaxIterEM())
    {
        PNL_THROW(CNotConverged, "maximum number of iterations")
    }

    SetNumProcEv( GetNumEv() );
}
Ejemplo n.º 30
0
void CBKInfEngine::BackwardFixLag()
{
    //////////////////////////////////////////////////////////////////////////
    // Backward step for fixed-lag smoothing procedure
    //////////////////////////////////////////////////////////////////////////
    PNL_CHECK_LEFT_BORDER(m_CurrentTime, m_Lag);
    
    if( m_Lag )
    {
	int currentTimeTmp = GetTime();
	CRing<CJtreeInfEngine *>::iterator tmpInfIter = m_JTreeInfIter;
	CRing< distrPVector >::iterator tmpDistrIter =  m_CDistrOnSepIter;
	
	CRing<CJtreeInfEngine*> ringEng;
	ringEng.assign( 2 , NULL ); 
	CRing<CJtreeInfEngine *>::iterator ringEngIter = ringEng.begin();
	
	*ringEngIter = CJtreeInfEngine::Copy(*m_JTreeInfIter);
	ringEngIter++;
	
	BackwardT();
	
	distrPVector tmpDistr(GetNumOfClusters(), (CDistribFun* const)NULL);
	int i;
	for( i = 0; i < GetLag(); i++ )
	{
	    if( i < GetLag() - 1 )
	    {
		*ringEngIter = CJtreeInfEngine::Copy(*(m_JTreeInfIter-1));
		int j;
		for( j = 0; j < GetNumOfClusters(); j++ )
		{
		    tmpDistr[j] = (*m_CDistrOnSepIter)[j]->Clone();
		}
	    }
	    Backward();
	    
	    //CJtreeInfEngine::Release(&(*(m_JTreeInfIter + 1)));
            delete (*(m_JTreeInfIter + 1));
	    
	    ringEngIter++;
	    
	    *(m_JTreeInfIter + 1) = *ringEngIter;
	    
	    if( i < GetLag() - 1 )
	    { 
		int j;
		for( j = 0; j < GetNumOfClusters(); j++ )
		{
		    delete (*(m_CDistrOnSepIter+1))[j];
		    (*(m_CDistrOnSepIter+1))[j]=tmpDistr[j];
		}
	    }
	} 
	
	
	m_CDistrOnSepIter = tmpDistrIter;
	m_JTreeInfIter = tmpInfIter;
	m_CurrentTime = currentTimeTmp;
	
    }
    else
    {
	BackwardT();
    }
    
}