Example #1
0
//	enforce epicardium properties on boundary nodes (boundary between
//	epicardium and endocardium mesh)
void AnisotropyGenerator::eliminateSpecificSurface(InputFileData* InputData,
                                                   AnisotropyCondition& condition,
                                                   intVector& surfElems,
                                                   int surfID,
                                                   std::ofstream& logFile) {

  using namespace std;

  vector<ConditionParticle>& ptcls = condition.getParticles();
  vector<ConditionElement>& elems = condition.getElements();

  bool notSame = false;

  for(int j = 1;j < surfElems.size();j++) {

    if(elems[surfElems[j - 1]].getSurfaceID()
      != elems[surfElems[j]].getSurfaceID()) {
      notSame = true;
    }
  }
  if(notSame == true) {

    intVector dummyVec;
    for(int k = 0;k < surfElems.size();k++) {
      if(elems[surfElems[k]].getSurfaceID() == surfID) {
        dummyVec.push_back(surfElems[k]);
      }
    }
    surfElems = dummyVec;
  }

}
Example #2
0
void CMNet::CreateTabularPotential( const intVector& domain,
        const floatVector& data )
{
    AllocFactor( domain.size(), &domain.front() );

    pFactorVector factors;
    int numFactors = GetFactors( domain.size(), &domain.front(), &factors );
    if( numFactors != 1 )
    {
        PNL_THROW( CInconsistentSize,
            "domain must be the same as corresponding domain size got from graph" );
    }
    factors[0]->AllocMatrix( &data.front(), matTable );
}
void CBKInfEngine::
Get1_5Clusters(int nnodesPerSlice, intVector& interfNds, intVecVector& clusters, 
	       intVecVector* clusters1_5Sl) const
{
    
    int nInterfNds = interfNds.size();
    
    clusters1_5Sl->assign( clusters.begin(), clusters.end() );
    clusters1_5Sl->insert( clusters1_5Sl->end(), clusters.begin(), clusters.end() );
    
    int nClusters = clusters.size();
    int i,j;
    for( i = 0; i < nClusters; i++ )
    {
	int nnodesPerClust = clusters[i].size();
	
	for( j = 0; j < nnodesPerClust; j++ )
	{
	    intVector::iterator loc = std::find( interfNds.begin(),
			    interfNds.end(), clusters[i][j] );
	    
	    (*clusters1_5Sl)[i][j] = loc - interfNds.begin();
	    (*clusters1_5Sl)[i + nClusters][j] += nInterfNds;
	}
	
    }
    
}
void CSamplingInfEngine::
GetObsDimsWithVls(intVector &domain, int nonObsNode, const CEvidence* pEv, 
		  intVector *dims, intVector *vls) const
{
    int nnodes = domain.size();
    dims->resize(nnodes - 1);
    vls->resize(nnodes - 1);
    
    int* it = &domain.front();
    int* itDims = &dims->front();
    int* itVls = &vls->front();
    int i;
    for( i = 0; i < nnodes; i++, it++ )
    {		
	if( *it != nonObsNode )
	{
	    *itDims = i;
	    *itVls = pEv->GetValueBySerialNumber(*it)->GetInt();//only if all nds are tabular!!
	    
	    itDims++;
	    itVls++;
	}
    }
    
}
//-------------------------------------------------------------------------------
 void CSoftMaxCPD::BuildCurrentEvidenceMatrix(float ***full_evid, float ***evid,intVector family,int numEv)
{
  int i, j;
  *evid = new float* [family.size()];
  for (i = 0; i < family.size(); i++)
  {
    (*evid)[i] = new float [numEv];
  }

  for (i = 0; i < numEv; i++)
  {
    for (j = 0; j < family.size(); j++)
    {
      (*evid)[j][i] = (*full_evid)[family[j]][i];
    }
  }
}
avtSpeciesMetaData::avtSpeciesMetaData(const std::string &n,
    const std::string &meshn, const std::string &matn,
    int nummat, const intVector &ns, const std::vector<stringVector> &sn)
    : AttributeSubject(avtSpeciesMetaData::TypeMapFormatString)
{
    // Initialize all.
    *this = avtSpeciesMetaData();

    // Override members
    name         = n;
    originalName = name;
    meshName     = meshn;
    materialName = matn;
    numMaterials = nummat;

    ClearSpecies();
    for (size_t i=0; i<ns.size(); i++)
        AddSpecies(avtMatSpeciesMetaData(ns[i], sn[i]));
    validVariable = true;
}
float CMlStaticStructLearn::ScoreFamily(intVector vFamily)
{
    int nParents = vFamily.size() - 1;
    PNL_CHECK_RANGES(nParents, 0, m_nMaxFanIn);
    intVector indexes(m_nMaxFanIn,0);
    int i;
    for(i=0; i<nParents; i++)
    {
        indexes[i] = vFamily[i]+1;
    }
    int node = vFamily[nParents];
    float score;
    float defval = m_pNodeScoreCache[node]->GetDefaultValue();
    score = m_pNodeScoreCache[node]->GetElementByIndexes(&indexes.front());
    if(score == defval)
    {
        score = ComputeFamilyScore(vFamily);
        m_pNodeScoreCache[node]->SetElementByIndexes(score, &indexes.front());
    }
    return score;
}
avtDataObject_p
IceTNetworkManager::Render(
    bool checkThreshold, intVector networkIds,
    bool getZBuffer, int annotMode, int windowID,
    bool leftEye)
{
    int t0 = visitTimer->StartTimer();
    DataNetwork *origWorkingNet = workingNet;
    avtDataObject_p retval;

    EngineVisWinInfo &viswinInfo = viswinMap[windowID];
    viswinInfo.markedForDeletion = false;
    VisWindow *viswin = viswinInfo.viswin;
    std::vector<avtPlot_p>& imageBasedPlots = viswinInfo.imageBasedPlots;

    renderings = 0;

    TRY
    {
        this->StartTimer();

        RenderSetup(windowID, networkIds, getZBuffer,
            annotMode, leftEye, checkThreshold);

        bool plotDoingTransparencyOutsideTransparencyActor = false;
        for(size_t i = 0 ; i < networkIds.size() ; i++)
        {
            workingNet = NULL;
            UseNetwork(networkIds[i]);
            if(this->workingNet->GetPlot()->ManagesOwnTransparency())
            {
                plotDoingTransparencyOutsideTransparencyActor = true;
            }
        }
        workingNet = NULL;

        // We can't easily figure out a compositing order, which IceT requires
        // in order to properly composite transparent geometry.  Thus if there
        // is some transparency, fallback to our parent implementation.
        avtTransparencyActor* trans = viswin->GetTransparencyActor();
        bool transparenciesExist = trans->TransparenciesExist()
                           ||  plotDoingTransparencyOutsideTransparencyActor;
        if (transparenciesExist)
        {
            debug2 << "Encountered transparency: falling back to old "
                      "SR / compositing routines." << std::endl;

            retval = NetworkManager::RenderInternal();
        }
        else
        {
            bool needZB = !imageBasedPlots.empty() ||
                          renderState.shadowMap  ||
                          renderState.depthCues;

            // Confusingly, we need to set the input to be *opposite* of what VisIt
            // wants.  This is due to (IMHO) poor naming in the IceT case; on the
            // input side:
            //     ICET_DEPTH_BUFFER_BIT set:     do Z-testing
            //     ICET_DEPTH_BUFFER_BIT not set: do Z-based compositing.
            // On the output side:
            //     ICET_DEPTH_BUFFER_BIT set:     readback of Z buffer is allowed
            //     ICET_DEPTH_BUFFER_BIT not set: readback of Z does not work.
            // In VisIt's case, we calculated a `need Z buffer' predicate based
            // around the idea that we need the Z buffer to do Z-compositing.
            // However, IceT \emph{always} needs the Z buffer internally -- the
            // flag only differentiates between `compositing' methodologies
            // (painter-style or `over' operator) on input.
            GLenum inputs = ICET_COLOR_BUFFER_BIT;
            GLenum outputs = ICET_COLOR_BUFFER_BIT;
            // Scratch all that, I guess.  That might be the correct way to go
            // about things in the long run, but IceT only gives us back half an
            // image if we don't set the depth buffer bit.  The compositing is a
            // bit wrong, but there's not much else we can do..
            // Consider removing the `hack' if a workaround is found.
            if (/*hack*/true/*hack*/) // || !this->MemoMultipass(viswin))
            {
                inputs |= ICET_DEPTH_BUFFER_BIT;
            }
            if(needZB)
            {
                outputs |= ICET_DEPTH_BUFFER_BIT;
            }
            ICET(icetInputOutputBuffers(inputs, outputs));

            // If there is a backdrop image, we need to tell IceT so that it can
            // composite correctly.
            if(viswin->GetBackgroundMode() != AnnotationAttributes::Solid)
            {
                ICET(icetEnable(ICET_CORRECT_COLORED_BACKGROUND));
            }
            else
            {
                ICET(icetDisable(ICET_CORRECT_COLORED_BACKGROUND));
            }

            if (renderState.renderOnViewer)
            {
                RenderCleanup();
                avtDataObject_p dobj = NULL;
                CATCH_RETURN2(1, dobj);
            }

            debug5 << "Rendering " << viswin->GetNumPrimitives()
                   << " primitives." << endl;

            int width, height, width_start, height_start;
            // This basically gets the width and the height.
            // The distinction is for 2D rendering, where we only want the
            // width and the height of the viewport.
            viswin->GetCaptureRegion(width_start, height_start, width, height,
                                     renderState.viewportedMode);

            this->TileLayout(width, height);

            CallInitializeProgressCallback(this->RenderingStages());

            // IceT mode is different from the standard network manager; we don't
            // need to create any compositor or anything: it's all done under the
            // hood.
            // Whether or not to do multipass rendering (opaque first, translucent
            // second) is all handled in the callback; from our perspective, we
            // just say draw, read back the image, and post-process it.

            // IceT sometimes omits large parts of Curve plots when using the
            // REDUCE strategy. Use a different compositing strategy for Curve
            // plots to avoid the problem.
            if(viswin->GetWindowMode() == WINMODE_CURVE)
                ICET(icetStrategy(ICET_STRATEGY_VTREE));
            else
                ICET(icetStrategy(ICET_STRATEGY_REDUCE));

            ICET(icetDrawFunc(render));
            ICET(icetDrawFrame());

            // Now that we're done rendering, we need to post process the image.
            debug3 << "IceTNM: Starting readback." << std::endl;
            avtImage_p img = this->Readback(viswin, needZB);

            // Now its essentially back to the same behavior as our parent:
            //  shadows
            //  depth cueing
            //  post processing

            if (renderState.shadowMap)
                this->RenderShadows(img);

            if (renderState.depthCues)
                this->RenderDepthCues(img);

            // If the engine is doing more than just 3D annotations,
            // post-process the composited image.
            RenderPostProcess(img);

            CopyTo(retval, img);
        }

        RenderCleanup();
    }
    CATCHALL
    {
        RenderCleanup();
        RETHROW;
    }
    ENDTRY

    workingNet = origWorkingNet;
    visitTimer->StopTimer(t0, "Ice-T Render");
    return retval;
}
void CGibbsSamplingInfEngine::
MarginalNodes( const intVector& queryNdsIn, int notExpandJPD  )
{
  MarginalNodes( &queryNdsIn.front(), queryNdsIn.size(), notExpandJPD );
}
Example #10
0
void CExInfEngine< INF_ENGINE, MODEL, FLAV, FALLBACK_ENGINE1, FALLBACK_ENGINE2 >::MarginalNodes( intVector const &queryNds, int notExpandJPD )
{
    MarginalNodes(&queryNds.front(), queryNds.size(), notExpandJPD );
}
Example #11
0
CFactor::CFactor( EDistributionType dt,
                  EFactorType pt,
                  const int *domain, int nNodes, CModelDomain* pMD,
                  const intVector& obsIndices )
                  : m_Domain( domain, domain + nNodes )
{	
    /*fill enum fields:*/
    m_DistributionType = dt;
    m_FactorType = pt;
    m_pMD = pMD;
    m_factNumInHeap = m_pMD->AttachFactor(this);
    int i;
    pConstNodeTypeVector nt;
    intVector dom = intVector( domain, domain+nNodes );
    pMD->GetVariableTypes( dom, &nt );
    m_obsPositions.assign( obsIndices.begin(), obsIndices.end() );
    int numObsNodesHere = obsIndices.size();
    switch (dt)
    {
    case dtScalar:
        {
            if( pt == ftCPD )
            {
                PNL_THROW( CInvalidOperation, "scalar is only potential - to multiply" );
            }
            //if there are observed nodes - get corresponding node types
            if( numObsNodesHere )
            {
                if ( numObsNodesHere != nNodes )
                {
                    PNL_THROW( CInconsistentType,
                        "all nodes in scalar distribution must be observed" )
                }
                //need to find observed nodes in domain and check including their changed types
                for( i = 0; i < numObsNodesHere; i++ )
                {
                    nt[obsIndices[i]] = nt[obsIndices[i]]->IsDiscrete() ? pMD->GetObsTabVarType():
                        pMD->GetObsGauVarType();
                }
            }
            m_CorrespDistribFun = CScalarDistribFun::Create(nNodes, &nt.front());
            break;
        }
	case dtTree:
        {
            if( pt != ftCPD )
            {
                PNL_THROW( CInvalidOperation, "Tree is only CPD" );
            }
            m_CorrespDistribFun = CTreeDistribFun::Create(nNodes, &nt.front());
            break;
        }
    case dtTabular:
        {
            
            if(( pt == ftPotential )&&( numObsNodesHere ))
            {
                //need to find observed nodes in domain and check including their changed types
                for( i = 0; i < numObsNodesHere; i++ )
                {
                    //change node type for this node
                    nt[obsIndices[i]] = nt[obsIndices[i]]->IsDiscrete() ? pMD->GetObsTabVarType():
                    pMD->GetObsGauVarType();
                }
            }
            //check all node types corresponds Tabular distribution
            for( i = 0; i < nNodes; i++ )
            {
                if((!(( nt[i]->IsDiscrete() )||
                    ( !nt[i]->IsDiscrete()&&(nt[i]->GetNodeSize() == 0)))))
                {
                    PNL_THROW( CInconsistentType, 
                        "node types must corresponds Tabular type" );
                }
            }
            m_CorrespDistribFun = CTabularDistribFun::Create( nNodes,
                &nt.front(), NULL );
            break;
        }
    case dtGaussian:
        {
            switch (pt)
            {
            case ftPotential:
                {
                    //need to find observed nodes in domain and check including their changed types
                    for( i = 0; i < numObsNodesHere; i++ )
                    {
                        //change node type for this node
                        nt[obsIndices[i]] = nt[obsIndices[i]]->IsDiscrete() ?
                                pMD->GetObsTabVarType():pMD->GetObsGauVarType();
                    }
                    for( i = 0; i < nNodes; i++ )
                    {
                        if( nt[i]->IsDiscrete() && (nt[i]->GetNodeSize() != 1))
                        {
                            PNL_THROW( CInvalidOperation,
                                "Gaussian potential must be of Gaussian nodes only" )
                        }
                    }
                    m_CorrespDistribFun = 
                        CGaussianDistribFun::CreateInMomentForm( 1, nNodes,
                        &nt.front(), NULL, NULL, NULL  );
                    break;
                }
            case ftCPD:
                {
                    //can check if there are both Continuous & Discrete nodes
                    int noDiscrete = 1;
                    for( int i = 0; i < nNodes; i++ )
                    {
                        if( nt[i]->IsDiscrete() )
                        {
                            noDiscrete = 0;
                            break;
                        }
                    }
                    if( noDiscrete )
                    {
                        m_CorrespDistribFun = 
                            CGaussianDistribFun::CreateInMomentForm( 0, nNodes,
                            &nt.front(), NULL, NULL, NULL );
                        break;
                    }
                    else
                    {
                        m_CorrespDistribFun = 
                            CCondGaussianDistribFun::Create( 0, nNodes, &nt.front() );
                        break;
                    }
                }
            default:
                {
                    PNL_THROW( CBadConst, 
                        "no competent type as EFactorType" );
                    break;
                }
            }
            break;
        }
    case dtMixGaussian:
        {
            switch(pt)
            {
            case ftCPD:
                {
                    //check if where is discrete node - mixture node
                    int noDiscrete = 1;
                    for( int i = 0; i < nNodes; i++ )
                    {
                        if( nt[i]->IsDiscrete() )
                        {
                            noDiscrete = 0;
                            break;
                        }
                    }
                    if( !noDiscrete  )
                    {
                        m_CorrespDistribFun = CCondGaussianDistribFun::Create( 0,
                            nNodes, &nt.front() );
                    }
                    else
                    {
                        PNL_THROW( CInconsistentType, 
                            "mixture Gaussian CPD must have mixture node - discrete" );
                    }
                    break;
                }
            default:
                {
                    PNL_THROW( CNotImplemented, "mixture gaussian potential" );  
                }
            }
            break;
        }
    case dtSoftMax:
        {
            switch (pt)
            {
            case ftPotential:
                {
                  PNL_THROW( CNotImplemented, "only CPD yet" );
                    break;
                }
            case ftCPD:
                {
                    //can check if there are both Continuous & Discrete nodes
                    int noDiscrete = 1;
                    for( int i = 0; i < nNodes-1; i++ )
                    {
                        if( nt[i]->IsDiscrete() )
                        {
                            noDiscrete = 0;
                            break;
                        }
                    }
                    if( noDiscrete )
                    {
                        m_CorrespDistribFun = 
                            CSoftMaxDistribFun::Create( nNodes,
                            &nt.front(), NULL, NULL );
                        break;
                    }
                    else
                    {
                        m_CorrespDistribFun = 
                            CCondSoftMaxDistribFun::Create( nNodes, &nt.front() );
                        break;
                    }
/*
                  //can check if there are both Continuous & Discrete nodes
                        m_CorrespDistribFun = 
                            CSoftMaxDistribFun::CreateUnitFunctionDistribution( nNodes, &nt.front() );
                        break;
*/
                }
            default:
                {
                    PNL_THROW( CBadConst, 
                        "no competent type as EFactorType" );
                    break;
                }
            }
            break;
        }
    default:
        {
            PNL_THROW ( CBadConst,
                "we have no such factor type at EDistributionType");
        }
    }
}
float CMlStaticStructLearn::ComputeFamilyScore(intVector vFamily)
{
    int nFamily = vFamily.size();
    CCPD* iCPD = this->CreateRandomCPD(nFamily, &vFamily.front(), m_pGrModel);
    CTabularDistribFun *pDistribFun;
    int ncases = m_Vector_pEvidences.size();
    const CEvidence * pEv;
    float score;
    float pred = 0;
    EDistributionType NodeType;
    switch (m_ScoreMethod)
    {
    case MaxLh :
        if ( !((iCPD->GetDistribFun()->GetDistributionType() == dtSoftMax)
                || (iCPD->GetDistribFun()->GetDistributionType() == dtCondSoftMax)))
        {
            iCPD->UpdateStatisticsML( &m_Vector_pEvidences.front(), ncases );
            score = iCPD->ProcessingStatisticalData(ncases);
        }
        else
        {
            float **evid = NULL;
            float **full_evid = NULL;
            BuildFullEvidenceMatrix(&full_evid);
            CSoftMaxCPD* SoftMaxFactor = static_cast<CSoftMaxCPD*>(iCPD);
            SoftMaxFactor->BuildCurrentEvidenceMatrix(&full_evid, &evid,
                    vFamily,m_Vector_pEvidences.size());
            SoftMaxFactor->InitLearnData();
            SoftMaxFactor->SetMaximizingMethod(mmGradient);
            SoftMaxFactor->MaximumLikelihood(evid, m_Vector_pEvidences.size(),
                                             0.00001f, 0.01f);
            SoftMaxFactor->CopyLearnDataToDistrib();
            if (SoftMaxFactor->GetDistribFun()->GetDistributionType() == dtSoftMax)

            {
                score = ((CSoftMaxDistribFun*)SoftMaxFactor->GetDistribFun())->CalculateLikelihood(evid,ncases);
            }
            else
            {
                score = ((CCondSoftMaxDistribFun*)SoftMaxFactor->GetDistribFun())->CalculateLikelihood(evid,ncases);
            };
            for (int k = 0; k < SoftMaxFactor->GetDomainSize(); k++)
            {
                delete [] evid[k];
            }
            delete [] evid;
            int i;
            intVector obsNodes;
            (m_Vector_pEvidences[0])->GetAllObsNodes(&obsNodes);
            for (i=0; i<obsNodes.size(); i++)
            {
                delete [] full_evid[i];
            }
            delete [] full_evid;
        };
        break;
    case PreAs :
        int i;
        NodeType = iCPD->GetDistributionType();
        switch (NodeType)
        {
        case dtTabular :
            for(i = 0; i < ncases; i++)
            {

                pConstEvidenceVector tempEv(0);
                tempEv.push_back(m_Vector_pEvidences[i]);
                iCPD->UpdateStatisticsML(&tempEv.front(), tempEv.size());
                iCPD->ProcessingStatisticalData(tempEv.size());
                pred += log(((CTabularCPD*)iCPD)->GetMatrixValue(m_Vector_pEvidences[i]));
            }
            break;
        case dtGaussian :
            for(i = 0; i < ncases; i += 1 )
            {

                pConstEvidenceVector tempEv(0);
                tempEv.push_back(m_Vector_pEvidences[i]);

                iCPD->UpdateStatisticsML(&tempEv.front(), tempEv.size());
                float tmp = 0;
                if (i != 0)
                {
                    tmp =iCPD->ProcessingStatisticalData(1);
                    pred +=tmp;

                }

            }
            break;
        case dtSoftMax:
            PNL_THROW(CNotImplemented,
                      "This type score method has not been implemented yet");
            break;
        default:
            PNL_THROW(CNotImplemented,
                      "This type score method has not been implemented yet");
            break;
        };

        score = pred;
        break;
    case MarLh :
    {
        //проверка того, что потенциал дискретный
        if (iCPD->GetDistributionType() != dtTabular)
        {
            PNL_THROW(CNotImplemented,
                      "This type of score method has been implemented only for discrete nets");
        }

        int DomainSize;
        const int * domain;
        switch(m_priorType)
        {
        case Dirichlet:
            iCPD->GetDomain(&DomainSize, &domain);

            pDistribFun = static_cast<CTabularDistribFun *>(iCPD->GetDistribFun());

            pDistribFun->InitPseudoCounts();

            for (i=0; i<ncases; i++)
            {
                pEv = m_Vector_pEvidences[i];
                const CEvidence *pEvidences[] = { pEv };
                pDistribFun->BayesUpdateFactor(pEvidences, 1, domain);
            }
            score = pDistribFun->CalculateBayesianScore();
            break;
        case K2:
            iCPD->GetDomain(&DomainSize, &domain);

            pDistribFun = static_cast<CTabularDistribFun *>(iCPD->GetDistribFun());

            pDistribFun->InitPseudoCounts(m_K2alfa);

            for (i=0; i<ncases; i++)
            {
                pEv = m_Vector_pEvidences[i];
                const CEvidence *pEvidences[] = { pEv };
                pDistribFun->BayesUpdateFactor(pEvidences, 1, domain);
            }
            score = pDistribFun->CalculateBayesianScore();
            break;
        case BDeu:
            iCPD->GetDomain(&DomainSize, &domain);

            pDistribFun = static_cast<CTabularDistribFun *>(iCPD->GetDistribFun());

            pDistribFun->InitPseudoCounts();

            for (i=0; i<ncases; i++)
            {
                pEv = m_Vector_pEvidences[i];
                const CEvidence *pEvidences[] = { pEv };
                pDistribFun->BayesUpdateFactor(pEvidences, 1, domain);
            }
            score = pDistribFun->CalculateBayesianScore() / iCPD->GetNumberOfFreeParameters();
            break;
        default:
            PNL_THROW(CNotImplemented,
                      "This type of prior has not been implemented yet");
            break;
        }


        break;
    }
    default :
        PNL_THROW(CNotImplemented,
                  "This type score method has not been implemented yet");
        break;
    }


    int dim = iCPD->GetNumberOfFreeParameters();
    switch (m_ScoreType)
    {
    case BIC :
        score -= 0.5f * float(dim) * float(log(float(ncases)));
        break;
    case AIC :
        score -= 0.5f * float(dim);
        break;
    case WithoutFine:
        break;
    case VAR :
        PNL_THROW(CNotImplemented,
                  "This type score function has not been implemented yet");
        break;
    default:
        PNL_THROW(CNotImplemented,
                  "This type score function has not been implemented yet");
        break;
    }

    delete iCPD;
    return score;
}
Example #13
0
int CGraphicalModel::GetFactors( const intVector& subdomainIn,
		                    pFactorVector *paramsOut ) const
{
	return GetFactors( subdomainIn.size(), &subdomainIn.front(),
		paramsOut );
};
Example #14
0
void CGraphicalModel::AllocFactor( const intVector& domainIn)
{
	AllocFactor( domainIn.size(), &domainIn.front() );
};
Example #15
0
//! Coordinate generator
//! Points are defined along the columns of coords and their dimensional
//! coordinates along the rows
void Interpolation::MLSUnitTest_meshMultiDim(double& length, double& nodal_dist,
		int& ndim, dbMatrix& coords, int& numCoords, intVector SBM_dim,
		dbVector SBM_coeff, ofstream& logFile) {

	// Define the coordinate list in a specific dimension
	dbVector coordList;
	for (int i = 0; (i) * nodal_dist <= length; i++)
		coordList.push_back(i * nodal_dist);

	printVector(coordList, "coordList", logFile);

	dbMatrix x;
	x.push_back(coordList);
	if (ndim > 1) {
		for (int i = 1; i < ndim; i++) {
			int dimLookup = i + 1;
			int position = findIntVecPos(dimLookup, 0, SBM_dim.size(), SBM_dim);
			logFile << "Position: " << position << endl;

			dbMatrix xBlock = x;
			int xBlockSize = xBlock[0].size();
			x.clear();
			x.resize(i + 1);

			for (int j = 0; j < coordList.size(); j++) {

				// Select a particular coordinate
				double coordSelect = coordList[j];

				// Find if dimension needs to be factored
				dbMatrix x_add = xBlock;
				if (position != -1) {
					logFile << "Dim factored" << endl;

					x_add.push_back(
							dbVector(xBlockSize,
									SBM_coeff[position] * coordSelect));

					//dbMatrix x_add = [SBM_coeff*coordSelect*ones(1,xBlockSize);xBlock];
				} else {
					// Repeat the previous block for each selected coordinate
					//x_add = [coordSelect*ones(1,xBlockSize);xBlock];
					x_add.push_back(dbVector(xBlockSize, coordSelect));
				}

				printMatrix(x_add, "x_add", logFile);

				//x = [x x_add];
				for (int k = 0; k < x_add.size(); k++) {
					for (int l = 0; l < x_add[k].size(); l++) {
						x[k].push_back(x_add[k][l]);
					}
				}

				printMatrix(x, "x", logFile);

			}
		}
	}

	// Transposing the coordinate matrix
	coords.resize(x[0].size());
	for (int i = 0; i < coords.size(); i++) {
		coords[i].resize(ndim);
		for (int j = 0; j < ndim; j++)
			coords[i][j] = x[j][i];
	}

	numCoords = coords.size();
}
Example #16
0
//! Calculate the Weight function
dbVector Interpolation::weightCalc(dbVector& iPoint, intVector& neighbours,
		dbMatrix& coords, dbVector& radiusVec, InputFileData* InputData,
		ofstream& logFile) {

	dbVector weightVec(neighbours.size(), 1);

	int choice = InputData->getValue("MLSWeightFunc");

	switch (choice) {
	// *************************************************************************
	// Cubic spline
	case 0:
		for (int i = 0; i < neighbours.size(); i++) {
			weightVec[i] = cubicSplineWgtCalc(iPoint, coords[neighbours[i]],
					radiusVec, logFile);
		}
		break;

	// *************************************************************************
	// Gaussian
	case 1:
		for (int i = 0; i < neighbours.size(); i++) {
			weightVec[i] = gaussianWgtCalc(iPoint, coords[neighbours[i]],
					radiusVec, logFile);
		}
		break;

	// *************************************************************************
	// Regularised
	case 2: {
		double weightSum = 0;
		for (int i = 0; i < neighbours.size(); i++) {
			weightVec[i] = regularizedWgtCalc(iPoint, coords[neighbours[i]],
					radiusVec, logFile);
			weightSum += weightVec[i];
		}

		for (int i = 0; i < neighbours.size(); i++) {
			weightVec[i] = weightVec[i] / weightSum;
		}

		break;
	}

	// *************************************************************************
	// Regularised modified
	case 3: {

		int ndim = iPoint.size();

		double weight = 0;
		for (int d = 0; d < ndim; d++) {

			dbVector weightDimVec(weightVec.size(), 0);
			double weightSum = 0;

			for (int i = 0; i < neighbours.size(); i++) {
				weightDimVec[i] = regularizedWgtCalc_mod(iPoint,
						coords[neighbours[i]], radiusVec, d, logFile);
				weightSum += weightDimVec[i];
			}

			for (int i = 0; i < neighbours.size(); i++) {
				weightVec[i] *= weightDimVec[i] / weightSum;
			}

		}

		break;
	}
	default:
		logFile << "ERROR: In Interpolation::weightCalc, MLSWeightFunc = "
				<< choice << "doesn't exist" << endl;
		cout << "ERROR: In Interpolation::weightCalc, MLSWeightFunc = "
				<< choice << "doesn't exist" << endl;
		MPI_Abort(MPI_COMM_WORLD, 1);
	}

	return weightVec;

}
void CNodeValues::ToggleNodeStateBySerialNumber( const intVector& numsOfNds )
{
    int numNds = numsOfNds.size();
    const int* pNumsOfNds = &numsOfNds.front();
    ToggleNodeStateBySerialNumber( numNds, pNumsOfNds );
}
void
DatabaseCorrelation::AddDatabase(const std::string &database, int nStates,
    const doubleVector &times, const intVector &cycles)
{
    // If the database is already in the correlation, maybe we should
    // remove it and then add it again in case the length changed like
    // when we add time states to a file.
    if(UsesDatabase(database))
        return;

    //
    // Add the times and cycles for the new database to the correlation so
    // we can access them later and perhaps use them to correlate.
    //
    for(int i = 0; i < nStates; ++i)
    {
        double t = ((i < times.size()) ? times[i] : 0.);
        databaseTimes.push_back(t);
        int c = ((i < cycles.size()) ? cycles[i] : 0);
        databaseCycles.push_back(c);
    }

    if(method == IndexForIndexCorrelation)
    {
        if(numStates >= nStates)
        {
            //
            // The number of states in the correlation is larger than
            // the number of states in the database so we can append
            // the database's states to the end of the indices and
            // repeat the last frames.
            //
            for(int i = 0; i < numStates; ++i)
            {
                int state = (i < nStates) ? i : (nStates - 1);
                indices.push_back(state);
            }
        } 
        else
        {
            //
            // The number of states for the current database is larger
            // than the number of states in the correlation. The correlation
            // must be lengthened.
            //
            indices.clear();
            for(size_t i = 0; i < databaseNames.size(); ++i)
            {
                for(int j = 0; j < nStates; ++j)
                {
                    int state = (j < databaseNStates[i]) ? j :
                        (databaseNStates[i]-1);
                    indices.push_back(state);
                }
            }
            // Add the new database to the correlation.
            for(int i = 0; i < nStates; ++i)
                indices.push_back(i);

            numStates = nStates;
        }

        databaseNames.push_back(database);
        databaseNStates.push_back(nStates);
    }
    else if(method == StretchedIndexCorrelation)
    {
        databaseNames.push_back(database);
        databaseNStates.push_back(nStates);

        indices.clear();
        int maxStates = (numStates > nStates) ? numStates : nStates;
        for(size_t i = 0; i < databaseNames.size(); ++i)
        {
            for(int j = 0; j < maxStates; ++j)
            {
                float t = float(j) / float(maxStates - 1);
                int state = int(t * (databaseNStates[i] - 1) + 0.5);
                indices.push_back(state);
            }
        }

        numStates = maxStates;
    }
    else if(method == UserDefinedCorrelation)
    {
        if(numStates > nStates)
        {
            //
            // The database being added has fewer states so we need to
            // repeat the last states.
            //
            
            // We'll have to pass in the user-defined indices and append them to the indices vector 
        }
        else
        {
            
        }
    }
    else if(method == TimeCorrelation)
    {
        databaseNames.push_back(database);
        databaseNStates.push_back(nStates);

        // Align time for all databases on the same time axis so we can count the 
        // number of times and make that be the new number of states.
        std::map<double, intVector> timeAlignmentMap;
        int index = 0;         
        for(size_t i = 0; i < databaseNames.size(); ++i)
            for(int j = 0; j < databaseNStates[i]; ++j, ++index)
                timeAlignmentMap[databaseTimes[index]].push_back(i);

        //
        // Set the condensed times vector
        //
        condensedTimes.clear();
        for(std::map<double,intVector>::const_iterator p = timeAlignmentMap.begin();
            p != timeAlignmentMap.end(); ++p)
        {
            condensedTimes.push_back(p->first);
        }
        
        // Now there is a map that has for each time in all of the databases 
        // a list of the databases that contain that time.
        indices.clear();
        for(size_t i = 0; i < databaseNames.size(); ++i)
        {
            int state = 0;
            std::map<double, intVector>::const_iterator pos = timeAlignmentMap.begin();
            for(; pos != timeAlignmentMap.end(); ++pos)
            {
                // Look to see if the current database is in the list of databases
                // for the current time. If so, we need to increment the state after
                // we use it.
                intVector::const_iterator dbIndex =
                    std::find(pos->second.begin(), pos->second.end(), i);
                indices.push_back(state);
                if(dbIndex != pos->second.end() && state < databaseNStates[i] - 1)
                    ++state;
            }
        }

        numStates = timeAlignmentMap.size();
    }
    else if(method == CycleCorrelation)
    {
        databaseNames.push_back(database);
        databaseNStates.push_back(nStates);

        // Align cycle for all databases on the same time axis so we can count the 
        // number of cycles and make that be the new number of states.
        std::map<int, intVector> cycleAlignmentMap;
        int index = 0;         
        for(size_t i = 0; i < databaseNames.size(); ++i)
            for(int j = 0; j < databaseNStates[i]; ++j, ++index)
                cycleAlignmentMap[databaseCycles[index]].push_back(i);

        //
        // Set the condensed cycles vector
        //
        condensedCycles.clear();
        for(std::map<int,intVector>::const_iterator p = cycleAlignmentMap.begin();
            p != cycleAlignmentMap.end(); ++p)
        {
            condensedCycles.push_back(p->first);
        }

        // Now there is a map that has for each time in all of the databases 
        // a list of the databases that contain that time.
        indices.clear();
        for(size_t i = 0; i < databaseNames.size(); ++i)
        {
            int state = 0;
            std::map<int, intVector>::const_iterator pos = cycleAlignmentMap.begin();
            for(; pos != cycleAlignmentMap.end(); ++pos)
            {
                // Look to see if the current database is in the list of databases
                // for the current time. If so, we need to increment the state after
                // we use it.
                intVector::const_iterator dbIndex =
                    std::find(pos->second.begin(), pos->second.end(), i);
                indices.push_back(state);
                if(dbIndex != pos->second.end() && state < databaseNStates[i] - 1)
                    ++state;
            }
        }

        numStates = cycleAlignmentMap.size();
    }
}