/** Constructor with the list of segments and the position of the first ref */ Graph::Graph(SegmentsGroup* _segmentsGroup, SpeakerMatch* _pSpeakerMatch, const int& _typeCost, const int& _costTrans, const int& _costIns, const int& _costOpt, const int& _costCorrectNonSpeaker, const int& _costAdaptive, const bool& _optRef, const bool& _optHyp, const bool& _bCompressedArray) : m_pSpeakerMatch(_pSpeakerMatch), m_typeCostModel(_typeCost), m_CostTransition(_costTrans), m_CostInsertion(_costIns), m_CostOptionally(_costOpt), m_CostCorrectNonSpeaker(_costCorrectNonSpeaker), m_CostAdaptive(_costAdaptive), m_useOptForRef(_optRef), m_useOptForHyp(_optHyp), m_bCompressedArray(_bCompressedArray) { m_HypRefStatus = (string("true").compare(Properties::GetProperty("align.genericmethod")) != 0); // m_NbThreads = static_cast<size_t>(atoi(Properties::GetProperty("threads.number").c_str())); if(m_typeCostModel == 2) { // Calculate the safe divider m_TimeBasedSafeDivider = min(min(min(m_CostTransition, m_CostInsertion), m_CostOptionally), m_CostCorrectNonSpeaker); } Token* curToken; size_t i, k, sizevector; SetDimension(_segmentsGroup->GetNumberOfReferences()+_segmentsGroup->GetNumberOfHypothesis()); if(m_HypRefStatus) SetIndexRef(_segmentsGroup->GetNumberOfHypothesis()); else SetIndexRef(0); m_TabDimensionDeep = new size_t[GetDimension()]; m_TabVecHypRef = new vector<Token*>[GetDimension()]; m_TabMapTokenIndex = new map<Token*, size_t>[GetDimension()]; m_TabFirstTokens = new list<Token*>[GetDimension()]; m_TabLastTokens = new list<Token*>[GetDimension()]; int minTimeSafeDividerToken = -1; // Planning each Segment and look for the last and first token for(i=0; i<GetDimension(); ++i) { vector<Segment*> temp_segs; if (i < _segmentsGroup->GetNumberOfHypothesis()) { m_TabVecHypRef[i] = _segmentsGroup->ToTopologicalOrderedStructHyp(i); temp_segs = _segmentsGroup->GetHypothesis(i); } else { m_TabVecHypRef[i] = _segmentsGroup->ToTopologicalOrderedStructRef(i-_segmentsGroup->GetNumberOfHypothesis()); temp_segs = _segmentsGroup->GetReference(i-_segmentsGroup->GetNumberOfHypothesis()); } sizevector = m_TabVecHypRef[i].size(); SetDimensionDeep(i, sizevector); for(k=0; k<sizevector; ++k) { curToken = m_TabVecHypRef[i][k]; if(curToken != NULL) { if(m_typeCostModel == 2) { int TimeSafeDividerToken = curToken->TimeSafeDivider(); if( (minTimeSafeDividerToken == -1) || (TimeSafeDividerToken < minTimeSafeDividerToken) ) minTimeSafeDividerToken = TimeSafeDividerToken; } m_TabMapTokenIndex[i][curToken] = k; size_t prcs = 0; while( (prcs < (temp_segs.size()-1)) && (temp_segs[prcs]->isEmpty()) ) ++prcs; if(temp_segs[prcs]->isFirstToken(curToken)) m_TabFirstTokens[i].push_front(curToken); prcs = temp_segs.size()-1; while( (prcs > 0) && (temp_segs[prcs]->isEmpty()) ) --prcs; if(temp_segs[prcs]->isLastToken(curToken)) m_TabLastTokens[i].push_front(curToken); } } } SetGraphOptimization(); m_MaxDurationSegmentGroup = _segmentsGroup->GetTotalDuration(); if(m_bCompressedArray) { LOG_DEBUG(logger, "Lenvenshtein Matrix Compression: ON"); m_MapCost = new CompressedLevenshteinMatrix(GetDimension(), m_TabDimensionDeep); } else { LOG_DEBUG(logger, "Lenvenshtein Matrix Compression: OFF"); m_MapCost = new ArrayLevenshteinMatrix(GetDimension(), m_TabDimensionDeep); } // Creating cache container m_TabCacheDimPreviousIndex = new list<size_t>** [GetDimension()]; for(i=0; i<GetDimension(); ++i) { m_TabCacheDimPreviousIndex[i] = new list<size_t>* [m_TabDimensionDeep[i]]; for(k=0; k<m_TabDimensionDeep[i]; ++k) m_TabCacheDimPreviousIndex[i][k] = NULL; } if(m_typeCostModel == 2) { m_TimeBasedSafeDivider *= minTimeSafeDividerToken; char buffer [BUFFER_SIZE]; sprintf(buffer, "Use Safe divider (%d)!", m_TimeBasedSafeDivider); LOG_DEBUG(logger, buffer); } }