示例#1
0
////////////////////////////////////////////////////////////////////////////////
// 'processFeatureVector' reads a feature vector and modifies the classifier's
// data members accordingly
////////////////////////////////////////////////////////////////////////////////
void
MaxEnt::processFeatureVector(const FeatureVector& features){  
//  Class=2; // what does this do? Need to be there?
  en.f.resize(C);
  en.fs.resize(C);
  en.Ny.resize(C);
  size_t c=0; // class
  en.Ny[c]=0.0;         // strength of class (first num after @)
  // for each feature
  for (FeatureVector::const_iterator feat = features.begin();
      feat!=features.end();feat++)
  {
    // these loops may have to be reversed !!
    // the classes may need to be looped over first, and features within
    // instead of classes within feature loop
    StringXML feature;
    double value = feat->getValue();
    // for each class
    for (unsigned int i=0;i<C;i++){
      stringstream ss;
      ss << "cat" << i << "_" << feat->getFeature();
      feature = ss.str();
      if (value!=0) {
      // set values
      if (f2s.count(feature)!=0) { // check whether feature exists
        (s2f)[f2s[feature]].second+=(en.Ny[i]!=0);
        en.f[i].push_back(make_pair(f2s[feature], value));
        en.fs[i]+=value;
        F=max(F, en.fs[i]);
      }
      }
    }
  }
}
示例#2
0
  inline
  Tp1 dot_product(const FeatureVector<Tp1, Alloc1>& x, const WeightVector<Tp, Alloc>& w, const FeatureVector<Tp2, Alloc2>& y)
  {
    typedef FeatureVector<Tp1, Alloc1> feature_vector1_type;
    typedef WeightVector<Tp, Alloc>    weight_vector_type;
    typedef FeatureVector<Tp2, Alloc2> feature_vector2_type;
    
    if (x.empty() || y.empty()) return Tp1();
    
    if (static_cast<const void*>(&x) == static_cast<const void*>(&y))
      return details::__inner_product(x.begin(), x.end(), w, Tp1());
    
if (x.size() < y.size())
      return dot_product(x.begin(), x.end(), w, y, Tp1());
    else
      return dot_product(x, w, y.begin(), y.end(), Tp1());
  }
示例#3
0
 inline
 Tp1 dot_product(const FeatureVector<Tp1, Alloc1>& x, const FeatureVector<Tp2, Alloc2>& y)
 {
   typedef FeatureVector<Tp1, Alloc1> feature_vector1_type;
   typedef FeatureVector<Tp2, Alloc2> feature_vector2_type;
   
   if (x.empty() || y.empty()) return Tp1();
   
   // if the same address, we are identical!
   if (static_cast<const void*>(&x) == static_cast<const void*>(&y))
     return details::__inner_product(x.begin(), x.end(), Tp1());
   
   if (x.size() < y.size())
     return dot_product(x.begin(), x.end(), y, Tp1());
   else
     return dot_product(x, y.begin(), y.end(), Tp1());
 }
示例#4
0
  inline
  Tp dot_product(const FeatureVector<Tp1, Alloc1>& x, Iterator first, Iterator last, Tp __dot)
  {
    typedef FeatureVector<Tp1, Alloc1> feature_vector1_type;
    
    for (/**/; first != last; ++ first) {
      typename feature_vector1_type::const_iterator iter = x.find(first->first);
      
      if (iter != x.end())
	__dot += iter->second * first->second;
    }
    return __dot;
  }
示例#5
0
  inline
  Tp dot_product(Iterator first, Iterator last, const FeatureVector<Tp2, Alloc2>& y, Tp __dot)
  {
    typedef FeatureVector<Tp2, Alloc2> feature_vector2_type;
    
    for (/**/; first != last; ++ first) {
      typename feature_vector2_type::const_iterator iter = y.find(first->first);
      
      if (iter != y.end())
	__dot += first->second * iter->second;
    }
    
    return __dot;
  }
示例#6
0
 inline
 Tp1 dot_product(const WeightVector<Tp1, Alloc1>& x, const FeatureVector<Tp2, Alloc2>& y)
 {
   return dot_product(x, y.begin(), y.end(), Tp1());
 }
void ClassifySvmSharedCommand::processSharedAndDesignData(vector<SharedRAbundVector*> lookup) {
    try {
        OutputFilter outputFilter(verbosity);

        LabeledObservationVector labeledObservationVector;
        FeatureVector featureVector;
        readSharedRAbundVectors(lookup, designMap, labeledObservationVector, featureVector);

        // optionally remove features with low standard deviation
        if ( stdthreshold > 0.0 ) {
            FeatureVector removedFeatureVector = applyStdThreshold(stdthreshold, labeledObservationVector, featureVector);
            if (removedFeatureVector.size() > 0) {
                std::cout << removedFeatureVector.size() << " OTUs were below the stdthreshold of " << stdthreshold << " and were removed" << std::endl;
                if ( outputFilter.debug() ) {
                    std::cout << "the following OTUs were below the standard deviation threshold of " << stdthreshold << std::endl;
                    for (FeatureVector::iterator i = removedFeatureVector.begin(); i != removedFeatureVector.end(); i++) {
                        std::cout << "  " << i->getFeatureLabel() << std::endl;
                    }
                }
            }
        }

        // apply [0,1] standardization
        if ( transformName == "zeroone") {
            std::cout << "transforming data to lie within range [0,1]" << std::endl;
            transformZeroOne(labeledObservationVector);
        }
        else {
            std::cout << "transforming data to have zero mean and unit variance" << std::endl;
            transformZeroMeanUnitVariance(labeledObservationVector);
        }

        SvmDataset svmDataset(labeledObservationVector, featureVector);

        OneVsOneMultiClassSvmTrainer trainer(svmDataset, evaluationFoldCount, trainingFoldCount, *this, outputFilter);

        if ( mode == "rfe" ) {
            SvmRfe svmRfe;
            ParameterRange& linearKernelConstantRange = kernelParameterRangeMap["linear"]["constant"];
            ParameterRange& linearKernelSmoCRange = kernelParameterRangeMap["linear"]["smoc"];
            RankedFeatureList rankedFeatureList = svmRfe.getOrderedFeatureList(svmDataset, trainer, linearKernelConstantRange, linearKernelSmoCRange);

            map<string, string> variables;
            variables["[filename]"] = outputDir + m->getRootName(m->getSimpleName(sharedfile));
            variables["[distance]"] = lookup[0]->getLabel();
            string filename = getOutputFileName("summary", variables);
            outputNames.push_back(filename);
            outputTypes["summary"].push_back(filename);
            m->mothurOutEndLine();

            std::ofstream outputFile(filename.c_str());

            int n = 0;
            int rfeRoundCount = rankedFeatureList.front().getRank();
            std::cout << "ordered features:" << std::endl;
            std::cout << setw(5)  << "index"
                      << setw(12) << "OTU"
                      << setw(5)  << "rank"
                      << std::endl;
            outputFile << setw(5)  << "index"
                       << setw(12) << "OTU"
                       << setw(5)  << "rank"
                       << std::endl;
            for (RankedFeatureList::iterator i = rankedFeatureList.begin(); i != rankedFeatureList.end(); i++) {
                n++;
                int rank = rfeRoundCount - i->getRank() + 1;
                outputFile << setw(5)  << n
                           << setw(12) << i->getFeature().getFeatureLabel()
                           << setw(5)  << rank
                           << std::endl;
                if ( n <= 20 ) {
                    std::cout << setw(5) << n
                              << setw(12) << i->getFeature().getFeatureLabel()
                              << setw(5) << rank
                              << std::endl;
                }
            }
            outputFile.close();
        }
        else {
            MultiClassSVM* mcsvm = trainer.train(kernelParameterRangeMap);

            map<string, string> variables;
            variables["[filename]"] = outputDir + m->getRootName(m->getSimpleName(sharedfile));
            variables["[distance]"] = lookup[0]->getLabel();
            string filename = getOutputFileName("summary", variables);
            outputNames.push_back(filename);
            outputTypes["summary"].push_back(filename);
            m->mothurOutEndLine();

            std::ofstream outputFile(filename.c_str());

            printPerformanceSummary(mcsvm, std::cout);
            printPerformanceSummary(mcsvm, outputFile);

            outputFile << "actual  predicted" << std::endl;
            for ( LabeledObservationVector::const_iterator i = labeledObservationVector.begin(); i != labeledObservationVector.end(); i++ ) {
                Label actualLabel = i->getLabel();
                outputFile << i->getDatasetIndex() << " " << actualLabel << " ";
                try {
                    Label predictedLabel = mcsvm->classify(*(i->getObservation()));
                    outputFile << predictedLabel << std::endl;
                }
                catch ( MultiClassSvmClassificationTie& m ) {
                    outputFile << "tie" << std::endl;
                    std::cout << "classification tie for observation " << i->datasetIndex << " with label " << i->first << std::endl;
                }

            }
            outputFile.close();
            delete mcsvm;
        }

    }
    catch (exception& e) {
        m->errorOut(e, "ClassifySvmSharedCommand", "processSharedAndDesignData");
        exit(1);
    }
}
bool CButtonMapXml::Serialize(const FeatureVector& features, TiXmlElement* pElement)
{
  if (pElement == NULL)
    return false;

  for (FeatureVector::const_iterator it = features.begin(); it != features.end(); ++it)
  {
    const ADDON::JoystickFeature& feature = *it;

    if (!IsValid(feature))
      continue;

    TiXmlElement featureElement(BUTTONMAP_XML_ELEM_FEATURE);
    TiXmlNode* featureNode = pElement->InsertEndChild(featureElement);
    if (featureNode == NULL)
      return false;

    TiXmlElement* featureElem = featureNode->ToElement();
    if (featureElem == NULL)
      return false;

    featureElem->SetAttribute(BUTTONMAP_XML_ATTR_FEATURE_NAME, feature.Name());

    switch (feature.Type())
    {
      case JOYSTICK_FEATURE_TYPE_SCALAR:
      {
        SerializePrimitive(featureElem, feature.Primitive());

        break;
      }
      case JOYSTICK_FEATURE_TYPE_ANALOG_STICK:
      {
        if (!SerializePrimitiveTag(featureElem, feature.Up(), BUTTONMAP_XML_ELEM_UP))
          return false;

        if (!SerializePrimitiveTag(featureElem, feature.Down(), BUTTONMAP_XML_ELEM_DOWN))
          return false;

        if (!SerializePrimitiveTag(featureElem, feature.Right(), BUTTONMAP_XML_ELEM_RIGHT))
          return false;

        if (!SerializePrimitiveTag(featureElem, feature.Left(), BUTTONMAP_XML_ELEM_LEFT))
          return false;

        break;
      }
      case JOYSTICK_FEATURE_TYPE_ACCELEROMETER:
      {
        if (!SerializePrimitiveTag(featureElem, feature.PositiveX(), BUTTONMAP_XML_ELEM_POSITIVE_X))
          return false;

        if (!SerializePrimitiveTag(featureElem, feature.PositiveY(), BUTTONMAP_XML_ELEM_POSITIVE_Y))
          return false;

        if (!SerializePrimitiveTag(featureElem, feature.PositiveZ(), BUTTONMAP_XML_ELEM_POSITIVE_Z))
          return false;

        break;
      }
      default:
        break;
    }
  }

  return true;
}
void FaceTracking::track( FeatureVector &faces, double affWeightPos, double affWeightSize, double affThresh, 
			double cohWeightDir, double cohWeightVel, double cohWeightSize, double cohThresh, double tolDir, double tolVel, double tolSize, 
			double discardThresh, double damping, double cohRise, 
			const _2Real::Vec2 &minSizeFace,
			double time )
{
	std::set<_2Real::Space2D*> remainingDetections;
	for( FeatureVector::iterator itF = faces.begin(); itF != faces.end(); ++itF )
		remainingDetections.insert( &( *itF ) );

	if( m_trackingInfoList.size() )
	{
		typedef std::multimap<double, std::pair<TrackingInfo*, _2Real::Space2D*> > PriorityMap;	//ordered list of tracking results candidates, ordered by their coherence value

		PriorityMap priorityMapAffinity;
		PriorityMap priorityMapCoherence;

		//STEP 1: create map of trajectory/detectiondata pairs
		for( FeatureVector::iterator itF = faces.begin(); itF != faces.end(); ++itF )
		{
			for( TrackingInfoList::iterator itT = m_trackingInfoList.begin(); itT != m_trackingInfoList.end(); ++itT )
			{
				if( itT->getFaceTrajectory().canCalcAffinity() )
					priorityMapAffinity.insert( std::make_pair( itT->getFaceTrajectory().calcAffinity( *itF, affWeightPos, affWeightSize, 1.0f, time ),
							std::make_pair( &( *itT ), &( *itF ) ) ) );

				if( itT->getFaceTrajectory().canCalcCoherence() )
					priorityMapCoherence.insert( std::make_pair( itT->getFaceTrajectory().calcCoherence( *itF, cohWeightDir, cohWeightVel, cohWeightSize, time, tolDir, tolVel, tolSize ),
							std::make_pair( &( *itT ), &( *itF ) ) ) );
			}
		}

		std::set<TrackingInfo*> remainingTrajectories;
		for( TrackingInfoList::iterator itT = m_trackingInfoList.begin(); itT != m_trackingInfoList.end(); ++itT )
			remainingTrajectories.insert( &( *itT ) );

		//STEP 2: get best face for each trajectory by coherence value (thresholded)
		for( PriorityMap::const_iterator it = priorityMapCoherence.begin(); it != priorityMapCoherence.end(); ++it )
		{
			if( it->first > cohThresh )
				break;		//skip correlations with too high coherence value

			std::set<TrackingInfo*>::const_iterator itT = remainingTrajectories.find( it->second.first );
			if( itT == remainingTrajectories.end() )
				continue;	//trajectory already found it's best fitting face

			std::set<_2Real::Space2D*>::const_iterator itF = remainingDetections.find( it->second.second );
			if( itF == remainingDetections.end() )
				continue;	//face already used for another trajecotry

			( *itT )->add( **itF, it->first, time );

			remainingTrajectories.erase( itT );
			remainingDetections.erase( itF );
		}

		//STEP 3: remaining short trajectories (size < 2) are filled with best faces by affinity value (thresholded)
		for( PriorityMap::const_iterator it = priorityMapAffinity.begin(); it != priorityMapAffinity.end(); ++it )
		{
			if( it->first > affThresh )
				break;		//skip correllations with too high affinity value

			if( it->second.first->getFaceTrajectory().canCalcCoherence() )
				continue;

			std::set<TrackingInfo*>::const_iterator itT = remainingTrajectories.find( it->second.first );
			if( itT == remainingTrajectories.end() )
				continue;	//trajectory already found it's best fitting face

			std::set<_2Real::Space2D*>::const_iterator itF = remainingDetections.find( it->second.second );
			if( itF == remainingDetections.end() )
				continue;	//face already used for another trajecotry

			( *itT )->add( **itF, 0.0, time );

			remainingTrajectories.erase( itT );
			remainingDetections.erase( itF );
		}

		//STEP 4: remaining trajectories are extrapolated (implement kalman, finally?) (if size is large enough, otherwise it does not make sense
		for( std::set<TrackingInfo*>::const_iterator it = remainingTrajectories.begin(); it != remainingTrajectories.end(); ++it )
			( *it )->extrapolate( time, damping, cohRise );

		//STEP 5: discard all trajectories with too high coherence average
		TrackingInfoList::iterator itT = m_trackingInfoList.begin();
		while( itT != m_trackingInfoList.end() )
		{
			if( itT->getFaceTrajectory().getAvrgCoherence() > discardThresh )
			{
				remainingTrajectories.erase( &( *itT ) );
				m_availableUserIDs.push_back( itT->getUserID() );

				itT = m_trackingInfoList.erase( itT );
			}
			else
				itT++;
		}

		for( std::set<TrackingInfo*>::iterator it = remainingTrajectories.begin(); it != remainingTrajectories.end(); ++it )
			( *it )->prune();

		//STEP 6: discard all trajectories which ran out of entries
		itT = m_trackingInfoList.begin();
		while( itT != m_trackingInfoList.end() )
		{
			if( !itT->getEntryCount() )
			{
				m_availableUserIDs.push_back( itT->getUserID() );

				itT = m_trackingInfoList.erase( itT );
			}
			else
				itT++;
		}
	}

	//STEP 7: build new trajectories out of remaining faces (extrapolation not possible due to low number of entries)
	if( m_availableUserIDs.size() )
	{
		for( std::set<_2Real::Space2D*>::const_iterator it = remainingDetections.begin(); it != remainingDetections.end(); ++it )
		{
			m_trackingInfoList.push_back( TrackingInfo( m_availableUserIDs.back(), minSizeFace ) );
			m_availableUserIDs.pop_back();

			m_trackingInfoList.back().add( **it, 0.0, time );
		}
	}
}
示例#10
0
void TrackingInfo::addFromFeatureCandidates( const FeatureVector &eyesCandidates, const FeatureVector &noseCandidates, const FeatureVector &mouthCandidates, double time, double posAffWeight, double sizeAffWeight )
{
	typedef std::multimap<double, const _2Real::Space2D*> PriorityMap;

	if( eyesCandidates.size() )
	{
		PriorityMap mapLeft;
		PriorityMap mapRight;

		for( FeatureVector::const_iterator it = eyesCandidates.begin(); it != eyesCandidates.end(); ++it )
		{
			_2Real::Vec2 center( ( it->getP0() + it->getP1() ) * 0.5f );
			if( center[0] > 0.5f )
			{
				if( m_eyeLeftTrajectory.canCalcAffinity() )
					mapLeft.insert( std::make_pair( m_eyeLeftTrajectory.calcAffinity( *it, posAffWeight, sizeAffWeight, 1.0, time ), &( *it ) ) );
				else
					mapLeft.insert( std::make_pair( 0.0, &( *it ) ) );
			}
			else
			{
				if( m_eyeRightTrajectory.canCalcAffinity() )
					mapRight.insert( std::make_pair( m_eyeRightTrajectory.calcAffinity( *it, posAffWeight, sizeAffWeight, 1.0, time ), &( *it ) ) );
				else
					mapRight.insert( std::make_pair( 0.0, &( *it ) ) );
			}
		}

		if( mapLeft.size() )
			m_eyeLeftTrajectory.add( *( mapLeft.begin()->second ), 0.0, time );
		else
		{
			//NOTE: there is no measure (yet?) to decide whether or not it would make sense to still extrapolate face feature, so this is not done at all,
			// instead trajectories are pruned so entries don't last forever. (actually *duplicating* last entry would make more sense in context of
			// face feature regions)
			m_eyeLeftTrajectory.prune();
		}

		if( mapRight.size() )
			m_eyeRightTrajectory.add( *( mapRight.begin()->second ), 0.0, time );
		else
		{
			//NOTE: there is no measure (yet?) to decide whether or not it would make sense to still extrapolate face feature, so this is not done at all,
			// instead trajectories are pruned so entries don't last forever. (actually *duplicating* last entry would make more sense in context of
			// face feature regions)
			m_eyeRightTrajectory.prune();
		}
	}
	else
	{
		//NOTE: there is no measure (yet?) to decide whether or not it would make sense to still extrapolate face feature, so this is not done at all,
		// instead trajectories are pruned so entries don't last forever. (actually *duplicating* last entry would make more sense in context of
		// face feature regions)
		m_eyeLeftTrajectory.prune();
		m_eyeRightTrajectory.prune();
	}

	if( noseCandidates.size() )
	{
		double minAffinity = 0.0;
		const _2Real::Space2D *bestCandidate = NULL;

		if( !m_noseTrajectory.canCalcAffinity() )
			bestCandidate = &( noseCandidates.front() ); //not enough regions in trajectory history in order to judge, just take first in candidate list
		else
		{
			for( FeatureVector::const_iterator it = noseCandidates.begin(); it != noseCandidates.end(); ++it )
			{
				if( !bestCandidate )
				{
					bestCandidate = &( *it );
					minAffinity = m_noseTrajectory.calcAffinity( *it, posAffWeight, sizeAffWeight, 1.0, time );
				}
				else
				{
					double affinity = m_noseTrajectory.calcAffinity( *it, posAffWeight, sizeAffWeight, 1.0, time );
					if( affinity < minAffinity )
					{
						bestCandidate = &( *it );
						minAffinity = affinity;
					}
				}
			}
		}

		m_noseTrajectory.add( *bestCandidate, 0.0, time );
	}
	else
	{
		//NOTE: there is no measure (yet?) to decide whether or not it would make sense to still extrapolate face feature, so this is not done at all,
		// instead trajectories are pruned so entries don't last forever. (actually *duplicating* last entry would make more sense in context of
		// face feature regions)
		m_noseTrajectory.prune();
	}

	if( mouthCandidates.size() )
	{
		double minAffinity = 0.0;
		const _2Real::Space2D *bestCandidate = NULL;

		if( !m_mouthTrajectory.canCalcAffinity() )
			bestCandidate = &( mouthCandidates.front() ); //not enough regions in trajectory history in order to judge, just take first in candidate list
		else
		{
			for( FeatureVector::const_iterator it = mouthCandidates.begin(); it != mouthCandidates.end(); ++it )
			{
				if( !bestCandidate )
				{
					bestCandidate = &( *it );
					minAffinity = m_mouthTrajectory.calcAffinity( *it, posAffWeight, sizeAffWeight, 1.0, time );
				}
				else
				{
					double affinity = m_mouthTrajectory.calcAffinity( *it, posAffWeight, sizeAffWeight, 1.0, time );
					if( affinity < minAffinity )
					{
						bestCandidate = &( *it );
						minAffinity = affinity;
					}
				}
			}
		}

		m_mouthTrajectory.add( *bestCandidate, 0.0, time );
	}
	else
	{
		//NOTE: there is no measure (yet?) to decide whether or not it would make sense to still extrapolate face feature, so this is not done at all,
		// instead trajectories are pruned so entries don't last forever. (actually *duplicating* last entry would make more sense in context of
		// face feature regions)
		m_mouthTrajectory.prune();
	}
}