예제 #1
0
BaseMatInstance * MaterialManager::createMeshDebugMatInstance(const ColorF &meshColor)
{
   String  meshDebugStr = String::ToString( "Torque_MeshDebug_%d", meshColor.getRGBAPack() );

   Material *debugMat;
   if (!Sim::findObject(meshDebugStr,debugMat))
   {
      debugMat = allocateAndRegister( meshDebugStr );

      debugMat->mDiffuse[0] = meshColor;
      debugMat->mEmissive[0] = true;
   }

   BaseMatInstance   *debugMatInstance = NULL;

   if( debugMat != NULL )
   {
      debugMatInstance = debugMat->createMatInstance();

      GFXStateBlockDesc desc;
      desc.setCullMode(GFXCullNone);
      desc.fillMode = GFXFillWireframe;
      debugMatInstance->addStateBlockDesc(desc);

      // Disable fog and other stuff.
      FeatureSet debugFeatures;
      debugFeatures.addFeature( MFT_DiffuseColor );
      debugMatInstance->init( debugFeatures, getGFXVertexFormat<GFXVertexPCN>() );
   }

   return debugMatInstance;
}
// Perform simple feature matching.  This just uses the SSD
// distance between two feature vectors, and matches a feature in the
// first image with the closest feature in the second image.  It can
// match multiple features in the first image to the same feature in
// the second image.
void ssdMatchFeatures(const FeatureSet &f1, const FeatureSet &f2, vector<FeatureMatch> &matches, double &totalScore) {
    int m = f1.size();
    int n = f2.size();

    matches.resize(m);
    totalScore = 0;

    double d;
    double dBest;
    int idBest;

    for (int i=0; i<m; i++) {
        dBest = 1e100;
        idBest = 0;

        for (int j=0; j<n; j++) {
            d = distanceSSD(f1[i].data, f2[j].data);

            if (d < dBest) {
		dBest = d;
		idBest = f2[j].id;
            }
        }

        matches[i].id1 = f1[i].id;
        matches[i].id2 = idBest;
        matches[i].score = dBest;
        totalScore += matches[i].score;
    }
}
예제 #3
0
	void RowLookupTable::buildIndex( const FeatureSet & fset )
	{
		size_t n = fset.size();
		if( !n )
			return;
		_maxY = ( int )fset[ n - 1 ].pt.y;
		
		_rowIndex.resize( _maxY + 1, Row() );

		int cy = ( int )fset[ 0 ].pt.y;
		_minY = cy;
		_rowIndex[ cy ].start = 0;

		FeatureSet::CmpYi cmp;
		while( cy < _maxY ){
			int prevStart = _rowIndex[ cy ].start;
			// calculate the upper bound for the previous y coord
			int start = std::upper_bound( &fset[ prevStart ],
										  &fset[ n - 1 ],
										  fset[ prevStart ], cmp ) - &fset[ 0 ];

			if( start == prevStart ) {
				break;
			}
			_rowIndex[ cy ].len = start - prevStart;

			cy = ( int ) fset[ start ].pt.y;
			_rowIndex[ cy ].start = start;
		}
		_rowIndex[ cy ].len = fset.size() - _rowIndex[ cy ].start;
	}
예제 #4
0
// Perform ratio feature matching. This just uses the ratio of the SSD distance of the 
// two best matches and matches a feature in the first image with the closest feature 
// in the second image. It can match multiple features in the first image to the same 
// feature in the second image.
void ratioMatchFeatures(const FeatureSet &f1, const FeatureSet &f2, vector<FeatureMatch> &matches) 
{
	int m = f1.size();
    int n = f2.size();

    matches.resize(m);

    double d;
    double dBest, dBest2;
    int idBest;

    for (int i=0; i<m; i++) {
        dBest = 1e100;
		dBest2 = 1e100;
        idBest = 0;
        for (int j=0; j<n; j++) {
            d = distanceSSD(f1[i].data, f2[j].data);
            if (d < dBest) {
				dBest2 = dBest;
                dBest = d;
                idBest = f2[j].id;
            }
			else if (d < dBest2) {
				dBest2 = d;
			}
        }

        matches[i].id1 = f1[i].id;
        matches[i].id2 = idBest;
        matches[i].distance = dBest/dBest2;
    }
}
예제 #5
0
파일: skyBox.cpp 프로젝트: nev7n/Torque3D
void SkyBox::_initMaterial()
{
   if ( mMatInstance )
      SAFE_DELETE( mMatInstance );

   if ( mMaterial )
      mMatInstance = mMaterial->createMatInstance();
   else
      mMatInstance = MATMGR->createMatInstance( "WarningMaterial" );

   // We want to disable culling and z write.
   GFXStateBlockDesc desc;
   desc.setCullMode( GFXCullNone );
   desc.setBlend( true );
   desc.setZReadWrite( true, false );
   mMatInstance->addStateBlockDesc( desc );

   // Also disable lighting on the skybox material by default.
   FeatureSet features = MATMGR->getDefaultFeatures();
   features.removeFeature( MFT_RTLighting );
   features.removeFeature( MFT_Visibility );
   features.addFeature(MFT_SkyBox);

   // Now initialize the material.
   mMatInstance->init(features, getGFXVertexFormat<GFXVertexPNT>());
}
// Compute Simple descriptors.
void ComputeSimpleDescriptors(CFloatImage &image, FeatureSet &features)
{
    //Create grayscale image used for Harris detection
    CFloatImage grayImage=ConvertToGray(image);

    vector<Feature>::iterator i = features.begin();
    while (i != features.end()) {
        Feature &f = *i;
		//these fields should already be set in the computeFeatures function
		int x = f.x;
		int y = f.y;

		// now get the 5x5 window surrounding the feature and store them in the features
		for(int row=(y-2); row<=(y+2); row++)
		{
			for(int col=(x-2); col<=(x+2); col++)
			{
				//if the pixel is out of bounds, assume it is black
				if(row<0 || row>=grayImage.Shape().height || col<0 || col>=grayImage.Shape().width)
				{
					f.data.push_back(0.0);
				}
				else
				{
					f.data.push_back(grayImage.Pixel(col,row,0));
				}
			}
		}
		printf("feature num %d\n", i->id);
        i++;
    }
}
예제 #7
0
void ImposterCaptureMaterialHook::init( BaseMatInstance *inMat )
{
   // We cannot capture impostors on custom materials
   // as we don't know how to get just diffuse and just
   // normals rendering.
   if ( dynamic_cast<CustomMaterial*>( inMat->getMaterial() ) )
      return;

   // Tweak the feature data to include just what we need.
   FeatureSet features;
   features.addFeature( MFT_VertTransform );
   features.addFeature( MFT_DiffuseMap );
   features.addFeature( MFT_OverlayMap );
   features.addFeature( MFT_DetailMap );
   features.addFeature( MFT_DiffuseColor );
   features.addFeature( MFT_AlphaTest );
   features.addFeature( MFT_IsTranslucent );

   const String &matName = inMat->getMaterial()->getName();

   mDiffuseMatInst = MATMGR->createMatInstance( matName );
   mDiffuseMatInst->getFeaturesDelegate().bind( &ImposterCaptureMaterialHook::_overrideFeatures );
   mDiffuseMatInst->init( features, inMat->getVertexFormat() );
   
   features.addFeature( MFT_IsDXTnm );
   features.addFeature( MFT_NormalMap );
   features.addFeature( MFT_NormalsOut );
   mNormalsMatInst = MATMGR->createMatInstance( matName );
   mNormalsMatInst->getFeaturesDelegate().bind( &ImposterCaptureMaterialHook::_overrideFeatures );
   mNormalsMatInst->init( features, inMat->getVertexFormat() );
}
void ShadowMaterialHook::_overrideFeatures(  ProcessedMaterial *mat,
                                             U32 stageNum,
                                             MaterialFeatureData &fd, 
                                             const FeatureSet &features )
{
   if ( stageNum != 0 )
   {
      fd.features.clear();
      return;
   }

   // Disable the base texture if we don't 
   // have alpha test enabled.
   if ( !fd.features[ MFT_AlphaTest ] )
   {
      fd.features.removeFeature( MFT_TexAnim );
      fd.features.removeFeature( MFT_DiffuseMap );
   }

   // HACK: Need to figure out how to enable these 
   // suckers without this override call!

   fd.features.setFeature( MFT_ParaboloidVertTransform, 
      features.hasFeature( MFT_ParaboloidVertTransform ) );
   fd.features.setFeature( MFT_IsSinglePassParaboloid, 
      features.hasFeature( MFT_IsSinglePassParaboloid ) );
      
   // The paraboloid transform outputs linear depth, so
   // it needs to use the plain depth out feature.
   if ( fd.features.hasFeature( MFT_ParaboloidVertTransform ) ) 
      fd.features.addFeature( MFT_DepthOut );      
   else
      fd.features.addFeature( MFT_EyeSpaceDepthOut );
}
예제 #9
0
AbstractClient::AbstractClient(QObject *parent)
    : QObject(parent), nextCmdId(0), status(StatusDisconnected)
{
    qRegisterMetaType<QVariant>("QVariant");
    qRegisterMetaType<CommandContainer>("CommandContainer");
    qRegisterMetaType<Response>("Response");
    qRegisterMetaType<Response::ResponseCode>("Response::ResponseCode");
    qRegisterMetaType<ClientStatus>("ClientStatus");
    qRegisterMetaType<RoomEvent>("RoomEvent");
    qRegisterMetaType<GameEventContainer>("GameEventContainer");
    qRegisterMetaType<Event_ServerIdentification>("Event_ServerIdentification");
    qRegisterMetaType<Event_ConnectionClosed>("Event_ConnectionClosed");
    qRegisterMetaType<Event_ServerShutdown>("Event_ServerShutdown");
    qRegisterMetaType<Event_AddToList>("Event_AddToList");
    qRegisterMetaType<Event_RemoveFromList>("Event_RemoveFromList");
    qRegisterMetaType<Event_UserJoined>("Event_UserJoined");
    qRegisterMetaType<Event_UserLeft>("Event_UserLeft");
    qRegisterMetaType<Event_ServerMessage>("Event_ServerMessage");
    qRegisterMetaType<Event_ListRooms>("Event_ListRooms");
    qRegisterMetaType<Event_GameJoined>("Event_GameJoined");
    qRegisterMetaType<Event_UserMessage>("Event_UserMessage");
    qRegisterMetaType<Event_NotifyUser>("Event_NotifyUser");
    qRegisterMetaType<ServerInfo_User>("ServerInfo_User");
    qRegisterMetaType<QList<ServerInfo_User> >("QList<ServerInfo_User>");
    qRegisterMetaType<Event_ReplayAdded>("Event_ReplayAdded");
    qRegisterMetaType<QList<QString> >("missingFeatures");

    FeatureSet features;
    features.initalizeFeatureList(clientFeatures);
    
    connect(this, SIGNAL(sigQueuePendingCommand(PendingCommand *)), this, SLOT(queuePendingCommand(PendingCommand *)));
}
예제 #10
0
int MetalabelFeature::ExtractFeature(const vector<TokenCitation*>& tokenVector, CitationSet& citationSet, UniGramFeature& uniGrams, BiGramFeature& biGrams, JournalSet& journalSet, FeatureSet& allFeatures, int printLog)
{
	int rtn = 0;
	allFeatures.mFeatures.clear();
	allFeatures.mMaxIndex = uniGrams.mDictionary.rbegin()->first + 1;
	allFeatures.mFeatures.resize(tokenVector.size());

	FeatureSet biFeatures;
	biFeatures.mMaxIndex = biGrams.mDictionary.rbegin()->first + 1;
	biFeatures.mFeatures.resize(tokenVector.size());

	FeatureSet jourFeatures;
	jourFeatures.mMaxIndex = journalSet.mJournals.rbegin()->first + 1;
	jourFeatures.mFeatures.resize(tokenVector.size());

	int numThreads = omp_get_num_procs();
	if (printLog != SILENT)
		clog << "CPU number: " << numThreads << endl;

	omp_set_num_threads(numThreads);
	if (printLog != SILENT)
		clog << "Start Parallel Extract Features" << endl;
#pragma omp parallel for schedule(dynamic) 
	for (int i = 0; i < tokenVector.size(); i++)
	{
		uniGrams.Extract(*tokenVector[i], allFeatures.mFeatures[i]);
	}

#pragma omp parallel for schedule(dynamic) 
	for (int i = 0; i < tokenVector.size(); i++)
	{
		biGrams.Extract(*tokenVector[i], biFeatures.mFeatures[i]);
	}

#pragma omp parallel for schedule(dynamic)
	for (int i = 0; i < tokenVector.size(); ++i)
	{
		Journal* ptrJournal = NULL;
		ptrJournal = journalSet.SearchJournalTitle(citationSet[tokenVector[i]->mPmid]->mJournalTitle);
		if (ptrJournal != NULL)
		{
			jourFeatures.mFeatures[i][ptrJournal->mJournalId] = 1.0;
		}
		else
		{
			cerr << "Error: can't find \"" << citationSet[tokenVector[i]->mPmid]->mJournalTitle << " in pmid " << tokenVector[i]->mPmid << endl;
		}
	}

	rtn = allFeatures.Merge(biFeatures);
	CHECK_RTN(rtn);
	rtn = allFeatures.Merge(jourFeatures);
	CHECK_RTN(rtn);
	rtn = allFeatures.Normalize();
	CHECK_RTN(rtn);
	return 0;
}
예제 #11
0
bool IWizardFactory::isAvailable(const QString &platformName) const
{
    FeatureSet availableFeatures = pluginFeatures();

    foreach (const Core::IFeatureProvider *featureManager, s_providerList)
        availableFeatures |= featureManager->availableFeatures(platformName);

    return availableFeatures.contains(requiredFeatures());
}
std::vector<float> 
SupportVectorMachine::predict(const FeatureSet& fset) const
{
	std::vector<float> preds(fset.size());
	for(int i = 0; i < fset.size(); i++) {
		preds[i] = predict(fset[i]);
	}

	return preds;
}
예제 #13
0
bool IWizard::isAvailable(const QString &platformName) const
{
    FeatureSet availableFeatures;

    const QList<Core::IFeatureProvider*> featureManagers = ExtensionSystem::PluginManager::getObjects<Core::IFeatureProvider>();

    foreach (const Core::IFeatureProvider *featureManager, featureManagers)
        availableFeatures |= featureManager->availableFeatures(platformName);

    return availableFeatures.contains(requiredFeatures());
}
예제 #14
0
/**
 * Reads the recorded gesture data from the files and brings it in the format ready for training. 
 */
vector<FeatureSet> GestureManagement::readData() {
	// read all takes for each gesture
	vector<FeatureSet> sets;
	sets.clear();
	for (int i=0; i<=NUMBER_OF_ROGGEN_SENSORS; ++i) {
		FeatureSet s;
		s.clear();
		sets.push_back(s);
	}
	for (int f=0; f<config->getStrings("classification names").size(); ++f) {
		if (config->getInt("verbosity") > 1) {
			cout << "Training classification name number " << f << endl;
		}
		// cout << "Gesture: " << f << endl;
		vector<float> varianceSums; // compute sums for every sensor within a gesture
		varianceSums.clear();
		for (int i=0; i<NUMBER_OF_ROGGEN_SENSORS; i++) {
			varianceSums.push_back(0); // inital value
		}
		int sizeSum = 0;
		// for (int i=1; i<=NUMBER_OF_TAKES; i++) {  // for each of the 3 takes
		for (int i=1; i<=config->getInt("number of takes"); ++i) {
			// cout << "Take " << i << endl;
			string fname = XML_SUBDIRECTORY+config->getStrings("classification names").at(f)+Helper::toString(i)+".xml";
			// cout << "Filename: " << fname << endl;
			if (!Helper::fileExists(fname.c_str())) {
				cerr << fname << " not yet recorded!" << endl;
				cerr << "No training possible" << endl;
				sets.clear();
				return sets;
			} else {
				vector<vector<int> > rd = convert(XmlFileHandling::readIntData(fname));
				for (int sensor=0; sensor<NUMBER_OF_ROGGEN_SENSORS; ++sensor) { // for every sensor
					// cout << "Sensor " << sensor << endl;
					buffers.at(sensor)->clear();
					buffers.at(sensor)->setUnlimited();
					buffers.at(sensor)->put(RoggenBuffer::filter(0,config->getInts("IDs").at(sensor),rd));
					maxGestureLength = Helper::getMax(maxGestureLength,buffers.at(sensor)->size());
					FeatureSample fs;
					fs.out = f;
					fs.in.clear();
					fs.in = extractFeatures(sensor);
					sets.at(sensor).push_back(fs);
					int size = buffers.at(sensor)->size();
					sizeSum += size;
				} // for every sensor
			} // if fileexists
		} // for every take
		sizeSum /= config->getInt("number of takes") * NUMBER_OF_ROGGEN_SENSORS; // compute average size of one gesture
		sizes.push_back(sizeSum); // store sizes (averages)
	}	// for every gesture
	return sets;
}
예제 #15
0
void ProcessedFFMaterial::_determineFeatures(   U32 stageNum, 
                                                FixedFuncFeatureData& featData, 
                                                const FeatureSet &features )
{
   if ( mStages[stageNum].getTex( MFT_DiffuseMap ) )
      featData.features[FixedFuncFeatureData::DiffuseMap] = true;

   if ( features.hasFeature( MFT_LightMap ) )
      featData.features[FixedFuncFeatureData::LightMap] = true;
   if ( features.hasFeature( MFT_ToneMap )) 
      featData.features[FixedFuncFeatureData::ToneMap] = true;
}
예제 #16
0
int MatchFeaturesByDistRatio(const FeatureSet &f1, const FeatureSet &f2, vector<FeatureMatch> &matches)
{
	//cout<<"ratio between the first and the second best match: "<<Match_threshold_for_ratio<<endl;
	int count = 0;

	int m = f1.size();
	int n = f2.size();

	matches.clear();

	double d;
	double dBest[2];
	int idBest;

	FeatureMatch feamatch;
	for (int i=0; i<m; i++) {
		dBest[0] = 1e100;
		dBest[1] = 1e100 + 1;
		idBest = 0;

		for (int j=0; j<n; j++) {
			d = distanceEuclidean(f1[i].data, f2[j].data);

			if (d < dBest[0]) {
				dBest[1] = dBest[0];
				dBest[0] = d;
				idBest = f2[j].id;
			}
			else if (d < dBest[1])
			{
				dBest[1] = d;
			}
		}
		if (sqrt(dBest[0] / dBest[1]) < Match_threshold_for_ratio)
		{
			feamatch.id = idBest;
			matches.push_back(feamatch);
			count++;
		}
		else
		{
			feamatch.id = -1;
			matches.push_back(feamatch);
		}
	}

	return count;
}
예제 #17
0
//Add a training sample
void LearningInterface::addTrainingSample(FeatureSet &features, double label)
{
    sample_type samp;
//    samp(0) = (double)features.bandwidth();
//    samp(1) = (double)features.contentType();
    for(int i = 0; i < FEATURE_SET_NUM_FEATURES; i++)
        samp(i) = features.getFeatureByIndex((FeatureSetIndexType) i);

    m_training_set.push_back(samp);
    if(label == 1.0)
        m_labels.push_back(+1);
    else
        m_labels.push_back(-1);

    m_num_training_samples++;

    //Test to see if user falls into only one class
    if(m_one_class_only)
    {
        //This is the first sample
        if(m_single_class_val == -2)
        {
            //Set this as the single class value
            m_single_class_val = (int) label;
        }
        //See if this is the other class
        else if((int)label != m_single_class_val)
        {
            //Lower flags
            m_one_class_only = false;
        }
    }
}
/******************* TO DO *********************
 * countInliers:
 *	INPUT:
 *		f1, f2: source feature sets
 *		matches: correspondences between f1 and f2
 *		m: motion model
 *		f: focal length
 *		width: image width
 *		height: image height
 *		M: transformation matrix
 *		RANSACthresh: RANSAC distance threshold
 *		inliers: inlier feature IDs
 *	OUTPUT:
 *		transform the features in f1 by M
 *
 *		count the number of features in f1 for which the transformed
 *		feature is within Euclidean distance RANSACthresh of its match
 *		in f2
 *
 *		store these features IDs in inliers
 *
 *		this method should be similar to evaluateMatch from project 1,
 *		except you are comparing each distance to a threshold instead
 *		of averaging them
 */
int countInliers(const FeatureSet &f1, const FeatureSet &f2,
				 const vector<FeatureMatch> &matches, MotionModel m,
				 float f, int width, int height,
				 CTransform3x3 M, double RANSACthresh, vector<int> &inliers)
{
	inliers.clear();
	int count = 0;

	for (unsigned int i=0; i<f1.size(); i++) {
		// BEGIN TODO
		// determine if the ith feature in f1, when transformed by M,
		// is within RANSACthresh of its match in f2 (if one exists)
		//
		// if so, increment count and append i to inliers
		if (matches[i].id < 0)
			continue;

		CVector3 p1, p2;
		p1[0] = f1[i].x - width / 2.0;	p1[1] = f1[i].y - height / 2.0;	p1[2] = f;
		p2 = M * p1;

		double xNew = p2[0] + width / 2.0;
		double yNew = p2[1] + height / 2.0;
		int f2pos = matches[i].id - 1;
		double dist = sqrt(pow(xNew - f2[f2pos].x, 2) + pow(yNew - f2[f2pos].y, 2));
		if (dist < RANSACthresh)
		{
			inliers.push_back(i);
			count++;
		}
		// END TODO
	}

	return count;
}
예제 #19
0
// Compute silly example features.  This doesn't do anything
// meaningful, but may be useful to use as an example.
void dummyComputeFeatures(CFloatImage &image, FeatureSet &features) {
    CShape sh = image.Shape();
    Feature f;

    for (int y=0; y<sh.height; y++) {
        for (int x=0; x<sh.width; x++) {
            double r = image.Pixel(x,y,0);
            double g = image.Pixel(x,y,1);
            double b = image.Pixel(x,y,2);

            if ((int)(255*(r+g+b)+0.5) % 100 == 1) {
                // If the pixel satisfies this meaningless criterion,
                // make it a feature.

                f.type = 1;
                f.id += 1;
                f.x = x;
                f.y = y;

                f.data.resize(1);
                f.data[0] = r + g + b;

                features.push_back(f);
            }
        }
    }
}
void ComputeHarrisFeatures(CFloatImage &image, FeatureSet &features)
{
    //Create grayscale image used for Harris detection
    CFloatImage grayImage=ConvertToGray(image);

    //Create image to store Harris values
    CFloatImage harrisImage(image.Shape().width,image.Shape().height,1);

    //Create image to store local maximum harris values as 1, other pixels 0
    CByteImage harrisMaxImage(image.Shape().width,image.Shape().height,1);

    //compute Harris values puts harris values at each pixel position in harrisImage. 
    //You'll need to implement this function.
    computeHarrisValues(grayImage, harrisImage);
        
    // Threshold the harris image and compute local maxima.  You'll need to implement this function.
    computeLocalMaxima(harrisImage,harrisMaxImage);

    // Prints out the harris image for debugging purposes
    CByteImage tmp(harrisImage.Shape());
    convertToByteImage(harrisImage, tmp);
    WriteFile(tmp, "harris.tga");

    // TO DO--------------------------------------------------------------------
    //Loop through feature points in harrisMaxImage and fill in information needed for 
    //descriptor computation for each point above a threshold. We fill in id, type, 
    //x, y, and angle.
	CFloatImage A(grayImage.Shape());
	CFloatImage B(grayImage.Shape());
	CFloatImage C(grayImage.Shape());

	CFloatImage partialX(grayImage.Shape());
	CFloatImage partialY(grayImage.Shape());

	GetHarrisComponents(grayImage, A, B, C, &partialX, &partialY);
	int featureCount = 0;
    for (int y=0;y<harrisMaxImage.Shape().height;y++) {
        for (int x=0;x<harrisMaxImage.Shape().width;x++) {
                
            // Skip over non-maxima
            if (harrisMaxImage.Pixel(x, y, 0) == 0)
				continue;

            //TO DO---------------------------------------------------------------------
            // Fill in feature with descriptor data here. 
            Feature f;
			f.type = 2;
			f.id = featureCount++;
			f.x = x;
			f.y = y;
			f.angleRadians = GetCanonicalOrientation(x, y, A, B, C, partialX, partialY);
				//atan(partialY.Pixel(x, y, 0)/partialX.Pixel(x, y, 0));
			// Add the feature to the list of features
            features.push_back(f);
        }
    }
}
예제 #21
0
int MetalabelFeature::ExtractFeature(const vector<TokenCitation*>& tokenVector, UniGramFeature& uniGrams, BiGramFeature& biGrams, feature_node** &featureSpace, int printLog)
{
	int numThreads = omp_get_num_procs();
	if (printLog != SILENT)
		clog << "CPU number: " << numThreads << endl;
	omp_set_num_threads(numThreads);
	if (printLog != SILENT)
		clog << "Extract unigram & bigram" << endl;

	if (printLog != SILENT)
		clog << "Make Feature table" << endl;
	int featureNum = (int)tokenVector.size();
	featureSpace = NULL;
	featureSpace = Malloc(feature_node*, featureNum);
	memset(featureSpace, 0, sizeof(feature_node*)* featureNum);

	int uniMaxIndex = uniGrams.mDictionary.rbegin()->first + 1;
	int biMaxIndex = biGrams.mDictionary.rbegin()->first + 1;

	if (printLog != SILENT)
		clog << "Extract features parallel" << endl;

#pragma omp parallel for schedule(dynamic)
	for (int i = 0; i < (int)tokenVector.size(); i++)
	{
		FeatureSet tabAllFeatures;
		tabAllFeatures.mMaxIndex = uniMaxIndex;
		tabAllFeatures.mFeatures.resize(1);

		FeatureSet tabBiFeatures;
		tabBiFeatures.mMaxIndex = biMaxIndex;
		tabBiFeatures.mFeatures.resize(1);

		uniGrams.Extract(*tokenVector[i], tabAllFeatures.mFeatures[0]);
		biGrams.Extract(*tokenVector[i], tabBiFeatures.mFeatures[0]);

		tabAllFeatures.Merge(tabBiFeatures);
		tabAllFeatures.Normalize();

		featureSpace[i] = NULL;
		LinearMachine::TransFeatures(featureSpace[i], tabAllFeatures.mFeatures[0]);
	}
	return 0;
}
예제 #22
0
void ImposterCaptureMaterialHook::_overrideFeatures(  ProcessedMaterial *mat,
                                                      U32 stageNum,
                                                      MaterialFeatureData &fd, 
                                                      const FeatureSet &features )
{
   if ( features.hasFeature( MFT_NormalsOut) )
      fd.features.addFeature( MFT_NormalsOut );

   fd.features.addFeature( MFT_ForwardShading );
}
예제 #23
0
void DatabaseSubsystem::getAvailableFeatures(Session& session,
    const map<int, int>& clObjMap, FeatureSet& featureSet)
{
    int ddType;
    string featureName;
    double featureParam1, featureParam2, featureParam3;

    featureSet.clear();

    ostringstream clObjIDs;
    for (map<int, int>::const_iterator itr = clObjMap.begin();
         itr != clObjMap.end(); ++itr)
    {
        if (itr != clObjMap.begin()) {
            clObjIDs << ",";
        }
        clObjIDs << itr->first;
    }

    Statement stmt = (session <<
        "SELECT DISTINCT "
        "  data_descriptor.type, data_feature.feature_name, "
        "  data_feature.feature_param1, data_feature.feature_param2, "
        "  data_feature.feature_param3  "
        "FROM data_feature INNER JOIN data_descriptor "
        "ON (data_feature.descr_id = data_descriptor.descr_id) "
        "WHERE data_descriptor.descr_id IN ("
        "  SELECT descr_id FROM classification_object_data WHERE object_id IN ("
        << clObjIDs.str()
        << "))",
        range(0, 1),
        into(ddType), into(featureName), into(featureParam1), 
        into(featureParam2), into(featureParam3));

    while (!stmt.done()) {
        if (stmt.execute() == 1) {
            featureSet.add(FeatureDescriptor(featureName,
                (DataDescriptor::Type) ddType, featureParam1, featureParam2, 
                featureParam3));
        }
    }
}
예제 #24
0
void CFeatureDrawer::DrawFadeFeaturesSet(FeatureSet& fadeFeatures, int modelType)
{
	for (FeatureSet::iterator fi = fadeFeatures.begin(); fi != fadeFeatures.end(); ) {
		const float cols[] = {1.0f, 1.0f, 1.0f, fi->second};

		if (modelType != MODELTYPE_3DO) {
			glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, cols);
		}

		// hack, sorting objects by distance would look better
		glAlphaFunc(GL_GREATER, fi->second / 2.0f);
		glColor4fv(cols);

		if (!DrawFeatureNow(fi->first, fi->second)) {
			fi = set_erase(fadeFeatures, fi);
		} else {
			++fi;
		}
	}
}
vector<FeatureMatch> matchFeatures(const FeatureSet &f1, const FeatureSet &f2) {
    vector<FeatureMatch> matches;
    float threshold = 500;

    for (int i = 0; i < f1.size(); ++i) {
        int nearest = -1;
        float nearestDis = threshold;
        for (int j = 0; j < f2.size(); ++j) {
            float dis = featureDist(f1[i], f2[j]);
            if (dis < nearestDis) {
                nearestDis = dis;
                nearest = j;
            }
        }
        FeatureMatch newMatch;
        newMatch.id = nearest + 1;
        newMatch.score = nearestDis;
        matches.push_back(newMatch);
    }
    return matches;
}
FeatureSet FeatureSet::clone() const
{
  FeatureSet other;
  for (FeatureSet::const_iterator i = begin(); i != end(); ++i) {
    FeatureVector cv = i->clone();
    // check that all data is the same:
    if (cv.size() != i->size())
      throw std::runtime_error("ssssssssss");
    for (unsigned int idx=0; idx<cv.size(); ++idx) {
      if (cv[idx] != (*i)[idx]) {
        cout << "(" << cv[idx] << "!=" << (*i)[idx] << ")" << flush;
        throw std::runtime_error("??????????");
      }
    }
    if (cv.getTag<TagTrueClassLabel>().label != i->getTag<TagTrueClassLabel>().label) {
      cout << "(" << cv.getTag<TagTrueClassLabel>().label << "!=" << i->getTag<TagTrueClassLabel>().label << ")" << flush;
      throw std::runtime_error("!!!!!!!!!!!");
    }
    other.push_back(cv);
  }
  return other;
}
예제 #27
0
void
FeatureExtractor::operator()(const ImageDatabase& db, FeatureSet& featureSet) const
{
	int n = db.getSize();

	featureSet.resize(n);
	for(int i = 0; i < n; i++) {
		CByteImage img;
		ReadFile(img, db.getFilename(i).c_str());

		featureSet[i] = (*this)(img);
	}
}
예제 #28
0
bool outputAndCheck(const DataSet& dataSet, const FeatureSet& featureSet)
{
    int i = 0;
    bool ok = true;
    for (DataSet::const_iterator dItr = dataSet.begin(); dItr != dataSet.end();
        ++dItr, ++i)
    {
        cout << "DataPoint #" << i << " has " << dItr->components.size()
             << " features: ";
        for (DataPoint::ComponentMap::const_iterator cItr = 
            dItr->components.begin(); cItr != dItr->components.end(); ++cItr)
        {
            cout << cItr->first.toString() << " ";
            if (!featureSet.has(cItr->first)) {
                ok = false;
            }
        }
        cout << endl;
        if (dItr->components.size() != featureSet.size())
            ok = false;
    }
    return ok;
}
예제 #29
0
void FeatureSet::filter( const FeatureSet &features )
{
   PROFILE_SCOPE( FeatureSet_Filter );

   for ( U32 i=0; i < mFeatures.size(); )
   {
      if ( !features.hasFeature( *mFeatures[i].type ) )
         mFeatures.erase_fast( i );
      else
         i++;
   }

   mDescription.clear();
}
예제 #30
0
//Make a prediction using the given feature set
double LearningInterface::predict(FeatureSet &features)
{
    //Get the sample
    sample_type sample;
//    sample(0) = (double)features.bandwidth();
//    sample(1) = (double)features.contentType();
    for(int i = 0; i < FEATURE_SET_NUM_FEATURES; i++)
        sample(i) = features.getFeatureByIndex((FeatureSetIndexType) i);

    //If the user only has one label ever applied then the cross validation will fail;
    //we can safely predict that this will be their label
    return m_one_class_only ? m_single_class_val : m_decision_function(sample);

}