예제 #1
0
// **********************************************************
//			ClearLabelFrames()
// **********************************************************
void CMapView::ClearLabelFrames()
{
	// clear frames for regular labels
	for (int i = 0; i < (int)_activeLayers.size(); i++)
	{
		Layer * l = _allLayers[_activeLayers[i]];
		if (l != NULL)
		{
			if (l->IsShapefile())
			{
				IShapefile * sf = NULL;
				if (l->QueryShapefile(&sf))
				{
					((CShapefile*)sf)->ClearChartFrames();
					sf->Release();
				}
			}

			// labels
			ILabels* LabelsClass = l->get_Labels();
			if (LabelsClass == NULL) continue;

			CLabels* coLabels = static_cast<CLabels*>(LabelsClass);
			coLabels->ClearLabelFrames();
			LabelsClass->Release(); LabelsClass = NULL;
		}
	}
}
예제 #2
0
// **********************************************************
//			ClearDrawingLabelFrames()
// **********************************************************
void CMapView::ClearDrawingLabelFrames()
{
	// clear frames for drawing labels
	for (size_t j = 0; j < _activeDrawLists.size(); j++)
	{
		bool isSkip = false;
		for (size_t i = 0; i < _drawingLayerInvisilbe.size(); i++)
		{
			if (_drawingLayerInvisilbe[i] == j)
			{
				isSkip = true;	// skip if this layer is set invisible
				break;
			}
		}
		if (isSkip)
			continue;

		DrawList * dlist = _allDrawLists[_activeDrawLists[j]];
		if (IS_VALID_PTR(dlist))
		{
			CLabels* coLabels = static_cast<CLabels*>(dlist->m_labels);
			coLabels->ClearLabelFrames();
		}
	}
}
예제 #3
0
SGVector<float64_t> CBaggingMachine::apply_get_outputs(CFeatures* data)
{
	ASSERT(data != NULL);
	REQUIRE(m_combination_rule != NULL, "Combination rule is not set!");
	ASSERT(m_num_bags == m_bags->get_num_elements());
  	
	SGMatrix<float64_t> output(data->get_num_vectors(), m_num_bags);
	output.zero();

	#pragma omp parallel for num_threads(parallel->get_num_threads())
	for (int32_t i = 0; i < m_num_bags; ++i)
	{
		CMachine* m = dynamic_cast<CMachine*>(m_bags->get_element(i));
		CLabels* l = m->apply(data);
		SGVector<float64_t> lv = l->get_values();
		float64_t* bag_results = output.get_column_vector(i);
		memcpy(bag_results, lv.vector, lv.vlen*sizeof(float64_t));

		SG_UNREF(l);
		SG_UNREF(m);
	}

	SGVector<float64_t> combined = m_combination_rule->combine(output);

	return combined;
}
예제 #4
0
파일: KNN.cpp 프로젝트: AsherBond/shogun
CLabels* CKNN::classify_NN()
{
	ASSERT(distance);
	ASSERT(num_classes>0);

	int32_t num_lab = distance->get_num_vec_rhs();
	ASSERT(num_lab);

	CLabels* output = new CLabels(num_lab);
	float64_t* distances = new float64_t[num_train_labels];

	ASSERT(distances);
	SG_INFO("%d test examples\n", num_lab);
	CSignal::clear_cancel();

	// for each test example
	for (int32_t i=0; i<num_lab && (!CSignal::cancel_computations()); i++)
	{
		SG_PROGRESS(i,0,num_lab);

		// get distances from i-th test example to 0..num_train_labels-1 train examples
		distances_lhs(distances,0,num_train_labels-1,i);
		int32_t j;

		// assuming 0th train examples as nearest to i-th test example
		int32_t out_idx = 0;
		float64_t min_dist = distances[0];

		// searching for nearest neighbor by comparing distances
		for (j=0; j<num_train_labels; j++)
		{
			if (distances[j]<min_dist)
			{
				min_dist = distances[j];
				out_idx = j;
			}
		}

		// label i-th test example with label of nearest neighbor with out_idx index
		output->set_label(i,train_labels[out_idx]+min_label);
	}

	delete [] distances;
	return output;
}
예제 #5
0
CLabels* CGaussianNaiveBayes::apply()
{
	// init number of vectors
	int32_t num_vectors = m_features->get_num_vectors();

	// init result labels
	CLabels* result = new CLabels(num_vectors);

	// classify each example of data
	SG_PROGRESS(0, 0, num_vectors);
	for (int i = 0; i < num_vectors; i++)
	{
		result->set_label(i,apply(i));
		SG_PROGRESS(i + 1, 0, num_vectors);
	}
	SG_DONE();
	return result;
};
int main(int argc, char** argv)
{
    int32_t num_vectors = 0;
    int32_t num_feats   = 2;

    init_shogun_with_defaults();

    // Prepare to read a file for the training data
    char fname_feats[]  = "../data/fm_train_real.dat";
    char fname_labels[] = "../data/label_train_multiclass.dat";
    CStreamingAsciiFile* ffeats_train  = new CStreamingAsciiFile(fname_feats);
    CStreamingAsciiFile* flabels_train = new CStreamingAsciiFile(fname_labels);
    SG_REF(ffeats_train);
    SG_REF(flabels_train);

    CStreamingDenseFeatures< float64_t >* stream_features =
        new CStreamingDenseFeatures< float64_t >(ffeats_train, false, 1024);

    CStreamingDenseFeatures< float64_t >* stream_labels =
        new CStreamingDenseFeatures< float64_t >(flabels_train, true, 1024);

    SG_REF(stream_features);
    SG_REF(stream_labels);

    // Create a matrix with enough space to read all the feature vectors
    SGMatrix< float64_t > mat = SGMatrix< float64_t >(num_feats, 1000);

    // Read the values from the file and store them in mat
    SGVector< float64_t > vec;
    stream_features->start_parser();
    while ( stream_features->get_next_example() )
    {
        vec = stream_features->get_vector();

        for ( int32_t i = 0 ; i < num_feats ; ++i )
            mat[num_vectors*num_feats + i] = vec[i];

        num_vectors++;
        stream_features->release_example();
    }
    stream_features->end_parser();

    // Create features with the useful values from mat
    CDenseFeatures< float64_t >* features = new CDenseFeatures< float64_t >(mat.matrix, num_feats, num_vectors);

    CLabels* labels = new CLabels(num_vectors);
    SG_REF(features);
    SG_REF(labels);

    // Read the labels from the file
    int32_t idx = 0;
    stream_labels->start_parser();
    while ( stream_labels->get_next_example() )
    {
        labels->set_int_label( idx++, (int32_t)stream_labels->get_label() );
        stream_labels->release_example();
    }
    stream_labels->end_parser();

    // Create liblinear svm classifier with L2-regularized L2-loss
    CLibLinear* svm = new CLibLinear(L2R_L2LOSS_SVC);
    SG_REF(svm);

    // Add some configuration to the svm
    svm->set_epsilon(EPSILON);
    svm->set_bias_enabled(true);

    // Create a multiclass svm classifier that consists of several of the previous one
    CLinearMulticlassMachine* mc_svm = new CLinearMulticlassMachine(
        new CECOCStrategy(new CECOCRandomDenseEncoder(), new CECOCHDDecoder()), (CDotFeatures*) features, svm, labels);
    SG_REF(mc_svm);

    // Train the multiclass machine using the data passed in the constructor
    mc_svm->train();

    // Classify the training examples and show the results
    CLabels* output = mc_svm->apply();

    SGVector< int32_t > out_labels = output->get_int_labels();
    CMath::display_vector(out_labels.vector, out_labels.vlen);

    // Free resources
    SG_UNREF(mc_svm);
    SG_UNREF(svm);
    SG_UNREF(output);
    SG_UNREF(features);
    SG_UNREF(labels);
    //SG_UNREF(ffeats_train);
    //SG_UNREF(flabels_train);
    SG_UNREF(stream_features);
    SG_UNREF(stream_labels);
    exit_shogun();

    return 0;
}
예제 #7
0
float64_t CBaggingMachine::get_oob_error(CEvaluation* eval) const
{
	REQUIRE(m_combination_rule != NULL, "Combination rule is not set!");
	REQUIRE(m_bags->get_num_elements() > 0, "BaggingMachine is not trained!");

	SGMatrix<float64_t> output(m_features->get_num_vectors(), m_bags->get_num_elements());
	if (m_labels->get_label_type() == LT_REGRESSION)
		output.zero();
	else
		output.set_const(NAN);

	/* TODO: add parallel support of applying the OOBs
	  only possible when add_subset is thread-safe
	#pragma omp parallel for num_threads(parallel->get_num_threads())
	*/
	for (index_t i = 0; i < m_bags->get_num_elements(); i++)
	{
		CMachine* m = dynamic_cast<CMachine*>(m_bags->get_element(i));
		CDynamicArray<index_t>* current_oob 
			= dynamic_cast<CDynamicArray<index_t>*>(m_oob_indices->get_element(i));

		SGVector<index_t> oob(current_oob->get_array(), current_oob->get_num_elements(), false);
		oob.display_vector();
		m_features->add_subset(oob);

		CLabels* l = m->apply(m_features);
		SGVector<float64_t> lv = l->get_values();

		// assign the values in the matrix (NAN) that are in-bag!
		for (index_t j = 0; j < oob.vlen; j++)
			output(oob[j], i) = lv[j];

		m_features->remove_subset();
		SG_UNREF(current_oob);
		SG_UNREF(m);
		SG_UNREF(l);
	}
	output.display_matrix();

	DynArray<index_t> idx;
	for (index_t i = 0; i < m_features->get_num_vectors(); i++)
	{
		if (m_all_oob_idx[i])
			idx.push_back(i);
	}

	SGVector<float64_t> combined = m_combination_rule->combine(output);
	CLabels* predicted = NULL;
	switch (m_labels->get_label_type())
	{
		case LT_BINARY:
			predicted = new CBinaryLabels(combined);
			break;

		case LT_MULTICLASS:
			predicted = new CMulticlassLabels(combined);
			break;

		case LT_REGRESSION:
			predicted = new CRegressionLabels(combined);
			break;

		default:
			SG_ERROR("Unsupported label type\n");
	}
	
	m_labels->add_subset(SGVector<index_t>(idx.get_array(), idx.get_num_elements(), false));
	float64_t res = eval->evaluate(predicted, m_labels);
	m_labels->remove_subset();

	return res;
}