void CSerialComputationEngine::submit_job(CIndependentJob* job)
{
	SG_DEBUG("Entering. The job is being computed!\n");

	REQUIRE(job, "Job to be computed is NULL\n");
	job->compute();

	SG_DEBUG("The job is computed. Leaving!\n");
}
Exemplo n.º 2
0
void CKLInference::update()
{
	SG_DEBUG("entering\n");

	CInference::update();
	update_init();
	update_alpha();
	update_chol();
	m_gradient_update=false;
	update_parameter_hash();

	SG_DEBUG("leaving\n");
}
float64_t CRandomKitchenSinksDotFeatures::dense_dot(
	int32_t vec_idx1, const float64_t* vec2, int32_t vec2_len)
{
	SG_DEBUG("entering dense_dot()\n");
	ASSERT(vec2_len == get_dim_feature_space());

	float64_t dot_product = 0;
	for (index_t i=0; i<num_samples; i++)
	{
		float64_t tmp_dot = dot(vec_idx1, i);
		tmp_dot = post_dot(tmp_dot, i);
		dot_product += tmp_dot * vec2[i];
	}
	SG_DEBUG("Leaving dense_dot()\n");
	return dot_product;
}
Exemplo n.º 4
0
template<class ST> bool CDenseFeatures<ST>::apply_preprocessor(bool force_preprocessing)
{
	if (m_subset_stack->has_subsets())
		SG_ERROR("A subset is set, cannot call apply_preproc\n")

	SG_DEBUG("force: %d\n", force_preprocessing)

	if (feature_matrix.matrix && get_num_preprocessors())
	{
		for (int32_t i = 0; i < get_num_preprocessors(); i++)
		{
			if ((!is_preprocessed(i) || force_preprocessing))
			{
				set_preprocessed(i);
				CDensePreprocessor<ST>* p =
						(CDensePreprocessor<ST>*) get_preprocessor(i);
				SG_INFO("preprocessing using preproc %s\n", p->get_name())

				if (p->apply_to_feature_matrix(this).matrix == NULL)
				{
					SG_UNREF(p);
					return false;
				}
				SG_UNREF(p);

			}
		}

		return true;
	}
Exemplo n.º 5
0
template<class ST> CFeatures* CDenseFeatures<ST>::create_merged_copy(
		CFeatures* other)
{
	SG_DEBUG("entering %s::create_merged_copy()\n", get_name());
	if (get_feature_type()!=other->get_feature_type() ||
			get_feature_class()!=other->get_feature_class() ||
			strcmp(get_name(), other->get_name()))
	{
		SG_ERROR("%s::create_merged_copy(): Features are of different type!\n",
				get_name());
	}

	CDenseFeatures<ST>* casted=dynamic_cast<CDenseFeatures<ST>* >(other);

	if (!casted)
	{
		SG_ERROR("%s::create_merged_copy(): Could not cast object of %s to "
				"same type as %s\n",get_name(), other->get_name(), get_name());
	}

	if (num_features!=casted->num_features)
	{
		SG_ERROR("%s::create_merged_copy(): Provided feature object has "
				"different dimension than this one\n");
	}

	/* create new feature matrix and copy both instances data into it */
	SGMatrix<ST> data(num_features, num_vectors+casted->get_num_vectors());

	/* copy data of this instance */
	SG_DEBUG("copying matrix of this instance\n");
	memcpy(data.matrix, feature_matrix.matrix,
			num_features*num_vectors*sizeof(ST));

	/* copy data of provided instance */
	SG_DEBUG("copying matrix of provided instance\n");
	memcpy(&data.matrix[num_vectors*num_features],
			casted->feature_matrix.matrix,
			casted->num_features*casted->num_vectors*sizeof(ST));

	/* create new instance and return */
	CDenseFeatures<ST>* result=new CDenseFeatures<ST>(data);

	SG_DEBUG("leaving %s::create_merged_copy()\n", get_name());
	return result;
}
void CRandomKitchenSinksDotFeatures::add_to_dense_vec(float64_t alpha,
	int32_t vec_idx1, float64_t* vec2, int32_t vec2_len, bool abs_val)
{
	SG_DEBUG("Entering add_to_dense()\n");
	ASSERT(vec2_len == get_dim_feature_space());

	for (index_t i=0; i<num_samples; i++)
	{
		float64_t tmp_dot = dot(vec_idx1, i);
		tmp_dot = post_dot(tmp_dot, i);
		if (abs_val)
			vec2[i] += CMath::abs(alpha * tmp_dot);
		else
			vec2[i] += alpha * tmp_dot;
	}
	SG_DEBUG("Leaving add_to_dense()\n");
}
void CRationalApproximationCGMJob::compute()
{
    SG_DEBUG("Entering\n");

    REQUIRE(m_aggregator, "Job result aggregator is not set!\n");
    REQUIRE(m_operator, "Operator is not set!\n");
    REQUIRE(m_vector.vector, "Vector is not set!\n");
    REQUIRE(m_shifts.vector, "Shifts are not set!\n");
    REQUIRE(m_weights.vector, "Weights are not set!\n");
    REQUIRE(m_operator->get_dimension()==m_vector.vlen,
            "Dimension mismatch! %d vs %d\n", m_operator->get_dimension(), m_vector.vlen);
    REQUIRE(m_shifts.vlen==m_weights.vlen,
            "Number of shifts and weights are not equal!\n");

    // solve the linear system with the sample vector
    SGVector<complex128_t> vec=m_linear_solver->solve_shifted_weighted(
                                   m_operator, m_vector, m_shifts, m_weights);

    // Take negative (see CRationalApproximation for the formula)
    Map<VectorXcd> v(vec.vector, vec.vlen);
    v=-v;

    // take out the imaginary part of the result before
    // applying linear operator
    SGVector<float64_t> agg=m_operator->apply(vec.get_imag());

    // perform dot product
    Map<VectorXd> map_agg(agg.vector, agg.vlen);
    Map<VectorXd> map_vector(m_vector.vector, m_vector.vlen);
    float64_t result=map_vector.dot(map_agg);

    result*=m_const_multiplier;

    // form the final result into a scalar result and submit to the aggregator
    CScalarResult<float64_t>* final_result=new CScalarResult<float64_t>(result);
    SG_REF(final_result);

    m_aggregator->submit_result(final_result);

    SG_UNREF(final_result);

    SG_DEBUG("Leaving\n");
}
Exemplo n.º 8
0
CFeatures* CFeatureSelection<ST>::apply(CFeatures* features)
{
	SG_DEBUG("Entering!\n");

	// remove previously computed feature subsets
	m_subset->remove_all_subsets();

	// sanity checks
	REQUIRE(features, "Features cannot be NULL!\n");
	REQUIRE(features->get_num_vectors()>0,
			"Number of feature vectors has to be positive!\n");
	REQUIRE(m_target_dim>0, "Target dimension (%d) has to be positive! Set "
			"a higher number via set_target_dim().\n", m_target_dim);

	index_t num_features=get_num_features(features);
	REQUIRE(num_features>0, "Invalid number of features (%d)! Most likely "
			"feature selection cannot be performed for %s!\n",
			num_features, features->get_name());
	REQUIRE(num_features>m_target_dim,
			"Number of original features (dimensions of the feature vectors) "
			"(%d) has to be greater that the target dimension (%d)!\n",
			num_features, m_target_dim);

	// this method makes a deep copy of the feature object and performs
	// feature selection on it. This is already SG_REF'ed because of the
	// implementation of clone()
	CFeatures* feats_copy=(CFeatures*)features->clone();

	switch (m_algorithm)
	{
		case BACKWARD_ELIMINATION:
			return apply_backward_elimination(feats_copy);
		default:
			SG_ERROR("Specified algorithm not yet supported!\n");
			return features;
	}

	SG_DEBUG("Leaving!\n");
}
SGVector<float64_t> CKernelMeanMatching::compute_weights()
{
	int32_t i,j;
	ASSERT(m_kernel)
	ASSERT(m_training_indices.vlen)
	ASSERT(m_test_indices.vlen)

	int32_t n_tr = m_training_indices.vlen;
	int32_t n_te = m_test_indices.vlen;

	SGVector<float64_t> weights(n_tr);
	weights.zero();

	kmm_K = SG_MALLOC(float64_t, n_tr*n_tr);
	kmm_K_ld = n_tr;
	float64_t* diag_K = SG_MALLOC(float64_t, n_tr);
	for (i=0; i<n_tr; i++)
	{
		float64_t d = m_kernel->kernel(m_training_indices[i], m_training_indices[i]);
		diag_K[i] = d;
		kmm_K[i*n_tr+i] = d;
		for (j=i+1; j<n_tr; j++)
		{
			d = m_kernel->kernel(m_training_indices[i],m_training_indices[j]);
			kmm_K[i*n_tr+j] = d;
			kmm_K[j*n_tr+i] = d;
		}
	}
	float64_t* kappa = SG_MALLOC(float64_t, n_tr);
	for (i=0; i<n_tr; i++)
	{
		float64_t avg = 0.0;
		for (j=0; j<n_te; j++)
			avg+= m_kernel->kernel(m_training_indices[i],m_test_indices[j]);

		avg *= float64_t(n_tr)/n_te;
		kappa[i] = -avg;
	}
	float64_t* a = SG_MALLOC(float64_t, n_tr);
	for (i=0; i<n_tr; i++) a[i] = 1.0;
	float64_t* LB = SG_MALLOC(float64_t, n_tr);
	float64_t* UB = SG_MALLOC(float64_t, n_tr);
	float64_t B = 2.0;
	for (i=0; i<n_tr; i++)
	{
		LB[i] = 0.0;
		UB[i] = B;
	}
	for (i=0; i<n_tr; i++)
		weights[i] = 1.0/float64_t(n_tr);

	libqp_state_T result =
		libqp_gsmo_solver(&kmm_get_col,diag_K,kappa,a,1.0,LB,UB,weights,n_tr,1000,1e-9,NULL);

	SG_DEBUG("libqp exitflag=%d, %d iterations passed, primal objective=%f\n",
	         result.exitflag,result.nIter,result.QP);

	SG_FREE(kappa);
	SG_FREE(a);
	SG_FREE(LB);
	SG_FREE(UB);
	SG_FREE(diag_K);
	SG_FREE(kmm_K);

	return weights;
}
Exemplo n.º 10
0
void CProbingSampler::precompute()
{
	SG_DEBUG("Entering\n");

	// if already precomputed, nothing to do
	if (m_is_precomputed)
	{
		SG_DEBUG("Coloring vector already computed! Exiting!\n");
		return;
	}

	// do coloring things here and save the coloring vector
	SparsityStructure* sp_str=m_matrix_operator->get_sparsity_structure(m_power);

	GraphColoringInterface* Color
		=new GraphColoringInterface(SRC_MEM_ADOLC, sp_str->m_ptr, sp_str->m_num_rows);

	std::string str_ordering;
	switch(m_ordering)
	{
	case NATURAL:
		str_ordering="NATURAL";
		break;
	case LARGEST_FIRST:
		str_ordering="LARGEST_FIRST";
		break;
	case DYNAMIC_LARGEST_FIRST:
		str_ordering="DYNAMIC_LARGEST_FIRST";
		break;
	case DISTANCE_TWO_LARGEST_FIRST:
		str_ordering="DISTANCE_TWO_LARGEST_FIRST";
		break;
	case SMALLEST_LAST:
		str_ordering="SMALLEST_LAST";
		break;
	case DISTANCE_TWO_SMALLEST_LAST:
		str_ordering="DISTANCE_TWO_SMALLEST_LAST";
		break;
	case INCIDENCE_DEGREE:
		str_ordering="INCIDENCE_DEGREE";
		break;
	case DISTANCE_TWO_INCIDENCE_DEGREE:
		str_ordering="DISTANCE_TWO_INCIDENCE_DEGREE";
		break;
	case RANDOM:
		str_ordering="RANDOM";
		break;
	}

	std::string str_coloring;
	switch(m_coloring)
	{
	case DISTANCE_ONE:
		str_coloring="DISTANCE_ONE";
		break;
	case ACYCLIC:
		str_coloring="ACYCLIC";
		break;
	case ACYCLIC_FOR_INDIRECT_RECOVERY:
		str_coloring="ACYCLIC_FOR_INDIRECT_RECOVERY";
		break;
	case STAR:
		str_coloring="STAR";
		break;
	case RESTRICTED_STAR:
		str_coloring="RESTRICTED_STAR";
		break;
	case DISTANCE_TWO:
		str_coloring="DISTANCE_TWO";
		break;
	}

	Color->Coloring(str_ordering, str_coloring);

	std::vector<int32_t> vi_VertexColors;
	Color->GetVertexColors(vi_VertexColors);

	REQUIRE(vi_VertexColors.size()==static_cast<uint32_t>(m_dimension),
		"dimension mismatch, %d vs %d!\n", vi_VertexColors.size(), m_dimension);

	m_coloring_vector=SGVector<int32_t>(vi_VertexColors.size());

	for (std::vector<int32_t>::iterator it=vi_VertexColors.begin();
		it!=vi_VertexColors.end(); it++)
	{
		index_t i=static_cast<index_t>(std::distance(vi_VertexColors.begin(), it));
		m_coloring_vector[i]=*it;
	}

	Map<VectorXi> colors(m_coloring_vector.vector, m_coloring_vector.vlen);
	m_num_samples=colors.maxCoeff()+1;
	SG_DEBUG("Using %d samples (aka colours) for probing trace sampler\n",
			m_num_samples);

	delete sp_str;
	delete Color;

	// set the precomputed flag true
	m_is_precomputed=true;

	SG_DEBUG("Leaving\n");
}
CJobResultAggregator* CLogRationalApproximationIndividual::submit_jobs(
	SGVector<float64_t> sample)
{
	SG_DEBUG("OperatorFunction::submit_jobs(): Entering..\n");
	REQUIRE(sample.vector, "Sample is not initialized!\n");
	REQUIRE(m_linear_operator, "Operator is not initialized!\n");
	REQUIRE(m_computation_engine, "Computation engine is NULL\n");

	// create the aggregator with sample, and the multiplier
	CIndividualJobResultAggregator* agg=new CIndividualJobResultAggregator(
		m_linear_operator, sample, m_constant_multiplier);
	// we don't want the aggregator to be destroyed when the job is unref-ed
	SG_REF(agg);

	// this enum will save from repeated typechecking for all jobs
	enum typeID {DENSE=1, SPARSE, UNKNOWN} operator_type=UNKNOWN;

	// create a complex copy of the matrix linear operator
	CMatrixOperator<complex128_t>* complex_op=NULL;
	if (typeid(*m_linear_operator)==typeid(CDenseMatrixOperator<float64_t>))
	{
		operator_type=DENSE;

		CDenseMatrixOperator<float64_t>* op
			=dynamic_cast<CDenseMatrixOperator<float64_t>*>(m_linear_operator);

		REQUIRE(op->get_matrix_operator().matrix, "Matrix is not initialized!\n");

		// create complex dense matrix operator
		complex_op=static_cast<CDenseMatrixOperator<complex128_t>*>(*op);
	}
	else if (typeid(*m_linear_operator)==typeid(CSparseMatrixOperator<float64_t>))
	{
		operator_type=SPARSE;

		CSparseMatrixOperator<float64_t>* op
			=dynamic_cast<CSparseMatrixOperator<float64_t>*>(m_linear_operator);

		REQUIRE(op->get_matrix_operator().sparse_matrix, "Matrix is not initialized!\n");

		// create complex sparse matrix operator
		complex_op=static_cast<CSparseMatrixOperator<complex128_t>*>(*op);
	}
	else
	{
		// something weird happened
		SG_ERROR("OperatorFunction::submit_jobs(): Unknown MatrixOperator given!\n");
	}

	// create num_shifts number of jobs for current sample vector
	for (index_t i=0; i<m_num_shifts; ++i)
	{
		// create a deep copy of the operator
		CMatrixOperator<complex128_t>* shifted_op=NULL;

		switch(operator_type)
		{
		case DENSE:
			shifted_op=new CDenseMatrixOperator<complex128_t>
				(*dynamic_cast<CDenseMatrixOperator<complex128_t>*>(complex_op));
			break;
		case SPARSE:
			shifted_op=new CSparseMatrixOperator<complex128_t>
				(*dynamic_cast<CSparseMatrixOperator<complex128_t>*>(complex_op));
			break;
		default:
			break;
		}

		REQUIRE(shifted_op, "OperatorFunction::submit_jobs():"
			"MatrixOperator typeinfo was not detected!\n");

		// move the shift inside the operator
		// (see CRationalApproximation)
		SGVector<complex128_t> diag=shifted_op->get_diagonal();
		for (index_t j=0; j<diag.vlen; ++j)
			diag[j]-=m_shifts[i];
		shifted_op->set_diagonal(diag);

		// create a job and submit to the engine
		CRationalApproximationIndividualJob* job
			=new CRationalApproximationIndividualJob(agg, m_linear_solver,
				shifted_op, sample, m_weights[i]);
		SG_REF(job);

		m_computation_engine->submit_job(job);

		// we can safely unref the job here, computation engine takes it from here
		SG_UNREF(job);
	}

	SG_UNREF(complex_op);

	SG_DEBUG("OperatorFunction::submit_jobs(): Leaving..\n");
	return agg;
}
void CStreamingHashedSparseFeatures<ST>::set_vector_reader()
{
	SG_DEBUG("called inside set_vector_reader\n");
	parser.set_read_vector(&CStreamingFile::get_sparse_vector);
}
Exemplo n.º 13
0
CFeatures* CFeatureSelection<ST>::apply_backward_elimination(CFeatures* features)
{
	SG_DEBUG("Entering!\n");

	// precompute whenever appropriate for performing the rest of the tasks
	precompute();

	// NULL check for features is handled in get_num_features
	index_t num_features=get_num_features(features);
	SG_DEBUG("Initial number of features %d!\n", num_features);

	// the main loop
	while (num_features>m_target_dim)
	{
		// tune the measurement parameters whenever necessary based on current
		// features
		adapt_params(features);

		// compute the measures for each of the current dimensions
		SGVector<float64_t> measures(num_features);
		for (index_t i=0; i<num_features; ++i)
			measures[i]=compute_measures(features, i);

		if (io->get_loglevel()==MSG_DEBUG || io->get_loglevel()==MSG_GCDEBUG)
			measures.display_vector("measures");

		// rank the measures
		SGVector<index_t> argsorted=CMath::argsort(measures);

		if (io->get_loglevel()==MSG_DEBUG || io->get_loglevel()==MSG_GCDEBUG)
			argsorted.display_vector("argsorted");

		// make sure that we don't end up with lesser feats than target dim
		index_t to_remove;
		if (m_policy==N_SMALLEST || m_policy==N_LARGEST)
			to_remove=m_num_remove;
		else
			to_remove=num_features*m_num_remove*0.01;

		index_t can_remove=num_features-m_target_dim;

		// if policy is to remove N feats corresponding to smallest/largest
		// measures, we just replace N with can_remove. if policy is to remove
		// N% feats, then we change the policy temporarily and remove a fixed
		// can_remove number of feats instead
		index_t orig_remove=m_num_remove;
		EFeatureRemovalPolicy orig_policy=m_policy;

		if (to_remove>can_remove)
		{
			m_num_remove=can_remove;
			SG_DEBUG("Can only remove %d features in this iteration!\n",
					can_remove);

			if (m_policy==PERCENTILE_SMALLEST)
				m_policy=N_SMALLEST;
			else if (m_policy==PERCENTILE_LARGEST)
				m_policy=N_LARGEST;
		}

		// remove appropriate number of features based on the measures and the
		// removal policy. this internally update the subset for selected
		// features as well
		features=remove_feats(features, argsorted);

		// restore original removal policy and numbers if necessary for the
		// sake of consistency
		if (to_remove>can_remove)
		{
			m_policy=orig_policy;
			m_num_remove=orig_remove;
		}

		// update the number of features
		num_features=get_num_features(features);
		SG_DEBUG("Current number of features %d!\n", num_features);
	}

	// sanity check
	ASSERT(m_subset->get_size()==m_target_dim);

	SG_DEBUG("Leaving!\n");
	return features;
}
void CSerialComputationEngine::wait_for_all()
{
	SG_DEBUG("All jobs are computed!\n");
}
Exemplo n.º 15
0
SGMatrix<float64_t> CLogDetEstimator::sample_without_averaging(
	index_t num_estimates)
{
	SG_DEBUG("Entering...\n")

	REQUIRE(m_operator_log, "Operator function is NULL\n");
	// call the precompute of operator function to compute all prerequisites
	m_operator_log->precompute();

	REQUIRE(m_trace_sampler, "Trace sampler is NULL\n");
	// call the precompute of the sampler
	m_trace_sampler->precompute();

	// for storing the aggregators that submit_jobs return
	CDynamicObjectArray aggregators;
	index_t num_trace_samples=m_trace_sampler->get_num_samples();

	for (index_t i=0; i<num_estimates; ++i)
	{
		for (index_t j=0; j<num_trace_samples; ++j)
		{
			// get the trace sampler vector
			SGVector<float64_t> s=m_trace_sampler->sample(j);
			// create jobs with the sample vector and store the aggregator
			CJobResultAggregator* agg=m_operator_log->submit_jobs(s);
			aggregators.append_element(agg);
			SG_UNREF(agg);
		}
	}

	REQUIRE(m_computation_engine, "Computation engine is NULL\n");
	// wait for all the jobs to be completed
	m_computation_engine->wait_for_all();

	// the samples matrix which stores the estimates without averaging
	// dimension: number of trace samples x number of log-det estimates
	SGMatrix<float64_t> samples(num_trace_samples, num_estimates);

	// use the aggregators to find the final result
	int32_t num_aggregates=aggregators.get_num_elements();
	for (int32_t i=0; i<num_aggregates; ++i)
	{
		CJobResultAggregator* agg=dynamic_cast<CJobResultAggregator*>
			(aggregators.get_element(i));
		if (!agg)
			SG_ERROR("Element is not CJobResultAggregator type!\n");

		// call finalize on all the aggregators
		agg->finalize();
		CScalarResult<float64_t>* r=dynamic_cast<CScalarResult<float64_t>*>
			(agg->get_final_result());
		if (!r)
			SG_ERROR("Result is not CScalarResult type!\n");

		// its important that we don't just unref the result here
		index_t idx_row=i%num_trace_samples;
		index_t idx_col=i/num_trace_samples;
		samples(idx_row, idx_col)=r->get_result();
		SG_UNREF(agg);
	}

	// clear all aggregators
	aggregators.clear_array();

	SG_DEBUG("Leaving\n")
	return samples;
}
SGVector<float64_t> CConjugateGradientSolver::solve(
	CLinearOperator<float64_t>* A, SGVector<float64_t> b)
{
	SG_DEBUG("CConjugateGradientSolve::solve(): Entering..\n");

	// sanity check
	REQUIRE(A, "Operator is NULL!\n");
	REQUIRE(A->get_dimension()==b.vlen, "Dimension mismatch!\n");

	// the final solution vector, initial guess is 0
	SGVector<float64_t> result(b.vlen);
	result.set_const(0.0);

	// the rest of the part hinges on eigen3 for computing norms
	Map<VectorXd> x(result.vector, result.vlen);
	Map<VectorXd> b_map(b.vector, b.vlen);

	// direction vector
	SGVector<float64_t> p_(result.vlen);
	Map<VectorXd> p(p_.vector, p_.vlen);

	// residual r_i=b-Ax_i, here x_0=[0], so r_0=b
	VectorXd r=b_map;

	// initial direction is same as residual
	p=r;

	// the iterator for this iterative solver
	IterativeSolverIterator<float64_t> it(b_map, m_max_iteration_limit,
		m_relative_tolerence, m_absolute_tolerence);

	// CG iteration begins
	float64_t r_norm2=r.dot(r);

	// start the timer
	CTime time;
	time.start();

	// set the residuals to zero
	if (m_store_residuals)
		m_residuals.set_const(0.0);

	for (it.begin(r); !it.end(r); ++it)
	{
		SG_DEBUG("CG iteration %d, residual norm %f\n",
			it.get_iter_info().iteration_count,
			it.get_iter_info().residual_norm);

		if (m_store_residuals)
		{
			m_residuals[it.get_iter_info().iteration_count]
				=it.get_iter_info().residual_norm;
		}

		// apply linear operator to the direction vector
		SGVector<float64_t> Ap_=A->apply(p_);
		Map<VectorXd> Ap(Ap_.vector, Ap_.vlen);

		// compute p^{T}Ap, if zero, failure
		float64_t p_dot_Ap=p.dot(Ap);
		if (p_dot_Ap==0.0)
			break;

		// compute the alpha parameter of CG
		float64_t alpha=r_norm2/p_dot_Ap;

		// update the solution vector and residual
		// x_{i}=x_{i-1}+\alpha_{i}p
		x+=alpha*p;

		// r_{i}=r_{i-1}-\alpha_{i}p
		r-=alpha*Ap;

		// compute new ||r||_{2}, if zero, converged
		float64_t r_norm2_i=r.dot(r);
		if (r_norm2_i==0.0)
			break;

		// compute the beta parameter of CG
		float64_t beta=r_norm2_i/r_norm2;

		// update direction, and ||r||_{2}
		r_norm2=r_norm2_i;
		p=r+beta*p;
	}

	float64_t elapsed=time.cur_time_diff();

	if (!it.succeeded(r))
		SG_WARNING("Did not converge!\n");

	SG_INFO("Iteration took %ld times, residual norm=%.20lf, time elapsed=%lf\n",
		it.get_iter_info().iteration_count, it.get_iter_info().residual_norm, elapsed);

	SG_DEBUG("CConjugateGradientSolve::solve(): Leaving..\n");
	return result;
}
Exemplo n.º 17
0
SGVector<float64_t> CLogDetEstimator::sample(index_t num_estimates)
{
	SG_DEBUG("Entering\n");
	SG_INFO("Computing %d log-det estimates\n", num_estimates);

	REQUIRE(m_operator_log, "Operator function is NULL\n");
	// call the precompute of operator function to compute the prerequisites
	m_operator_log->precompute();

	REQUIRE(m_trace_sampler, "Trace sampler is NULL\n");
	// call the precompute of the sampler
	m_trace_sampler->precompute();

	REQUIRE(m_operator_log->get_operator()->get_dimension()\
		==m_trace_sampler->get_dimension(),
		"Mismatch in dimensions of the operator and trace-sampler, %d vs %d!\n",
		m_operator_log->get_operator()->get_dimension(),
		m_trace_sampler->get_dimension());

	// for storing the aggregators that submit_jobs return
	CDynamicObjectArray* aggregators=new CDynamicObjectArray();
	index_t num_trace_samples=m_trace_sampler->get_num_samples();

	for (index_t i=0; i<num_estimates; ++i)
	{
		for (index_t j=0; j<num_trace_samples; ++j)
		{
			SG_INFO("Computing log-determinant trace sample %d/%d\n", j,
					num_trace_samples);

			SG_DEBUG("Creating job for estimate %d, trace sample %d/%d\n", i, j,
					num_trace_samples);
			// get the trace sampler vector
			SGVector<float64_t> s=m_trace_sampler->sample(j);
			// create jobs with the sample vector and store the aggregator
			CJobResultAggregator* agg=m_operator_log->submit_jobs(s);
			aggregators->append_element(agg);
			SG_UNREF(agg);
		}
	}

	REQUIRE(m_computation_engine, "Computation engine is NULL\n");

	// wait for all the jobs to be completed
	SG_INFO("Waiting for jobs to finish\n");
	m_computation_engine->wait_for_all();
	SG_INFO("All jobs finished, aggregating results\n");

	// the samples vector which stores the estimates with averaging
	SGVector<float64_t> samples(num_estimates);
	samples.zero();

	// use the aggregators to find the final result
	// use the same order as job submission to combine results
	int32_t num_aggregates=aggregators->get_num_elements();
	index_t idx_row=0;
	index_t idx_col=0;
	for (int32_t i=0; i<num_aggregates; ++i)
	{
		// this cast is safe due to above way of building the array
		CJobResultAggregator* agg=dynamic_cast<CJobResultAggregator*>
			(aggregators->get_element(i));
		ASSERT(agg);

		// call finalize on all the aggregators, cast is safe again
		agg->finalize();
		CScalarResult<float64_t>* r=dynamic_cast<CScalarResult<float64_t>*>
			(agg->get_final_result());
		ASSERT(r);

		// iterate through indices, group results in the same way as jobs
		samples[idx_col]+=r->get_result();
		idx_row++;
		if (idx_row>=num_trace_samples)
		{
			idx_row=0;
			idx_col++;
		}

		SG_UNREF(agg);
	}

	// clear all aggregators
	SG_UNREF(aggregators)

	SG_INFO("Finished computing %d log-det estimates\n", num_estimates);

	SG_DEBUG("Leaving\n");
	return samples;
}
Exemplo n.º 18
0
SGVector<complex128_t> CCGMShiftedFamilySolver::solve_shifted_weighted(
	CLinearOperator<SGVector<float64_t>, SGVector<float64_t> >* A, SGVector<float64_t> b,
	SGVector<complex128_t> shifts, SGVector<complex128_t> weights)
{
	SG_DEBUG("Entering\n");

	// sanity check
	REQUIRE(A, "Operator is NULL!\n");
	REQUIRE(A->get_dimension()==b.vlen, "Dimension mismatch! [%d vs %d]\n",
		A->get_dimension(), b.vlen);
	REQUIRE(shifts.vector,"Shifts are not initialized!\n");
	REQUIRE(weights.vector,"Weights are not initialized!\n");
	REQUIRE(shifts.vlen==weights.vlen, "Number of shifts and number of "
		"weights are not equal! [%d vs %d]\n", shifts.vlen, weights.vlen);

	// the solution matrix, one column per shift, initial guess 0 for all
	MatrixXcd x_sh=MatrixXcd::Zero(b.vlen, shifts.vlen);
	MatrixXcd p_sh=MatrixXcd::Zero(b.vlen, shifts.vlen);

	// non-shifted direction
	SGVector<float64_t> p_(b.vlen);

	// the rest of the part hinges on eigen3 for computing norms
	Map<VectorXd> b_map(b.vector, b.vlen);
	Map<VectorXd> p(p_.vector, p_.vlen);

	// residual r_i=b-Ax_i, here x_0=[0], so r_0=b
	VectorXd r=b_map;

	// initial direction is same as residual
	p=r;
	p_sh=r.replicate(1, shifts.vlen).cast<complex128_t>();

	// non shifted initializers
	float64_t r_norm2=r.dot(r);
	float64_t beta_old=1.0;
	float64_t alpha=1.0;

	// shifted quantities
	SGVector<complex128_t> alpha_sh(shifts.vlen);
	SGVector<complex128_t> beta_sh(shifts.vlen);
	SGVector<complex128_t> zeta_sh_old(shifts.vlen);
	SGVector<complex128_t> zeta_sh_cur(shifts.vlen);
	SGVector<complex128_t> zeta_sh_new(shifts.vlen);

	// shifted initializers
	zeta_sh_old.set_const(1.0);
	zeta_sh_cur.set_const(1.0);

	// the iterator for this iterative solver
	IterativeSolverIterator<float64_t> it(r, m_max_iteration_limit,
		m_relative_tolerence, m_absolute_tolerence);

	// start the timer
	CTime time;
	time.start();

	// set the residuals to zero
	if (m_store_residuals)
		m_residuals.set_const(0.0);

	// CG iteration begins
	for (it.begin(r); !it.end(r); ++it)
	{

		SG_DEBUG("CG iteration %d, residual norm %f\n",
				it.get_iter_info().iteration_count,
				it.get_iter_info().residual_norm);

		if (m_store_residuals)
		{
			m_residuals[it.get_iter_info().iteration_count]
				=it.get_iter_info().residual_norm;
		}

		// apply linear operator to the direction vector
		SGVector<float64_t> Ap_=A->apply(p_);
		Map<VectorXd> Ap(Ap_.vector, Ap_.vlen);

		// compute p^{T}Ap, if zero, failure
		float64_t p_dot_Ap=p.dot(Ap);
		if (p_dot_Ap==0.0)
			break;

		// compute the beta parameter of CG_M
		float64_t beta=-r_norm2/p_dot_Ap;

		// compute the zeta-shifted parameter of CG_M
		compute_zeta_sh_new(zeta_sh_old, zeta_sh_cur, shifts, beta_old, beta,
			alpha, zeta_sh_new);

		// compute beta-shifted parameter of CG_M
		compute_beta_sh(zeta_sh_new, zeta_sh_cur, beta, beta_sh);

		// update the solution vector and residual
		for (index_t i=0; i<shifts.vlen; ++i)
			x_sh.col(i)-=beta_sh[i]*p_sh.col(i);

		// r_{i}=r_{i-1}+\beta_{i}Ap
		r+=beta*Ap;

		// compute new ||r||_{2}, if zero, converged
		float64_t r_norm2_i=r.dot(r);
		if (r_norm2_i==0.0)
			break;

		// compute the alpha parameter of CG_M
		alpha=r_norm2_i/r_norm2;

		// update ||r||_{2}
		r_norm2=r_norm2_i;

		// update direction
		p=r+alpha*p;

		compute_alpha_sh(zeta_sh_new, zeta_sh_cur, beta_sh, beta, alpha, alpha_sh);

		for (index_t i=0; i<shifts.vlen; ++i)
		{
			p_sh.col(i)*=alpha_sh[i];
			p_sh.col(i)+=zeta_sh_new[i]*r;
		}

		// update parameters
		for (index_t i=0; i<shifts.vlen; ++i)
		{
			zeta_sh_old[i]=zeta_sh_cur[i];
			zeta_sh_cur[i]=zeta_sh_new[i];
		}
		beta_old=beta;
	}

	float64_t elapsed=time.cur_time_diff();

	if (!it.succeeded(r))
		SG_WARNING("Did not converge!\n");

	SG_INFO("Iteration took %d times, residual norm=%.20lf, time elapsed=%f\n",
		it.get_iter_info().iteration_count, it.get_iter_info().residual_norm, elapsed);

	// compute the final result vector multiplied by weights
	SGVector<complex128_t> result(b.vlen);
	result.set_const(0.0);
	Map<VectorXcd> x(result.vector, result.vlen);

	for (index_t i=0; i<x_sh.cols(); ++i)
		x+=x_sh.col(i)*weights[i];

	SG_DEBUG("Leaving\n");
	return result;
}
Exemplo n.º 19
0
void CRInterface::get_char_string_list(TString<char>*& strings, int32_t& num_str, int32_t& max_string_len)
{
	SEXP strs=get_arg_increment();

	if (strs == R_NilValue || TYPEOF(strs) != STRSXP)
		SG_ERROR("Expected String List as argument %d\n", m_rhs_counter);

	SG_DEBUG("nrows=%d ncols=%d Rf_length=%d\n", nrows(strs), ncols(strs), Rf_length(strs));

	if (nrows(strs) && ncols(strs)!=1)
	{
		num_str = ncols(strs);
		max_string_len = nrows(strs);

		strings=new TString<char>[num_str];
		ASSERT(strings);

		for (int32_t i=0; i<num_str; i++)
		{
			char* dst=new char[max_string_len+1];
			for (int32_t j=0; j<max_string_len; j++)
			{
				SEXPREC* s= STRING_ELT(strs,i*max_string_len+j);
				if (LENGTH(s)!=1)
					SG_ERROR("LENGTH(s)=%d != 1, nrows(strs)=%d ncols(strs)=%d\n", LENGTH(s), nrows(strs), ncols(strs));
				dst[j]=CHAR(s)[0];
			}
			strings[i].string=dst;
			strings[i].string[max_string_len]='\0';
			strings[i].length=max_string_len;
		}
	}
	else
	{
		max_string_len=0;
		num_str=Rf_length(strs);
		strings=new TString<char>[num_str];
		ASSERT(strings);

		for (int32_t i=0; i<num_str; i++)
		{
			SEXPREC* s= STRING_ELT(strs,i);
			char* c= (char*) CHAR(s);
			int32_t len=LENGTH(s);

			if (len && c)
			{
				char* dst=new char[len+1];
				strings[i].string=(char*) memcpy(dst, c, len*sizeof(char));
				strings[i].string[len]='\0';
				strings[i].length=len;
				max_string_len=CMath::max(max_string_len, len);
			}
			else
			{
				SG_WARNING( "string with index %d has zero length\n", i+1);
				strings[i].string=0;
				strings[i].length=0;
			}
		}
	}
}