Exemple #1
0
template<class ST> SGSparseVector<ST> CSparseFeatures<ST>::get_sparse_feature_vector(int32_t num)
{
	REQUIRE(num>=0 && num<get_num_vectors(),
		"get_sparse_feature_vector(num=%d): num exceeds [0;%d]\n",
		num, get_num_vectors()-1);
	index_t real_num=m_subset_stack->subset_idx_conversion(num);

	if (sparse_feature_matrix.sparse_matrix)
	{
		return sparse_feature_matrix[real_num];
	}
	else
	{
		SGSparseVector<ST> result;
		if (feature_cache)
		{
			result.features=feature_cache->lock_entry(num);

			if (result.features)
				return result;
			else
			{
				result.features=feature_cache->set_entry(num);
			}
		}

		//if (!result.features)
		//	result.do_free=true;

		result.features=compute_sparse_feature_vector(num,
			result.num_feat_entries, result.features);


		if (get_num_preprocessors())
		{
			int32_t tmp_len=result.num_feat_entries;
			SGSparseVectorEntry<ST>* tmp_feat_before=result.features;
			SGSparseVectorEntry<ST>* tmp_feat_after = NULL;

			for (int32_t i=0; i<get_num_preprocessors(); i++)
			{
				//tmp_feat_after=((CSparsePreprocessor<ST>*) get_preproc(i))->apply_to_feature_vector(tmp_feat_before, tmp_len);

				if (i!=0)	// delete feature vector, except for the the first one, i.e., feat
					SG_FREE(tmp_feat_before);
				tmp_feat_before=tmp_feat_after;
			}

			if (tmp_feat_after)
			{
				memcpy(result.features, tmp_feat_after,
						sizeof(SGSparseVectorEntry<ST>)*tmp_len);

				SG_FREE(tmp_feat_after);
				result.num_feat_entries=tmp_len;
			}
			SG_DEBUG("len: %d len2: %d\n", result.num_feat_entries, get_num_features())
		}
		return result ;
	}
}
int main(int argc, char** argv)
{
	char buffer[1500];
	int print_orig = 0;

	if ( argc > 1 )
	{
		print_orig = 1;
	}

	while ( scanf("%s", buffer) != EOF )
	{
		/* buffer holds an s6 string */
		sparsegraph g;
		SG_INIT(g);

		int num_loops;
		stringtosparsegraph(buffer, &g, &num_loops);

		int nv = g.nv;
		int m = (nv + WORDSIZE - 1) / WORDSIZE;
		nauty_check(WORDSIZE, m, nv, NAUTYVERSIONID);

		DYNALLSTAT(int, lab, lab_n);
		DYNALLSTAT(int, ptn, ptn_n);
		DYNALLSTAT(int, orbits, orbits_n);

		DYNALLOC1(int, lab, lab_n, nv, "malloc");
		DYNALLOC1(int, ptn, ptn_n, nv, "malloc");
		DYNALLOC1(int, orbits, orbits_n, nv, "malloc");

		static DEFAULTOPTIONS_SPARSEGRAPH( options);

		options.defaultptn = TRUE; /* Don't need colors */
		options.getcanon = TRUE; /* gets labels */
		options.digraph = TRUE;


		statsblk stats; /* we'll use this at the end */
		DYNALLSTAT(setword, workspace, worksize);
		DYNALLOC1(setword, workspace, worksize, 50 * m, "malloc");

		sparsegraph canon_g;
		SG_INIT(canon_g);

		/* call nauty */
		nauty((graph*) &g, lab, ptn, NULL, orbits, &options, &stats, workspace,
				50 * m, m, g.nv, (graph*) &canon_g);

		sortlists_sg(&canon_g);

		char* canon_str = sgtos6(&canon_g);
		int canon_len = strlen(canon_str);

		if ( print_orig == 0 )
		{
			printf("%s", canon_str);
		}
		else
		{
			canon_str[canon_len-1] = 0;
			printf("%s", canon_str);
			printf("\t%s\n", buffer);
		}

		/* free workspace */
		DYNFREE(workspace, worksize);
		DYNFREE(lab,lab_n);
		DYNFREE(ptn,ptn_n);
		DYNFREE(orbits,orbits_n);

		SG_FREE(canon_g);
		SG_FREE(g);
	}

	return 0;
}
void printModifiedGraphBarrier(sparsegraph* g)
{
	/* need to make a better graph! */
	sparsegraph sg;
	SG_INIT(sg);

	sg.w = 0;
	sg.wlen = 0;

	sg.v = (int*) malloc(g->nv * sizeof(int));
	sg.d = (int*) malloc(g->nv * sizeof(int));
	sg.e = (int*) malloc(g->nde * sizeof(int));

	int* rev_verts = (int*) malloc(g->nv * sizeof(int));
	int* down_verts = (int*) malloc(g->nv * sizeof(int));

	int vindex = 0;
	int eindex = 0;
	int indirect_edge_deletes = 0;

	for ( int i = 0; i < g->nv; i++ )
	{
		/* if vertex is not deleted */
		if ( g->v[i] >= 0 )
		{
			//			printf("[%d] : %d \t", vindex, i);
			rev_verts[vindex] = i;
			down_verts[i] = vindex;
			vindex++;
		}
	}

	vindex = 0;

	for ( int i = 0; i < g->nv; i++ )
	{
		/* if vertex is not deleted */
		if ( g->v[i] >= 0 )
		{
			sg.v[vindex] = eindex;
			sg.d[vindex] = 0;

			for ( int j = 0; j < g->d[i]; j++ )
			{
				/* if edge is not deleted AND other vertex is not deleted */
				int edge_val = g->e[g->v[i] + j];

				if ( edge_val >= 0 && g->v[edge_val] >= 0 )
				{
					sg.e[eindex] = down_verts[edge_val];
					sg.d[vindex] = sg.d[vindex] + 1;
					eindex++;
				}
				else
				{
					/* the vertex on the other end was deleted */
					indirect_edge_deletes++;
				}
			}

			vindex++;
		}
	}

	sg.vlen = vindex;
	sg.dlen = vindex;
	sg.elen = eindex;
	sg.nv = vindex;
	sg.nde = eindex;

	free(down_verts);
	free(rev_verts);

	printf("%s", sgtos6(&sg));

	SG_FREE(sg);
}
Exemple #4
0
template<class T> CRegressionLabels* SGSparseMatrix<T>::load_svmlight_file(char* fname,
		bool do_sort_features)
{
	CRegressionLabels* lab=NULL;

	size_t blocksize=1024*1024;
	size_t required_blocksize=blocksize;
	uint8_t* dummy=SG_MALLOC(uint8_t, blocksize);
	FILE* f=fopen(fname, "ro");

	if (f)
	{
		free_data();

		SG_SINFO("counting line numbers in file %s\n", fname)
		size_t sz=blocksize;
		size_t block_offs=0;
		size_t old_block_offs=0;
		fseek(f, 0, SEEK_END);
		size_t fsize=ftell(f);
		rewind(f);

		while (sz == blocksize)
		{
			sz=fread(dummy, sizeof(uint8_t), blocksize, f);
			for (size_t i=0; i<sz; i++)
			{
				block_offs++;
				if (dummy[i]=='\n' || (i==sz-1 && sz<blocksize))
				{
					num_vectors++;
					required_blocksize=CMath::max(required_blocksize, block_offs-old_block_offs+1);
					old_block_offs=block_offs;
				}
			}
			SG_SPROGRESS(block_offs, 0, fsize, 1, "COUNTING:\t")
		}

		SG_SINFO("found %d feature vectors\n", num_vectors)
		SG_FREE(dummy);
		blocksize=required_blocksize;
		dummy = SG_MALLOC(uint8_t, blocksize+1); //allow setting of '\0' at EOL

		lab=new CRegressionLabels(num_vectors);
		sparse_matrix=SG_MALLOC(SGSparseVector<T>, num_vectors);
		rewind(f);
		sz=blocksize;
		int32_t lines=0;
		while (sz == blocksize)
		{
			sz=fread(dummy, sizeof(uint8_t), blocksize, f);

			size_t old_sz=0;
			for (size_t i=0; i<sz; i++)
			{
				if (i==sz-1 && dummy[i]!='\n' && sz==blocksize)
				{
					size_t len=i-old_sz+1;
					uint8_t* data=&dummy[old_sz];

					for (size_t j=0; j<len; j++)
						dummy[j]=data[j];

					sz=fread(dummy+len, sizeof(uint8_t), blocksize-len, f);
					i=0;
					old_sz=0;
					sz+=len;
				}

				if (dummy[i]=='\n' || (i==sz-1 && sz<blocksize))
				{

					size_t len=i-old_sz;
					uint8_t* data=&dummy[old_sz];

					int32_t dims=0;
					for (size_t j=0; j<len; j++)
					{
						if (data[j]==':')
							dims++;
					}

					if (dims<=0)
					{
						SG_SERROR("Error in line %d - number of"
								" dimensions is %d line is %d characters"
								" long\n line_content:'%.*s'\n", lines,
								dims, len, len, (const char*) data);
					}

					SGSparseVectorEntry<T>* feat=SG_MALLOC(SGSparseVectorEntry<T>, dims);
					size_t j=0;
					for (; j<len; j++)
					{
						if (data[j]==' ')
						{
							data[j]='\0';

							lab->set_label(lines, atof((const char*) data));
							break;
						}
					}

					int32_t d=0;
					j++;
					uint8_t* start=&data[j];
					for (; j<len; j++)
					{
						if (data[j]==':')
						{
							data[j]='\0';

							feat[d].feat_index=(int32_t) atoi((const char*) start)-1;
							num_features=CMath::max(num_features, feat[d].feat_index+1);

							j++;
							start=&data[j];
							for (; j<len; j++)
							{
								if (data[j]==' ' || data[j]=='\n')
								{
									data[j]='\0';
									feat[d].entry=(T) atof((const char*) start);
									d++;
									break;
								}
							}

							if (j==len)
							{
								data[j]='\0';
								feat[dims-1].entry=(T) atof((const char*) start);
							}

							j++;
							start=&data[j];
						}
					}

					sparse_matrix[lines].num_feat_entries=dims;
					sparse_matrix[lines].features=feat;

					old_sz=i+1;
					lines++;
					SG_SPROGRESS(lines, 0, num_vectors, 1, "LOADING:\t")
				}
			}
		}
		SG_SINFO("file successfully read\n")
		fclose(f);
	}
SGVector<float64_t> CKernelMeanMatching::compute_weights()
{
	int32_t i,j;
	ASSERT(m_kernel)
	ASSERT(m_training_indices.vlen)
	ASSERT(m_test_indices.vlen)

	int32_t n_tr = m_training_indices.vlen;
	int32_t n_te = m_test_indices.vlen;

	SGVector<float64_t> weights(n_tr);
	weights.zero();

	kmm_K = SG_MALLOC(float64_t, n_tr*n_tr);
	kmm_K_ld = n_tr;
	float64_t* diag_K = SG_MALLOC(float64_t, n_tr);
	for (i=0; i<n_tr; i++)
	{
		float64_t d = m_kernel->kernel(m_training_indices[i], m_training_indices[i]);
		diag_K[i] = d;
		kmm_K[i*n_tr+i] = d;
		for (j=i+1; j<n_tr; j++)
		{
			d = m_kernel->kernel(m_training_indices[i],m_training_indices[j]);
			kmm_K[i*n_tr+j] = d;
			kmm_K[j*n_tr+i] = d;
		}
	}
	float64_t* kappa = SG_MALLOC(float64_t, n_tr);
	for (i=0; i<n_tr; i++)
	{
		float64_t avg = 0.0;
		for (j=0; j<n_te; j++)
			avg+= m_kernel->kernel(m_training_indices[i],m_test_indices[j]);

		avg *= float64_t(n_tr)/n_te;
		kappa[i] = -avg;
	}
	float64_t* a = SG_MALLOC(float64_t, n_tr);
	for (i=0; i<n_tr; i++) a[i] = 1.0;
	float64_t* LB = SG_MALLOC(float64_t, n_tr);
	float64_t* UB = SG_MALLOC(float64_t, n_tr);
	float64_t B = 2.0;
	for (i=0; i<n_tr; i++)
	{
		LB[i] = 0.0;
		UB[i] = B;
	}
	for (i=0; i<n_tr; i++)
		weights[i] = 1.0/float64_t(n_tr);

	libqp_state_T result =
		libqp_gsmo_solver(&kmm_get_col,diag_K,kappa,a,1.0,LB,UB,weights,n_tr,1000,1e-9,NULL);

	SG_DEBUG("libqp exitflag=%d, %d iterations passed, primal objective=%f\n",
	         result.exitflag,result.nIter,result.QP);

	SG_FREE(kappa);
	SG_FREE(a);
	SG_FREE(LB);
	SG_FREE(UB);
	SG_FREE(diag_K);
	SG_FREE(kmm_K);

	return weights;
}
Exemple #6
0
void SGSparseMatrix<T>::free_data()
{
	SG_FREE(sparse_matrix);
	num_vectors = 0;
	num_features = 0;
}
Exemple #7
0
static void*
runit(void * threadarg)          /* Main routine for one thread */
{
    DYNALLSTAT(int,lab,lab_sz);
    DYNALLSTAT(int,ptn,ptn_sz);
    DYNALLSTAT(int,orbits,orbits_sz);
    DEFAULTOPTIONS_SPARSEGRAPH(options);
    statsblk stats;
    sparsegraph sg;   /* Declare sparse graph structure */

    int n,m,i;

    n = ((params*)threadarg)->n;
    options.writeautoms = ((params*)threadarg)->writeautoms;

 /* Initialise sparse graph structure. */

    SG_INIT(sg);

    m = SETWORDSNEEDED(n);
    nauty_check(WORDSIZE,m,n,NAUTYVERSIONID);

    DYNALLOC1(int,lab,lab_sz,n,"malloc");
    DYNALLOC1(int,ptn,ptn_sz,n,"malloc");
    DYNALLOC1(int,orbits,orbits_sz,n,"malloc");

 /* SG_ALLOC makes sure that the v,d,e fields of a sparse graph
    structure point to arrays that are large enough.  This only
    works if the structure has been initialised. */

    SG_ALLOC(sg,n,2*n,"malloc");

    sg.nv = n;              /* Number of vertices */
    sg.nde = 2*n;           /* Number of directed edges */

    for (i = 0; i < n; ++i)
    {
        sg.v[i] = 2*i;
        sg.d[i] = 2;
        sg.e[2*i] = (i+n-1)%n;      /* edge i->i-1 */
        sg.e[2*i+1] = (i+n+1)%n;    /* edge i->i+1 */
    }

    if (options.writeautoms)
        printf("Generators for Aut(C[%d]):\n",n);
    sparsenauty(&sg,lab,ptn,orbits,&options,&stats,NULL);

    if (options.writeautoms)
    {
        printf("Automorphism group size = ");
        writegroupsize(stdout,stats.grpsize1,stats.grpsize2);
        printf("\n");
    }
    if (stats.numorbits != 1 || stats.grpsize1 != 2*n)
        fprintf(stderr,">E group error\n");

 /* If we are using multiple threads, we need to free all the dynamic
    memory we have allocated.  We don't have to do this after each 
    call to nauty, just once before the thread finishes. */

    SG_FREE(sg);
    DYNFREE(lab,lab_sz);
    DYNFREE(ptn,ptn_sz);
    DYNFREE(orbits,orbits_sz);
    nauty_freedyn();
    nautil_freedyn();
    nausparse_freedyn();  /* Use naugraph_freedyn() instead if
                            dense format is being used. */

    return NULL;
}
/**
 * Destructor
 */
SaturationGraph::~SaturationGraph()
{
	printf("T SUM TIME_IN_ORBITS %lf\n", this->time_in_orbits);
	printf("T SUM TIME_IN_COMPACT %lf\n", this->time_in_compact);
	printf("T SUM TIME_IN_REGEN %lf\n", this->time_in_regenerate);
	printf("T SUM TIME_IN_FEASIBLE %lf\n", this->time_in_feasible);
	printf("T SUM TIME_IN_STABILIZE %lf\n", this->time_in_stabilized);

	int Nchoose2 = (this->N * (this->N - 1)) / 2;

	if ( this->adjmat != 0 )
	{
		free(this->adjmat);
		this->adjmat = 0;
	}

	if ( this->completemult != 0 )
	{
		free(this->completemult);
		this->completemult = 0;
	}

	if ( this->completions != 0 )
	{
		for ( int i = 0; i < Nchoose2; i++ )
		{
			if ( this->completions[i] != 0 )
			{
				/* The Augmentation objects COPY these completions */
				free(this->completions[i]);
				this->completions[i] = 0;
			}
		}

		free(this->completions);
		this->completions = 0;
	}

	if ( this->zeroDegrees != 0 )
	{
		free(this->zeroDegrees);
		this->zeroDegrees = 0;
	}

	if ( this->oneDegrees != 0 )
	{
		free(this->oneDegrees);
		this->oneDegrees = 0;
	}

	while ( this->augmentations.size() > 0 )
	{
		Augmentation* augment = this->augmentations.top();
		this->augmentations.pop();

		delete augment;
		augment = 0;
	}

	if ( this->g != 0 )
	{
		SG_FREE((*(this->g)));
		free(this->g);
		this->g = 0;
	}

	if ( this->small_g != 0 )
	{
		SG_FREE((*(this->small_g)));
		free(this->small_g);
		this->small_g = 0;
	}

	if ( this->openedges != 0 )
	{
		delete this->openedges;
	}
}
/**
 * compactTheGraph() Compact the graph g into small_g.
 */
void SaturationGraph::compactTheGraph()
{
	this->regenerateOpenEdges();

	if ( this->small_g != 0 )
	{
		/* we need to free small_g */
		SG_FREE((*(this->small_g)));
		free(this->small_g);
		this->small_g = 0;
	}

	clock_t start_c = clock();
	this->small_g = (sparsegraph*) malloc(sizeof(sparsegraph));

	SG_INIT((*(this->small_g)));

	/* small_g has two levels: one for each type (0/1) of edge */
	int sn = 2 * this->n;
	this->small_g->nv = sn;

	this->small_g->vlen = sn;
	this->small_g->dlen = sn;

	this->small_g->v = (size_t*) malloc(sn * sizeof(size_t));
	this->small_g->d = (int*) malloc(sn * sizeof(int));

	int vindex = 0;
	for ( int i = 0; i < this->n; i++ )
	{
		this->small_g->v[i] = vindex;
		this->small_g->d[i] = 1 + this->zeroDegrees[i];
		vindex += 1 + this->zeroDegrees[i];
	}
	for ( int i = 0; i < this->n; i++ )
	{
		this->small_g->v[this->n + i] = vindex;
		this->small_g->d[this->n + i] = 1 + this->oneDegrees[i];
		vindex += 1 + this->oneDegrees[i];
	}

	int sde = vindex;
	this->small_g->nde = sde;
	this->small_g->elen = sde;
	this->small_g->e = (int*) malloc(sde * sizeof(int));

	for ( int i = 0; i < this->n; i++ )
	{
		vindex = this->small_g->v[i];

		/* the cross-bar */
		this->small_g->e[vindex] = i + this->n;

		int ve = 1;
		for ( int j = 0; j < this->n; j++ )
		{
			if ( j != i )
			{
				int index = this->indexOf(i, j);

				/* 0-type edges in this layer */
				if ( this->adjmat[index] == 0 )
				{
					this->small_g->e[vindex + ve] = j;
					ve++;
				}
			}
		}
	}
	for ( int i = 0; i < this->n; i++ )
	{
		vindex = this->small_g->v[this->n + i];

		/* the cross-bar */
		this->small_g->e[vindex] = i;

		int ve = 1;
		for ( int j = 0; j < this->n; j++ )
		{
			if ( j != i )
			{
				int index = this->indexOf(i, j);

				/* 1-type edges in this layer */
				if ( this->adjmat[index] == 1 )
				{
					this->small_g->e[vindex + ve] = this->n + j;
					ve++;
				}
			}
		}
	}

	this->g_updated = false;

	clock_t end_c = clock();
	(this->time_in_compact) = this->time_in_compact + (double) (end_c - start_c) / (double) CLOCKS_PER_SEC;
}
Exemple #10
0
template<class ST> ST* CDenseFeatures<ST>::get_feature_vector(int32_t num, int32_t& len, bool& dofree)
{
	/* index conversion for subset, only for array access */
	int32_t real_num=m_subset_stack->subset_idx_conversion(num);

	len = num_features;

	if (feature_matrix.matrix)
	{
		dofree = false;
		return &feature_matrix.matrix[real_num * int64_t(num_features)];
	}

	ST* feat = NULL;
	dofree = false;

	if (feature_cache)
	{
		feat = feature_cache->lock_entry(real_num);

		if (feat)
			return feat;
		else
			feat = feature_cache->set_entry(real_num);
	}

	if (!feat)
		dofree = true;
	feat = compute_feature_vector(num, len, feat);

	if (get_num_preprocessors())
	{
		int32_t tmp_len = len;
		ST* tmp_feat_before = feat;
		ST* tmp_feat_after = NULL;

		for (int32_t i = 0; i < get_num_preprocessors(); i++)
		{
			CDensePreprocessor<ST>* p =
					(CDensePreprocessor<ST>*) get_preprocessor(i);
			// temporary hack
			SGVector<ST> applied = p->apply_to_feature_vector(
					SGVector<ST>(tmp_feat_before, tmp_len));
			tmp_feat_after = applied.vector;
			SG_UNREF(p);

			if (i != 0) // delete feature vector, except for the the first one, i.e., feat
				SG_FREE(tmp_feat_before);
			tmp_feat_before = tmp_feat_after;
		}

		// note: tmp_feat_after should be checked as it is used by memcpy
		if (tmp_feat_after)
		{
			memcpy(feat, tmp_feat_after, sizeof(ST) * tmp_len);
			SG_FREE(tmp_feat_after);

			len = tmp_len;
		}
	}
	return feat;
}