Exemplo n.º 1
0
void
matrix_srt(struct matrix *mm, const struct srt *srt) {
	if (!srt) {
		return;
	}
	scale_mat(mm->m, srt->scalex, srt->scaley);
	rot_mat(mm->m, srt->rot);
	mm->m[4] += srt->offx;
	mm->m[5] += srt->offy;
}
Exemplo n.º 2
0
void Tetro::draw(sf::RenderTarget& target, sf::RenderStates states) const {
	sf::VertexArray vertices(sf::Quads, 4*4);
	for (int i = 0; i < 4; i++) {
		sf::Vertex* quad = &vertices[4*i];
		sf::Transform t = scale_mat(CELL_WIDTH_HEIGHT);
		_blocks[i].drawVertices(quad, t, this->getColor());
	}
	const float final_x = CELL_WIDTH_HEIGHT * this->_col;
	const float final_y = CELL_WIDTH_HEIGHT * this->_row;
	states.transform.translate(final_x, final_y);
	target.draw(vertices, states);
}
Exemplo n.º 3
0
void	do_scene_transform(t_app *app, t_scene *scn)
{
	int i;

	i = 0;
	app = 0;
	scn->mat = scale_mat(scn->scale);
	scn->mat = muli_mat4x4(rot_y_mat(scn->rot.y), scn->mat);
	scn->mat = muli_mat4x4(rot_x_mat(scn->rot.x), scn->mat);
	scn->mat = muli_mat4x4(rot_z_mat(scn->rot.z), scn->mat);
	scn->mat = muli_mat4x4(translate_mat(scn->pos), scn->mat);
}
Exemplo n.º 4
0
void Lightning::fractalize_ball(LightningNode *lightningRoot)
{
	if (head == NULL || lightningRoot->depth == 0) {
		return;
	}
	Matrix transform = Matrix();
	Vector translateToTail;
	double rotX;
	double rotY;
	double rotZ;
	float scale;
	LightningNode *tmp;
	scale = pow(0.7, (maxFracDepth - lightningRoot->depth + 1));
	for (int i = 0; i < lightningRoot->numBranches; i++) {
		lightningRoot->branches[i] = getRootCopy();
		tmp = lightningRoot->branches[i];
		if (i == 0) {
			tailPoint = topPoint;
			headPoint = botPoint;
		} else if (i == 1) {
			tailPoint = botPoint;
			headPoint = topPoint;
		} else if (i == 2) {
			tailPoint = lPoint;
			headPoint = rPoint;
		} else if (i == 3) {
			tailPoint = rPoint;
			headPoint = lPoint;
		} else if (i == 4) {
			tailPoint = fPoint;
			headPoint = bPoint;
		} else if (i == 5) {
			tailPoint = bPoint;
			headPoint = fPoint;
		} else {
			exit(1);
		}
		transform = lightningRoot->transform * scale_mat(Vector(scale, scale, scale));
		translateToTail = (lightningRoot->transform * tailPoint) - (transform * (tmp->transform * headPoint));
		transform = trans_mat(translateToTail) * transform;
		applyToPath(tmp, transform, lightningRoot->depth - 1);
		//TODO: Insert while loop for recursion
		while (tmp != NULL) {
			fractalize(tmp);
			tmp = tmp->next;
		}
	}
}
Exemplo n.º 5
0
void		do_transform(t_app *app, t_obj *obj, t_matrix4x4 mat)
{
	unsigned int	i;
	float			proj;

	i = 0;
	proj = app->scene.cam.proj;
	obj->mat = scale_mat(obj->scale);
	obj->mat = muli_mat4x4(rot_y_mat(obj->rot.y), obj->mat);
	obj->mat = muli_mat4x4(rot_x_mat(obj->rot.x), obj->mat);
	obj->mat = muli_mat4x4(rot_z_mat(obj->rot.z), obj->mat);
	obj->mat = muli_mat4x4(translate_mat(obj->pos), obj->mat);
	obj->mat = muli_mat4x4(mat, obj->mat);
	obj->mat = muli_mat4x4(cam_mat(app), obj->mat);
	while (i < obj->nbr_vecs)
	{
		obj->vecs[i] = muli_mat4x4_vec4(obj->mat, obj->vecs_orig[i]);
		i++;
	}
}
Exemplo n.º 6
0
void Lightning::fractalize(LightningNode *lightningRoot)
{
	if (head == NULL || lightningRoot->depth == 0) {
		return;
	}
	if (fracType == 1) {
		fractalize_ball(lightningRoot);
		return;
	}
	Matrix transform = Matrix();
	Vector translateToTail;
	double rotX;
	double rotY;
	double rotZ;
	float scale;
	LightningNode *tmp;
	scale = pow(FRACSCALE, (maxFracDepth - lightningRoot->depth + 1));
	for (int i = 0; i < lightningRoot->numBranches; i++) {
		lightningRoot->branches[i] = getRootCopy();
		tmp = lightningRoot->branches[i];
		srand(time(NULL) * (i + 5));
		rotX = ((double)(rand() % 60) - 30.0) * PI / 180.0;
		srand(time(NULL) * i);
		rotY = ((double)(rand() % 360)) * PI / 180.0;
		srand(time(NULL) + i);
		rotZ = ((double)(rand() % 60) - 30.0) * PI / 180.0;
		transform = rotY_mat(rotY) * (rotZ_mat(rotZ) * rotX_mat(rotX)) * scale_mat(Vector(scale, scale, scale));
		transform = lightningRoot->transform * transform;
		translateToTail = (lightningRoot->transform * tailPoint) - (transform * (tmp->transform * headPoint));
		transform = trans_mat(translateToTail) * transform;
		applyToPath(tmp, transform, lightningRoot->depth - 1);
		//TODO: Insert while loop for recursion
		while (tmp != NULL) {
			fractalize(tmp);
			tmp = tmp->next;
		}
	}
}
Exemplo n.º 7
0
void
matrix_scale(struct matrix *m, int sx, int sy) {
	scale_mat(m->m, sx, sy);
}
Exemplo n.º 8
0
FlattenedNode *Lightning::lightning_recursive_flatten(FlattenedNode *graphList, Matrix transform, Matrix *rTrans, SceneNode *child, Point *eyePoint) 
{
	Matrix rec_transform = Matrix();
	Matrix tmp_trans = Matrix();
	FlattenedNode* tmp_flattened;
	if (rTrans == NULL) {
		rec_transform = transform * rec_transform;
	} else {
		rec_transform = *rTrans;
	}
	if (child == NULL) {
		return graphList;
	}
	for (int i = 0; i < child->transformations.size(); i++) {
		switch (child->transformations[i]->type)
		{
			case TRANSFORMATION_TRANSLATE:
				{
					tmp_trans = tmp_trans * trans_mat(child->transformations[i]->translate);
					break;
				}
			case TRANSFORMATION_SCALE:
				{
					tmp_trans = tmp_trans * scale_mat(child->transformations[i]->scale);
					break;
				}
			case TRANSFORMATION_ROTATE:
				{
					tmp_trans = tmp_trans * rot_mat(child->transformations[i]->rotate, child->transformations[i]->angle);
					break;
				}
			case TRANSFORMATION_MATRIX:
				{
					tmp_trans = tmp_trans * child->transformations[i]->matrix;
					break;
				}
			default:
				break;
		}
	}
	rec_transform = rec_transform * tmp_trans;
	for (int i = 0; i < child->primitives.size(); i++) {
		tmp_flattened = new FlattenedNode();
		tmp_flattened->primitive = child->primitives[i];
		if (fracType == 1) {
			tmp_flattened->transform = scale_mat(Vector(0.35, 0.35, 0.35)) * rec_transform;
		} else {
			tmp_flattened->transform = scale_mat(Vector(STOCKSCALE, STOCKSCALE, STOCKSCALE)) * rec_transform;
		}
		tmp_flattened->invTransform = invert(tmp_flattened->transform);
		tmp_flattened->objEyeP = tmp_flattened->invTransform * *eyePoint;
		SceneMaterial *currentMaterial = &(tmp_flattened->primitive->material);
		if (currentMaterial->textureMap != NULL && currentMaterial->textureMap->isUsed) {
			currentMaterial->openedTMap = new ppm(currentMaterial->textureMap->filename);
		}
		tmp_flattened->next = graphList;
		graphList = tmp_flattened;
	}
	for (int i = 0; i < child->children.size(); i++) {
		graphList = lightning_recursive_flatten(graphList, transform, &rec_transform, child->children[i], eyePoint);
	}
	return graphList;
}
Exemplo n.º 9
0
void Lightning::generate(unsigned length)
{
	if (fracType == 1) {
		generate_ball();
		return;
	}
	double rotX;
	double rotY;
	double rotZ;
	float scale;
	int branchingTest;
	unsigned totalBolts = 0;
	Point minPoint, maxPoint;
	Point tmpPoint;
	Vector translate;
	LightningNode *tmp;
	LightningNode *prev;
	if (head != NULL || length == 0) {
		refresh();
	}
	if (length == 0) {
		return;
	}
	head = new LightningNode;
	tmp = head;
	prev = head;
	for (unsigned i = 0; i < length; i++) {
		tmp->depth = maxFracDepth;
		//Random rotation of the bolt
		srand(time(NULL)*i);
		rotX = ((double)(rand() % 60) - 30.0) * PI / 180.0;
		srand(time(NULL)*(i+i));
		rotY = ((double)(rand() % 360)) * PI / 180.0;
		srand(time(NULL)*(i * i));
		rotZ = ((double)(rand() % 60) - 30.0) * PI / 180.0;
		tmp->transform = rotY_mat(rotY) * (rotZ_mat(rotZ) * rotX_mat(rotX));
		
		//Random scaling of the bolt
		scale = 1.0;//((double)(rand() % 3) / 10.0) + 0.8;
		tmp->transform = scale_mat(Vector(scale, scale, scale)) * tmp->transform;

		//Inheriting previous matrices
		if (tmp == head) {
			minPoint = tmp->transform * headPoint;
			maxPoint = minPoint;
			tmpPoint = tmp->transform * tailPoint;
			if (tmpPoint[0] > maxPoint[0]) {
				maxPoint[0] = tmpPoint[0];
			} else if (tmpPoint[0] < minPoint[0]) {
				minPoint[0] = tmpPoint[0];
			} 
			if (tmpPoint[1] > maxPoint[1]) {
				maxPoint[1] = tmpPoint[1];
			} else if (tmpPoint[1] < minPoint[1]) {
				minPoint[1] = tmpPoint[1];
			} 
			if (tmpPoint[2] > maxPoint[2]) {
				maxPoint[2] = tmpPoint[2];
			} else if (tmpPoint[2] < minPoint[2]) {
				minPoint[2] = tmpPoint[2];
			}
		} else {
			tmp->transform = prev->transform * tmp->transform;
			tmpPoint = tmp->transform * headPoint;
			translate = (prev->transform * tailPoint) - (tmpPoint);
			tmp->transform = trans_mat(translate) * tmp->transform;
			if (tmpPoint[0] > maxPoint[0]) {
				maxPoint[0] = tmpPoint[0];
			} else if (tmpPoint[0] < minPoint[0]) {
				minPoint[0] = tmpPoint[0];
			} 
			if (tmpPoint[1] > maxPoint[1]) {
				maxPoint[1] = tmpPoint[1];
			} else if (tmpPoint[1] < minPoint[1]) {
				minPoint[1] = tmpPoint[1];
			} 
			if (tmpPoint[2] > maxPoint[2]) {
				maxPoint[2] = tmpPoint[2];
			} else if (tmpPoint[2] < minPoint[2]) {
				minPoint[2] = tmpPoint[2];
			}
			tmpPoint = tmp->transform * tailPoint;
			if (tmpPoint[0] > maxPoint[0]) {
				maxPoint[0] = tmpPoint[0];
			} else if (tmpPoint[0] < minPoint[0]) {
				minPoint[0] = tmpPoint[0];
			} 
			if (tmpPoint[1] > maxPoint[1]) {
				maxPoint[1] = tmpPoint[1];
			} else if (tmpPoint[1] < minPoint[1]) {
				minPoint[1] = tmpPoint[1];
			}
			if (tmpPoint[2] > maxPoint[2]) {
				maxPoint[2] = tmpPoint[2];
			} else if (tmpPoint[2] < minPoint[2]) {
				minPoint[2] = tmpPoint[2];
			}
		}

		//Give it branches
		srand((time(NULL) * 24678 * i) + (i *2));
		branchingTest = (rand()) % 200;
		if (branchingTest >= THRESHOLD3) {
			tmp->numBranches = 3;
			tmp->branches = new LightningNode*[3];
			tmp->branches[0] = NULL;
			tmp->branches[1] = NULL;
			tmp->branches[2] = NULL;
			branchesPerLine += 3;
		} else if (branchingTest >= THRESHOLD2) {
			tmp->numBranches = 2;
			tmp->branches = new LightningNode*[2];
			tmp->branches[0] = NULL;
			tmp->branches[1] = NULL;
			branchesPerLine += 2;
		} else if (branchingTest >= THRESHOLD1) {
			tmp->numBranches = 1;
			tmp->branches = new LightningNode*[1];
			tmp->branches[0] = NULL;
			branchesPerLine++;
		} else {
			tmp->numBranches = 0;
			tmp->branches = NULL;
		}

		if (i < (length - 1)) {
			tmp->next = new LightningNode;
			prev = tmp;
			tmp = tmp->next;
			tmp->next = NULL;
		}
	}
	std::cout<<"Total bolts rendered: ";
	for (int i = 0; i <= maxFracDepth; i++) {
		totalBolts += length * pow(branchesPerLine, i);
	}
	std::cout<<totalBolts<<std::endl;
	centerAtOrigin(maxPoint, minPoint);
	//TODO: Fractal compatibility
	tmp = head;
	while (tmp != NULL) {
		fractalize(tmp);
		tmp = tmp->next;
	}
}
Exemplo n.º 10
0
void rtgui_matrix_scale(struct rtgui_matrix *m, int sx, int sy)
{
	scale_mat(m->m, sx, sy);
}
Exemplo n.º 11
0
int main(int argc, char* argv[])
{
    TEST_PARAS myparas = parse_test_paras(argc, argv, testfile, embeddingfile, trainfile);
    printf("Predicting...\n");
    if(!myparas.allow_self_transition)
	printf("Do not allow self-transtion.\n");

    if (!myparas.underflow_correction)
        printf("Underflow correction disabled\n");

    int new_test_song_exp = (myparas.train_test_hash_file[0] != '\0');

    if(myparas.tagfile[0] == '\0' && new_test_song_exp)
    {
	printf("Have to support with a tag file if you want to test on unseen songs.\n");
	exit(1);
    }


    int d;
    int m; 
    int l;
    int i; 
    int j;
    int s;
    int fr;
    int to;
    double* bias_terms = 0;
    double** X = read_matrix_file(embeddingfile, &l, &d, &bias_terms);
    double** realX;
    PDATA pd = read_playlists_data(testfile);
    //int k = pd.num_songs;
    int k;
    double llhood = 0.0;
    double uniform_llhood = 0.0;
    double realn = 0.0;
    double not_realn= 0.0;
    int* train_test_hash;
    int k_train;
    int k_test;


    TDATA td;

    if(!new_test_song_exp)
    {
	k = pd.num_songs;
	if(myparas.tagfile[0] != '\0')
	{
	    td = read_tag_data(myparas.tagfile);
	    m = td.num_tags;
	    myparas.num_points = l / (k + m); 
	    realX = zerosarray(k * myparas.num_points, d);
	    calculate_realX(X, realX, td, k, m, d, myparas.num_points);
	    free_tag_data(td);

	    if(myparas.tag_ebd_filename[0] != '\0')
		write_embedding_to_file(X + k * myparas.num_points, m * myparas.num_points, d, myparas.tag_ebd_filename, 0);
	}
	else
	{
	    myparas.num_points = l / k;
	    realX = zerosarray(k * myparas.num_points, d);
	    Array2Dcopy(X, realX, l, d);
	}
	Array2Dfree(X, l, d);
    }
    else
    {
	printf("Prediction on unseen songs.\n");
	td = read_tag_data(myparas.tagfile);
	m = td.num_tags;
	k = td.num_songs;
	train_test_hash = read_hash(myparas.train_test_hash_file, &k_train);
	k_test = k - k_train;
	printf("Number of new songs %d.\n", k_test);
	myparas.num_points = l / (k_train + m); 
	realX = zerosarray(k * myparas.num_points, d);
	calculate_realX_with_hash(X, realX, td, k, m, d, myparas.num_points, k_train, train_test_hash);
	free_tag_data(td);
	Array2Dfree(X, l, d);
    }

    if(myparas.song_ebd_filename[0] != '\0')
	write_embedding_to_file(realX, k * myparas.num_points, d, myparas.song_ebd_filename, 0);
    if(myparas.bias_ebd_filename[0] != '\0')
    {
	FILE* fp = fopen(myparas.bias_ebd_filename, "w");
    
	for( i = 0; i < k ;i++)
	{
	    fprintf(fp, "%f", bias_terms[i]);
	    if ( i != k - 1)
		fputc('\n', fp);
	}

	fclose(fp);
    }

    double** square_dist;
    if(myparas.square_dist_filename[0] != '\0')
	square_dist = zerosarray(k, k);


    int n = 0;
    for(i = 0; i < pd.num_playlists; i ++)
	if(pd.playlists_length[i] > 0)
	    n += pd.playlists_length[i] - 1;
    printf("Altogether %d transitions.\n", n);fflush(stdout);

    PHASH* tcount;
    PHASH* tcount_train;
    double** tcount_full;
    double** tcount_full_train;

    if(myparas.use_hash_TTable)
        tcount = create_empty_hash(2 * n);
    else
        tcount_full = zerosarray(k, k);
    HELEM temp_elem;
    TPAIR temp_pair;
    int idx;
    double temp_val;
    for(i = 0; i < pd.num_playlists; i ++)
    {
	if(pd.playlists_length[i] > myparas.range)
	{
	    for(j = 0; j < pd.playlists_length[i] - 1; j++)
	    {
                temp_pair.fr = pd.playlists[i][j];
                temp_pair.to = pd.playlists[i][j + myparas.range];
                //printf("(%d, %d)\n", temp_pair.fr, temp_pair.to);
                if(temp_pair.fr >= 0 && temp_pair.to >= 0)
                {
                    if(myparas.use_hash_TTable)
                    {
                        idx = exist_in_hash(tcount, temp_pair);
                        if(idx < 0)
                        {
                            temp_elem.key = temp_pair;
                            temp_elem.val = 1.0;
                            add_entry(tcount, temp_elem);
                        }
                        else
                            update_with(tcount, idx, 1.0);
                    }
                    else
                        tcount_full[temp_pair.fr][temp_pair.to] += 1.0;
                }
	    }
	}
    }

    TRANSITIONTABLE ttable;
    TRANSITIONTABLE BFStable;


    //Need to use the training file
    if(myparas.output_distr)
    {
	PDATA pd_train = read_playlists_data(trainfile);
        if(myparas.use_hash_TTable)
            tcount_train = create_empty_hash(2 * n);
        else
            tcount_full_train = zerosarray(k, k);
        for(i = 0; i < pd_train.num_playlists; i ++)
        {
            if(pd_train.playlists_length[i] > 1)
            {
                for(j = 0; j < pd_train.playlists_length[i] - 1; j++)
                {
                    temp_pair.fr = pd_train.playlists[i][j];
                    temp_pair.to = pd_train.playlists[i][j + 1];
                    if(myparas.use_hash_TTable)
                    {
                        idx = exist_in_hash(tcount_train, temp_pair);
                        if(idx < 0)
                        {
                            temp_elem.key = temp_pair;
                            temp_elem.val = 1.0;
                            add_entry(tcount_train, temp_elem);
                        }
                        else
                            update_with(tcount_train, idx, 1.0);
                    }
                    else
                        tcount_full_train[temp_pair.fr][temp_pair.to] += 1.0;
                }
            }
        }
    }

    FILE* song_distr_file;
    FILE* trans_distr_file;
    double* song_sep_ll;

    if(myparas.output_distr)
    {
        printf("Output likelihood distribution file turned on.\n");
        if(myparas.output_distr)
        {
            song_distr_file = fopen(songdistrfile, "w");
            trans_distr_file = fopen(transdistrfile, "w");
            song_sep_ll = (double*)calloc(k, sizeof(double));
        }

    }

    int* test_ids_for_new_songs;
    if(new_test_song_exp)
	test_ids_for_new_songs = get_test_ids(k, k_train, train_test_hash);


    for(fr = 0; fr < k; fr++)
    {
	int collection_size;
	int* collection_idx;
	if(myparas.fast_collection)
	{
	    collection_size = (BFStable.parray)[fr].length;
	    if (collection_size == 0)
		continue;

	    collection_idx = (int*)malloc(collection_size * sizeof(int));
	    LINKEDELEM* tempp = (BFStable.parray)[fr].head;
	    for(i = 0; i < collection_size; i++)
	    {
		collection_idx[i] = tempp -> idx; 
		tempp = tempp -> pnext;
	    }
	}
	else if(new_test_song_exp)
	{
	    collection_size = k_test;
	    collection_idx = (int*)malloc(collection_size * sizeof(int));
	    int_list_copy(test_ids_for_new_songs, collection_idx, k_test);
	}
	else
	    collection_size = k;

	double** delta = zerosarray(collection_size, d);
	double* p = (double*)calloc(collection_size, sizeof(double));
	double** tempkd = zerosarray(collection_size, d);
        double* tempk = (double*)calloc(collection_size, sizeof(double));
    double** mid_delta = 0;
    double* mid_p = 0;
    double** mid_tempkd = 0;

    // I get a seg fault when these get freed. Don't understand.
    if (myparas.num_points == 3) {
	    mid_delta = zerosarray(collection_size, d);
        mid_p = (double*)calloc(collection_size, sizeof(double));
	    mid_tempkd = zerosarray(collection_size, d);
    }

	for(j = 0; j < collection_size; j++)
	{
	    for(i = 0; i < d; i++)
	    {
		if(myparas.fast_collection || new_test_song_exp)
		    delta[j][i] = realX[fr][i] - realX[(myparas.num_points - 1) * k + collection_idx[j]][i];
		else
		    delta[j][i] = realX[fr][i] - realX[(myparas.num_points - 1) * k + j][i];
	    }
        if(myparas.num_points == 3) {
    		if(myparas.fast_collection || new_test_song_exp)
	    	    mid_delta[j][i] =
                    realX[k + fr][i] - realX[k + collection_idx[j]][i];
		    else
	   	        mid_delta[j][i] = realX[k + fr][i] - realX[k + j][i];
        }
	}

	mat_mult(delta, delta, tempkd, collection_size, d);
	scale_mat(tempkd, collection_size, d, -1.0);
	sum_along_direct(tempkd, p, collection_size, d, 1);

	if(myparas.square_dist_filename[0] != '\0')
	    for(i = 0; i < k; i++)
		square_dist[fr][i] = -p[i];

    if (bias_terms != 0)
        add_vec(p, bias_terms, collection_size, 1.0);

    if (myparas.num_points == 3) {
        // Just use the mid_deltas (midpoint differences): square them,
        // then sum and add to the p vector directly, then the midpoint
        // probability is incorporated
    	mat_mult(mid_delta, mid_delta, mid_tempkd, collection_size, d);
    	scale_mat(mid_tempkd, collection_size, d, -1.0);
    	sum_along_direct(mid_tempkd, mid_p, collection_size, d, 1);
        add_vec(p, mid_p, collection_size, 1.0); 
    }

    if (myparas.underflow_correction == 1) {
        double max_val = p[0];
        for(i = 0; i < collection_size; i++)
            max_val = p[i] > max_val? p[i] : max_val;
        vec_scalar_sum(p, -max_val, collection_size);
    }

    Veccopy(p, tempk, collection_size);
    exp_on_vec(tempk, collection_size);

	//exp_on_vec(p, collection_size);

    // underflow checking:

//    for (i = 0; i < collection_size; i++)
//        if (p[i] < 0.000001)
//            p[i] = 0.000001;

	double temp_sum;
	if(myparas.allow_self_transition)
	    temp_sum = sum_vec(tempk, collection_size);
	else
	{
	    temp_sum = 0.0;
	    for(i = 0; i < collection_size; i++)
		if(!myparas.fast_collection || new_test_song_exp)
		    temp_sum += (i != fr)? tempk[i] : 0.0;
		else
		    temp_sum += (collection_idx[i] != fr)? tempk[i] : 0.0;
	}
        vec_scalar_sum(p, -log(temp_sum), collection_size);
	//scale_vec(p, collection_size, 1.0 / temp_sum);

	//printf("done...\n");
	for(to = 0; to < k; to++)
	{
	    if(myparas.allow_self_transition || (!myparas.allow_self_transition && fr != to))
	    {
		temp_pair.fr = fr;
		temp_pair.to = to;
		//printf("(%d, %d)\n", fr, to);
                if(myparas.use_hash_TTable)
                    idx = exist_in_hash(tcount, temp_pair); 
                else
                    idx = tcount_full[fr][to] > 0.0? 1 : -1;
                //printf("%d\n", idx);fflush(stdout);
                int idx_train;
                //printf("done...\n");fflush(stdout);
                
                if(myparas.output_distr)
                {
                    if(myparas.use_hash_TTable)
                        idx_train = exist_in_hash(tcount_train, temp_pair);
                    else
                        idx_train = tcount_full_train[fr][to] > 0.0? 1 : -1;
                }



		if(idx >= 0)
		{
		    if(myparas.fast_collection || new_test_song_exp)
		    {
			s = -1;
			for(i = 0; i < collection_size; i++)
			{
			    if(collection_idx[i] == to)
			    {
				s = i;
				break;
			    }
			}
		    }
		    else
			s = to;

		    //printf("%d\n", idx);fflush(stdout);
                    if(myparas.use_hash_TTable)
                        temp_val = retrieve_value_with_idx(tcount, idx);
                    else
                        temp_val = tcount_full[fr][to];

		    if(s < 0)
			not_realn += temp_val;
		    else
		    {
			//printf("s = %d\n", s);
			llhood += temp_val * p[s];
			if(new_test_song_exp)
			    uniform_llhood += temp_val * log(1.0 / (double) k_test);
			realn += temp_val;

                        if(myparas.output_distr)
                        {
                            //double temp_val_train =  idx_train >= 0? retrieve_value_with_idx(tcount_train, idx_train): 0.0;
                            double temp_val_train;
                            if(idx_train < 0)
                                temp_val_train = 0.0;
                            else
                                temp_val_train = myparas.use_hash_TTable ? retrieve_value_with_idx(tcount_train, idx_train) : tcount_full_train[fr][to];

                            song_sep_ll[fr] += temp_val * p[s];
                            song_sep_ll[to] += temp_val * p[s];
                            fprintf(trans_distr_file, "%d %d %f\n", (int)temp_val_train, (int)temp_val, temp_val * p[s]);
                        }
		    }
		}
	    }
	}




	Array2Dfree(delta, collection_size, d);
	free(p);
	Array2Dfree(tempkd, collection_size, d);
        free(tempk);
    if (myparas.num_points == 3) {
    	Array2Dfree(mid_delta, collection_size, d);
	    free(mid_p);
    	Array2Dfree(mid_tempkd, collection_size, d);
    }
	if(myparas.fast_collection || new_test_song_exp)
	    free(collection_idx);
    }

    if(myparas.output_distr)
    {
	printf("Writing song distr.\n");
	for(i = 0; i < k; i++)
	    fprintf(song_distr_file, "%d %f\n", (int)(pd.id_counts[i]), song_sep_ll[i]);
	fclose(song_distr_file);
	fclose(trans_distr_file);
	free(song_sep_ll);
    }

    llhood /= realn;
    printf("Avg log-likelihood on test: %f\n", llhood);
    if(myparas.fast_collection)
	printf("Ratio of transitions that do not appear in the training set: %f\n", not_realn / (realn + not_realn));
    if(new_test_song_exp)
    {
	uniform_llhood /= realn;
	printf("Avg log-likelihood for uniform baseline: %f\n", uniform_llhood);
    }

    if(myparas.use_hash_TTable)
        free_hash(tcount);
    else
        Array2Dfree(tcount_full, k, k);
    free_playlists_data(pd);
    if(myparas.output_distr)
    {
        if(myparas.use_hash_TTable)
            free_hash(tcount_train);
        else
            Array2Dfree(tcount_full_train, k, k);
    }
    Array2Dfree(realX, k * myparas.num_points, d);

    if(new_test_song_exp)
    {
	free(train_test_hash);
	free(test_ids_for_new_songs);
    }

    if(myparas.square_dist_filename[0] != '\0')
    {
	write_embedding_to_file(square_dist, k, k, myparas.square_dist_filename, 0); 
	Array2Dfree(square_dist, k, k);
    }
}
Exemplo n.º 12
0
  void
  pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::recognize ()
  {

    models_.reset (new std::vector<ModelT>);
    transforms_.reset (new std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> >);

    PointInTPtr processed (new pcl::PointCloud<PointInT>);
    PointInTPtr in (new pcl::PointCloud<PointInT>);

    std::vector<pcl::PointCloud<FeatureT>, Eigen::aligned_allocator<pcl::PointCloud<FeatureT> > > signatures;
    std::vector < Eigen::Vector3d > centroids;

    if (indices_.size ())
      pcl::copyPointCloud (*input_, indices_, *in);
    else
      in = input_;

    {
      pcl::ScopeTime t ("Estimate feature");
      micvfh_estimator_->estimate (in, processed, signatures, centroids);
    }

    std::vector<index_score> indices_scores;
    descriptor_distances_.clear ();

    if (signatures.size () > 0)
    {

      {
        pcl::ScopeTime t_matching ("Matching and roll...");

        if (use_single_categories_ && (categories_to_be_searched_.size () > 0))
        {

          //perform search of the different signatures in the categories_to_be_searched_
          for (size_t c = 0; c < categories_to_be_searched_.size (); c++)
          {
            std::cout << "Using category:" << categories_to_be_searched_[c] << std::endl;
            for (size_t idx = 0; idx < signatures.size (); idx++)
            {
              /*double* hist = signatures[idx].points[0].histogram;
               std::vector<double> std_hist (hist, hist + getHistogramLength (dummy));
               flann_model histogram ("", std_hist);
               flann::Matrix<int> indices;
               flann::Matrix<double> distances;

               std::map<std::string, int>::iterator it;
               it = category_to_vectors_indices_.find (categories_to_be_searched_[c]);

               assert (it != category_to_vectors_indices_.end ());
               nearestKSearch (single_categories_index_[it->second], histogram, nmodels_, indices, distances);*/

              double* hist = signatures[idx].points[0].histogram;
              int size_feat = sizeof(signatures[idx].points[0].histogram) / sizeof(double);
              std::vector<double> std_hist (hist, hist + size_feat);
              //ModelT empty;

              flann_model histogram;
              histogram.descr = std_hist;
              flann::Matrix<int> indices;
              flann::Matrix<double> distances;

              std::map<std::string, int>::iterator it;
              it = category_to_vectors_indices_.find (categories_to_be_searched_[c]);
              assert (it != category_to_vectors_indices_.end ());

              nearestKSearch (single_categories_index_[it->second], histogram, NN_, indices, distances);
              //gather NN-search results
              double score = 0;
              for (size_t i = 0; i < NN_; ++i)
              {
                score = distances[0][i];
                index_score is;
                is.idx_models_ = single_categories_pointers_to_models_[it->second]->at (indices[0][i]);
                is.idx_input_ = static_cast<int> (idx);
                is.score_ = score;
                indices_scores.push_back (is);
              }
            }

            //we cannot add more than nmodels per category, so sort here and remove offending ones...
            std::sort (indices_scores.begin (), indices_scores.end (), sortIndexScoresOp);
            indices_scores.resize ((c + 1) * NN_);
          }
        }
        else
        {
          for (size_t idx = 0; idx < signatures.size (); idx++)
          {

            double* hist = signatures[idx].points[0].histogram;
            int size_feat = sizeof(signatures[idx].points[0].histogram) / sizeof(double);
            std::vector<double> std_hist (hist, hist + size_feat);
            //ModelT empty;

            flann_model histogram;
            histogram.descr = std_hist;

            flann::Matrix<int> indices;
            flann::Matrix<double> distances;
            nearestKSearch (flann_index_, histogram, NN_, indices, distances);

            //gather NN-search results
            double score = 0;
            for (int i = 0; i < NN_; ++i)
            {
              score = distances[0][i];
              index_score is;
              is.idx_models_ = indices[0][i];
              is.idx_input_ = static_cast<int> (idx);
              is.score_ = score;
              indices_scores.push_back (is);

              //ModelT m = flann_models_[indices[0][i]].model;
            }
          }
        }

        std::sort (indices_scores.begin (), indices_scores.end (), sortIndexScoresOp);

        /*
         * There might be duplicated candidates, in those cases it makes sense to take
         * the closer one in descriptor space
         */

        typename std::map<flann_model, bool> found;
        typename std::map<flann_model, bool>::iterator it_map;
        for (size_t i = 0; i < indices_scores.size (); i++)
        {
          flann_model m = flann_models_[indices_scores[i].idx_models_];
          it_map = found.find (m);
          if (it_map == found.end ())
          {
            indices_scores[found.size ()] = indices_scores[i];
            found[m] = true;
          }
        }
        indices_scores.resize (found.size ());

        int num_n = std::min (NN_, static_cast<int> (indices_scores.size ()));

        /*
         * Filter some hypothesis regarding to their distance to the first neighbour
         */

        /*std::vector<index_score> indices_scores_filtered;
         indices_scores_filtered.resize (num_n);
         indices_scores_filtered[0] = indices_scores[0];

         double best_score = indices_scores[0].score_;
         int kept = 1;
         for (int i = 1; i < num_n; ++i)
         {
         //std::cout << best_score << indices_scores[i].score_ << (best_score / indices_scores[i].score_) << std::endl;
         if ((best_score / indices_scores[i].score_) > 0.65)
         {
         indices_scores_filtered[i] = indices_scores[i];
         kept++;
         }

         //best_score = indices_scores[i].score_;
         }

         indices_scores_filtered.resize (kept);
         //std::cout << indices_scores_filtered.size () << " § " << num_n << std::endl;

         indices_scores = indices_scores_filtered;
         num_n = indices_scores.size ();*/
        std::cout << "Number of object hypotheses... " << num_n << std::endl;

        std::vector<bool> valid_trans;
        std::vector < Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> > transformations;

        micvfh_estimator_->getValidTransformsVec (valid_trans);
        micvfh_estimator_->getTransformsVec (transformations);

        for (int i = 0; i < num_n; ++i)
        {
          ModelT m = flann_models_[indices_scores[i].idx_models_].model;
          int view_id = flann_models_[indices_scores[i].idx_models_].view_id;
          int desc_id = flann_models_[indices_scores[i].idx_models_].descriptor_id;

          int idx_input = indices_scores[i].idx_input_;

          std::cout << m.class_ << "/" << m.id_ << " ==> " << indices_scores[i].score_ << std::endl;

          Eigen::Matrix4d roll_view_pose;
          bool roll_pose_found = getRollPose (m, view_id, desc_id, roll_view_pose);

          if (roll_pose_found && valid_trans[idx_input])
          {
            Eigen::Matrix4d transposed = roll_view_pose.transpose ();

            //std::cout << transposed << std::endl;

            PointInTPtr view;
            getView (m, view_id, view);

            Eigen::Matrix4d model_view_pose;
            getPose (m, view_id, model_view_pose);

            Eigen::Matrix4d scale_mat;
            scale_mat.setIdentity (4, 4);

            if (compute_scale_)
            {
              //compute scale using the whole view
              PointInTPtr view_transformed (new pcl::PointCloud<PointInT>);
              Eigen::Matrix4d hom_from_OVC_to_CC;
              hom_from_OVC_to_CC = transformations[idx_input].inverse () * transposed;
              pcl::transformPointCloud (*view, *view_transformed, hom_from_OVC_to_CC);

              Eigen::Vector3d input_centroid = centroids[indices_scores[i].idx_input_];
              Eigen::Vector3d view_centroid;
              getCentroid (m, view_id, desc_id, view_centroid);

              Eigen::Vector4d cmatch4f (view_centroid[0], view_centroid[1], view_centroid[2], 0);
              Eigen::Vector4d cinput4f (input_centroid[0], input_centroid[1], input_centroid[2], 0);

              Eigen::Vector4d max_pt_input;
              pcl::getMaxDistance (*processed, cinput4f, max_pt_input);
              max_pt_input[3] = 0;
              double max_dist_input = (cinput4f - max_pt_input).norm ();

              //compute max dist for transformed model_view
              pcl::getMaxDistance (*view, cmatch4f, max_pt_input);
              max_pt_input[3] = 0;
              double max_dist_view = (cmatch4f - max_pt_input).norm ();

              cmatch4f = hom_from_OVC_to_CC * cmatch4f;
              std::cout << max_dist_view << " " << max_dist_input << std::endl;

              double scale_factor_view = max_dist_input / max_dist_view;
              std::cout << "Scale factor:" << scale_factor_view << std::endl;

              Eigen::Matrix4d center, center_inv;

              center.setIdentity (4, 4);
              center (0, 3) = -cinput4f[0];
              center (1, 3) = -cinput4f[1];
              center (2, 3) = -cinput4f[2];

              center_inv.setIdentity (4, 4);
              center_inv (0, 3) = cinput4f[0];
              center_inv (1, 3) = cinput4f[1];
              center_inv (2, 3) = cinput4f[2];

              scale_mat (0, 0) = scale_factor_view;
              scale_mat (1, 1) = scale_factor_view;
              scale_mat (2, 2) = scale_factor_view;

              scale_mat = center_inv * scale_mat * center;
            }

            Eigen::Matrix4d hom_from_OC_to_CC;
            hom_from_OC_to_CC = scale_mat * transformations[idx_input].inverse () * transposed * model_view_pose;

            models_->push_back (m);
            transforms_->push_back (hom_from_OC_to_CC);
            descriptor_distances_.push_back (static_cast<double> (indices_scores[i].score_));
          }
          else
          {
            PCL_WARN("The roll pose was not found, should use CRH here... \n");
          }
        }
      }

      std::cout << "Number of object hypotheses:" << models_->size () << std::endl;

      /**
       * POSE REFINEMENT
       **/

      if (ICP_iterations_ > 0)
      {
        pcl::ScopeTime t ("Pose refinement");

        //Prepare scene and model clouds for the pose refinement step
        double VOXEL_SIZE_ICP_ = 0.005;
        PointInTPtr cloud_voxelized_icp (new pcl::PointCloud<PointInT> ());

        {
          pcl::ScopeTime t ("Voxelize stuff...");
          pcl::VoxelGrid<PointInT> voxel_grid_icp;
          voxel_grid_icp.setInputCloud (processed);
          voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
          voxel_grid_icp.filter (*cloud_voxelized_icp);
          source_->voxelizeAllModels (VOXEL_SIZE_ICP_);
        }

#pragma omp parallel for num_threads(omp_get_num_procs())
        for (int i = 0; i < static_cast<int> (models_->size ()); i++)
        {

          ConstPointInTPtr model_cloud;
          PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);

          if (compute_scale_)
          {
            model_cloud = models_->at (i).getAssembled (-1);
            PointInTPtr model_aligned_m (new pcl::PointCloud<PointInT>);
            pcl::transformPointCloud (*model_cloud, *model_aligned_m, transforms_->at (i));
            pcl::VoxelGrid<PointInT> voxel_grid_icp;
            voxel_grid_icp.setInputCloud (model_aligned_m);
            voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_);
            voxel_grid_icp.filter (*model_aligned);
          }
          else
          {
            model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_);
            pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
          }

          pcl::IterativeClosestPoint<PointInT, PointInT> reg;
          reg.setInputCloud (model_aligned); //model
          reg.setInputTarget (cloud_voxelized_icp); //scene
          reg.setMaximumIterations (ICP_iterations_);
          reg.setMaxCorrespondenceDistance (VOXEL_SIZE_ICP_ * 3.);
          reg.setTransformationEpsilon (1e-6);

          typename pcl::PointCloud<PointInT>::Ptr output_ (new pcl::PointCloud<PointInT> ());
          reg.align (*output_);

          Eigen::Matrix4d icp_trans = reg.getFinalTransformation ();
          transforms_->at (i) = icp_trans * transforms_->at (i);
        }
      }

      /**
       * HYPOTHESES VERIFICATION
       **/

      if (hv_algorithm_)
      {

        pcl::ScopeTime t ("HYPOTHESES VERIFICATION");

        std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models;
        aligned_models.resize (models_->size ());

        for (size_t i = 0; i < models_->size (); i++)
        {
          ConstPointInTPtr model_cloud;
          PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);

          if (compute_scale_)
          {
            model_cloud = models_->at (i).getAssembled (-1);
            PointInTPtr model_aligned_m (new pcl::PointCloud<PointInT>);
            pcl::transformPointCloud (*model_cloud, *model_aligned_m, transforms_->at (i));
            pcl::VoxelGrid<PointInT> voxel_grid_icp;
            voxel_grid_icp.setInputCloud (model_aligned_m);
            voxel_grid_icp.setLeafSize (0.005, 0.005, 0.005);
            voxel_grid_icp.filter (*model_aligned);
          }
          else
          {
            model_cloud = models_->at (i).getAssembled (0.005);
            pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
          }

          //ConstPointInTPtr model_cloud = models_->at (i).getAssembled (0.005);
          //PointInTPtr model_aligned (new pcl::PointCloud<PointInT>);
          //pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i));
          aligned_models[i] = model_aligned;
        }

        std::vector<bool> mask_hv;
        hv_algorithm_->setSceneCloud (input_);
        hv_algorithm_->addModels (aligned_models, true);
        hv_algorithm_->verify ();
        hv_algorithm_->getMask (mask_hv);

        boost::shared_ptr < std::vector<ModelT> > models_temp;
        boost::shared_ptr < std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> > > transforms_temp;

        models_temp.reset (new std::vector<ModelT>);
        transforms_temp.reset (new std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> >);

        for (size_t i = 0; i < models_->size (); i++)
        {
          if (!mask_hv[i])
            continue;

          models_temp->push_back (models_->at (i));
          transforms_temp->push_back (transforms_->at (i));
        }

        models_ = models_temp;
        transforms_ = transforms_temp;
      }

    }
  }