/// rotate selection
void selection_rotate(const vec3f& ea) {
    if(selected_point) return;
    if(selected_frame) {
        auto m = translation_matrix(selected_frame->o) *
                 rotation_matrix(ea.x, selected_frame->x) *
                 rotation_matrix(ea.y, selected_frame->y) *
                 rotation_matrix(ea.z, selected_frame->z) *
                 translation_matrix(- selected_frame->o);
        *selected_frame = transform_frame(m, *selected_frame);
    }
}
Exemple #2
0
const glm::mat4 MovableObject::matrix() const{
	if(translation_matrix_dirty_ 
			|| rotation_matrix_dirty_ 
			|| scale_matrix_dirty_)  {
		matrix_ = translation_matrix()*rotation_matrix()*scale_matrix();
	}
	return matrix_;
}
Exemple #3
0
// compute the frame from an animation
frame3f animate_compute_frame(FrameAnimation* animation, int time) {
    // find keyframe interval and t
        auto interval_t = get_keyframe_details(animation->keytimes, time);
        auto interval   = interval_t.first;
        auto t          = interval_t.second;


        // get translation and rotation matrices
        auto trans=translation_matrix(t*(animation->translation)[interval+1]+(1-t)*(animation->translation)[interval]);
        auto rot=t*((animation->rotation)[interval+1])+(1-t)*((animation->rotation)[interval]);
        // compute combined xform matrix
        auto rot_x=rotation_matrix(rot[0], x3f);
        auto rot_y=rotation_matrix(rot[1], y3f);
        auto rot_z=rotation_matrix(rot[2], z3f);
        // return the transformed rest frame
        return transform_frame(trans*rot_z*rot_y*rot_x, animation->rest_frame);
}
Exemple #4
0
void viewer::draw() const
{
  glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
  glClearDepth(1.0f);
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

  glUseProgram(shader_programs[active_shader_set]);
  glUniformMatrix4fv(translationMatrixUnif[active_shader_set], 1, GL_FALSE, &translation_matrix()[0]);
  glUniformMatrix4fv(rotationXMatrixUnif[active_shader_set], 1, GL_FALSE, &rotation_x_matrix()[0]);
  glUniformMatrix4fv(rotationYMatrixUnif[active_shader_set], 1, GL_FALSE, &rotation_y_matrix()[0]);
  glUniformMatrix4fv(rotationZMatrixUnif[active_shader_set], 1, GL_FALSE, &rotation_z_matrix()[0]);

  glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
  glEnableVertexAttribArray(0);
  glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, 0);

  glDrawArrays(GL_TRIANGLES, 0, map.vertices / 3);

  glDisableVertexAttribArray(0);
  glDisableVertexAttribArray(1);
  glUseProgram(0);
}
//TODO: code here should be abstracted outside the app, modify tests accordingly
int main(int argc, char *argv[]) {

    // Chec the number of arguments
    if (argc != 2) {
        std::cout << "********************************" << std::endl;
        std::cout << "Usage of the code: ./traffic-sign-detection imageFileName.extension" << std::endl;
        std::cout << "********************************" << std::endl;

        return -1;
    }

    // Clock for measuring the elapsed time
    std::chrono::time_point<std::chrono::system_clock> start, end;
    start = std::chrono::system_clock::now();

    // Read the input image - convert char* to string
    std::string input_filename(argv[1]);

    // Read the input image
    cv::Mat input_image = cv::imread(input_filename);

    // Check that the image has been opened
    if (!input_image.data) {
        std::cout << "Error to read the image. Check ''cv::imread'' function of OpenCV" << std::endl;
        return -1;
    }
    // Check that the image read is a 3 channels image
    CV_Assert(input_image.channels() == 3);


    /*
   * Conversion of the image in some specific color space
   */

    // Conversion of the rgb image in ihls color space
    cv::Mat ihls_image;
    colorconversion::convert_rgb_to_ihls(input_image, ihls_image);
    // Conversion from RGB to logarithmic chromatic red and blue
    std::vector< cv::Mat > log_image;
    colorconversion::rgb_to_log_rb(input_image, log_image);

    /*
   * Segmentation of the image using the previous transformation
   */

    // Segmentation of the IHLS and more precisely of the normalised hue channel
    // ONE PARAMETER TO CONSIDER - COLOR OF THE TRAFFIC SIGN TO DETECT - RED VS BLUE
    int nhs_mode = 0; // nhs_mode == 0 -> red segmentation / nhs_mode == 1 -> blue segmentation
    cv::Mat nhs_image_seg_red;

    segmentation::seg_norm_hue(ihls_image, nhs_image_seg_red, nhs_mode);
    //nhs_mode = 1; // nhs_mode == 0 -> red segmentation / nhs_mode == 1 -> blue segmentation
    //cv::Mat nhs_image_seg_blue;
    cv::Mat nhs_image_seg_blue = nhs_image_seg_red.clone();
    //segmentation::seg_norm_hue(ihls_image, nhs_image_seg_blue, nhs_mode);
    // Segmentation of the log chromatic image
    // TODO - DEFINE THE THRESHOLD FOR THE BLUE TRAFFIC SIGN. FOR NOW WE AVOID THE PROCESSING FOR BLUE SIGN AND LET ONLY THE OTHER METHOD TO TAKE CARE OF IT.
    cv::Mat log_image_seg;
    segmentation::seg_log_chromatic(log_image, log_image_seg);

    /*
   * Merging and filtering of the previous segmentation
   */

    // Merge the results of previous segmentation using an OR operator
    // Pre-allocation of an image by cloning a previous image
    cv::Mat merge_image_seg_with_red = nhs_image_seg_red.clone();
    cv::Mat merge_image_seg = nhs_image_seg_blue.clone();
    cv::bitwise_or(nhs_image_seg_red, log_image_seg, merge_image_seg_with_red);
    cv::bitwise_or(nhs_image_seg_blue, merge_image_seg_with_red, merge_image_seg);

    // Filter the image using median filtering and morpho math
    cv::Mat bin_image;
    imageprocessing::filter_image(merge_image_seg, bin_image);


    cv::imwrite("seg.jpg", bin_image);

    /*
   * Extract candidates (i.e., contours) and remove inconsistent candidates
   */

    std::vector< std::vector< cv::Point > > distorted_contours;
    imageprocessing::contours_extraction(bin_image, distorted_contours);

    /*
   * Correct the distortion for each contour
   */

    // Initialisation of the variables which will be returned after the distortion. These variables are linked with the transformation applied to correct the distortion
    std::vector< cv::Mat > rotation_matrix(distorted_contours.size());
    std::vector< cv::Mat > scaling_matrix(distorted_contours.size());
    std::vector< cv::Mat > translation_matrix(distorted_contours.size());
    for (unsigned int contour_idx = 0; contour_idx < distorted_contours.size(); contour_idx++) {
        rotation_matrix[contour_idx] = cv::Mat::eye(3, 3, CV_32F);
        scaling_matrix[contour_idx] = cv::Mat::eye(3, 3, CV_32F);
        translation_matrix[contour_idx] = cv::Mat::eye(3, 3, CV_32F);
    }

    // Correct the distortion
    std::vector< std::vector< cv::Point2f > > undistorted_contours;
    imageprocessing::correction_distortion(distorted_contours, undistorted_contours, translation_matrix, rotation_matrix, scaling_matrix);

    // Normalise the contours to be inside a unit circle
    std::vector<double> factor_vector(undistorted_contours.size());
    std::vector< std::vector< cv::Point2f > > normalised_contours;
    initopt::normalise_all_contours(undistorted_contours, normalised_contours, factor_vector);

    std::vector< std::vector< cv::Point2f > > detected_signs_2f(normalised_contours.size());
    std::vector< std::vector< cv::Point > > detected_signs(normalised_contours.size());

    // For each contours
    for (unsigned int contour_idx = 0; contour_idx < normalised_contours.size(); contour_idx++) {

        // For each type of traffic sign
        /*
     * sign_type = 0 -> nb_edges = 3;  gielis_sym = 6; radius
     * sign_type = 1 -> nb_edges = 4;  gielis_sym = 4; radius
     * sign_type = 2 -> nb_edges = 12; gielis_sym = 4; radius
     * sign_type = 3 -> nb_edges = 8;  gielis_sym = 8; radius
     * sign_type = 4 -> nb_edges = 3;  gielis_sym = 6; radius / 2
     */

        Timer tmrSgnType("For signType");
        optimisation::ConfigStruct_<double> final_config;
        double best_fit = std::numeric_limits<double>::infinity();
        //int type_sign_to_keep = 0;
        for (int sign_type = 0; sign_type < 5; sign_type++) {
            Timer tmrIteration(" for_signType_iter");

            // Check the center mass for a contour
            cv::Point2f mass_center = initopt::mass_center_discovery(input_image, translation_matrix[contour_idx],
                                                                     rotation_matrix[contour_idx], scaling_matrix[contour_idx],
                                                                     normalised_contours[contour_idx], factor_vector[contour_idx],
                                                                     sign_type);

            // Find the rotation offset
            double rot_offset = initopt::rotation_offset(normalised_contours[contour_idx]);

            // Declaration of the parameters of the gielis with the default parameters
            optimisation::ConfigStruct_<double> contour_config;
            // Set the number of symmetry
            int gielis_symmetry = 0;
            switch (sign_type) {
            case 0:
                gielis_symmetry = 6;
                break;
            case 1:
                gielis_symmetry = 4;
                break;
            case 2:
                gielis_symmetry = 4;
                break;
            case 3:
                gielis_symmetry = 8;
                break;
            case 4:
                gielis_symmetry = 6;
                break;
            }
            contour_config.p = gielis_symmetry;
            // Set the rotation matrix
            contour_config.theta_offset = rot_offset;
            // Set the mass center
            contour_config.x_offset = mass_center.x;
            contour_config.y_offset = mass_center.y;

            Timer tmrOpt("\t for_signType_gielisOptimization");
            // Go for the optimisation
            Eigen::Vector4d mean_err(0,0,0,0), std_err(0,0,0,0);
            optimisation::gielis_optimisation(normalised_contours[contour_idx], contour_config, mean_err, std_err);

            mean_err = mean_err.cwiseAbs();
            double err_fit = mean_err.sum();

            if (err_fit < best_fit) {
                best_fit = err_fit;
                final_config = contour_config;
                //type_sign_to_keep = sign_type;
            }
        }

        Timer tmr2("Reconstruct contour");

        // Reconstruct the contour
        std::cout << "Contour #" << contour_idx << ":\n" << final_config << std::endl;
        std::vector< cv::Point2f > gielis_contour;
        int nb_points = 1000;
        optimisation::gielis_reconstruction(final_config, gielis_contour, nb_points);
        std::vector< cv::Point2f > denormalised_gielis_contour;
        initopt::denormalise_contour(gielis_contour, denormalised_gielis_contour, factor_vector[contour_idx]);
        std::vector< cv::Point2f > distorted_gielis_contour;
        imageprocessing::inverse_transformation_contour(denormalised_gielis_contour, distorted_gielis_contour,
                                                        translation_matrix[contour_idx], rotation_matrix[contour_idx],
                                                        scaling_matrix[contour_idx]);

        // Transform to cv::Point to show the results
        std::vector< cv::Point > distorted_gielis_contour_int(distorted_gielis_contour.size());
        for (unsigned int i = 0; i < distorted_gielis_contour.size(); i++) {
            distorted_gielis_contour_int[i].x = (int) std::round(distorted_gielis_contour[i].x);
            distorted_gielis_contour_int[i].y = (int) std::round(distorted_gielis_contour[i].y);
        }

        detected_signs_2f[contour_idx] = distorted_gielis_contour;
        detected_signs[contour_idx] = distorted_gielis_contour_int;

    }

    end = std::chrono::system_clock::now();
    std::chrono::duration<double> elapsed_seconds = end-start;
    std::time_t end_time = std::chrono::system_clock::to_time_t(end);

    std::cout << "Finished computation at " << std::ctime(&end_time)
              << "Elapsed time: " << elapsed_seconds.count()*1000 << " ms\n";


    cv::Mat output_image = input_image.clone();
    cv::Scalar color(0,255,0);
    cv::drawContours(output_image, detected_signs, -1, color, 2, 8);

    cv::namedWindow("Window", CV_WINDOW_AUTOSIZE);
    cv::imshow("Window", output_image);
    cv::waitKey(0);

    return 0;
}
Exemple #6
0
bool GLRenderer::Initialize()
{
	// get OpenGL version info
	int major, minor;
	sscanf((char*)glGetString(GL_VERSION), "%d.%d", &major, &minor);
	gl_version = major * 10 + minor;

	sscanf((char*)glGetString(GL_SHADING_LANGUAGE_VERSION), "%d.%d", &major, &minor);
	glsl_version = major * 100 + minor;

	// get OpenGL metrics
	glGetIntegerv(GL_MAX_TEXTURE_SIZE, &gl_max_texture_size);
	glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &gl_max_combined_texture_image_units);

	if(ogl_ext_EXT_texture_filter_anisotropic)
	{
		glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &gl_max_texture_max_anisotropy_ext);
	}
	else
	{
		LOG_INFO("GL extension EXT_texture_filter_anisotropic is not available.");
	}

	// gen fbo and screen buffers
	glGenFramebuffers(1, &fbo);
	glGenTextures(1, &colorBuffer);
	glGenTextures(1, &depthBuffer);

	// initialize opengl state
	glEnable(GL_CULL_FACE);
	glCullFace(GL_BACK);
	glDepthFunc(GL_LEQUAL);

	// texture initialization
	GLTexture::Initialize();

	blankTexture.Load("blank.png");
	scaleTexture.Load("tile4.png");

	// uniform buffers
	objectUniformBuffer = GLUniformBuffer::Create(sizeof(ObjectBlock));
	materialUniformBuffer = GLUniformBuffer::Create(sizeof(MaterialBlock));

	// shader setup
	defaultShader.Load("default_vertex.vert", "default_frag.frag");
	alphaTestShader.Load("default_vertex.vert", "alpha_test.frag");

	defaultShader.Bind();
	defaultShader.AddAttribute("position");
	defaultShader.AddAttribute("textureCoordinate");

	defaultShader.BindUniformBuffer("MaterialBlock", materialUniformBuffer, BINDING_MATERIAL);
	defaultShader.BindUniformBuffer("ObjectBlock", objectUniformBuffer, BINDING_OBJECT);
	defaultShader.SetUniformInt("texture", 0);

	alphaTestShader.Bind();
	alphaTestShader.AddAttribute("position");
	alphaTestShader.AddAttribute("textureCoordinate");

	alphaTestShader.BindUniformBuffer("MaterialBlock", materialUniformBuffer, BINDING_MATERIAL);
	alphaTestShader.BindUniformBuffer("ObjectBlock", objectUniformBuffer, BINDING_OBJECT);
	alphaTestShader.SetUniformInt("texture", 0);

	// models Init
	wonk.LoadAsMesh("Fiona.obj");

	for(int j = 0; j < NUM_MODELS; j++)
	{
		for(int i = 0; i < NUM_SUBMESHES; i++)
		{
			Handle meshHandle = renderQueue.Claim();
			mershHandles[j*NUM_SUBMESHES+i] = meshHandle;

			GLMesh* mesh = renderQueue.Get(meshHandle);
			mesh->phase = wonk.materials[i].phase;
			mesh->vertexArray = wonk.vertexArray;
			mesh->viewportLayer = LAYER_WORLD;
			mesh->material = i + 1;

			mesh->textureID = wonk.materials[i].texture;
			mesh->model = translation_matrix(j * 8, 0, 0) * rotation_matrix(j * 15, UNIT_Y);

			mesh->startIndex = wonk.materials[i].startIndex;
			GLuint endIndex = (i == NUM_SUBMESHES - 1) ? wonk.numIndices : wonk.materials[i + 1].startIndex;
			mesh->numIndices = endIndex - wonk.materials[i].startIndex;
		}
	}

	for(int i = 0; i < NUM_MODELS * NUM_SUBMESHES; i++)
	{
		boundingBoxes[i].center = vec3((i / 2) * 8, 0.0f, 0.0f);
		boundingBoxes[i].extents = vec3(2, 2, 2);
		boundingBoxes[i].axes[0] = UNIT_X;
		boundingBoxes[i].axes[1] = UNIT_Y;
		boundingBoxes[i].axes[2] = UNIT_Z;
	}

	// terrain initialization
	Terrain::Initialize();

	// camera init
	cameraData.projection = MAT_I;
	cameraData.isOrtho = false;

	screenViewProjection = MAT_I;

	GLPrimitives::Initialize();
	GUI::Initialize();

	return true;
}
    void TestGeneralConvolution3DWithHomogeneousUblas()
    {
        TrianglesMeshReader<3,3> mesh_reader("mesh/test/data/cube_136_elements");
        TetrahedralMesh<3,3> mesh;
        mesh.ConstructFromMeshReader(mesh_reader);

        TS_ASSERT_DELTA(mesh.GetVolume(), 1.0, 1e-6);
        TS_ASSERT_DELTA(mesh.GetSurfaceArea(), 6.0, 1e-6);

        // Change coordinates

        c_matrix<double, 4, 4> x_rotation_matrix = identity_matrix<double>(4);
        c_matrix<double, 4, 4> y_rotation_matrix = identity_matrix<double>(4);
        c_matrix<double, 4, 4> z_rotation_matrix = identity_matrix<double>(4);
        c_matrix<double, 4, 4> translation_matrix = identity_matrix<double>(4);

        double theta = 0.7;
        double phi = 0.3;
        double psi = 1.4;

        x_rotation_matrix(1,1) = cos(theta);
        x_rotation_matrix(1,2) = sin(theta);
        x_rotation_matrix(2,1) = -sin(theta);
        x_rotation_matrix(2,2) = cos(theta);

        y_rotation_matrix(0,0) = cos(phi);
        y_rotation_matrix(0,2) = -sin(phi);
        y_rotation_matrix(2,0) = sin(phi);
        y_rotation_matrix(2,2) = cos(phi);

        z_rotation_matrix(0,0) = cos(psi);
        z_rotation_matrix(0,1) = sin(psi);
        z_rotation_matrix(1,0) = -sin(psi);
        z_rotation_matrix(1,1) = cos(psi);

        translation_matrix(0,3) = 2.3;
        translation_matrix(1,3) = 3.1;
        translation_matrix(2,3) = 1.7;

        /*
        Note: because we are using column-major vectors this tranformation:
        RotX(theta) . RotY(phi) . RotZ(psi) . Trans(...)
        is actually being applied right-to-left
        See test below.
        */
        c_matrix<double, 4, 4> transformation_matrix = prod (x_rotation_matrix, y_rotation_matrix);
        transformation_matrix = prod (transformation_matrix, z_rotation_matrix);
        transformation_matrix = prod (transformation_matrix, translation_matrix);

        for (unsigned i=0; i<mesh.GetNumNodes(); i++)
        {
            Node<3>* p_node = mesh.GetNode(i);
            ChastePoint<3> point = p_node->GetPoint();

            c_vector<double, 4> point_location;

            point_location[0] = point[0];
            point_location[1] = point[1];
            point_location[2] = point[2];
            point_location[3] = 1.0;

            c_vector<double, 4> new_point_location = prod(transformation_matrix, point_location);

            TS_ASSERT_EQUALS(new_point_location[3], 1.0);

            point.SetCoordinate(0,new_point_location[0]);
            point.SetCoordinate(1,new_point_location[1]);
            point.SetCoordinate(2,new_point_location[2]);
            p_node->SetPoint(point);
        }
        mesh.RefreshMesh();

        TS_ASSERT_DELTA(mesh.GetVolume(), 1.0, 1e-6);
        TS_ASSERT_DELTA(mesh.GetSurfaceArea(), 6.0, 1e-6);

        ChastePoint<3> corner_after = mesh.GetNode(6)->GetPoint();
        TS_ASSERT_DELTA(corner_after[0], 3.59782,  5e-5);
        TS_ASSERT_DELTA(corner_after[1], 0.583418, 5e-5);
        TS_ASSERT_DELTA(corner_after[2], 4.65889,  5e-5);

        // Write to file
        TrianglesMeshWriter<3,3> mesh_writer("","TransformedMesh");
        mesh_writer.WriteFilesUsingMesh(mesh);

        /*
        * Now try
        tetview /tmp/chaste/testoutput/TransformedMesh
        */
    }
Exemple #8
0
void translate(double i, double j, double k, matrix m) {
	matrix temp = mat_mat_multiply(translation_matrix(i, j, k), m);
	memcpy(m, temp, 16 * sizeof(double));
	free(temp);
}