Example #1
0
/*
 * Entry point
 */
int main(int argc, char** argv) {
	// Check if enough parameters were supplied
		if (argc < 3) {
			std::cout << "USAGE: " << argv[0] << " <input image> <output image>\n";
			return -1;
		}

		// These are our file names
		std::string input_path(argv[1]);
		std::string output_path(argv[2]);

		// Read file header and display header information
		NICE::ImageFile source_file(input_path);

		// Read image into memory
		NICE::Image image;
		source_file.reader(&image);

		// Allocate memory for result image
		NICE::Image result(image.width(), image.height());

		// Put together a simple convolution kernel for our motion blur effect
		NICE::Matrix kernel(10,10);
		kernel.set(0);
		kernel.addIdentity(0.1);

		// Filter the image
		NICE::Filter::filter(image, kernel, result);

		// Write image to disk
		NICE::ImageFile dest_image(output_path);
		dest_image.writer(&result);
		return 0;
}
Example #2
0
Image* resize(T& image, const Dim& dim, int resize_quality) {
    typename T::data_type* data = new typename T::data_type
    (dim, image.origin());
    ImageView<typename T::data_type>* view =
        new ImageView<typename T::data_type>(*data);
    /*
      Images with nrows or ncols == 1 cannot be scaled by VIGRA.
      This is a hack that just returns an image with the same
      color as the upper-left pixel
    */
    if (image.nrows() <= 1 || image.ncols() <= 1 ||
            view->nrows() <= 1 || view->ncols() <= 1) {
        std::fill(view->vec_begin(), view->vec_end(), image.get(Point(0, 0)));
        return view;
    }
    if (resize_quality == 0) {
        // for straight scaling, resampleImage must be used in VIGRA
        double xfactor = (double)view->ncols()/image.ncols();
        double yfactor = (double)view->nrows()/image.nrows();
        // this is implemented incorrectly in VIGRA:
        //resizeImageNoInterpolation(src_image_range(image), dest_image_range(*view));
        // the following works however:
        // requires extension of VIGRA (see basicgeometry.hxx)
        // that are not yet merged into VIGRA 1.6.0
        resampleImage(src_image_range(image), dest_image(*view), xfactor, yfactor);
    } else if (resize_quality == 1) {
        resizeImageLinearInterpolation(src_image_range(image), dest_image_range(*view));
    } else {
        resizeImageSplineInterpolation(src_image_range(image), dest_image_range(*view));
    }
    image_copy_attributes(image, *view);
    return view;
}
Example #3
0
  typename ImageFactory<T>::view_type* difference_of_exponential_crack_edge_image(const T& src, double scale, double gradient_threshold, unsigned int min_edge_length, unsigned int close_gaps, unsigned int beautify) {
    if ((scale < 0) || (gradient_threshold < 0))
      throw std::runtime_error("The scale and gradient threshold must be greater than 0");

    typename ImageFactory<T>::data_type* dest_data =
      new typename ImageFactory<T>::data_type(Dim(src.ncols() * 2, src.nrows() * 2), src.origin());

    typename ImageFactory<T>::view_type* dest =
      new typename ImageFactory<T>::view_type(*dest_data);

    try {
      vigra::differenceOfExponentialCrackEdgeImage(src_image_range(src), dest_image(*dest), scale, gradient_threshold, NumericTraits<typename T::value_type>::one());
    
      if (min_edge_length > 0)
        vigra::removeShortEdges(dest_image_range(*dest), min_edge_length, NumericTraits<typename T::value_type>::one());
    
      if (close_gaps)
        vigra::closeGapsInCrackEdgeImage(dest_image_range(*dest), NumericTraits<typename T::value_type>::one());
    
      if (beautify)
        vigra::beautifyCrackEdgeImage(dest_image_range(*dest), NumericTraits<typename T::value_type>::one(), NumericTraits<typename T::value_type>::zero());
    } catch (std::exception e) {
      delete dest;
      delete dest_data;
      throw;
    }
    return dest;
  }
	void convert_to_polar_data_transformer::transform(
		const void * data,
		void * data_transformed,
		neuron_data_type::input_type type,
		const layer_configuration_specific& original_config,
		unsigned int sample_id)
	{
		if (type != neuron_data_type::type_byte)
			throw neural_network_exception("convert_to_polar_data_transformer is implemented for data stored as bytes only");

		if (original_config.dimension_sizes.size() != 2)
			throw neural_network_exception((boost::format("convert_to_polar_data_transformer is processing 2D data only, data is passed with number of dimensions %1%") % original_config.dimension_sizes.size()).str());

		if (original_config.dimension_sizes != input_window_sizes)
			throw neural_network_exception("convert_to_polar_data_transformer: input window size mismatch between creation and actual transform");

		unsigned int original_neuron_count_per_feature_map = original_config.get_neuron_count_per_feature_map();
		unsigned int transformed_neuron_count_per_feature_map = get_transformed_configuration(original_config).get_neuron_count_per_feature_map();
		for(unsigned int feature_map_id = 0; feature_map_id < original_config.feature_map_count; ++feature_map_id)
		{
			cv::Mat1b original_image(static_cast<int>(original_config.dimension_sizes[1]), static_cast<int>(original_config.dimension_sizes[0]), const_cast<unsigned char *>(static_cast<const unsigned char *>(data)) + (original_neuron_count_per_feature_map * feature_map_id));
			cv::Mat1b dest_image(static_cast<int>(output_window_sizes[1]), static_cast<int>(output_window_sizes[0]), static_cast<unsigned char *>(data_transformed) + (transformed_neuron_count_per_feature_map * feature_map_id));

			// Should try INTER_CUBIC and INTER_LANCZOS4 as well
			cv::remap(original_image, dest_image, map_x, map_y, cv::INTER_LINEAR, cv::BORDER_CONSTANT, border_value);
		}
	}
Example #5
0
/*
 * Entry point
 */
int main(int argc, char** argv) {
    // Check if enough parameters were supplied
    if (argc < 3) {
        std::cout << "USAGE: " << argv[0] << " <input image> <output image>\n";
        return -1;
    }

    // These are our file names
    std::string input_path(argv[1]);
    std::string output_path(argv[2]);

    // Read image into memory
    NICE::ImageFile source_file(input_path);
    NICE::Image image;
    source_file.reader(&image);

    // Calculate pseudocolors
    NICE::ColorImage pseudo_image(image.width(), image.height());
    NICE::imageToPseudoColor(image, pseudo_image);

    // Write image to disk
    NICE::ImageFile dest_image(output_path);
    dest_image.writer(&pseudo_image);
    return 0;
}
Example #6
0
static bool ExportAllVisibleLayersAsImage(const char* filename, sTileset& tileset, std::vector<sLayer> layers)
{
  if (!(layers.size() >= 1))
    return false;
  
  int tile_width  = tileset.GetTileWidth();
  int tile_height = tileset.GetTileHeight();
  int dest_image_width = 0;
  int dest_image_height = 0;
  unsigned int i;

  // find the size of the image we're going to create
  for (i = 0; i < layers.size(); i++) {
    if (layers[i].GetWidth() * tile_width > dest_image_width) {
      dest_image_width = layers[i].GetWidth() * tile_width;
    }
    if (layers[i].GetHeight() * tile_height > dest_image_height) {
      dest_image_height = layers[i].GetHeight() * tile_height;
    }
  }
  if (dest_image_width <= 0 || dest_image_height <= 0)
    return false;
  // create destination/output image
  CImage32 dest_image(dest_image_width, dest_image_height);
  if (layers.size() > 0) { // start from the bottom and work our way to the top ;)
    if (!LayerToImage(&dest_image, layers[0], tileset)) {
      return false;
    }
  }
  else // nothing to export
    return false;
  // we already have the first layer, now we do the rest
  for (i = 1; i < layers.size(); i++) {
    int image_width  = layers[i].GetWidth()  * tile_width;
    int image_height = layers[i].GetHeight() * tile_height;
    CImage32 src_image(image_width, image_height);
    
    if ( !LayerToImage(&src_image, layers[i], tileset) ) {
      return false;
    }
    else {
      // blend (dest_image, src_image)
      BlendImage(dest_image_width, dest_image_height, image_width, image_height, dest_image.GetPixels(), src_image.GetPixels());
    }
  }
 
  return dest_image.Save(filename);
}
Example #7
0
  typename ImageFactory<T>::view_type* canny_edge_image(const T& src, double scale, double gradient_threshold) {
    if ((scale < 0) || (gradient_threshold < 0))
      throw std::runtime_error("The scale and gradient threshold must be >= 0");

    typename ImageFactory<T>::data_type* dest_data =
      new typename ImageFactory<T>::data_type(src.size(), src.origin());

    typename ImageFactory<T>::view_type* dest =
      new typename ImageFactory<T>::view_type(*dest_data, src);

    try {
      vigra::cannyEdgeImage(src_image_range(src), dest_image(*dest), scale, gradient_threshold, NumericTraits<typename T::value_type>::one());
    } catch (std::exception e) {
      delete dest;
      delete dest_data;
      throw;
    }
    return dest;
  }
Example #8
0
  typename ImageFactory<T>::view_type* difference_of_exponential_edge_image(const T& src, double scale, double gradient_threshold, unsigned int min_edge_length) {
    if ((scale < 0) || (gradient_threshold < 0))
      throw std::runtime_error("The scale and gradient_threshold must be greater than 0");

    typename ImageFactory<T>::data_type* dest_data =
      new typename ImageFactory<T>::data_type(src.size(), src.origin());

    typename ImageFactory<T>::view_type* dest =
      new typename ImageFactory<T>::view_type(*dest_data);

    try {
      vigra::differenceOfExponentialEdgeImage(src_image_range(src), dest_image(*dest), scale, gradient_threshold);
    
      if (min_edge_length > 0)
        vigra::removeShortEdges(dest_image_range(*dest), min_edge_length, NumericTraits<typename T::value_type>::one());
    } catch (std::exception e) {
      delete dest;
      delete dest_data;
      throw;
    }
    return dest;
  }
	void extract_2d_data_transformer::transform(
		const void * input_data,
		void * output_data,
		neuron_data_type::input_type type,
		const layer_configuration_specific& original_config)
	{
		if (type != neuron_data_type::type_byte)
			throw neural_network_exception("extract_2d_data_transformer is implemented for data stored as bytes only");

		if (original_config.dimension_sizes.size() != 2)
			throw neural_network_exception((boost::format("extract_2d_data_transformer is processing 2d data only, data is passed with number of dimensions %1%") % original_config.dimension_sizes.size()).str());

		if (original_config.feature_map_count != 1)
			throw neural_network_exception("extract_2d_data_transformer is implemented for 1 feature map data only");

		cv::Mat1b original_image(static_cast<int>(original_config.dimension_sizes[1]), static_cast<int>(original_config.dimension_sizes[0]), const_cast<unsigned char *>(static_cast<const unsigned char *>(input_data)));
		int window_top_left_x = (original_config.dimension_sizes[0] - input_window_width) / 2;
		int window_bottom_right_x = window_top_left_x + input_window_width;
		int window_top_left_y = (original_config.dimension_sizes[1] - input_window_height) / 2;
		int window_bottom_right_y = window_top_left_y + input_window_height;
		cv::Mat1b cropped_image = original_image.rowRange(window_top_left_y, window_bottom_right_y).colRange(window_top_left_x, window_bottom_right_x);
		cv::Mat1b dest_image(static_cast<int>(output_window_height), static_cast<int>(output_window_width), static_cast<unsigned char *>(output_data));
		cv::resize(cropped_image, dest_image, dest_image.size());
	}
Example #10
0
typename ImageFactory<T>::view_type* rotate(const T &src, double angle, typename T::value_type bgcolor, int order)
{
    if (order < 1 || order > 3) {
        throw std::range_error("Order must be between 1 and 3");
    }
    if (src.nrows()<2 && src.ncols()<2)
        return simple_image_copy(src);

    // Adjust angle to a positive double between 0-360
    while(angle<0.0) angle+=360;
    while(angle>=360.0) angle-=360;

    // some angle ranges flip width and height
    // as VIGRA requires source and destination to be of the same
    // size, it cannot handle a reduce in one image dimension.
    // Hence we must rotate by 90 degrees, if necessary
    bool rot90done = false;
    typename ImageFactory<T>::view_type* prep4vigra = (typename ImageFactory<T>::view_type*) &src;
    if ((45 < angle && angle < 135) ||
            (225 < angle && angle < 315)) {
        typename ImageFactory<T>::data_type* prep4vigra_data =
            new typename ImageFactory<T>::data_type(Size(src.height(),src.width()));
        prep4vigra = new typename ImageFactory<T>::view_type(*prep4vigra_data);
        size_t ymax = src.nrows() - 1;
        for (size_t y = 0; y < src.nrows(); ++y) {
            for (size_t x = 0; x < src.ncols(); ++x) {
                prep4vigra->set(Point(ymax-y,x), src.get(Point(x,y)));
            }
        }
        rot90done = true;
        // recompute rotation angle, because partial rotation already done
        angle -= 90.0;
        if (angle < 0.0) angle +=360;
    }

    double rad = (angle / 180.0) * M_PI;

    // new width/height depending on angle
    size_t new_width, new_height;
    if ((0 <= angle && angle <= 90) ||
            (180 <= angle && angle <= 270)) {
        new_width = size_t(0.5+abs(cos(rad) * (double)prep4vigra->width() +
                                   sin(rad) * (double)prep4vigra->height()));
        new_height = size_t(0.5+abs(sin(rad) * (double)prep4vigra->width() +
                                    cos(rad) * (double)prep4vigra->height()));
    } else {
        new_width = size_t(0.5+abs(cos(rad) * (double)prep4vigra->width() -
                                   sin(rad) * (double)prep4vigra->height()));
        new_height = size_t(0.5+abs(sin(rad) * (double)prep4vigra->width() -
                                    cos(rad) * (double)prep4vigra->height()));
    }
    size_t pad_width = 0;
    if (new_width > prep4vigra->width())
        pad_width = (new_width - prep4vigra->width()) / 2 + 2;
    size_t pad_height = 0;
    if (new_height > prep4vigra->height())
        pad_height = (new_height - prep4vigra->height()) / 2 + 2;

    typename ImageFactory<T>::view_type* tmp =
        pad_image(*prep4vigra, pad_height, pad_width, pad_height, pad_width, bgcolor);

    typename ImageFactory<T>::data_type* dest_data =
        new typename ImageFactory<T>::data_type(tmp->size());
    typename ImageFactory<T>::view_type* dest =
        new typename ImageFactory<T>::view_type(*dest_data);

    try {
        fill(*dest, bgcolor);

        if (order == 1) {
            vigra::SplineImageView<1, typename T::value_type>
            spline(src_image_range(*tmp));
            vigra::rotateImage(spline, dest_image(*dest), -angle);
        } else if (order == 2) {
            vigra::SplineImageView<2, typename T::value_type>
            spline(src_image_range(*tmp));
            vigra::rotateImage(spline, dest_image(*dest), -angle);
        } else if (order == 3) {
            vigra::SplineImageView<3, typename T::value_type>
            spline(src_image_range(*tmp));
            vigra::rotateImage(spline, dest_image(*dest), -angle);
        }
    } catch (std::exception e) {
        delete tmp->data();
        delete tmp;
        delete dest;
        delete dest_data;
        if (rot90done) {
            delete prep4vigra->data();
            delete prep4vigra;
        }
        throw;
    }

    if (rot90done) {
        delete prep4vigra->data();
        delete prep4vigra;
    }
    delete tmp->data();
    delete tmp;

    return dest;
}
	void extract_data_transformer::transform(
		const float * data,
		float * data_transformed,
		const layer_configuration_specific& original_config,
		unsigned int sample_id)
	{
		if (input_window_sizes == output_window_sizes)
		{
			const std::vector<unsigned int>& dimension_sizes = original_config.dimension_sizes;

			if (dimension_sizes.size() != input_window_sizes.size())
				throw neural_network_exception((boost::format("extract_data_transformer is created with %1%-dimensions, data has %2% dimensions") % input_window_sizes.size() % dimension_sizes.size()).str());

			std::vector<unsigned int> src_offset_list;
			for(unsigned int i = 0; i < dimension_sizes.size(); ++i)
			{
				if (dimension_sizes[i] < output_window_sizes[i])
					throw neural_network_exception((boost::format("Dimension %1% of original config has %2% size while minimum is %3%") % i % dimension_sizes[i] % output_window_sizes[i]).str());
				src_offset_list.push_back((dimension_sizes[i] - output_window_sizes[i]) / 2);
			}

			std::vector<unsigned int> dst_pos_list(dimension_sizes.size(), 0);

			const float * src_begin = data;
			float * dst = data_transformed;

			for(unsigned int feature_map_id = 0; feature_map_id < original_config.feature_map_count; ++feature_map_id)
			{
				while (true)
				{
					unsigned int offset = dst_pos_list.back() + src_offset_list.back();
					for(int i = static_cast<int>(dimension_sizes.size()) - 2; i >= 0; --i)
						offset = offset * dimension_sizes[i] + dst_pos_list[i] + src_offset_list[i];

					memcpy(dst, src_begin + offset, output_window_sizes[0] * sizeof(float));
					dst += output_window_sizes[0];

					bool inc = false;
					for(int i = 1; i < output_window_sizes.size(); ++i)
					{
						dst_pos_list[i]++;
						if (dst_pos_list[i] < output_window_sizes[i])
						{
							inc = true;
							break;
						}
						else
							dst_pos_list[i] = 0;
					}
					if (!inc)
						break;
				}

				src_begin += original_config.get_neuron_count_per_feature_map();
			}
		}
		else
		{
			if (original_config.dimension_sizes.size() != 2)
				throw neural_network_exception((boost::format("Resizing extract_data_transformer is processing 2D data only, data is passed with number of dimensions %1%") % original_config.dimension_sizes.size()).str());

			int window_top_left_x = (original_config.dimension_sizes[0] - input_window_sizes[0]) / 2;
			int window_bottom_right_x = window_top_left_x + input_window_sizes[0];
			int window_top_left_y = (original_config.dimension_sizes[1] - input_window_sizes[1]) / 2;
			int window_bottom_right_y = window_top_left_y + input_window_sizes[1];

			unsigned int original_neuron_count_per_feature_map = original_config.get_neuron_count_per_feature_map();
			unsigned int transformed_neuron_count_per_feature_map = get_transformed_configuration(original_config).get_neuron_count_per_feature_map();
			for(unsigned int feature_map_id = 0; feature_map_id < original_config.feature_map_count; ++feature_map_id)
			{
				cv::Mat1f original_image(static_cast<int>(original_config.dimension_sizes[1]), static_cast<int>(original_config.dimension_sizes[0]), const_cast<float *>(data) + (original_neuron_count_per_feature_map * feature_map_id));
				cv::Mat1f cropped_image = original_image.rowRange(window_top_left_y, window_bottom_right_y).colRange(window_top_left_x, window_bottom_right_x);
				cv::Mat1f dest_image(static_cast<int>(output_window_sizes[1]), static_cast<int>(output_window_sizes[0]), data_transformed + (transformed_neuron_count_per_feature_map * feature_map_id));
				cv::resize(cropped_image, dest_image, dest_image.size());
			}
		}
	}
	void distort_2d_data_transformer::transform(
		const float * data,
		float * data_transformed,
		const layer_configuration_specific& original_config,
		unsigned int sample_id)
	{
		if (original_config.dimension_sizes.size() < 2)
			throw neural_network_exception((boost::format("distort_2d_data_transformer is processing at least 2d data, data is passed with number of dimensions %1%") % original_config.dimension_sizes.size()).str());

		float rotation_angle = rotate_angle_distribution.min();
		float scale = scale_distribution.min();
		float shift_x = shift_x_distribution.min();
		float shift_y = shift_y_distribution.min();
		bool flip_around_x_axis = (flip_around_x_distribution.min() == 1);
		bool flip_around_y_axis = (flip_around_y_distribution.min() == 1);
		float stretch = stretch_distribution.min();
		float stretch_angle = stretch_angle_distribution.min();
		float perspective_reverse_distance = perspective_reverse_distance_distribution.min();
		float perspective_distance = std::numeric_limits<float>::max();
		float perspective_angle = perspective_angle_distribution.min();

		{
			std::lock_guard<std::mutex> lock(gen_stream_mutex);

			if (apply_rotate_angle_distribution)
				rotation_angle = rotate_angle_distribution(generator);
			if (apply_scale_distribution)
				scale = scale_distribution(generator);
			if (apply_shift_x_distribution)
				shift_x = shift_x_distribution(generator);
			if (apply_shift_y_distribution)
				shift_y = shift_y_distribution(generator);
			if (flip_around_x_distribution.max() > flip_around_x_distribution.min())
				flip_around_x_axis = (flip_around_x_distribution(generator) == 1);
			if (flip_around_y_distribution.max() > flip_around_y_distribution.min())
				flip_around_y_axis = (flip_around_y_distribution(generator) == 1);
			if (apply_stretch_distribution)
				stretch = stretch_distribution(generator);
			stretch_angle = stretch_angle_distribution(generator);
			if (apply_perspective_reverse_distance_distribution)
			{
				perspective_reverse_distance = perspective_reverse_distance_distribution(generator);
				if (perspective_reverse_distance > 0.0F)
					perspective_distance = 1.0F / perspective_reverse_distance;
			}
			perspective_angle = perspective_angle_distribution(generator);
		}

		unsigned int neuron_count_per_image = original_config.dimension_sizes[0] * original_config.dimension_sizes[1];
		unsigned int image_count = original_config.get_neuron_count() / neuron_count_per_image;
		for(unsigned int image_id = 0; image_id < image_count; ++image_id)
		{
			cv::Mat1f dest_image(static_cast<int>(original_config.dimension_sizes[1]), static_cast<int>(original_config.dimension_sizes[0]), data_transformed + (image_id * neuron_count_per_image));
			cv::Mat1f image(static_cast<int>(original_config.dimension_sizes[1]), static_cast<int>(original_config.dimension_sizes[0]), const_cast<float *>(data) + (image_id * neuron_count_per_image));

			if ((rotation_angle != 0.0F) || (scale != 1.0F) || (shift_x != 0.0F) || (shift_y != 0.0F) || (stretch != 1.0F) || (perspective_distance != std::numeric_limits<float>::max()))
			{
				data_transformer_util::stretch_rotate_scale_shift_perspective(
					dest_image,
					image,
					cv::Point2f(static_cast<float>(image.cols) * 0.5F, static_cast<float>(image.rows) * 0.5F),
					rotation_angle,
					scale,
					shift_x,
					shift_y,
					stretch,
					stretch_angle,
					perspective_distance,
					perspective_angle,
					border_value);

				data_transformer_util::flip(
					dest_image,
					flip_around_x_axis,
					flip_around_y_axis);
			}
			else
			{
				data_transformer_util::flip(
					dest_image,
					image,
					flip_around_x_axis,
					flip_around_y_axis);
			}
		}
	}