Exemplo n.º 1
0
/** \brief Hide a file (or some other ifstream) in the loaded picture
 * It loads the streamsize to this->hundredPercentValue and the amount of finished bytes to this->doneBytes
 * \param &toHideFileStream std::ifstream the filestream whose contents should be hidden in the picture (should be opened binary)
 * \param &password const std::string the password (not used yet)
 * \throw SteganoException::ImgNotLoaded if no container file is loaded.
 * \throw SteganoException::Img2Small if the loaded image is too small to hide the given phrase
 * \throw SteganoException::FileStreamClosed if the specified fileStream is not opened
 */
void SteganoHide::hideFile(std::ifstream &toHideFileStream, const std::string &password) {
    if(!this->steganoImage.isValid()) {
        throw SteganoException::ImgNotLoaded();
    }

    // we need to divide it by 2 because in worst case every byte takes 2 pixel
    if(getFileStreamSizeInBytes(toHideFileStream) > (this->xResolution * this->yResolution) / 2) {
        throw SteganoException::Img2Small();
    }

    if(!toHideFileStream.is_open()) {
        throw SteganoException::FileStreamClosed();
    }

    this->origImageBackup = this->steganoImage;
    this->doneBytes = 0;
    // 2 * |imagePixel| because we normalize all pixels first and afterwards we reset it.
    this->hundredPercentValue = getFileStreamSizeInBytes(toHideFileStream) + 2 * this->pixelAmount;

    normalizeImage();
    unsigned int loopCount = 0;
    while(toHideFileStream.good()) {
        Pixel hidingPixel = calculateHidingPosition(loopCount);

        hideByteAtPixel(toHideFileStream.get(), hidingPixel);

        loopCount++;
        this->doneBytes++;
   //    std::cout << "byte" << std::endl;
    }

    drawFinishPixel(calculateHidingPosition(loopCount));
    resetNormalizedImage();
}
Exemplo n.º 2
0
/** \brief Hide a phrase (string) in the loaded picture
 *
 * \param &phraseToHide const std::string the information that should be hidden in the picture
 * \param &password const std::string a password that is not used right now
 * \throw SteganoException::ImgNotLoaded if no container file is loaded.
 * \throw SteganoException::Img2Small if the loaded image is too small to hide the given phrase
 */
void SteganoHide::hidePhrase(const std::string &phraseToHide, const std::string &password) {
    if(!this->steganoImage.isValid()) {
        throw SteganoException::ImgNotLoaded();
    }

    // we need to divide it by 2 because in worst case every byte takes 2 pixel
    if(phraseToHide.size() > (this->pixelAmount) / 2) {
        throw SteganoException::Img2Small();
    }

    this->doneBytes = 0;
    this->hundredPercentValue = phraseToHide.size() + 2 * this->pixelAmount;
    this->origImageBackup = this->steganoImage;

    normalizeImage();

    for(unsigned int i = 0; i < phraseToHide.size(); i++) {
        Pixel hidingPixel = calculateHidingPosition(i);
     //   std::cout << hidingPixel.x << ":" << hidingPixel.y << std::endl;

        hideByteAtPixel(phraseToHide.at(i), hidingPixel);
        this->doneBytes++;
    }

    drawFinishPixel(calculateHidingPosition(phraseToHide.size()));
    resetNormalizedImage();
}
Exemplo n.º 3
0
void NormalizeFilter::filterImage()
{
    if (m_refImage.isNull())
    {
        m_refImage = m_orgImage;
    }

    normalizeImage();
    m_destImage = m_orgImage;
}
Mat ImageNormalizer::equalizeImageHistogram(Mat inputImage)
{
	Mat equalizedImage;
	Mat inputImageCopy;
	inputImage.copyTo(inputImageCopy);

	cv::Point min_loc, max_loc;
	double min, max;
	cv::minMaxLoc(inputImage, &min, &max, &min_loc, &max_loc);

	equalizedImage = normalizeImage(inputImageCopy - min);

	return equalizedImage;
}
Exemplo n.º 5
0
// Read the PNG file using the libpng.  The low-level interface is used here
// because we want to do various transformations (including setting the
// matte background if any, and applying gama) which can't be done with
// the high-level interface. The scanline also begins at the bottom of
// the image (per SecondLife conventions) instead of at the top, so we
// must assign row-pointers in "reverse" order.
BOOL LLPngWrapper::readPng(U8* src, LLImageRaw* rawImage, ImageInfo *infop)
{
	try
	{
		// Create and initialize the png structures
		mReadPngPtr = png_create_read_struct(PNG_LIBPNG_VER_STRING,
			this, &errorHandler, NULL);
		if (mReadPngPtr == NULL)
		{
			throw "Problem creating png read structure";
		}

		// Allocate/initialize the memory for image information.
		mReadInfoPtr = png_create_info_struct(mReadPngPtr);

		// Set up the input control
		PngDataInfo dataPtr;
		dataPtr.mData = src;
		dataPtr.mOffset = 0;

		png_set_read_fn(mReadPngPtr, &dataPtr, &readDataCallback);
		png_set_sig_bytes(mReadPngPtr, 0);

		// setup low-level read and get header information
		png_read_info(mReadPngPtr, mReadInfoPtr);
		png_get_IHDR(mReadPngPtr, mReadInfoPtr, &mWidth, &mHeight,
			&mBitDepth, &mColorType, &mInterlaceType,
			&mCompressionType, &mFilterMethod);

		// Normalize the image, then get updated image information
		// after transformations have been applied
		normalizeImage();
		updateMetaData();

		// If a raw object is supplied, read the PNG image into its
		// data space
		if (rawImage != NULL)
		{
			rawImage->resize(static_cast<U16>(mWidth),
				static_cast<U16>(mHeight), mChannels);
			U8 *dest = rawImage->getData();
			int offset = mWidth * mChannels;

			// Set up the row pointers and read the image
			mRowPointers = new U8* [mHeight];
			for (U32 i=0; i < mHeight; i++)
			{
				mRowPointers[i] = &dest[(mHeight-i-1)*offset];
			}

			png_read_image(mReadPngPtr, mRowPointers);

			// Finish up, ensures all metadata are updated
			png_read_end(mReadPngPtr, NULL);
		}

		// If an info object is supplied, copy the relevant info
		if (infop != NULL)
		{
			infop->mHeight = static_cast<U16>(mHeight);
			infop->mWidth = static_cast<U16>(mWidth);
			infop->mComponents = mChannels;
		}

		mFinalSize = dataPtr.mOffset;
	}
	catch (png_const_charp msg)
	{
		mErrorMessage = msg;
		releaseResources();
		return (FALSE);
	}

	// Clean up and return
	releaseResources();
	return (TRUE);
}
Exemplo n.º 6
0
void TextDetector::detect(IplImage * input,
		const struct TextDetectionParams &params,
		std::vector<Chain> &chains,
		std::vector<std::pair<Point2d, Point2d> > &compBB,
		std::vector<std::pair<CvPoint, CvPoint> > &chainBB) {
	assert(input->depth == IPL_DEPTH_8U);
	assert(input->nChannels == 3);
	// Convert to grayscale
	IplImage * grayImage = cvCreateImage(cvGetSize(input), IPL_DEPTH_8U, 1);
	cvCvtColor(input, grayImage, CV_RGB2GRAY);
	// Create Canny Image
	double threshold_low = 175;
	double threshold_high = 320;
	IplImage * edgeImage = cvCreateImage(cvGetSize(input), IPL_DEPTH_8U, 1);
	cvCanny(grayImage, edgeImage, threshold_low, threshold_high, 3);
	cvSaveImage("canny.png", edgeImage);

	// Create gradient X, gradient Y
	IplImage * gaussianImage = cvCreateImage(cvGetSize(input), IPL_DEPTH_32F,
			1);
	cvConvertScale(grayImage, gaussianImage, 1. / 255., 0);
	cvSmooth(gaussianImage, gaussianImage, CV_GAUSSIAN, 5, 5);
	IplImage * gradientX = cvCreateImage(cvGetSize(input), IPL_DEPTH_32F, 1);
	IplImage * gradientY = cvCreateImage(cvGetSize(input), IPL_DEPTH_32F, 1);
	cvSobel(gaussianImage, gradientX, 1, 0, CV_SCHARR);
	cvSobel(gaussianImage, gradientY, 0, 1, CV_SCHARR);
	cvSmooth(gradientX, gradientX, 3, 3);
	cvSmooth(gradientY, gradientY, 3, 3);
	cvReleaseImage(&gaussianImage);

	// Calculate SWT and return ray vectors
	std::vector<Ray> rays;
	IplImage * SWTImage = cvCreateImage(cvGetSize(input), IPL_DEPTH_32F, 1);
	for (int row = 0; row < input->height; row++) {
		float* ptr = (float*) (SWTImage->imageData + row * SWTImage->widthStep);
		for (int col = 0; col < input->width; col++) {
			*ptr++ = -1;
		}
	}
	strokeWidthTransform(edgeImage, gradientX, gradientY, params, SWTImage,
			rays);
	cvSaveImage("SWT_0.png", SWTImage);
	SWTMedianFilter(SWTImage, rays);
	cvSaveImage("SWT_1.png", SWTImage);

	IplImage * output2 = cvCreateImage(cvGetSize(input), IPL_DEPTH_32F, 1);
	normalizeImage(SWTImage, output2);
	cvSaveImage("SWT_2.png", output2);
	IplImage * saveSWT = cvCreateImage(cvGetSize(input), IPL_DEPTH_8U, 1);
	cvConvertScale(output2, saveSWT, 255, 0);
	cvSaveImage("SWT.png", saveSWT);
	cvReleaseImage(&output2);
	cvReleaseImage(&saveSWT);

	// Calculate legally connected components from SWT and gradient image.
	// return type is a vector of vectors, where each outer vector is a component and
	// the inner vector contains the (y,x) of each pixel in that component.
	std::vector<std::vector<Point2d> > components =
			findLegallyConnectedComponents(SWTImage, rays);

	// Filter the components
	std::vector<std::vector<Point2d> > validComponents;
	std::vector<Point2dFloat> compCenters;
	std::vector<float> compMedians;
	std::vector<Point2d> compDimensions;
	filterComponents(SWTImage, components, validComponents, compCenters,
			compMedians, compDimensions, compBB, params);

	IplImage * output3 = cvCreateImage(cvGetSize(input), 8U, 3);
	renderComponentsWithBoxes(SWTImage, validComponents, compBB, output3);
	cvSaveImage("components.png", output3);
	cvReleaseImage ( &output3 );

	// Make chains of components
	chains = makeChains(input, validComponents, compCenters, compMedians,
			compDimensions, params);

	IplImage * output = cvCreateImage(cvGetSize(grayImage), IPL_DEPTH_8U, 3);
	renderChainsWithBoxes(SWTImage, validComponents, chains, compBB, chainBB, output);
	cvSaveImage("text-boxes.png", output);


    std::cout << "-------- detect end --------" << std::endl;

	cvReleaseImage(&output);
	cvReleaseImage(&gradientX);
	cvReleaseImage(&gradientY);
	cvReleaseImage(&SWTImage);
	cvReleaseImage(&edgeImage);
	cvReleaseImage(&grayImage);
	return;
}