Exemple #1
0
/*
    Using pDetectROI to choose an ROI area where you want to find the faces in the image.
    If pDetectROI is set to 0, the whole image area is used to find faces.
*/
int __stdcall FaceAlignment(const unsigned char *pbyImageData, int nImgWidth, int nImgHeight, 
    const FaceRect *pFaceRect, FeaPointF *pFeaPoints)
{
    if (!g_isFAInited)
        return ERR_SDKNOINIT;
        
    if (0 == pbyImageData || 0 >= nImgWidth || 0 >= nImgHeight || 0 == pFaceRect || 0 == pFeaPoints)
        return ERR_INVALIDCALL;
        
    int retValue = ERR_NONE;
    // Put your alignment function here
    cv::Mat imgData(nImgHeight, nImgWidth, CV_8UC1, (void *)pbyImageData);
    BoundingBox faceBox;
    faceBox.start_x = pFaceRect->left;
    faceBox.start_y = pFaceRect->top;
    faceBox.width = pFaceRect->right - pFaceRect->left + 1;
    faceBox.height = pFaceRect->bottom - pFaceRect->top + 1;
    faceBox.centroid_x = faceBox.start_x + faceBox.width/2.0;
    faceBox.centroid_y = faceBox.start_y + faceBox.height/2.0;
    cv::Mat_<double> current_shape = g_FA.Predict((cv::Mat_<uchar> &)imgData,faceBox,20);
    
    for (int i = 0; i < FEATURE_POINTS_NUMBER; ++i)
    {
        pFeaPoints[i].x = current_shape(i,0);
        pFeaPoints[i].y = current_shape(i,1);
    }

	return retValue;
}
JNIEXPORT jintArray JNICALL Java_pris_videotest_JNIClient_detectWithReturn(
		JNIEnv * env, jclass, jintArray pixels, jint width, jint height) {
	jint * cPixels;
	cPixels = env->GetIntArrayElements(pixels, 0);

	cv::Mat imgData(height, width, CV_8UC4, (unsigned char*) cPixels);

	IplImage *frame = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 4);
	*frame = imgData.operator _IplImage();
	//imgData.release();

	cvSmooth(frame, frame, CV_GAUSSIAN, 3, 0, 0);     ///<高斯滤波
	cv::Mat m_OriFrameMat = frame;
	//cvReleaseImage(&frame);
	resize(m_OriFrameMat, m_ResizeFrameMat,
			cv::Size(m_nVideoResizeW, m_nVideoResizeH), 0, 0, CV_INTER_LINEAR); ///<压缩 640*480 m_pResizeFrame=30*40
	//m_OriFrameMat.release();
	//cvtColor(m_ResizeFrameMat, m_GrayFrameMat, CV_BGRA2GRAY, 1); ///<灰度化
	//m_ResizeFrameMat.release();
	m_pBGSubMOG2.operator()(m_ResizeFrameMat, foregroundMat, 0.001);
	m_ResizeFrameMat = foregroundMat;

	int result[m_nVideoResizeH*m_nVideoResizeW];
	for(int i = 0; i<m_nVideoResizeH*m_nVideoResizeW; i++){
		result[i] = m_ResizeFrameMat.data[i];
	}
	env->ReleaseIntArrayElements(pixels, cPixels, 0);
	jintArray intArray = env->NewIntArray(m_nVideoResizeH*m_nVideoResizeW);
	env->SetIntArrayRegion(intArray, 0, m_nVideoResizeH*m_nVideoResizeW, result);
	return intArray;
}
Exemple #3
0
ID3D11Texture2D* D3D11Context::createTexture2DFromAppData (const char* id)
{
    int dataSize;
    const char* data = demo_->appDataGet (id, dataSize);

    if (nullptr == data)
        return nullptr;

    juce::Image img = ImageCache::getFromMemory (data, dataSize);

    if (!img.isValid())
        return nullptr;

    int w = img.getWidth();
    int h = img.getHeight();

    if ((w % 2) > 0 || (h % 2) > 0)
    {
        w = (w % 2) > 0 ? (w + 1) : w;
        h = (h % 2) > 0 ? (h + 1) : h;
        img = img.rescaled (w, h);
    }

    img = img.convertedToFormat (juce::Image::ARGB);

    HeapBlock<uint8> mem (w * h * 4);
    memset (mem.getData(), 0xff, w * h * 4);

    juce::Image::BitmapData imgData (img, juce::Image::BitmapData::readOnly);

    uint8* src = imgData.data;
    uint8* dst = mem.getData();
    size_t rowPitch = (size_t)w * 4;

    for (int r = 0; r < h; ++r)
    {
        uint8* s = src;
        uint8* d = dst;

        for (int c = 0; c < w; ++c)
        {
            d[0] = s[2];
            d[1] = s[1];
            d[2] = s[0];
            d[3] = s[3];

            s += 4;
            d += 4;
        }

        src += imgData.lineStride;
        dst += rowPitch;
    }

    Hold<ID3D11Texture2D> texture;
    if (texture.set (createTexture2D (w, h, 1, DXGI_FORMAT_R8G8B8A8_UNORM, mem.getData(), rowPitch, h * rowPitch)).isNull())
        return nullptr;

    return texture.drop();
}
Exemple #4
0
    //---------------------------------------------------------------------
    Codec::DecodeResult STBIImageCodec::decode(DataStreamPtr& input) const
    {
        // Buffer stream into memory (TODO: override IO functions instead?)
        MemoryDataStream memStream(input, true);

        int width, height, components;
        stbi_uc* pixelData = stbi_load_from_memory(memStream.getPtr(),
                static_cast<int>(memStream.size()), &width, &height, &components, 0);

        if (!pixelData)
        {
            OGRE_EXCEPT(Exception::ERR_INTERNAL_ERROR, 
                "Error decoding image: " + String(stbi_failure_reason()),
                "STBIImageCodec::decode");
        }

        SharedPtr<ImageData> imgData(OGRE_NEW ImageData());
        MemoryDataStreamPtr output;

        imgData->depth = 1; // only 2D formats handled by this codec
        imgData->width = width;
        imgData->height = height;
        imgData->num_mipmaps = 0; // no mipmaps in non-DDS 
        imgData->flags = 0;

        switch( components )
        {
            case 1:
                imgData->format = PF_BYTE_L;
                break;
            case 2:
                imgData->format = PF_BYTE_LA;
                break;
            case 3:
                imgData->format = PF_BYTE_RGB;
                break;
            case 4:
                imgData->format = PF_BYTE_RGBA;
                break;
            default:
                stbi_image_free(pixelData);
                OGRE_EXCEPT(Exception::ERR_ITEM_NOT_FOUND,
                            "Unknown or unsupported image format",
                            "STBIImageCodec::decode");
                break;
        }
        
        size_t dstPitch = imgData->width * PixelUtil::getNumElemBytes(imgData->format);
        imgData->size = dstPitch * imgData->height;
        output.bind(OGRE_NEW MemoryDataStream(pixelData, imgData->size, true));
        
        DecodeResult ret;
        ret.first = output;
        ret.second = imgData;
        return ret;
    }
Exemple #5
0
int CBitmap::Save(LPCSTR pszFileName) {
	int width	= m_image.width;
	int height	= m_image.height;
	int bytesPerPixel = m_bmpInfoHeader.biBitCount / 8;
	int bytesPerLine = ( (width + (m_bmpInfoHeader.biBitCount % 32 != 0) ) * bytesPerPixel >> 2) << 2;
	int writeSize = bytesPerLine * height;
	
	BitmapFileHeader header = {};
	header.bfType	= 0x4D42;
	header.bfSize	= sizeof(BitmapFileHeader) + m_bmpInfoHeader.biSize + writeSize;
	header.bfOffBits	= sizeof(BitmapFileHeader) + m_bmpInfoHeader.biSize;
	
	//m_bmpInfoHeader.biBitCount = 24;
	m_bmpInfoHeader.biClrUsed = 0;
	m_bmpInfoHeader.biClrImportant = 0;
	m_bmpInfoHeader.biSizeImage = bytesPerLine*height;
	
	FILE	*fp = fopen(pszFileName, "wb");
	if(!fp) return -1;
	fwrite(&header, sizeof(header), 1, fp);
	fwrite(&m_bmpInfoHeader, m_bmpInfoHeader.biSize, 1, fp);
    
	u32 *pBuf = (u32 *)&m_image.buf[0];
	
	std::vector<BYTE>	imgData(writeSize + 4);
	printf("SaveSize:%d, %d\n", width, height);

	switch(m_bmpInfoHeader.biBitCount) {
		case 24: {
			if(m_bmpInfoHeader.biHeight < 0) // Top to bottom
				REP(y, height)	REP(x, width)
					*(u32 *)&imgData[bytesPerLine*y + 3*x] = pBuf[width*y+x];
			else // Bottom to top
				REV(y, height)	REP(x, width)
					*(u32 *)&imgData[bytesPerLine*y + 3*x] = pBuf[width*y+x];
		} break;
		
		case 32: {
			if(m_bmpInfoHeader.biHeight < 0) // Top to bottom
				REP(y, height)	REP(x, width)
					*(u32 *)&imgData[(width*y+x)*4] = pBuf[y * width + x];
			else // Bottom to top
				REV(y, height)	REP(x, width)
					*(u32 *)&imgData[(width*y+x)*4] = pBuf[y * width + x];
		} break;
	}
	fwrite(&imgData[0], writeSize, 1, fp);
	fclose(fp);
	printf("Saved!\n", width, height);
	
	return 0;
}
JNIEXPORT jboolean JNICALL Java_pris_videotest_JNIClient_detect(JNIEnv * env,
		jclass, jbyteArray pixels, jint width, jint height) {
	jbyte * cPixels;
	cPixels = env->GetByteArrayElements(pixels, 0);

	cv::Mat imgData(height, width, CV_8UC1, (unsigned char*) cPixels);

	IplImage *frame = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	*frame = imgData.operator _IplImage();
	//imgData.release();

	cvSmooth(frame, frame, CV_GAUSSIAN, 3, 0, 0);     ///<高斯滤波
	cv::Mat m_OriFrameMat = frame;
	//cvReleaseImage(&frame);
	resize(m_OriFrameMat, m_ResizeFrameMat,
			cv::Size(m_nVideoResizeW, m_nVideoResizeH), 0, 0, CV_INTER_LINEAR); ///<压缩 640*480 m_pResizeFrame=30*40
	//m_OriFrameMat.release();
	//cvtColor(m_ResizeFrameMat, m_GrayFrameMat, CV_BGRA2GRAY, 1); ///<灰度化
	//m_ResizeFrameMat.release();
	m_pBGSubMOG2.operator()(m_ResizeFrameMat, foregroundMat, 0.001);
	m_ResizeFrameMat = foregroundMat;

	int i, j, k;
	k = 0;
	for (i = 0; i < m_nVideoResizeH; i++) {
		for (j = 0; j < m_nVideoResizeW; j++) {
			if (m_ResizeFrameMat.data[i * m_nVideoResizeW + j] != 0) {
				k++; ///<计算二值前景图像非0像素点个数
			}
		}
	}
	//m_GrayFrameMat.release();
	//delete frame;
	double k_ratio = (double) k / (double) (m_nVideoResizeW * m_nVideoResizeH);
	if (k_ratio <= 0.01) {
		env->ReleaseByteArrayElements(pixels, cPixels, 0);
		return false;
	}
	if (k_ratio / m_rFGSegThreshold > 1.5 || k_ratio / m_rFGSegThreshold < 0.79)
		m_rFGSegThreshold = k_ratio;

	///检测到运动视频段
	if (k_ratio >= m_rFGSegThreshold) {
		env->ReleaseByteArrayElements(pixels, cPixels, 0);
		return true;
	}
	env->ReleaseByteArrayElements(pixels, cPixels, 0);
	return false;
}
Exemple #7
0
    void PgfImage::readMetadata()
    {
#ifdef DEBUG
        std::cerr << "Exiv2::PgfImage::readMetadata: Reading PGF file " << io_->path() << "\n";
#endif
        if (io_->open() != 0)
        {
            throw Error(9, io_->path(), strError());
        }
        IoCloser closer(*io_);
        // Ensure that this is the correct image type
        if (!isPgfType(*io_, true))
        {
            if (io_->error() || io_->eof()) throw Error(14);
            throw Error(3, "PGF");
        }
        clearMetadata();

        readPgfMagicNumber(*io_);

        uint32_t headerSize = readPgfHeaderSize(*io_);

        readPgfHeaderStructure(*io_, &pixelWidth_, &pixelHeight_);

        // And now, the most interresting, the user data byte array where metadata are stored as small image.

        long size = 8 + headerSize - io_->tell();

#ifdef DEBUG
        std::cout << "Exiv2::PgfImage::readMetadata: Found Image data (" << size << " bytes)\n";
#endif

        if (size < 0) throw Error(20);
        if (size == 0) return;

        DataBuf imgData(size);
        std::memset(imgData.pData_, 0x0, imgData.size_);
        long bufRead = io_->read(imgData.pData_, imgData.size_);
        if (io_->error()) throw Error(14);
        if (bufRead != imgData.size_) throw Error(20);

        Image::AutoPtr image = Exiv2::ImageFactory::open(imgData.pData_, imgData.size_);
        image->readMetadata();
        exifData() = image->exifData();
        iptcData() = image->iptcData();
        xmpData()  = image->xmpData();

    } // PgfImage::readMetadata
u_int32_t ImageLoader::loadImage(unsigned i_imgSize, char *p_imgData, Mat &img)
{
    vector<char> imgData(i_imgSize);
    memcpy(imgData.data(), p_imgData, i_imgSize);

    try
    {
        img = imdecode(imgData, CV_LOAD_IMAGE_GRAYSCALE);
    }
    catch (cv::Exception& e) // The decoding of an image can raise an exception.
    {
        const char* err_msg = e.what();
        cout << "Exception caught: " << err_msg << endl;
        return IMAGE_NOT_DECODED;
    }

    if (!img.data)
    {
        cout << "Error reading the image." << std::endl;
        return IMAGE_NOT_DECODED;
    }

    unsigned i_imgWidth = img.cols;
    unsigned i_imgHeight = img.rows;


    if (i_imgWidth > 1000
        || i_imgHeight > 1000)
    {
        cout << "Image too large." << endl;
        return IMAGE_SIZE_TOO_BIG;
    }

#if 1
    if (i_imgWidth < 200
        || i_imgHeight < 200)
    {
        cout << "Image too small." << endl;
        return IMAGE_SIZE_TOO_SMALL;
    }
#endif

    return OK;
}
void GLImagePane::saveScreen(const char* filePath) {
	wxGLCanvas::SetCurrent(*m_context);
	wxPaintDC(this); // only to be used in paint events. use wxClientDC to paint outside the paint event
	int w = GetParent()->GetSize().x;
	int h = GetSize().y;
	ImgRGB imgData(w, h);
	glReadPixels(-1, 0, w - 1, h, GL_BGR, GL_UNSIGNED_BYTE, imgData.data);
	cv::Mat img(h, w, CV_8UC3, imgData);
	cv::line(img, cv::Point2i(0, 0), cv::Point2i(0, h - 1), cv::Scalar(0, 0, 0),
			2, CV_AA, 0);
	cv::line(img, cv::Point2i(0, h - 1), cv::Point2i(w - 1, h - 1),
			cv::Scalar(0, 0, 0), 2, CV_AA, 0);
	cv::line(img, cv::Point2i(w - 1, h - 1), cv::Point2i(w - 1, 0),
			cv::Scalar(0, 0, 0), 2, CV_AA, 0);
	cv::line(img, cv::Point2i(w - 1, 0), cv::Point2i(0, 0), cv::Scalar(0, 0, 0),
			2, CV_AA, 0);
	CvMat cvImg = img;
	cvFlip(&cvImg, 0);
	cv::imwrite(filePath, img);
}
Exemple #10
0
int __cdecl HookedPlayMovieSlide(const char* imageFile, const char* soundFile, const SubtitleLine* subtitles, int flags, int soundtrackId) {
	logger->info("Play Movie Slide {} {} {} {}", imageFile, soundFile, flags, soundtrackId);

	// Load img into memory using TIO
	unique_ptr<vector<uint8_t>> imgData(TioReadBinaryFile(imageFile));

	if (!imgData) {
		logger->error("Unable to load the image file {}", imageFile);
		return 1; // Can't play because we cant load the file
	}

	auto device = graphics->device();
	gfx::ImageFileInfo info;
	auto surface(gfx::LoadImageToSurface(graphics->device(), *imgData.get(), info));
	
	movieFuncs.MovieIsPlaying = true;

	device->ShowCursor(FALSE);

	// Clear screen with black color and present immediately
	device->Clear(0, nullptr, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, 0, 0, 0);
	device->Present(nullptr, nullptr, nullptr, nullptr);

	SubtitleRenderer subtitleRenderer(subtitles);

	TigRect bbRect(0, 0, graphics->backBufferDesc().Width, graphics->backBufferDesc().Height);
	TigRect destRect(0, 0, info.width, info.height);
	destRect.FitInto(bbRect);
	RECT fitDestRect = destRect.ToRect();

	Stopwatch sw;

	TigSoundStreamWrapper stream;

	if (soundFile) {
		if (!stream.Play(soundFile, TigSoundType::Voice)) {
			logger->error("Unable to play sound {} during slideshow.", soundFile);
		} else {
			stream.SetVolume(*tigSoundAddresses.movieVolume);
		}
	}

	bool keyPressed = false;
	while (!keyPressed && (!stream.IsValid() || stream.IsPlaying() || sw.GetElapsedMs() < 3000)) {
		D3DLOG(device->Clear(0, nullptr, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, 0, 0, 0));

		D3DLOG(device->BeginScene());
		D3DLOG(device->StretchRect(surface, NULL, graphics->backBuffer(), &fitDestRect, D3DTEXF_LINEAR));
		subtitleRenderer.Render();
		D3DLOG(device->EndScene());
		D3DLOG(device->Present(NULL, NULL, NULL, NULL));

		templeFuncs.ProcessSystemEvents();

		TigMsg msg;
		while (!msgFuncs.Process(&msg)) {
			// Flags 1 seems to disable skip via keyboard. Also seems unused.
			if (!(flags & 1) && msg.type == TigMsgType::KEYSTATECHANGE && LOBYTE(msg.arg2) == 1) {
				// TODO Wait for the key to be unpressed again
				keyPressed = true;
				break;
			}
		}
	}

	movieFuncs.MovieIsPlaying = false;
	device->ShowCursor(TRUE);

	return 0;
}