static bool pasteFIBITMAPToImage(const QPoint &pos, T_Image *dst, FIBITMAP *src) { FREE_IMAGE_TYPE srcType = FreeImage_GetImageType(src); QSize srcSize(FreeImage_GetWidth(src), FreeImage_GetHeight(src)); const uint8_t *srcBits = FreeImage_GetBits(src); int srcPitch = FreeImage_GetPitch(src); switch (srcType) { case FIT_BITMAP: { int bpp = FreeImage_GetBPP(src); switch (bpp) { case 24: { auto wrapped = GenericImage<BgrU8>::wrap(srcBits, srcSize, srcPitch); dst->template paste<ImagePasteSourceInverted>(wrapped, pos); break; } case 32: { dst->template paste<ImagePasteSourceInverted>(GenericImage<BgraU8>::wrap(srcBits, srcSize, srcPitch), pos); break; } default: { FIBITMAP *newBitmap = FreeImage_ConvertTo32Bits(src); // converted to RGBA8 dst->template paste<ImagePasteSourceInverted>(GenericImage<BgraU8>::wrap(FreeImage_GetBits(newBitmap), srcSize, FreeImage_GetPitch(newBitmap)), pos); FreeImage_Unload(newBitmap); break; } } break; } case FIT_RGB16: { dst->template paste<ImagePasteSourceInverted>(GenericImage<RgbaU16>::wrap(srcBits, srcSize, srcPitch), pos); break; } case FIT_RGBA16: { dst->template paste<ImagePasteSourceInverted>(GenericImage<RgbaU16>::wrap(srcBits, srcSize, srcPitch), pos); break; } default: qWarning() << Q_FUNC_INFO << ": Unsupported data type"; return false; } return true; }
bool TestTranspose<T>::process() { NCVStatus ncvStat; bool rcode = false; NcvSize32u srcSize(this->width, this->height); NCVMatrixAlloc<T> d_img(*this->allocatorGPU.get(), this->width, this->height); ncvAssertReturn(d_img.isMemAllocated(), false); NCVMatrixAlloc<T> h_img(*this->allocatorCPU.get(), this->width, this->height); ncvAssertReturn(h_img.isMemAllocated(), false); NCVMatrixAlloc<T> d_dst(*this->allocatorGPU.get(), this->height, this->width); ncvAssertReturn(d_dst.isMemAllocated(), false); NCVMatrixAlloc<T> h_dst(*this->allocatorCPU.get(), this->height, this->width); ncvAssertReturn(h_dst.isMemAllocated(), false); NCVMatrixAlloc<T> h_dst_d(*this->allocatorCPU.get(), this->height, this->width); ncvAssertReturn(h_dst_d.isMemAllocated(), false); NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting()); NCV_SKIP_COND_BEGIN ncvAssertReturn(this->src.fill(h_img), false); NCV_SKIP_COND_END ncvStat = h_img.copySolid(d_img, 0); ncvAssertReturn(ncvStat == NPPST_SUCCESS, false); NCV_SKIP_COND_BEGIN if (sizeof(T) == sizeof(Ncv32u)) { ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)d_img.ptr(), d_img.pitch(), (Ncv32u *)d_dst.ptr(), d_dst.pitch(), NcvSize32u(this->width, this->height)); } else if (sizeof(T) == sizeof(Ncv64u)) { ncvStat = nppiStTranspose_64u_C1R((Ncv64u *)d_img.ptr(), d_img.pitch(), (Ncv64u *)d_dst.ptr(), d_dst.pitch(), NcvSize32u(this->width, this->height)); } else { ncvAssertPrintReturn(false, "Incorrect transpose test instance", false); } ncvAssertReturn(ncvStat == NPPST_SUCCESS, false); NCV_SKIP_COND_END ncvStat = d_dst.copySolid(h_dst_d, 0); ncvAssertReturn(ncvStat == NPPST_SUCCESS, false); NCV_SKIP_COND_BEGIN if (sizeof(T) == sizeof(Ncv32u)) { ncvStat = nppiStTranspose_32u_C1R_host((Ncv32u *)h_img.ptr(), h_img.pitch(), (Ncv32u *)h_dst.ptr(), h_dst.pitch(), NcvSize32u(this->width, this->height)); } else if (sizeof(T) == sizeof(Ncv64u)) { ncvStat = nppiStTranspose_64u_C1R_host((Ncv64u *)h_img.ptr(), h_img.pitch(), (Ncv64u *)h_dst.ptr(), h_dst.pitch(), NcvSize32u(this->width, this->height)); } else { ncvAssertPrintReturn(false, "Incorrect downsample test instance", false); } ncvAssertReturn(ncvStat == NPPST_SUCCESS, false); NCV_SKIP_COND_END //bit-to-bit check bool bLoopVirgin = true; NCV_SKIP_COND_BEGIN //const Ncv64f relEPS = 0.005; for (Ncv32u i=0; bLoopVirgin && i < this->width; i++) { for (Ncv32u j=0; bLoopVirgin && j < this->height; j++) { if (h_dst.ptr()[h_dst.stride()*i+j] != h_dst_d.ptr()[h_dst_d.stride()*i+j]) { bLoopVirgin = false; } } } NCV_SKIP_COND_END if (bLoopVirgin) { rcode = true; } return rcode; }
bool TestResize<T>::process() { NCVStatus ncvStat; bool rcode = false; Ncv32s smallWidth = this->width / this->scaleFactor; Ncv32s smallHeight = this->height / this->scaleFactor; if (smallWidth == 0 || smallHeight == 0) { return true; } NcvSize32u srcSize(this->width, this->height); NCVMatrixAlloc<T> d_img(*this->allocatorGPU.get(), this->width, this->height); ncvAssertReturn(d_img.isMemAllocated(), false); NCVMatrixAlloc<T> h_img(*this->allocatorCPU.get(), this->width, this->height); ncvAssertReturn(h_img.isMemAllocated(), false); NCVMatrixAlloc<T> d_small(*this->allocatorGPU.get(), smallWidth, smallHeight); ncvAssertReturn(d_small.isMemAllocated(), false); NCVMatrixAlloc<T> h_small(*this->allocatorCPU.get(), smallWidth, smallHeight); ncvAssertReturn(h_small.isMemAllocated(), false); NCVMatrixAlloc<T> h_small_d(*this->allocatorCPU.get(), smallWidth, smallHeight); ncvAssertReturn(h_small_d.isMemAllocated(), false); NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting()); NCV_SKIP_COND_BEGIN ncvAssertReturn(this->src.fill(h_img), false); NCV_SKIP_COND_END ncvStat = h_img.copySolid(d_img, 0); ncvAssertReturn(ncvStat == NPPST_SUCCESS, false); NCV_SKIP_COND_BEGIN if (sizeof(T) == sizeof(Ncv32u)) { ncvStat = nppiStDecimate_32u_C1R((Ncv32u *)d_img.ptr(), d_img.pitch(), (Ncv32u *)d_small.ptr(), d_small.pitch(), srcSize, this->scaleFactor, this->bTextureCache); } else if (sizeof(T) == sizeof(Ncv64u)) { ncvStat = nppiStDecimate_64u_C1R((Ncv64u *)d_img.ptr(), d_img.pitch(), (Ncv64u *)d_small.ptr(), d_small.pitch(), srcSize, this->scaleFactor, this->bTextureCache); } else { ncvAssertPrintReturn(false, "Incorrect downsample test instance", false); } ncvAssertReturn(ncvStat == NPPST_SUCCESS, false); NCV_SKIP_COND_END ncvStat = d_small.copySolid(h_small_d, 0); ncvAssertReturn(ncvStat == NPPST_SUCCESS, false); NCV_SKIP_COND_BEGIN if (sizeof(T) == sizeof(Ncv32u)) { ncvStat = nppiStDecimate_32u_C1R_host((Ncv32u *)h_img.ptr(), h_img.pitch(), (Ncv32u *)h_small.ptr(), h_small.pitch(), srcSize, this->scaleFactor); } else if (sizeof(T) == sizeof(Ncv64u)) { ncvStat = nppiStDecimate_64u_C1R_host((Ncv64u *)h_img.ptr(), h_img.pitch(), (Ncv64u *)h_small.ptr(), h_small.pitch(), srcSize, this->scaleFactor); } else { ncvAssertPrintReturn(false, "Incorrect downsample test instance", false); } ncvAssertReturn(ncvStat == NPPST_SUCCESS, false); NCV_SKIP_COND_END //bit-to-bit check bool bLoopVirgin = true; NCV_SKIP_COND_BEGIN //const Ncv64f relEPS = 0.005; for (Ncv32u i=0; bLoopVirgin && i < h_small.height(); i++) { for (Ncv32u j=0; bLoopVirgin && j < h_small.width(); j++) { if (h_small.ptr()[h_small.stride()*i+j] != h_small_d.ptr()[h_small_d.stride()*i+j]) { bLoopVirgin = false; } } } NCV_SKIP_COND_END if (bLoopVirgin) { rcode = true; } return rcode; }
gl::Error Framebuffer9::blitImpl(const gl::Rectangle &sourceArea, const gl::Rectangle &destArea, const gl::Rectangle *scissor, bool blitRenderTarget, bool blitDepth, bool blitStencil, GLenum filter, const gl::Framebuffer *sourceFramebuffer) { ASSERT(filter == GL_NEAREST); IDirect3DDevice9 *device = mRenderer->getDevice(); ASSERT(device); mRenderer->endScene(); if (blitRenderTarget) { const gl::FramebufferAttachment *readBuffer = sourceFramebuffer->getColorbuffer(0); ASSERT(readBuffer); RenderTarget9 *readRenderTarget = nullptr; gl::Error error = readBuffer->getRenderTarget(&readRenderTarget); if (error.isError()) { return error; } ASSERT(readRenderTarget); const gl::FramebufferAttachment *drawBuffer = mState.getColorAttachment(0); ASSERT(drawBuffer); RenderTarget9 *drawRenderTarget = nullptr; error = drawBuffer->getRenderTarget(&drawRenderTarget); if (error.isError()) { return error; } ASSERT(drawRenderTarget); // The getSurface calls do an AddRef so save them until after no errors are possible IDirect3DSurface9* readSurface = readRenderTarget->getSurface(); ASSERT(readSurface); IDirect3DSurface9* drawSurface = drawRenderTarget->getSurface(); ASSERT(drawSurface); gl::Extents srcSize(readRenderTarget->getWidth(), readRenderTarget->getHeight(), 1); gl::Extents dstSize(drawRenderTarget->getWidth(), drawRenderTarget->getHeight(), 1); RECT srcRect; srcRect.left = sourceArea.x; srcRect.right = sourceArea.x + sourceArea.width; srcRect.top = sourceArea.y; srcRect.bottom = sourceArea.y + sourceArea.height; RECT dstRect; dstRect.left = destArea.x; dstRect.right = destArea.x + destArea.width; dstRect.top = destArea.y; dstRect.bottom = destArea.y + destArea.height; // Clip the rectangles to the scissor rectangle if (scissor) { if (dstRect.left < scissor->x) { srcRect.left += (scissor->x - dstRect.left); dstRect.left = scissor->x; } if (dstRect.top < scissor->y) { srcRect.top += (scissor->y - dstRect.top); dstRect.top = scissor->y; } if (dstRect.right > scissor->x + scissor->width) { srcRect.right -= (dstRect.right - (scissor->x + scissor->width)); dstRect.right = scissor->x + scissor->width; } if (dstRect.bottom > scissor->y + scissor->height) { srcRect.bottom -= (dstRect.bottom - (scissor->y + scissor->height)); dstRect.bottom = scissor->y + scissor->height; } } // Clip the rectangles to the destination size if (dstRect.left < 0) { srcRect.left += -dstRect.left; dstRect.left = 0; } if (dstRect.right > dstSize.width) { srcRect.right -= (dstRect.right - dstSize.width); dstRect.right = dstSize.width; } if (dstRect.top < 0) { srcRect.top += -dstRect.top; dstRect.top = 0; } if (dstRect.bottom > dstSize.height) { srcRect.bottom -= (dstRect.bottom - dstSize.height); dstRect.bottom = dstSize.height; } // Clip the rectangles to the source size if (srcRect.left < 0) { dstRect.left += -srcRect.left; srcRect.left = 0; } if (srcRect.right > srcSize.width) { dstRect.right -= (srcRect.right - srcSize.width); srcRect.right = srcSize.width; } if (srcRect.top < 0) { dstRect.top += -srcRect.top; srcRect.top = 0; } if (srcRect.bottom > srcSize.height) { dstRect.bottom -= (srcRect.bottom - srcSize.height); srcRect.bottom = srcSize.height; } HRESULT result = device->StretchRect(readSurface, &srcRect, drawSurface, &dstRect, D3DTEXF_NONE); SafeRelease(readSurface); SafeRelease(drawSurface); if (FAILED(result)) { return gl::Error(GL_OUT_OF_MEMORY, "Internal blit failed, StretchRect returned 0x%X.", result); } } if (blitDepth || blitStencil) { const gl::FramebufferAttachment *readBuffer = sourceFramebuffer->getDepthOrStencilbuffer(); ASSERT(readBuffer); RenderTarget9 *readDepthStencil = nullptr; gl::Error error = readBuffer->getRenderTarget(&readDepthStencil); if (error.isError()) { return error; } ASSERT(readDepthStencil); const gl::FramebufferAttachment *drawBuffer = mState.getDepthOrStencilAttachment(); ASSERT(drawBuffer); RenderTarget9 *drawDepthStencil = nullptr; error = drawBuffer->getRenderTarget(&drawDepthStencil); if (error.isError()) { return error; } ASSERT(drawDepthStencil); // The getSurface calls do an AddRef so save them until after no errors are possible IDirect3DSurface9* readSurface = readDepthStencil->getSurface(); ASSERT(readDepthStencil); IDirect3DSurface9* drawSurface = drawDepthStencil->getSurface(); ASSERT(drawDepthStencil); HRESULT result = device->StretchRect(readSurface, nullptr, drawSurface, nullptr, D3DTEXF_NONE); SafeRelease(readSurface); SafeRelease(drawSurface); if (FAILED(result)) { return gl::Error(GL_OUT_OF_MEMORY, "Internal blit failed, StretchRect returned 0x%X.", result); } } return gl::Error(GL_NO_ERROR); }
FRect GPUFilter::getRelDestRect() const { glm::vec2 srcSize(m_SrcSize); return FRect(m_DestRect.tl.x/srcSize.x, m_DestRect.tl.y/srcSize.y, m_DestRect.br.x/srcSize.x, m_DestRect.br.y/srcSize.y); }