int QTextCopyHelper::appendFragment(int pos, int endPos, int objectIndex) { QTextDocumentPrivate::FragmentIterator fragIt = src->find(pos); const QTextFragmentData * const frag = fragIt.value(); Q_ASSERT(objectIndex == -1 || (frag->size_array[0] == 1 && src->formatCollection()->format(frag->format).objectIndex() != -1)); int charFormatIndex; if (forceCharFormat) charFormatIndex = primaryCharFormatIndex; else charFormatIndex = convertFormatIndex(frag->format, objectIndex); const int inFragmentOffset = qMax(0, pos - fragIt.position()); int charsToCopy = qMin(int(frag->size_array[0] - inFragmentOffset), endPos - pos); QTextBlock nextBlock = src->blocksFind(pos + 1); int blockIdx = -2; if (nextBlock.position() == pos + 1) { blockIdx = convertFormatIndex(nextBlock.blockFormat()); } else if (pos == 0 && insertPos == 0) { dst->setBlockFormat(dst->blocksBegin(), dst->blocksBegin(), convertFormat(src->blocksBegin().blockFormat()).toBlockFormat()); dst->setCharFormat(-1, 1, convertFormat(src->blocksBegin().charFormat()).toCharFormat()); } QString txtToInsert(originalText.constData() + frag->stringPosition + inFragmentOffset, charsToCopy); if (txtToInsert.length() == 1 && (txtToInsert.at(0) == QChar::ParagraphSeparator || txtToInsert.at(0) == QTextBeginningOfFrame || txtToInsert.at(0) == QTextEndOfFrame ) ) { dst->insertBlock(txtToInsert.at(0), insertPos, blockIdx, charFormatIndex); ++insertPos; } else { if (nextBlock.textList()) { QTextBlock dstBlock = dst->blocksFind(insertPos); if (!dstBlock.textList()) { // insert a new text block with the block and char format from the // source block to make sure that the following text fragments // end up in a list as they should int listBlockFormatIndex = convertFormatIndex(nextBlock.blockFormat()); int listCharFormatIndex = convertFormatIndex(nextBlock.charFormat()); dst->insertBlock(insertPos, listBlockFormatIndex, listCharFormatIndex); ++insertPos; } } dst->insert(insertPos, txtToInsert, charFormatIndex); const int userState = nextBlock.userState(); if (userState != -1) dst->blocksFind(insertPos).setUserState(userState); insertPos += txtToInsert.length(); } return charsToCopy; }
virtual int consumePendingFrame(Minicap::Frame* frame) { uint32_t width, height; android::PixelFormat format; android::status_t err; mHeap = NULL; err = mComposer->captureScreen(mDisplayId, &mHeap, &width, &height, &format, mDesiredWidth, mDesiredHeight, 0, -1UL); if (err != android::NO_ERROR) { MCERROR("ComposerService::captureScreen() failed %s", error_name(err)); return err; } frame->data = mHeap->getBase(); frame->width = width; frame->height = height; frame->format = convertFormat(format); frame->stride = width; frame->bpp = android::bytesPerPixel(format); frame->size = mHeap->getSize(); return 0; }
static image::Image * getSurfaceImage(IDirect3DDevice7 *pDevice, IDirectDrawSurface7 *pSurface) { HRESULT hr; DDSURFACEDESC2 Desc; ZeroMemory(&Desc, sizeof Desc); Desc.dwSize = sizeof Desc; hr = pSurface->Lock(NULL, &Desc, DDLOCK_WAIT | DDLOCK_READONLY | DDLOCK_SURFACEMEMORYPTR | DDLOCK_NOSYSLOCK, NULL); if (FAILED(hr)) { std::cerr << "warning: IDirectDrawSurface7::Lock failed\n"; return NULL; } image::Image *image = NULL; D3DFORMAT Format = convertFormat(Desc.ddpfPixelFormat); if (Format == D3DFMT_UNKNOWN) { std::cerr << "warning: unsupported DDPIXELFORMAT\n"; } else { image = ConvertImage(Format, Desc.lpSurface, Desc.lPitch, Desc.dwWidth, Desc.dwHeight); } pSurface->Unlock(NULL); return image; }
bool TextureImpl_DirectX9::Create(const iSize &size, TEXTURE_COLOR_FORMAT format, uint32 usage, uint32 mipLevels) { if (_texture) _texture->Release(); _texture = NULL; if (LPDIRECT3DDEVICE9 device = DrawManager::GetDeviceHandle()) { _format = convertFormat(format); _usage = convertUsage(usage); if (D3DXCreateTexture(device, size.width, size.height, 0, _usage, _format, D3DPOOL_DEFAULT, &_texture)==S_OK) { _size = size; // MakeDepthSurface if(usage & TEXTURE_USAGE_RENDERTARGET) { if(device->CreateDepthStencilSurface(size.width, size.height, D3DFMT_D24S8, D3DMULTISAMPLE_NONE, 0, TRUE, &_depthSurface, NULL) != S_OK) { _texture->Release(); _texture = NULL; return false; } } return true; } else { _format = D3DFMT_UNKNOWN; } } return false; }
virtual int consumePendingFrame(Minicap::Frame* frame) { android::status_t err; if ((err = mConsumer->lockNextBuffer(&mBuffer)) != android::NO_ERROR) { if (err == -EINTR) { return err; } else { MCERROR("Unable to lock next buffer %s (%d)", error_name(err), err); return err; } } frame->data = mBuffer.data; frame->format = convertFormat(mBuffer.format); frame->width = mBuffer.width; frame->height = mBuffer.height; frame->stride = mBuffer.stride; frame->bpp = android::bytesPerPixel(mBuffer.format); frame->size = mBuffer.stride * mBuffer.height * frame->bpp; mHaveBuffer = true; return 0; }
void Appearance::load(char *szIni, char *szIniGroup, char *prefix) { GetPrivateProfileString(szIniGroup, LABEL_FORMAT, "", labelFormat, sizeof(labelFormat), szIni); convertFormat(labelFormat, sizeof(labelFormat)); GetPrivateProfileString(szIniGroup, LABEL_SELECTOR, "", labelSelector, sizeof(labelSelector), szIni); setLabelMeterIndex(GetPrivateProfileInt(szIniGroup, LABEL_METER, -1, szIni)); }
uint8 *ASTCHandler::parse(filesystem::FileData *filedata, std::vector<CompressedImageData::SubImage> &images, size_t &dataSize, CompressedImageData::Format &format, bool &sRGB) { if (!canParse(filedata)) throw love::Exception("Could not decode compressed data (not an .astc file?)"); ASTCHeader header = *(const ASTCHeader *) filedata->getData(); CompressedImageData::Format cformat = convertFormat(header.blockdimX, header.blockdimY, header.blockdimZ); if (cformat == CompressedImageData::FORMAT_UNKNOWN) throw love::Exception("Could not parse .astc file: unsupported ASTC format %dx%dx%d.", header.blockdimX, header.blockdimY, header.blockdimZ); uint32 sizeX = header.sizeX[0] + (header.sizeX[1] << 8) + (header.sizeX[2] << 16); uint32 sizeY = header.sizeY[0] + (header.sizeY[1] << 8) + (header.sizeY[2] << 16); uint32 sizeZ = header.sizeZ[0] + (header.sizeZ[1] << 8) + (header.sizeZ[2] << 16); uint32 blocksX = (sizeX + header.blockdimX - 1) / header.blockdimX; uint32 blocksY = (sizeY + header.blockdimY - 1) / header.blockdimY; uint32 blocksZ = (sizeZ + header.blockdimZ - 1) / header.blockdimZ; size_t totalsize = blocksX * blocksY * blocksZ * 16; if (totalsize + sizeof(header) > filedata->getSize()) throw love::Exception("Could not parse .astc file: file is too small."); uint8 *data = nullptr; try { data = new uint8[totalsize]; } catch (std::bad_alloc &) { throw love::Exception("Out of memory."); } // .astc files only store a single mipmap level. memcpy(data, (uint8 *) filedata->getData() + sizeof(ASTCHeader), totalsize); CompressedImageData::SubImage mip; mip.width = sizeX; mip.height = sizeY; mip.size = totalsize; mip.data = data; images.push_back(mip); dataSize = totalsize; format = cformat; sRGB = false; return data; }
void OgreTexture::setFormat(PixelFormat _format) { mOriginalFormat = _format; mPixelFormat = convertFormat(_format); mNumElemBytes = 0; if (_format == PixelFormat::L8) mNumElemBytes = 1; else if (_format == PixelFormat::L8A8) mNumElemBytes = 2; else if (_format == PixelFormat::R8G8B8) mNumElemBytes = 3; else if (_format == PixelFormat::R8G8B8A8) mNumElemBytes = 4; }
uint8 *PKMHandler::parse(filesystem::FileData *filedata, std::vector<CompressedImageData::SubImage> &images, size_t &dataSize, CompressedImageData::Format &format, bool &sRGB) { if (!canParse(filedata)) throw love::Exception("Could not decode compressed data (not a PKM file?)"); PKMHeader header = *(const PKMHeader *) filedata->getData(); header.textureFormatBig = swap16big(header.textureFormatBig); header.extendedWidthBig = swap16big(header.extendedWidthBig); header.extendedHeightBig = swap16big(header.extendedHeightBig); header.widthBig = swap16big(header.widthBig); header.heightBig = swap16big(header.heightBig); CompressedImageData::Format cformat = convertFormat(header.textureFormatBig); if (cformat == CompressedImageData::FORMAT_UNKNOWN) throw love::Exception("Could not parse PKM file: unsupported texture format."); // The rest of the file after the header is all texture data. size_t totalsize = filedata->getSize() - sizeof(PKMHeader); uint8 *data = nullptr; try { data = new uint8[totalsize]; } catch (std::bad_alloc &) { throw love::Exception("Out of memory."); } // PKM files only store a single mipmap level. memcpy(data, (uint8 *) filedata->getData() + sizeof(PKMHeader), totalsize); CompressedImageData::SubImage mip; // TODO: verify whether glCompressedTexImage works properly with the unpadded // width and height values (extended == padded.) mip.width = header.widthBig; mip.height = header.heightBig; mip.size = totalsize; mip.data = data; images.push_back(mip); dataSize = totalsize; format = cformat; sRGB = false; return data; }
//============================================================================== VkPipelineVertexInputStateCreateInfo* PipelineImpl::initVertexStage( const VertexStateInfo& vertex, VkPipelineVertexInputStateCreateInfo& ci) { if(vertex.m_bindingCount == 0 && vertex.m_attributeCount == 0) { // Early out return nullptr; } // First the bindings ci.vertexBindingDescriptionCount = vertex.m_bindingCount; for(U i = 0; i < ci.vertexBindingDescriptionCount; ++i) { VkVertexInputBindingDescription& vkBinding = const_cast<VkVertexInputBindingDescription&>( ci.pVertexBindingDescriptions[i]); vkBinding.binding = i; vkBinding.stride = vertex.m_bindings[i].m_stride; switch(vertex.m_bindings[i].m_stepRate) { case VertexStepRate::VERTEX: vkBinding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; break; case VertexStepRate::INSTANCE: vkBinding.inputRate = VK_VERTEX_INPUT_RATE_INSTANCE; break; default: ANKI_ASSERT(0); } } // Then the attributes ci.vertexAttributeDescriptionCount = vertex.m_attributeCount; for(U i = 0; i < ci.vertexAttributeDescriptionCount; ++i) { VkVertexInputAttributeDescription& vkAttrib = const_cast<VkVertexInputAttributeDescription&>( ci.pVertexAttributeDescriptions[i]); vkAttrib.location = 0; vkAttrib.binding = vertex.m_attributes[i].m_binding; vkAttrib.format = convertFormat(vertex.m_attributes[i].m_format); vkAttrib.offset = vertex.m_attributes[i].m_offset; } return &ci; }
gfx::DepthStencilTarget* PGRAPH::getDepthStencilTarget(U32 address) { if (depthStencilTargets.find(address) != depthStencilTargets.end()) { return depthStencilTargets[address]; } // Generate a texture to hold the depth buffer gfx::TextureDesc desc = {}; desc.mipmapLevels = 1; desc.width = surface.width; desc.height = surface.height; desc.format = convertFormat(surface.depthFormat); desc.flags = gfx::TEXTURE_FLAG_DEPTHSTENCIL_TARGET; auto* texture = graphics->createTexture(desc); textures[address] = texture; auto* target = graphics->createDepthStencilTarget(texture); depthStencilTargets[address] = target; return target; }
gfx::ColorTarget* PGRAPH::getColorTarget(U32 address) { if (colorTargets.find(address) != colorTargets.end()) { return colorTargets[address]; } // Generate a texture to hold the color buffer gfx::TextureDesc desc = {}; desc.mipmapLevels = 1; desc.width = surface.width; desc.height = surface.height; desc.format = convertFormat(surface.colorFormat); desc.flags = gfx::TEXTURE_FLAG_COLOR_TARGET; auto* texture = graphics->createTexture(desc); textures[address] = texture; auto* target = graphics->createColorTarget(texture); colorTargets[address] = target; return target; }
uint8 *DDSHandler::parse(filesystem::FileData *filedata, std::vector<CompressedImageData::SubImage> &images, size_t &dataSize, CompressedImageData::Format &format, bool &sRGB) { if (!dds::isDDS(filedata->getData(), filedata->getSize())) throw love::Exception("Could not decode compressed data (not a DDS file?)"); CompressedImageData::Format texformat = CompressedImageData::FORMAT_UNKNOWN; bool isSRGB = false; uint8 *data = nullptr; dataSize = 0; images.clear(); try { // Attempt to parse the dds file. dds::Parser parser(filedata->getData(), filedata->getSize()); texformat = convertFormat(parser.getFormat(), isSRGB); if (texformat == CompressedImageData::FORMAT_UNKNOWN) throw love::Exception("Could not parse compressed data: Unsupported format."); if (parser.getMipmapCount() == 0) throw love::Exception("Could not parse compressed data: No readable texture data."); // Calculate the size of the block of memory we're returning. for (size_t i = 0; i < parser.getMipmapCount(); i++) { const dds::Image *img = parser.getImageData(i); dataSize += img->dataSize; } data = new uint8[dataSize]; size_t dataOffset = 0; // Copy the parsed mipmap levels from the FileData to our CompressedImageData. for (size_t i = 0; i < parser.getMipmapCount(); i++) { // Fetch the data for this mipmap level. const dds::Image *img = parser.getImageData(i); CompressedImageData::SubImage mip; mip.width = img->width; mip.height = img->height; mip.size = img->dataSize; // Copy the mipmap image from the FileData to our block of memory. memcpy(data + dataOffset, img->data, mip.size); mip.data = data + dataOffset; dataOffset += mip.size; images.push_back(mip); } } catch (std::exception &e) { delete[] data; images.clear(); throw love::Exception("%s", e.what()); } format = texformat; sRGB = isSRGB; return data; }
/* * @~English * @brief Load a GL texture object from a ktxStream. * * This function will unpack compressed GL_ETC1_RGB8_OES and GL_ETC2_* format * textures in software when the format is not supported by the GL context, * provided the library has been compiled with SUPPORT_SOFTWARE_ETC_UNPACK * defined as 1. * * It will also convert textures with legacy formats to their modern equivalents * when the format is not supported by the GL context, provided that the library * has been compiled with SUPPORT_LEGACY_FORMAT_CONVERSION defined as 1. * * @param [in] stream pointer to the ktxStream from which to load. * @param [in,out] pTexture name of the GL texture to load. If NULL or if * <tt>*pTexture == 0</tt> the function will generate * a texture name. The function binds either the * generated name or the name given in @p *pTexture * to the texture target returned in @p *pTarget, * before loading the texture data. If @p pTexture * is not NULL and a name was generated, the generated * name will be returned in *pTexture. * @param [out] pTarget @p *pTarget is set to the texture target used. The * target is chosen based on the file contents. * @param [out] pDimensions If @p pDimensions is not NULL, the width, height and * depth of the texture's base level are returned in the * fields of the KTX_dimensions structure to which it points. * @param [out] pIsMipmapped * If @p pIsMipmapped is not NULL, @p *pIsMipmapped is set * to GL_TRUE if the KTX texture is mipmapped, GL_FALSE * otherwise. * @param [out] pGlerror @p *pGlerror is set to the value returned by * glGetError when this function returns the error * KTX_GL_ERROR. glerror can be NULL. * @param [in,out] pKvdLen If not NULL, @p *pKvdLen is set to the number of bytes * of key-value data pointed at by @p *ppKvd. Must not be * NULL, if @p ppKvd is not NULL. * @param [in,out] ppKvd If not NULL, @p *ppKvd is set to the point to a block of * memory containing key-value data read from the file. * The application is responsible for freeing the memory. * * * @return KTX_SUCCESS on success, other KTX_* enum values on error. * * @exception KTX_INVALID_VALUE @p target is @c NULL or the size of a mip * level is greater than the size of the * preceding level. * @exception KTX_INVALID_OPERATION @p ppKvd is not NULL but pKvdLen is NULL. * @exception KTX_UNEXPECTED_END_OF_FILE the file does not contain the * expected amount of data. * @exception KTX_OUT_OF_MEMORY Sufficient memory could not be allocated to store * the requested key-value data. * @exception KTX_GL_ERROR A GL error was raised by glBindTexture, * glGenTextures or gl*TexImage*. The GL error * will be returned in @p *glerror, if glerror * is not @c NULL. */ static KTX_error_code ktxLoadTextureS(struct ktxStream* stream, GLuint* pTexture, GLenum* pTarget, KTX_dimensions* pDimensions, GLboolean* pIsMipmapped, GLenum* pGlerror, unsigned int* pKvdLen, unsigned char** ppKvd) { GLint previousUnpackAlignment; KTX_header header; KTX_texinfo texinfo; void* data = NULL; khronos_uint32_t dataSize = 0; GLuint texname; int texnameUser; khronos_uint32_t faceLodSize; khronos_uint32_t faceLodSizeRounded; khronos_uint32_t level; khronos_uint32_t face; GLenum glFormat, glInternalFormat; KTX_error_code errorCode = KTX_SUCCESS; GLenum errorTmp; if (pGlerror) *pGlerror = GL_NO_ERROR; if (ppKvd) { *ppKvd = NULL; } if (!stream || !stream->read || !stream->skip) { return KTX_INVALID_VALUE; } if (!pTarget) { return KTX_INVALID_VALUE; } if (!stream->read(&header, KTX_HEADER_SIZE, stream->src)) { return KTX_UNEXPECTED_END_OF_FILE; } errorCode = _ktxCheckHeader(&header, &texinfo); if (errorCode != KTX_SUCCESS) { return errorCode; } if (ppKvd) { if (pKvdLen == NULL) return KTX_INVALID_OPERATION; *pKvdLen = header.bytesOfKeyValueData; if (*pKvdLen) { *ppKvd = (unsigned char*)malloc(*pKvdLen); if (*ppKvd == NULL) return KTX_OUT_OF_MEMORY; if (!stream->read(*ppKvd, *pKvdLen, stream->src)) { free(*ppKvd); *ppKvd = NULL; return KTX_UNEXPECTED_END_OF_FILE; } } } else { /* skip key/value metadata */ if (!stream->skip((long)header.bytesOfKeyValueData, stream->src)) { return KTX_UNEXPECTED_END_OF_FILE; } } if (contextProfile == 0) discoverContextCapabilities(); /* KTX files require an unpack alignment of 4 */ glGetIntegerv(GL_UNPACK_ALIGNMENT, &previousUnpackAlignment); if (previousUnpackAlignment != KTX_GL_UNPACK_ALIGNMENT) { glPixelStorei(GL_UNPACK_ALIGNMENT, KTX_GL_UNPACK_ALIGNMENT); } texnameUser = pTexture && *pTexture; if (texnameUser) { texname = *pTexture; } else { glGenTextures(1, &texname); } glBindTexture(texinfo.glTarget, texname); // Prefer glGenerateMipmaps over GL_GENERATE_MIPMAP if (texinfo.generateMipmaps && (glGenerateMipmap == NULL)) { glTexParameteri(texinfo.glTarget, GL_GENERATE_MIPMAP, GL_TRUE); } if (texinfo.glTarget == GL_TEXTURE_CUBE_MAP) { texinfo.glTarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X; } glInternalFormat = header.glInternalFormat; glFormat = header.glFormat; if (!texinfo.compressed) { #if SUPPORT_LEGACY_FORMAT_CONVERSION // If sized legacy formats are supported there is no need to convert. // If only unsized formats are supported, there is no point in converting // as the modern formats aren't supported either. if (sizedFormats == _NON_LEGACY_FORMATS && supportsSwizzle) { convertFormat(texinfo.glTarget, &glFormat, &glInternalFormat); errorTmp = glGetError(); } else if (sizedFormats == _NO_SIZED_FORMATS) glInternalFormat = header.glBaseInternalFormat; #else // When no sized formats are supported, or legacy sized formats are not // supported, must change internal format. if (sizedFormats == _NO_SIZED_FORMATS || (!(sizedFormats & _LEGACY_FORMATS) && (header.glBaseInternalFormat == GL_ALPHA || header.glBaseInternalFormat == GL_LUMINANCE || header.glBaseInternalFormat == GL_LUMINANCE_ALPHA || header.glBaseInternalFormat == GL_INTENSITY))) { glInternalFormat = header.glBaseInternalFormat; } #endif } for (level = 0; level < header.numberOfMipmapLevels; ++level) { GLsizei pixelWidth = MAX(1, header.pixelWidth >> level); GLsizei pixelHeight = MAX(1, header.pixelHeight >> level); GLsizei pixelDepth = MAX(1, header.pixelDepth >> level); if (!stream->read(&faceLodSize, sizeof(khronos_uint32_t), stream->src)) { errorCode = KTX_UNEXPECTED_END_OF_FILE; goto cleanup; } if (header.endianness == KTX_ENDIAN_REF_REV) { _ktxSwapEndian32(&faceLodSize, 1); } faceLodSizeRounded = (faceLodSize + 3) & ~(khronos_uint32_t)3; if (!data) { /* allocate memory sufficient for the first level */ data = malloc(faceLodSizeRounded); if (!data) { errorCode = KTX_OUT_OF_MEMORY; goto cleanup; } dataSize = faceLodSizeRounded; } else if (dataSize < faceLodSizeRounded) { /* subsequent levels cannot be larger than the first level */ errorCode = KTX_INVALID_VALUE; goto cleanup; } for (face = 0; face < header.numberOfFaces; ++face) { if (!stream->read(data, faceLodSizeRounded, stream->src)) { errorCode = KTX_UNEXPECTED_END_OF_FILE; goto cleanup; } /* Perform endianness conversion on texture data */ if (header.endianness == KTX_ENDIAN_REF_REV && header.glTypeSize == 2) { _ktxSwapEndian16((khronos_uint16_t*)data, faceLodSize / 2); } else if (header.endianness == KTX_ENDIAN_REF_REV && header.glTypeSize == 4) { _ktxSwapEndian32((khronos_uint32_t*)data, faceLodSize / 4); } if (texinfo.textureDimensions == 1) { if (texinfo.compressed) { glCompressedTexImage1D(texinfo.glTarget + face, level, glInternalFormat, pixelWidth, 0, faceLodSize, data); } else { glTexImage1D(texinfo.glTarget + face, level, glInternalFormat, pixelWidth, 0, glFormat, header.glType, data); } } else if (texinfo.textureDimensions == 2) { if (header.numberOfArrayElements) { pixelHeight = header.numberOfArrayElements; } if (texinfo.compressed) { // It is simpler to just attempt to load the format, rather than divine which // formats are supported by the implementation. In the event of an error, // software unpacking can be attempted. glCompressedTexImage2D(texinfo.glTarget + face, level, glInternalFormat, pixelWidth, pixelHeight, 0, faceLodSize, data); } else { glTexImage2D(texinfo.glTarget + face, level, glInternalFormat, pixelWidth, pixelHeight, 0, glFormat, header.glType, data); } } else if (texinfo.textureDimensions == 3) { if (header.numberOfArrayElements) { pixelDepth = header.numberOfArrayElements; } if (texinfo.compressed) { glCompressedTexImage3D(texinfo.glTarget + face, level, glInternalFormat, pixelWidth, pixelHeight, pixelDepth, 0, faceLodSize, data); } else { glTexImage3D(texinfo.glTarget + face, level, glInternalFormat, pixelWidth, pixelHeight, pixelDepth, 0, glFormat, header.glType, data); } } errorTmp = glGetError(); #if SUPPORT_SOFTWARE_ETC_UNPACK // Renderion is returning INVALID_VALUE. Oops!! if ((errorTmp == GL_INVALID_ENUM || errorTmp == GL_INVALID_VALUE) && texinfo.compressed && texinfo.textureDimensions == 2 && (glInternalFormat == GL_ETC1_RGB8_OES || (glInternalFormat >= GL_COMPRESSED_R11_EAC && glInternalFormat <= GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC))) { GLubyte* unpacked; GLenum format, internalFormat, type; errorCode = _ktxUnpackETC((GLubyte*)data, glInternalFormat, pixelWidth, pixelHeight, &unpacked, &format, &internalFormat, &type, R16Formats, supportsSRGB); if (errorCode != KTX_SUCCESS) { goto cleanup; } if (!sizedFormats & _NON_LEGACY_FORMATS) { if (internalFormat == GL_RGB8) internalFormat = GL_RGB; else if (internalFormat == GL_RGBA8) internalFormat = GL_RGBA; } glTexImage2D(texinfo.glTarget + face, level, internalFormat, pixelWidth, pixelHeight, 0, format, type, unpacked); free(unpacked); errorTmp = glGetError(); } #endif if (errorTmp != GL_NO_ERROR) { if (pGlerror) *pGlerror = errorTmp; errorCode = KTX_GL_ERROR; goto cleanup; } } } cleanup: free(data); /* restore previous GL state */ if (previousUnpackAlignment != KTX_GL_UNPACK_ALIGNMENT) { glPixelStorei(GL_UNPACK_ALIGNMENT, previousUnpackAlignment); } if (errorCode == KTX_SUCCESS) { if (texinfo.generateMipmaps && glGenerateMipmap) { glGenerateMipmap(texinfo.glTarget); } *pTarget = texinfo.glTarget; if (pTexture) { *pTexture = texname; } if (pDimensions) { pDimensions->width = header.pixelWidth; pDimensions->height = header.pixelHeight; pDimensions->depth = header.pixelDepth; } if (pIsMipmapped) { if (texinfo.generateMipmaps || header.numberOfMipmapLevels > 1) *pIsMipmapped = GL_TRUE; else *pIsMipmapped = GL_FALSE; } } else { if (ppKvd && *ppKvd) { free(*ppKvd); *ppKvd = NULL; } if (!texnameUser) { glDeleteTextures(1, &texname); } } return errorCode; }
MMAL_COMPONENT_T *Private_Impl::create_camera_component ( RASPIVID_STATE *state ) { MMAL_COMPONENT_T *camera = 0; MMAL_ES_FORMAT_T *format; MMAL_PORT_T *video_port = NULL; // MMAL_PORT_T *preview_port = NULL; // MMAL_PORT_T *still_port = NULL; MMAL_STATUS_T status; /* Create the component */ status = mmal_component_create ( MMAL_COMPONENT_DEFAULT_CAMERA, &camera ); if ( status != MMAL_SUCCESS ) { cerr << ( "Failed to create camera component" ); return 0; } if ( !camera->output_num ) { cerr << ( "Camera doesn't have output ports" ); mmal_component_destroy ( camera ); return 0; } video_port = camera->output[MMAL_CAMERA_VIDEO_PORT]; // preview_port=camera->output[MMAL_CAMERA_PREVIEW_PORT]; // still_port=camera->output[MMAL_CAMERA_CAPTURE_PORT]; // set up the camera configuration MMAL_PARAMETER_CAMERA_CONFIG_T cam_config; cam_config.hdr.id = MMAL_PARAMETER_CAMERA_CONFIG; cam_config.hdr.size = sizeof ( cam_config ); cam_config.max_stills_w = state->width; cam_config.max_stills_h = state->height; cam_config.stills_yuv422 = 0; cam_config.one_shot_stills = 0; cam_config.max_preview_video_w = state->width; cam_config.max_preview_video_h = state->height; cam_config.num_preview_video_frames = 3; cam_config.stills_capture_circular_buffer_height = 0; cam_config.fast_preview_resume = 0; cam_config.use_stc_timestamp = MMAL_PARAM_TIMESTAMP_MODE_RAW_STC; mmal_port_parameter_set ( camera->control, &cam_config.hdr ); /** * Set the ROI of the sensor to use for captures/preview * @param camera Pointer to camera component * @param rect Normalised coordinates of ROI rectangle * * */ MMAL_PARAMETER_INPUT_CROP_T crop = {{MMAL_PARAMETER_INPUT_CROP, sizeof(MMAL_PARAMETER_INPUT_CROP_T)}}; double x = state->roi.x; double y = state->roi.y; double w = state->roi.w; double h = state->roi.h; if (x + w > 1.0) w = 1 - x; if (y + h > 1.0) h = 1 - y; crop.rect.x = (65536 * x); crop.rect.y = (65536 * y); crop.rect.width = (65536 * w); crop.rect.height = (65536 * h); mmal_port_parameter_set(camera->control, &crop.hdr); // Set the encode format on the video port format = video_port->format; format->encoding_variant = convertFormat ( State.captureFtm ); format->encoding = convertFormat ( State.captureFtm ); format->es->video.width = VCOS_ALIGN_UP(state->width, 32); format->es->video.height = VCOS_ALIGN_UP(state->height, 16); format->es->video.crop.x = 0; format->es->video.crop.y = 0; format->es->video.crop.width = state->width; format->es->video.crop.height = state->height; format->es->video.frame_rate.num = state->framerate; format->es->video.frame_rate.den = VIDEO_FRAME_RATE_DEN; status = mmal_port_format_commit ( video_port ); if ( status ) { cerr << ( "camera video format couldn't be set" ); mmal_component_destroy ( camera ); return 0; } // Ensure there are enough buffers to avoid dropping frames if ( video_port->buffer_num < VIDEO_OUTPUT_BUFFERS_NUM ) video_port->buffer_num = VIDEO_OUTPUT_BUFFERS_NUM; video_port->buffer_size = video_port->buffer_size_recommended; //video_port->buffer_num = video_port->buffer_num_recommended; //PR : create pool of message on video port MMAL_POOL_T *pool; pool = mmal_port_pool_create ( video_port, video_port->buffer_num, video_port->buffer_size ); if ( !pool ) { cerr << ( "Failed to create buffer header pool for video output port" ); } state->video_pool = pool; // PR : plug the callback to the video port status = mmal_port_enable ( video_port, video_buffer_callback ); if ( status ) { cerr << ( "camera video callback2 error" ); mmal_component_destroy ( camera ); return 0; } /* Enable component */ status = mmal_component_enable ( camera ); if ( status ) { cerr << ( "camera component couldn't be enabled" ); mmal_component_destroy ( camera ); return 0; } state->camera_component = camera;//this needs to be before set_all_parameters return camera; }
Pipeline* Direct3D12Backend::createPipeline(const PipelineDesc& desc) { HRESULT hr; auto* pipeline = new Direct3D12Pipeline(); // Root signature parameters CD3DX12_DESCRIPTOR_RANGE ranges[2]; ranges[0].Init(D3D12_DESCRIPTOR_RANGE_TYPE_CBV, desc.numCBVs, 0); ranges[1].Init(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, desc.numSRVs, 0); std::vector<CD3DX12_ROOT_PARAMETER> parameters; if (desc.numCBVs) { CD3DX12_ROOT_PARAMETER parameter; parameter.InitAsDescriptorTable(1, &ranges[0], D3D12_SHADER_VISIBILITY_ALL); parameters.push_back(parameter); } if (desc.numSRVs) { CD3DX12_ROOT_PARAMETER parameter; parameter.InitAsDescriptorTable(1, &ranges[1], D3D12_SHADER_VISIBILITY_PIXEL); parameters.push_back(parameter); } // Samplers std::vector<D3D12_STATIC_SAMPLER_DESC> d3dSamplers(desc.samplers.size()); for (Size i = 0; i < desc.samplers.size(); i++) { const auto& sampler = desc.samplers[i]; d3dSamplers[i].Filter = convertFilter(sampler.filter); d3dSamplers[i].AddressU = convertTextureAddressMode(sampler.addressU); d3dSamplers[i].AddressV = convertTextureAddressMode(sampler.addressV); d3dSamplers[i].AddressW = convertTextureAddressMode(sampler.addressW); d3dSamplers[i].MipLODBias = 0; d3dSamplers[i].MaxAnisotropy = 0; d3dSamplers[i].ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER; d3dSamplers[i].BorderColor = D3D12_STATIC_BORDER_COLOR_TRANSPARENT_BLACK; d3dSamplers[i].MinLOD = 0.0f; d3dSamplers[i].MaxLOD = D3D12_FLOAT32_MAX; d3dSamplers[i].ShaderRegister = i; d3dSamplers[i].RegisterSpace = 0; d3dSamplers[i].ShaderVisibility = D3D12_SHADER_VISIBILITY_PIXEL; } // Root signature D3D12_ROOT_SIGNATURE_DESC rootSignatureDesc = {}; rootSignatureDesc.NumParameters = parameters.size(); rootSignatureDesc.pParameters = parameters.data(); rootSignatureDesc.NumStaticSamplers = d3dSamplers.size(); rootSignatureDesc.pStaticSamplers = d3dSamplers.data(); rootSignatureDesc.Flags = D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT | D3D12_ROOT_SIGNATURE_FLAG_DENY_DOMAIN_SHADER_ROOT_ACCESS | D3D12_ROOT_SIGNATURE_FLAG_DENY_GEOMETRY_SHADER_ROOT_ACCESS | D3D12_ROOT_SIGNATURE_FLAG_DENY_HULL_SHADER_ROOT_ACCESS; ID3DBlob* signature; ID3DBlob* error; hr = _D3D12SerializeRootSignature(&rootSignatureDesc, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error); if (FAILED(hr)) { LPVOID errorString = error->GetBufferPointer(); logger.error(LOG_GRAPHICS, "Direct3D12Backend::createPipeline: D3D12SerializeRootSignature failed (0x%X): %s", hr, errorString); return nullptr; } hr = device->CreateRootSignature(0, signature->GetBufferPointer(), signature->GetBufferSize(), IID_PPV_ARGS(&pipeline->rootSignature)); if (FAILED(hr)) { logger.error(LOG_GRAPHICS, "Direct3D12Backend::createPipeline: CreateRootSignature failed (0x%X)", hr); return nullptr; } D3D12_GRAPHICS_PIPELINE_STATE_DESC d3dDesc = {}; d3dDesc.NodeMask = 1; d3dDesc.SampleMask = UINT_MAX; d3dDesc.pRootSignature = pipeline->rootSignature; d3dDesc.NumRenderTargets = 1; d3dDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM; d3dDesc.DSVFormat = convertFormat(desc.formatDSV); d3dDesc.SampleDesc.Count = 1; // Shaders if (desc.vs) { auto* d3dShader = static_cast<Direct3D12Shader*>(desc.vs); d3dDesc.VS.pShaderBytecode = d3dShader->bytecodeData; d3dDesc.VS.BytecodeLength = d3dShader->bytecodeSize; } if (desc.hs) { auto* d3dShader = static_cast<Direct3D12Shader*>(desc.hs); d3dDesc.HS.pShaderBytecode = d3dShader->bytecodeData; d3dDesc.HS.BytecodeLength = d3dShader->bytecodeSize; } if (desc.ds) { auto* d3dShader = static_cast<Direct3D12Shader*>(desc.ds); d3dDesc.DS.pShaderBytecode = d3dShader->bytecodeData; d3dDesc.DS.BytecodeLength = d3dShader->bytecodeSize; } if (desc.gs) { auto* d3dShader = static_cast<Direct3D12Shader*>(desc.gs); d3dDesc.GS.pShaderBytecode = d3dShader->bytecodeData; d3dDesc.GS.BytecodeLength = d3dShader->bytecodeSize; } if (desc.ps) { auto* d3dShader = static_cast<Direct3D12Shader*>(desc.ps); d3dDesc.PS.pShaderBytecode = d3dShader->bytecodeData; d3dDesc.PS.BytecodeLength = d3dShader->bytecodeSize; } // IA state std::vector<D3D12_INPUT_ELEMENT_DESC> d3dInputElements; for (const auto& element : desc.iaState.inputLayout) { DXGI_FORMAT format = convertFormat(element.format); D3D12_INPUT_CLASSIFICATION inputClassification = convertInputClassification(element.inputClassification); d3dInputElements.emplace_back(D3D12_INPUT_ELEMENT_DESC{ "INPUT", element.semanticIndex, format, element.inputSlot, element.offset, inputClassification, element.instanceStepRate }); } d3dDesc.InputLayout.NumElements = d3dInputElements.size(); d3dDesc.InputLayout.pInputElementDescs = d3dInputElements.data(); d3dDesc.PrimitiveTopologyType = convertPrimitiveTopologyType(desc.iaState.topology); // RS state d3dDesc.RasterizerState.FillMode = convertFillMode(desc.rsState.fillMode); d3dDesc.RasterizerState.CullMode = convertCullMode(desc.rsState.cullMode); d3dDesc.RasterizerState.FrontCounterClockwise = desc.rsState.frontCounterClockwise; d3dDesc.RasterizerState.DepthClipEnable = TRUE; // TODO d3dDesc.DepthStencilState.DepthEnable = desc.rsState.depthEnable; d3dDesc.DepthStencilState.DepthWriteMask = convertDepthWriteMask(desc.rsState.depthWriteMask); d3dDesc.DepthStencilState.DepthFunc = convertComparisonFunc(desc.rsState.depthFunc); d3dDesc.DepthStencilState.StencilEnable = desc.rsState.stencilEnable; d3dDesc.DepthStencilState.StencilReadMask = desc.rsState.stencilReadMask; d3dDesc.DepthStencilState.StencilWriteMask = desc.rsState.stencilWriteMask; d3dDesc.DepthStencilState.FrontFace.StencilFailOp = convertStencilOp(desc.rsState.frontFace.stencilOpFail); d3dDesc.DepthStencilState.FrontFace.StencilDepthFailOp = convertStencilOp(desc.rsState.frontFace.stencilOpZFail); d3dDesc.DepthStencilState.FrontFace.StencilPassOp = convertStencilOp(desc.rsState.frontFace.stencilOpPass); d3dDesc.DepthStencilState.FrontFace.StencilFunc = convertComparisonFunc(desc.rsState.frontFace.stencilFunc); d3dDesc.DepthStencilState.BackFace.StencilFailOp = convertStencilOp(desc.rsState.backFace.stencilOpFail); d3dDesc.DepthStencilState.BackFace.StencilDepthFailOp = convertStencilOp(desc.rsState.backFace.stencilOpZFail); d3dDesc.DepthStencilState.BackFace.StencilPassOp = convertStencilOp(desc.rsState.backFace.stencilOpPass); d3dDesc.DepthStencilState.BackFace.StencilFunc = convertComparisonFunc(desc.rsState.backFace.stencilFunc); // CB state d3dDesc.BlendState.AlphaToCoverageEnable = desc.cbState.enableAlphaToCoverage; d3dDesc.BlendState.IndependentBlendEnable = desc.cbState.enableIndependentBlend; UINT sizeColorTargetBlendArray = d3dDesc.BlendState.IndependentBlendEnable ? 8 : 1; for (UINT i = 0; i < sizeColorTargetBlendArray; i++) { const auto& source = desc.cbState.colorTarget[i]; auto& d3dTarget = d3dDesc.BlendState.RenderTarget[i]; d3dTarget.BlendEnable = source.enableBlend; d3dTarget.LogicOpEnable = source.enableLogicOp; d3dTarget.SrcBlend = convertBlend(source.srcBlend); d3dTarget.DestBlend = convertBlend(source.destBlend); d3dTarget.BlendOp = convertBlendOp(source.blendOp); d3dTarget.SrcBlendAlpha = convertBlend(source.srcBlendAlpha); d3dTarget.DestBlendAlpha = convertBlend(source.destBlendAlpha); d3dTarget.BlendOpAlpha = convertBlendOp(source.blendOpAlpha); d3dTarget.LogicOp = convertLogicOp(source.logicOp); d3dTarget.RenderTargetWriteMask = convertColorWriteMask(source.colorWriteMask); } hr = device->CreateGraphicsPipelineState(&d3dDesc, IID_PPV_ARGS(&pipeline->state)); if (FAILED(hr)) { logger.error(LOG_GRAPHICS, "Direct3D12Backend::createPipeline: CreateGraphicsPipelineState failed (0x%X)", hr); return nullptr; } return pipeline; }
HRESULT Direct3DTexture::Load( LPDIRECT3DTEXTURE lpD3DTexture) { #if LOGGER std::ostringstream str; str << this << " " << __FUNCTION__; str << " " << lpD3DTexture; LogText(str.str()); #endif if (lpD3DTexture == nullptr) { #if LOGGER str.str("\tDDERR_INVALIDPARAMS"); LogText(str.str()); #endif return DDERR_INVALIDPARAMS; } Direct3DTexture* d3dTexture = (Direct3DTexture*)lpD3DTexture; TextureSurface* surface = d3dTexture->_surface; // debug texture vith name // if (surface->_buffer[0] == 0 && surface->_buffer[1] != 0) // { //#if LOGGER // str.str(""); // str << "\ttex " << (const char*)(surface->_buffer + 1); // LogText(str.str()); //#endif // } if (d3dTexture->_textureView) { #if LOGGER str.str("\tretrieve existing texture"); LogText(str.str()); #endif *&this->_textureView = d3dTexture->_textureView.Get(); this->_textureView->AddRef(); return D3D_OK; } #if LOGGER str.str("\tcreate new texture"); LogText(str.str()); #endif #if LOGGER str.str(""); str << "\t" << surface->_pixelFormat.dwRGBBitCount; str << " " << (void*)surface->_pixelFormat.dwRBitMask; str << " " << (void*)surface->_pixelFormat.dwGBitMask; str << " " << (void*)surface->_pixelFormat.dwBBitMask; str << " " << (void*)surface->_pixelFormat.dwRGBAlphaBitMask; LogText(str.str()); #endif DWORD bpp = surface->_pixelFormat.dwRGBBitCount == 32 ? 4 : 2; int format = DXGI_FORMAT_UNKNOWN; if (bpp == 4) { format = DXGI_FORMAT_B8G8R8A8_UNORM; } else { DWORD alpha = surface->_pixelFormat.dwRGBAlphaBitMask; if (alpha == 0x0000F000) { format = DXGI_FORMAT_B4G4R4A4_UNORM; } else if (alpha == 0x00008000) { format = DXGI_FORMAT_B5G5R5A1_UNORM; } else { format = DXGI_FORMAT_B5G6R5_UNORM; } } D3D11_TEXTURE2D_DESC textureDesc; textureDesc.Width = surface->_width; textureDesc.Height = surface->_height; textureDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; textureDesc.Usage = D3D11_USAGE_IMMUTABLE; textureDesc.CPUAccessFlags = 0; textureDesc.MiscFlags = 0; textureDesc.MipLevels = surface->_mipmapCount; textureDesc.ArraySize = 1; textureDesc.SampleDesc.Count = 1; textureDesc.SampleDesc.Quality = 0; textureDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE; D3D11_SUBRESOURCE_DATA* textureData = new D3D11_SUBRESOURCE_DATA[textureDesc.MipLevels]; char** buffers = new char*[textureDesc.MipLevels]; buffers[0] = convertFormat(surface->_buffer, surface->_width, surface->_height, format); textureData[0].pSysMem = buffers[0]; textureData[0].SysMemPitch = surface->_width * 4; textureData[0].SysMemSlicePitch = 0; MipmapSurface* mipmap = surface->_mipmap; for (DWORD i = 1; i < textureDesc.MipLevels; i++) { buffers[i] = convertFormat(mipmap->_buffer, mipmap->_width, mipmap->_height, format); textureData[i].pSysMem = buffers[i]; textureData[i].SysMemPitch = mipmap->_width * 4; textureData[i].SysMemSlicePitch = 0; mipmap = mipmap->_mipmap; } ComPtr<ID3D11Texture2D> texture; HRESULT hr = this->_deviceResources->_d3dDevice->CreateTexture2D(&textureDesc, textureData, &texture); for (DWORD i = 0; i < textureDesc.MipLevels; i++) { delete[] buffers[i]; } delete[] buffers; delete[] textureData; if (FAILED(hr)) { static bool messageShown = false; if (!messageShown) { MessageBox(nullptr, _com_error(hr).ErrorMessage(), __FUNCTION__, MB_ICONERROR); } messageShown = true; #if LOGGER str.str("\tD3DERR_TEXTURE_LOAD_FAILED"); LogText(str.str()); #endif return D3DERR_TEXTURE_LOAD_FAILED; } D3D11_SHADER_RESOURCE_VIEW_DESC textureViewDesc; memset(&textureViewDesc, 0, sizeof(D3D11_SHADER_RESOURCE_VIEW_DESC)); textureViewDesc.Format = textureDesc.Format; textureViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D; textureViewDesc.Texture2D.MipLevels = textureDesc.MipLevels; textureViewDesc.Texture2D.MostDetailedMip = 0; if (FAILED(this->_deviceResources->_d3dDevice->CreateShaderResourceView(texture, &textureViewDesc, &d3dTexture->_textureView))) { #if LOGGER str.str("\tD3DERR_TEXTURE_LOAD_FAILED"); LogText(str.str()); #endif return D3DERR_TEXTURE_LOAD_FAILED; } *&this->_textureView = d3dTexture->_textureView.Get(); this->_textureView->AddRef(); return D3D_OK; }
void PGRAPH::Begin(Primitive primitive) { // Set surface setSurface(); // Set viewport gfx::Viewport viewportRect = { viewport.x, viewport.y, viewport.width, viewport.height, 0.0f, 1.0f }; gfx::Rectangle scissorRect = { scissor.x, scissor.y, scissor.width, scissor.height }; cmdBuffer->cmdSetViewports(1, &viewportRect); cmdBuffer->cmdSetScissors(1, &scissorRect); // Hashing auto vpData = &vpe.data[vpe.start]; auto vpHash = HashVertexProgram(vpData); auto fpData = memory->ptr<rsx_fp_instruction_t>((fp_location ? rsx->get_ea(0x0) : 0xC0000000) + fp_offset); auto fpHash = HashFragmentProgram(fpData); auto pipelineHash = hashStruct(pipeline) ^ vpHash ^ fpHash; if (cachePipeline.find(pipelineHash) == cachePipeline.end()) { const auto& p = pipeline; if (cacheVP.find(vpHash) == cacheVP.end()) { auto vp = std::make_unique<RSXVertexProgram>(); vp->decompile(vpData); vp->compile(graphics.get()); cacheVP[vpHash] = std::move(vp); } if (cacheFP.find(fpHash) == cacheFP.end()) { auto fp = std::make_unique<RSXFragmentProgram>(); fp->decompile(fpData); fp->compile(graphics.get()); cacheFP[fpHash] = std::move(fp); } gfx::PipelineDesc pipelineDesc = {}; pipelineDesc.formatDSV = convertFormat(surface.depthFormat); pipelineDesc.numCBVs = 2; pipelineDesc.numSRVs = RSX_MAX_TEXTURES; pipelineDesc.vs = cacheVP[vpHash]->shader; pipelineDesc.ps = cacheFP[fpHash]->shader; pipelineDesc.rsState.fillMode = gfx::FILL_MODE_SOLID; pipelineDesc.rsState.cullMode = p.cull_face_enable ? convertCullMode(p.cull_mode) : gfx::CULL_MODE_NONE; pipelineDesc.rsState.frontCounterClockwise = convertFrontFace(p.front_face); pipelineDesc.rsState.depthEnable = p.depth_test_enable; pipelineDesc.rsState.depthWriteMask = p.depth_mask ? gfx::DEPTH_WRITE_MASK_ALL : gfx::DEPTH_WRITE_MASK_ZERO; pipelineDesc.rsState.depthFunc = convertCompareFunc(p.depth_func); pipelineDesc.rsState.stencilEnable = p.stencil_test_enable; pipelineDesc.rsState.stencilReadMask = p.stencil_func_mask; pipelineDesc.rsState.stencilWriteMask = p.stencil_mask; pipelineDesc.rsState.frontFace.stencilOpFail = convertStencilOp(p.stencil_op_fail); pipelineDesc.rsState.frontFace.stencilOpZFail = convertStencilOp(p.stencil_op_zfail); pipelineDesc.rsState.frontFace.stencilOpPass = convertStencilOp(p.stencil_op_zpass); pipelineDesc.rsState.frontFace.stencilFunc = convertCompareFunc(p.stencil_func); if (p.two_sided_stencil_test_enable) { pipelineDesc.rsState.backFace.stencilOpFail = convertStencilOp(p.stencil_op_fail); pipelineDesc.rsState.backFace.stencilOpZFail = convertStencilOp(p.stencil_op_zfail); pipelineDesc.rsState.backFace.stencilOpPass = convertStencilOp(p.stencil_op_zpass); pipelineDesc.rsState.backFace.stencilFunc = convertCompareFunc(p.stencil_func); } else { pipelineDesc.rsState.backFace.stencilOpFail = convertStencilOp(p.back_stencil_op_fail); pipelineDesc.rsState.backFace.stencilOpZFail = convertStencilOp(p.back_stencil_op_zfail); pipelineDesc.rsState.backFace.stencilOpPass = convertStencilOp(p.back_stencil_op_zpass); pipelineDesc.rsState.backFace.stencilFunc = convertCompareFunc(p.back_stencil_func); } pipelineDesc.cbState.colorTarget[0].enableBlend = p.blend_enable; pipelineDesc.cbState.colorTarget[0].enableLogicOp = p.logic_op_enable; pipelineDesc.cbState.colorTarget[0].blendOp = convertBlendOp(p.blend_equation_rgb); pipelineDesc.cbState.colorTarget[0].blendOpAlpha = convertBlendOp(p.blend_equation_alpha); pipelineDesc.cbState.colorTarget[0].srcBlend = convertBlend(p.blend_sfactor_rgb); pipelineDesc.cbState.colorTarget[0].destBlend = convertBlend(p.blend_dfactor_rgb); pipelineDesc.cbState.colorTarget[0].srcBlendAlpha = convertBlend(p.blend_sfactor_alpha); pipelineDesc.cbState.colorTarget[0].destBlendAlpha = convertBlend(p.blend_dfactor_alpha); pipelineDesc.cbState.colorTarget[0].colorWriteMask = convertColorMask(p.color_mask); pipelineDesc.cbState.colorTarget[0].logicOp = convertLogicOp(p.logic_op); pipelineDesc.iaState.topology = convertPrimitiveTopology(primitive); for (U32 index = 0; index < RSX_MAX_VERTEX_INPUTS; index++) { const auto& attr = vpe.attr[index]; if (!attr.size) { continue; } gfx::Format format = convertVertexFormat(attr.type, attr.size); U32 stride = attr.stride; pipelineDesc.iaState.inputLayout.push_back({ index, format, index, 0, stride, 0, gfx::INPUT_CLASSIFICATION_PER_VERTEX, 0 } ); } for (U32 i = 0; i < RSX_MAX_TEXTURES; i++) { gfx::Sampler sampler = {}; sampler.filter = gfx::FILTER_MIN_MAG_MIP_LINEAR; sampler.addressU = gfx::TEXTURE_ADDRESS_MIRROR; sampler.addressV = gfx::TEXTURE_ADDRESS_MIRROR; sampler.addressW = gfx::TEXTURE_ADDRESS_MIRROR; pipelineDesc.samplers.push_back(sampler); } cachePipeline[pipelineHash] = std::unique_ptr<gfx::Pipeline>(graphics->createPipeline(pipelineDesc)); } heapResources->reset(); heapResources->pushVertexBuffer(vpeConstantMemory); heapResources->pushVertexBuffer(vtxTransform); // Upload VPE constants if necessary void* constantsPtr = vpeConstantMemory->map(); memcpy(constantsPtr, &vpe.constant, sizeof(vpe.constant)); vpeConstantMemory->unmap(); // Upload vertex transform matrix if necessary if (vertex_transform_dirty) { V128* transformPtr = reinterpret_cast<V128*>(vtxTransform->map()); memset(transformPtr, 0, 4 * sizeof(V128)); F32 half_cliph = surface.width / 2.0f; F32 half_clipv = surface.height / 2.0f; transformPtr[0].f32[0] = (viewport_scale.f32[0] / half_cliph); transformPtr[1].f32[1] = (viewport_scale.f32[1] / half_clipv); transformPtr[2].f32[2] = (viewport_scale.f32[2]); transformPtr[0].f32[3] = (viewport_offset.f32[0] - half_cliph) / half_cliph; transformPtr[1].f32[3] = (viewport_offset.f32[1] - half_clipv) / half_clipv; transformPtr[2].f32[3] = (viewport_offset.f32[2]); transformPtr[3].f32[3] = 1.0f; vtxTransform->unmap(); } // Set textures for (U32 i = 0; i < RSX_MAX_TEXTURES; i++) { const auto& tex = texture[i]; // Dummy texture if (!tex.enable) { gfx::TextureDesc texDesc = {}; texDesc.width = 2; texDesc.height = 2; texDesc.format = gfx::FORMAT_R8G8B8A8_UNORM; texDesc.mipmapLevels = 1; texDesc.swizzle = TEXTURE_SWIZZLE_ENCODE( gfx::TEXTURE_SWIZZLE_VALUE_0, gfx::TEXTURE_SWIZZLE_VALUE_0, gfx::TEXTURE_SWIZZLE_VALUE_0, gfx::TEXTURE_SWIZZLE_VALUE_0 ); gfx::Texture* texDescriptor = graphics->createTexture(texDesc); heapResources->pushTexture(texDescriptor); } // Upload real texture else { auto texFormat = static_cast<TextureFormat>(tex.format & ~RSX_TEXTURE_LN & ~RSX_TEXTURE_UN); gfx::TextureDesc texDesc = {}; texDesc.data = memory->ptr<Byte>((tex.location ? rsx->get_ea(0x0) : 0xC0000000) + tex.offset); texDesc.size = tex.width * tex.height; texDesc.width = tex.width; texDesc.height = tex.height; texDesc.format = convertTextureFormat(texFormat); texDesc.mipmapLevels = tex.mipmap; texDesc.swizzle = convertTextureSwizzle(texFormat); switch (texFormat) { case RSX_TEXTURE_B8: texDesc.size *= 1; break; case RSX_TEXTURE_A1R5G5B5: texDesc.size *= 2; break; case RSX_TEXTURE_A4R4G4B4: texDesc.size *= 2; break; case RSX_TEXTURE_R5G6B5: texDesc.size *= 2; break; case RSX_TEXTURE_A8R8G8B8: texDesc.size *= 4; break; default: assert_always("Unimplemented"); } gfx::Texture* texDescriptor = graphics->createTexture(texDesc); heapResources->pushTexture(texDescriptor); } } cmdBuffer->cmdBindPipeline(cachePipeline[pipelineHash].get()); cmdBuffer->cmdSetHeaps({ heapResources }); cmdBuffer->cmdSetDescriptor(0, heapResources, 0); cmdBuffer->cmdSetDescriptor(1, heapResources, 2); cmdBuffer->cmdSetPrimitiveTopology(convertPrimitiveTopology(primitive)); }