void DIBPixelData::setRGBABitmapAlpha(HDC hdc, const IntRect& dstRect, unsigned char level) { HBITMAP bitmap = static_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP)); DIBPixelData pixelData(bitmap); ASSERT(pixelData.bitsPerPixel() == 32); IntRect drawRect(dstRect); XFORM trans; GetWorldTransform(hdc, &trans); IntSize transformedPosition(trans.eDx, trans.eDy); drawRect.move(transformedPosition); int pixelDataWidth = pixelData.size().width(); int pixelDataHeight = pixelData.size().height(); IntRect bitmapRect(0, 0, pixelDataWidth, pixelDataHeight); drawRect.intersect(bitmapRect); if (drawRect.isEmpty()) return; RGBQUAD* bytes = reinterpret_cast<RGBQUAD*>(pixelData.buffer()); bytes += drawRect.y() * pixelDataWidth; size_t width = drawRect.width(); size_t height = drawRect.height(); int x = drawRect.x(); for (size_t i = 0; i < height; i++) { RGBQUAD* p = bytes + x; for (size_t j = 0; j < width; j++) { p->rgbReserved = level; p++; } bytes += pixelDataWidth; } }
Pixel R8Image::pixel(size_t row, size_t column) const { int idx = index(row, column); const Byte *data = pixelData(); return Pixel(data[idx], 255, 255); }
void R8Image::setPixel(size_t row, size_t column, const Pixel& pixel) { int idx = index(row, column); Byte *data = pixelData(); data[idx] = pixel.r; }
// FIXME: Is it possible to merge getWindowsContext and createWindowsBitmap into a single API // suitable for all clients? void GraphicsContext::releaseWindowsContext(HDC hdc, const IntRect& dstRect, bool supportAlphaBlend, bool mayCreateBitmap) { bool createdBitmap = mayCreateBitmap && (!m_data->m_hdc || isInTransparencyLayer()); if (!createdBitmap) { m_data->restore(); return; } if (dstRect.isEmpty()) return; OwnPtr<HBITMAP> bitmap = adoptPtr(static_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP))); DIBPixelData pixelData(bitmap.get()); ASSERT(pixelData.bitsPerPixel() == 32); CGContextRef bitmapContext = CGBitmapContextCreate(pixelData.buffer(), pixelData.size().width(), pixelData.size().height(), 8, pixelData.bytesPerRow(), deviceRGBColorSpaceRef(), kCGBitmapByteOrder32Little | (supportAlphaBlend ? kCGImageAlphaPremultipliedFirst : kCGImageAlphaNoneSkipFirst)); CGImageRef image = CGBitmapContextCreateImage(bitmapContext); CGContextDrawImage(m_data->m_cgContext.get(), dstRect, image); // Delete all our junk. CGImageRelease(image); CGContextRelease(bitmapContext); ::DeleteDC(hdc); }
void GraphicsContext::releaseWindowsContext(HDC hdc, const IntRect& dstRect, bool supportAlphaBlend, bool mayCreateBitmap) { bool createdBitmap = mayCreateBitmap && (!m_data->m_hdc || isInTransparencyLayer()); if (!hdc || !createdBitmap) { m_data->restore(); return; } if (dstRect.isEmpty()) return; OwnPtr<HBITMAP> bitmap = adoptPtr(static_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP))); DIBPixelData pixelData(bitmap.get()); ASSERT(pixelData.bitsPerPixel() == 32); // If this context does not support alpha blending, then it may have // been drawn with GDI functions which always set the alpha channel // to zero. We need to manually set the bitmap to be fully opaque. unsigned char* bytes = reinterpret_cast<unsigned char*>(pixelData.buffer()); if (!supportAlphaBlend) setRGBABitmapAlpha(bytes, pixelData.size().height() * pixelData.bytesPerRow(), 255); drawBitmapToContext(m_data, platformContext()->cr(), pixelData, IntSize(dstRect.x(), dstRect.height() + dstRect.y())); ::DeleteDC(hdc); }
static CGContextRef CGContextWithHDC(HDC hdc, bool hasAlpha) { HBITMAP bitmap = static_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP)); DIBPixelData pixelData(bitmap); // FIXME: We can get here because we asked for a bitmap that is too big // when we have a tiled layer and we're compositing. In that case // bmBitsPixel will be 0. This seems to be benign, so for now we will // exit gracefully and look at it later: // https://bugs.webkit.org/show_bug.cgi?id=52041 // ASSERT(bitmapBits.bitsPerPixel() == 32); if (pixelData.bitsPerPixel() != 32) return 0; CGBitmapInfo bitmapInfo = kCGBitmapByteOrder32Little | (hasAlpha ? kCGImageAlphaPremultipliedFirst : kCGImageAlphaNoneSkipFirst); CGContextRef context = CGBitmapContextCreate(pixelData.buffer(), pixelData.size().width(), pixelData.size().height(), 8, pixelData.bytesPerRow(), deviceRGBColorSpaceRef(), bitmapInfo); // Flip coords CGContextTranslateCTM(context, 0, pixelData.size().height()); CGContextScaleCTM(context, 1, -1); // Put the HDC In advanced mode so it will honor affine transforms. SetGraphicsMode(hdc, GM_ADVANCED); return context; }
void mitk::SegmentationInterpolationController::ScanChangedSlice( itk::Image<DATATYPE, 2>*, const SetChangedSliceOptions& options ) { DATATYPE* pixelData( (DATATYPE*)options.pixelData ); unsigned int timeStep( options.timeStep ); unsigned int sliceDimension( options.sliceDimension ); unsigned int sliceIndex( options.sliceIndex ); if ( sliceDimension > 2 ) return; if ( sliceIndex >= m_SegmentationCountInSlice[timeStep][sliceDimension].size() ) return; unsigned int dim0( options.dim0 ); unsigned int dim1( options.dim1 ); int numberOfPixels(0); // number of pixels in this slice that are not 0 unsigned int dim0max = m_SegmentationCountInSlice[timeStep][dim0].size(); unsigned int dim1max = m_SegmentationCountInSlice[timeStep][dim1].size(); // scan the slice from two directions // and set the flags for the two dimensions of the slice for (unsigned int v = 0; v < dim1max; ++v) { for (unsigned int u = 0; u < dim0max; ++u) { DATATYPE value = *(pixelData + u + v * dim0max); assert ( (signed) m_SegmentationCountInSlice[timeStep][dim0][u] + (signed)value >= 0 ); // just for debugging. This must always be true, otherwise some counting is going wrong assert ( (signed) m_SegmentationCountInSlice[timeStep][dim1][v] + (signed)value >= 0 ); m_SegmentationCountInSlice[timeStep][dim0][u] = static_cast<unsigned int>( m_SegmentationCountInSlice[timeStep][dim0][u] + value ); m_SegmentationCountInSlice[timeStep][dim1][v] = static_cast<unsigned int>( m_SegmentationCountInSlice[timeStep][dim1][v] + value ); numberOfPixels += static_cast<int>( value ); } } // flag for the dimension of the slice itself assert ( (signed) m_SegmentationCountInSlice[timeStep][sliceDimension][sliceIndex] + numberOfPixels >= 0 ); m_SegmentationCountInSlice[timeStep][sliceDimension][sliceIndex] += numberOfPixels; //MITK_INFO << "scan t=" << timeStep << " from (0,0) to (" << dim0max << "," << dim1max << ") (" << pixelData << "-" << pixelData+dim0max*dim1max-1 << ") in slice " << sliceIndex << " found " << numberOfPixels << " pixels" << std::endl; }
R8Image::~R8Image() { delete [] pixelData(); }
inline ImageData load_image(std::string fileName) { int w, h, comp; stbi_uc *temp = stbi_load(fileName.c_str(), &w, &h, &comp, 0); if (nullptr == temp) throw std::runtime_error("Unable to load file: " + fileName); std::shared_ptr<stbi_uc> data(temp, [](stbi_uc *d) {stbi_image_free(d); }); temp = nullptr; ImageData i; i.width = w; i.height = h; i.bitsPerPixel = comp * 8; i.bytesPerPixel = comp; i.size = i.width * i.height * i.bytesPerPixel; // the image needs to be flipped vertically because if (i.bytesPerPixel == 4) { i.format = GL_RGBA8; std::vector<RGBA> pixelData((RGBA*)data.get(), ((RGBA*)data.get() + (w * h))); std::vector<RGBA> flippedData(w * h); std::vector<RGBA>::iterator iter = flippedData.begin(); for (int yPos = (h - 1) * w; yPos >= 0; yPos -= w) { for (int xPos = 0; xPos < w; ++xPos, ++iter) { *iter = pixelData[yPos + xPos]; } } i.pixels.resize(i.size); memcpy(i.pixels.data(), flippedData.data(), i.size); } else if (i.bytesPerPixel == 3) { i.format = GL_RGB8; std::vector<RGB> pixelData((RGB*)data.get(), ((RGB*)data.get() + (w * h))); std::vector<RGB> flippedData(w * h); std::vector<RGB>::iterator iter = flippedData.begin(); for (int yPos = (h - 1) * w; yPos >= 0; yPos -= w) { for (int xPos = 0; xPos < w; ++xPos, ++iter) { *iter = pixelData[yPos + xPos]; } } i.pixels.resize(i.size); memcpy(i.pixels.data(), flippedData.data(), i.size); } else { // not a true color format throw std::runtime_error("Unsupported bytes per pixel"); } return i; }
TextureAtlasGeneratorResult TextureAtlasGenerator::Generate( const std::vector<TextureAtlasGeneratorSource>& sources, int width, int height) { POMDOG_ASSERT(width > 0); POMDOG_ASSERT(height > 0); TextureAtlasGeneratorResult result; auto & regions = result.Atlas.regions; std::unordered_map<std::shared_ptr<Image>, int> indices; for (auto & source : sources) { POMDOG_ASSERT(!source.Name.empty()); TextureAtlasRegion region; region.Name = source.Name; regions.push_back(region); indices.emplace(source.Image, regions.size() - 1); POMDOG_ASSERT(regions.at(indices[source.Image]).Name == source.Name); } result.HasError = false; auto root = std::make_shared<TexturePackNode>(Rectangle{0, 0, width, height}); for (auto & source : sources) { auto & image = source.Image; auto clipBounds = Clip(image); auto node = Insert(root, clipBounds.Width, clipBounds.Height); POMDOG_ASSERT(node); if (!node) { // TODO: error handling result.HasError = true; #if defined(DEBUG) std::printf("Cannot pack the texture '%s'", regions[indices[image]].Name.c_str()); #endif break; } node->image = image; node->clipBounds = clipBounds; } std::vector<Color> pixelData(width * height); std::fill(pixelData.begin(), pixelData.end(), Color{0, 0, 0, 0}); Traverse(root, [&](const TexturePackNode& node) { const auto image = node.image; auto & region = regions[indices[image]].Region; region.Subrect.X = static_cast<int>(node.rect.X); region.Subrect.Y = static_cast<int>(node.rect.Y); region.Subrect.Width = node.clipBounds.Width; region.Subrect.Height = node.clipBounds.Height; static_assert(std::is_same<decltype(node.clipBounds.X), std::int32_t>::value, ""); static_assert(std::is_same<decltype(node.clipBounds.Y), std::int32_t>::value, ""); static_assert(std::is_same<decltype(region.XOffset), std::int16_t>::value, ""); static_assert(std::is_same<decltype(region.YOffset), std::int16_t>::value, ""); static_assert(std::is_same<decltype(image->GetWidth()), int>::value, ""); static_assert(std::is_same<decltype(image->GetHeight()), int>::value, ""); static_assert(std::is_same<decltype(region.Width), std::int16_t>::value, ""); static_assert(std::is_same<decltype(region.Height), std::int16_t>::value, ""); POMDOG_ASSERT(node.clipBounds.X <= std::numeric_limits<std::int16_t>::max()); POMDOG_ASSERT(node.clipBounds.Y <= std::numeric_limits<std::int16_t>::max()); POMDOG_ASSERT(image->GetWidth() <= std::numeric_limits<std::int16_t>::max()); POMDOG_ASSERT(image->GetHeight() <= std::numeric_limits<std::int16_t>::max()); region.XOffset = static_cast<std::int16_t>(node.clipBounds.X); region.YOffset = static_cast<std::int16_t>(node.clipBounds.Y); region.Width = static_cast<std::int16_t>(image->GetWidth()); region.Height = static_cast<std::int16_t>(image->GetHeight()); const auto start = region.Subrect.X + region.Subrect.Y * width; for (int y = 0; y < region.Subrect.Height; ++y) { const auto offset = region.XOffset + ((region.YOffset + y) * image->GetWidth()); static_assert(sizeof(decltype(*image->GetData())) == sizeof(Color), ""); std::memcpy( pixelData.data() + start + (y * width), image->GetData() + offset, sizeof(Color) * region.Subrect.Width); } }); result.Image = std::make_shared<Image>(width, height); result.Image->SetData(std::move(pixelData)); return result; }
bool TestRenderer::LoadTexture(const std::wstring& filename, ID3D11ShaderResourceView** srv) { FileHandle texFile(CreateFile(filename.c_str(), GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr)); if (!texFile.IsValid()) { LogError(L"Failed to open texture."); return false; } DWORD bytesRead{}; uint32_t fileSize = GetFileSize(texFile.Get(), nullptr); TextureHeader texHeader{}; if (!ReadFile(texFile.Get(), &texHeader, sizeof(texHeader), &bytesRead, nullptr)) { LogError(L"Failed to read texture."); return false; } if (texHeader.Signature != TextureHeader::ExpectedSignature) { LogError(L"Invalid texture file."); return false; } uint32_t pixelDataSize = fileSize - sizeof(TextureHeader); std::unique_ptr<uint8_t[]> pixelData(new uint8_t[pixelDataSize]); if (!ReadFile(texFile.Get(), pixelData.get(), pixelDataSize, &bytesRead, nullptr)) { LogError(L"Failed to read texture data."); return false; } D3D11_TEXTURE2D_DESC td{}; td.ArraySize = texHeader.ArrayCount; td.Format = texHeader.Format; #if USE_SRGB if (td.Format == DXGI_FORMAT_R8G8B8A8_UNORM) { td.Format = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB; } else if (td.Format == DXGI_FORMAT_B8G8R8A8_UNORM) { td.Format = DXGI_FORMAT_B8G8R8A8_UNORM_SRGB; } #endif td.Width = texHeader.Width; td.Height = texHeader.Height; td.MipLevels = texHeader.MipLevels; td.BindFlags = D3D11_BIND_SHADER_RESOURCE; td.SampleDesc.Count = 1; td.Usage = D3D11_USAGE_DEFAULT; D3D11_SUBRESOURCE_DATA init[20] {}; uint32_t bpp = (uint32_t)BitsPerPixel(td.Format) / 8; ComPtr<ID3D11Texture2D> texture; HRESULT hr = S_OK; // Only try to use mips if width & height are the same size if (td.Width == td.Height && td.MipLevels > 1) { uint32_t width = td.Width; uint32_t height = td.Height; uint8_t* pPixels = pixelData.get(); for (int m = 0; m < (int)td.MipLevels; ++m) { init[m].pSysMem = pPixels; init[m].SysMemPitch = width * bpp; init[m].SysMemSlicePitch = width * height * bpp; width = max(width >> 1, 1); height = max(height >> 1, 1); pPixels += init[m].SysMemSlicePitch; } hr = Device->CreateTexture2D(&td, init, &texture); }
//-------------------------------------------------------------- void ofApp::keyPressed(int key){ infoText = ""; bool buildTexture = false; switch ( key) { case 'r': record = true; break; case '1': trainingClassLabel = 1; break; case '2': trainingClassLabel = 2; break; case '3': trainingClassLabel = 3; break; case 't': if( pipeline.train( trainingData ) ){ infoText = "Pipeline Trained"; buildTexture = true; }else infoText = "WARNING: Failed to train pipeline"; break; case 's': if( trainingData.save("TrainingData.grt") ){ infoText = "Training data saved to file"; }else infoText = "WARNING: Failed to save training data to file"; break; case 'l': if( trainingData.load("TrainingData.grt") ){ infoText = "Training data saved to file"; }else infoText = "WARNING: Failed to load training data from file"; break; case 'c': trainingData.clear(); infoText = "Training data cleared"; break; default: break; } if( buildTexture ){ const unsigned int rows = TEXTURE_RESOLUTION; const unsigned int cols = TEXTURE_RESOLUTION; const unsigned int size = rows*cols*4; vector<float> pixelData( size ); ofFloatPixels pixels; unsigned int index = 0; unsigned int classLabel = 0; VectorFloat featureVector(2); VectorFloat likelihoods; float r,g,b,a; float maximumLikelihood; for(unsigned int j=0; j<cols; j++){ for(unsigned int i=0; i<rows; i++){ featureVector[0] = i/double(rows); featureVector[1] = j/double(cols); if( pipeline.predict( featureVector ) ){ classLabel = pipeline.getPredictedClassLabel(); maximumLikelihood = pipeline.getMaximumLikelihood(); likelihoods = pipeline.getClassLikelihoods(); switch( classLabel ){ case 1: r = 1.0; g = 0.0; b = 0.0; a = maximumLikelihood; break; case 2: r = 0.0; g = 1.0; b = 0.0; a = maximumLikelihood; break; case 3: r = 0.0; g = 0.0; b = 1.0; a = maximumLikelihood; break; break; default: r = 0; g = 0; b = 0; a = 1; break; } pixelData[ index++ ] = r; pixelData[ index++ ] = g; pixelData[ index++ ] = b; pixelData[ index++ ] = a; } } } pixels.setFromExternalPixels(&pixelData[0],rows,cols,OF_PIXELS_RGBA); if(!texture.isAllocated()){ texture.allocate( pixels, false ); texture.setRGToRGBASwizzles(true); } texture.loadData( pixels ); texture.setTextureMinMagFilter( GL_LINEAR, GL_LINEAR ); } }
SaveResult write_bmp(const FilePath& filePath, const Bitmap& bmp, BitmapQuality quality) { BinaryWriter out(filePath); if (!out.good()){ return SaveResult::SaveFailed(error_open_file_write(filePath)); } const auto bitsPerPixel = bits_per_pixel(quality); const IntSize size(bmp.GetSize()); const int rowStride = bmp_row_stride(bitsPerPixel, size.w); switch(quality){ // Note: No default, to ensure warning if unhandled enum value case BitmapQuality::COLOR_8BIT: { const auto pixelData = quantized(bmp, Dithering::ON); PaletteColors paletteColors(pixelData.palette.size()); write_struct(out, create_bitmap_file_header(paletteColors, rowStride, size.h)); write_struct(out, create_bitmap_info_header_8bipp(bmp.GetSize(), default_DPI(), PaletteColors(pixelData.palette.size()), false)); write_8bipp_BI_RGB(out, pixelData); return SaveResult::SaveSuccessful(); } case BitmapQuality::GRAY_8BIT: { MappedColors pixelData(desaturate_AlphaMap(bmp), grayscale_color_table()); PaletteColors paletteColors(pixelData.palette.size()); write_struct(out, create_bitmap_file_header(paletteColors, rowStride, size.h)); write_struct(out, create_bitmap_info_header_8bipp(bmp.GetSize(), default_DPI(), PaletteColors(pixelData.palette.size()), false)); write_8bipp_BI_RGB(out, pixelData); return SaveResult::SaveSuccessful(); } case BitmapQuality::COLOR_24BIT: { write_struct(out, create_bitmap_file_header(PaletteColors(0), rowStride, size.h)); write_struct(out, create_bitmap_info_header_24bipp(bmp.GetSize(), default_DPI(), false)); write_24bipp_BI_RGB(out, bmp); return SaveResult::SaveSuccessful(); } } assert(false); return SaveResult::SaveFailed(utf8_string("Internal error in save_bitmap")); }
/** * Retrieve the statistics based on the box size * and point on the cube. * * @param p */ void StatisticsTool::getStatistics(QPoint p) { MdiCubeViewport *cvp = cubeViewport(); if(cvp == NULL) return; double sample, line; cvp->viewportToCube(p.x(), p.y(), sample, line); // If we are outside of the cube, do nothing if((sample < 0.5) || (line < 0.5) || (sample > cvp->cubeSamples() + 0.5) || (line > cvp->cubeLines() + 0.5)) { return; } int isamp = (int)(sample + 0.5); int iline = (int)(line + 0.5); Statistics stats; Brick *brick = new Brick(1, 1, 1, cvp->cube()->pixelType()); QVector<QVector<double> > pixelData(p_boxLines, QVector<double>(p_boxSamps, Null)); double lineDiff = p_boxLines / 2.0; double sampDiff = p_boxSamps / 2.0; p_ulSamp = isamp - (int)floor(sampDiff); p_ulLine = iline - (int)floor(lineDiff); int x, y; y = p_ulLine; for(int i = 0; i < p_boxLines; i++) { x = p_ulSamp; if(y < 1 || y > cvp->cubeLines()) { y++; continue; } for(int j = 0; j < p_boxSamps; j++) { if(x < 1 || x > cvp->cubeSamples()) { x++; continue; } brick->SetBasePosition(x, y, cvp->grayBand()); cvp->cube()->read(*brick); stats.AddData(brick->at(0)); pixelData[i][j] = brick->at(0); x++; } y++; } p_visualDisplay->setPixelData(pixelData, p_ulSamp, p_ulLine); if (stats.ValidPixels()) { p_minLabel->setText(QString("Minimum: %1").arg(stats.Minimum())); p_maxLabel->setText(QString("Maximum: %1").arg(stats.Maximum())); p_avgLabel->setText(QString("Average: %1").arg(stats.Average())); p_stdevLabel->setText(QString("Standard Dev: %1").arg(stats.StandardDeviation(), 0, 'f', 6)); } else { p_minLabel->setText(QString("Minimum: n/a")); p_maxLabel->setText(QString("Maximum: n/a")); p_avgLabel->setText(QString("Average: n/a")); p_stdevLabel->setText(QString("Standard Dev: n/a")); } p_set = true; resizeScrollbars(); }