Exemplo n.º 1
0
static void write_png_data
   ( png_structp  png_ptr
        , png_bytep  data
   , png_size_t   length )
{
   chunk_t *ch = get_png_chunk(png_ptr);
   if( 0 == ch )
      ch = newChunk( ch, 0 );
   while( length )
   {
      if( ch->len_ == ch->max_ )
      {
         ch = newChunk(ch, ch->total_);
      }
      unsigned left = ch->max_ - ch->len_ ;
      if( left > length )
         left = length ;
      memcpy( ch->data_+ch->len_, data, left );
      data += left ;
      length -= left ;
      ch->len_ += left ;
      ch->total_ += left ;
   }
   
   png_init_io(png_ptr,(FILE *)ch);
}
Exemplo n.º 2
0
bool PaintChunker::incrementDisplayItemIndex(const DisplayItem& item) {
  DCHECK(RuntimeEnabledFeatures::slimmingPaintV2Enabled());

  ItemBehavior behavior;
  Optional<PaintChunk::Id> newChunkId;
  if (DisplayItem::isForeignLayerType(item.getType())) {
    behavior = RequiresSeparateChunk;
    // Use null chunkId if we are skipping cache, so that the chunk will not
    // match any old chunk and will be treated as brand new.
    if (!item.skippedCache())
      newChunkId.emplace(item.getId());

    // Clear m_currentChunkId so that any display items after the foreign layer
    // without a new chunk id will be treated as having no id to avoid the chunk
    // from using the same id as the chunk before the foreign layer chunk.
    m_currentChunkId = WTF::nullopt;
  } else {
    behavior = DefaultBehavior;
    if (!item.skippedCache() && m_currentChunkId)
      newChunkId.emplace(*m_currentChunkId);
  }

  if (m_chunks.isEmpty()) {
    PaintChunk newChunk(0, 1, newChunkId ? &*newChunkId : nullptr,
                        m_currentProperties);
    m_chunks.append(newChunk);
    m_chunkBehavior.append(behavior);
    return true;
  }

  auto& lastChunk = m_chunks.last();
  bool canContinueChunk = m_currentProperties == lastChunk.properties &&
                          behavior != RequiresSeparateChunk &&
                          m_chunkBehavior.last() != RequiresSeparateChunk;
  if (canContinueChunk) {
    lastChunk.endIndex++;
    return false;
  }

  PaintChunk newChunk(lastChunk.endIndex, lastChunk.endIndex + 1,
                      newChunkId ? &*newChunkId : nullptr, m_currentProperties);
  m_chunks.append(newChunk);
  m_chunkBehavior.append(behavior);
  return true;
}
Exemplo n.º 3
0
void ChunkManager::updateChunks(const Point& point, bool visible /* = true */ ){
	if (!visible){
		return;
	}

	hideDistantChunk(point);

	if (contains(point)) return;

	newChunk(point);
}
Exemplo n.º 4
0
void VlrCompressor::done()
{
    // Close and clear the point encoder.
    m_encoder->done();
    m_encoder.reset();

    newChunk();

    // Save our current position.  Go to the location where we need
    // to write the chunk table offset at the beginning of the point data.

    uint64_t chunkTablePos = htole64((uint64_t) m_stream.m_buf.size());
    // We need to add the offset given a construction time since
    // we did not use a stream to write the header and vlrs
    uint64_t trueChunkTablePos = htole64((uint64_t) m_stream.m_buf.size() +  m_offsetToData);

    // Equivalent of stream.seekp(m_chunkInfoPos); stream << chunkTablePos
    memcpy(&m_stream.m_buf[m_chunkInfoPos], (char*) &trueChunkTablePos, sizeof(uint64_t));

    // Move to the start of the chunk table.
    // Which in our case is the end of the m_stream vector

    // Write the chunk table header in two steps
    // 1. Push bytes into the stream
    // 2. memcpy the data into the pushed bytes
    unsigned char skip[2 * sizeof(uint32_t)] = {0};
    m_stream.putBytes(skip, sizeof(skip));
    uint32_t version = htole32(0);
    uint32_t chunkTableSize = htole32((uint32_t) m_chunkTable.size());

    memcpy(&m_stream.m_buf[chunkTablePos], &version, sizeof(uint32_t));
    memcpy(&m_stream.m_buf[chunkTablePos + sizeof(uint32_t)], &chunkTableSize, sizeof(uint32_t));

    // Encode and write the chunk table.
    // OutputStream outputStream(m_stream);
    TypedLazPerfBuf<uint8_t> outputStream(m_stream);
    Encoder encoder(outputStream);
    laszip::compressors::integer compressor(32, 2);
    compressor.init();

    uint32_t predictor = 0;
    for (uint32_t chunkSize : m_chunkTable)
    {
        chunkSize = htole32(chunkSize);
        compressor.compress(encoder, predictor, chunkSize, 1);
        predictor = chunkSize;
    }
    encoder.done();
}
void PaintChunker::incrementDisplayItemIndex(ItemBehavior behavior)
{
    ASSERT(RuntimeEnabledFeatures::slimmingPaintV2Enabled());

    if (m_chunks.isEmpty()) {
        PaintChunk newChunk(0, 1, m_currentProperties);
        m_chunks.append(newChunk);
        m_chunkBehavior.append(behavior);
        return;
    }

    auto& lastChunk = m_chunks.last();
    bool canContinueChunk = m_currentProperties == lastChunk.properties
        && behavior != RequiresSeparateChunk
        && m_chunkBehavior.last() != RequiresSeparateChunk;
    if (canContinueChunk) {
        lastChunk.endIndex++;
        return;
    }

    PaintChunk newChunk(lastChunk.endIndex, lastChunk.endIndex + 1, m_currentProperties);
    m_chunks.append(newChunk);
    m_chunkBehavior.append(behavior);
}
Exemplo n.º 6
0
void* ContextMemoryManager::newData(size_t size) {
  // Use next available free location in current chunk
  void* res = (void*)d_nextFree;
  d_nextFree += size;
  // Check if the request is too big for the chunk
  if(d_nextFree > d_endChunk) {
    newChunk();
    res = (void*)d_nextFree;
    d_nextFree += size;
    AlwaysAssert(d_nextFree <= d_endChunk,
                 "Request is bigger than memory chunk size");
  }
  Debug("context") << "ContextMemoryManager::newData(" << size
                   << ") returning " << res << " at level "
                   << d_chunkList.size() << std::endl;
  return res;
}
Exemplo n.º 7
0
    TileMap::TileMap(uint32_t gridWidth, uint32_t gridHeight, uint32_t tileWidth, uint32_t tileHeight, float textureTileWidth, float textureTileHeight, uint32_t chunkWidth, uint32_t chunkHeight)
    {

        FEA_ASSERT(gridWidth > 0 && gridHeight > 0, "The size of the tile grid cannot be zero or below in any dimension! " + std::to_string(gridWidth) + " " + std::to_string(gridHeight) + " provided.");
        FEA_ASSERT(tileWidth > 0 && tileHeight > 0, "The size of the tiles cannot be zero or below in any dimension! " + std::to_string(tileWidth) + " " + std::to_string(tileHeight) + " provided.");
        FEA_ASSERT(textureTileWidth > 0.0f && textureTileHeight > 0.0f, "The size of the tiles in the texture cannot be zero or below in any dimension! " + std::to_string(textureTileWidth) + " " + std::to_string(textureTileHeight) + " provided.");
        FEA_ASSERT(chunkWidth > 0 && chunkHeight > 0, "The size of the tile chunks cannot be zero or below in any dimension! " + std::to_string(chunkWidth) + " " + std::to_string(chunkHeight) + " provided.");

        uint32_t chunkGridWidth = (gridWidth + chunkWidth - 1) / chunkWidth;
        uint32_t chunkGridHeight = (gridHeight + chunkHeight - 1) / chunkHeight;

        mChunkGridSize = glm::uvec2(chunkGridWidth, chunkGridHeight);
        mChunkSize = glm::uvec2(chunkWidth, chunkHeight);
        mGridSize = glm::uvec2(gridWidth, gridHeight);
        mTextureTileSize = glm::vec2(textureTileWidth, textureTileHeight);
        mTileSize = glm::uvec2(tileWidth, tileHeight);

        bool uneven = gridWidth % chunkWidth != 0;
        glm::uvec2 edgeSize(gridWidth % chunkWidth, gridHeight % chunkHeight);

        uint32_t newChunkHeight = chunkHeight;

        for(uint32_t y = 0; y < chunkGridHeight; y++)
        {
            if(y == chunkGridHeight - 1 && uneven)
                newChunkHeight = edgeSize.y;

            uint32_t newChunkWidth = chunkWidth;
            for(uint32_t x = 0; x < chunkGridWidth; x++)
            {
                if(x == chunkGridWidth - 1 && uneven)
                    newChunkWidth = edgeSize.x;

                TileChunk newChunk(newChunkWidth, newChunkHeight, tileWidth, tileHeight);
                glm::vec2 chunkOrigin = glm::vec2(mPosition.x +(float) (x * chunkWidth * tileWidth),mPosition.y + (float)(y * chunkHeight * tileHeight));
                newChunk.setOriginalOrigin(-chunkOrigin);

                mChunks.push_back(newChunk);
            }
        }
    }
Exemplo n.º 8
0
void* ContextMemoryManager::newData(size_t size) {
  // Use next available free location in current chunk
  void* res = (void*)d_nextFree;
  d_nextFree += size;
  // Check if the request is too big for the chunk
  if(d_nextFree > d_endChunk) {
    newChunk();
    res = (void*)d_nextFree;
    d_nextFree += size;
    AlwaysAssert(d_nextFree <= d_endChunk,
                 "Request is bigger than memory chunk size");
  }
  Debug("context") << "ContextMemoryManager::newData(" << size
                   << ") returning " << res << " at level "
                   << d_chunkList.size() << std::endl;

#ifdef CVC4_VALGRIND
  VALGRIND_MEMPOOL_ALLOC(this, static_cast<char*>(res), size);
  d_allocations.back().push_back(static_cast<char*>(res));
#endif /* CVC4_VALGRIND */

  return res;
}
Exemplo n.º 9
0
void VlrCompressor::compress(const char *inbuf)
{
    // First time through.
    if (!m_encoder || !m_compressor)
    {
        // Get the position, which is 0 since we
        // are just starting to write
        m_chunkInfoPos = m_stream.m_buf.size();

        // Seek over the chunk info offset value
        unsigned char skip[sizeof(uint64_t)] = {0};
        m_stream.putBytes(skip, sizeof(skip));
        m_chunkOffset = m_chunkInfoPos + sizeof(uint64_t);

        resetCompressor();
    }
    else if (m_chunkPointsWritten == m_chunksize)
    {
        resetCompressor();
        newChunk();
    }
    m_compressor->compress(inbuf);
    m_chunkPointsWritten++;
}
Exemplo n.º 10
0
void chunkArchive::closeStream()
{
	newChunk(EOC);		// Last Chunk
	closeChunk();
}