Exemplo n.º 1
0
struct cbfs_file *cbfs_find_next_entry(struct cbfs_image *image,
				       struct cbfs_file *entry)
{
	uint32_t addr = cbfs_get_entry_addr(image, entry);
	int align = image->header->align;
	assert(entry && cbfs_is_valid_entry(image, entry));
	addr += ntohl(entry->offset) + ntohl(entry->len);
	addr = align_up(addr, align);
	return (struct cbfs_file *)(image->buffer.data + addr);
}
Exemplo n.º 2
0
/*!
 * \cond DOXYGEN_PRIVATE
 * Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
 */
static size_t adjust_request_size(size_t size, size_t align)
{
    size_t adjust = 0;
    if (size && size < block_size_max)
    {
        const size_t aligned = align_up(size, align);
        adjust = tlsf_max(aligned, block_size_min);
    }
    return adjust;
}
Exemplo n.º 3
0
void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
{
    void *addr;
    len = align_up(len, page_size);
    addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
    if (addr) {
        *lockingSuccess = mlock(addr, len) == 0;
    }
    return addr;
}
Exemplo n.º 4
0
uint32_t
GX2CalcFetchShaderSizeEx(uint32_t attribs,
                         GX2FetchShaderType type,
                         GX2TessellationMode mode)
{
   auto fetch = GX2FSCalcNumFetchInsts(attribs, type);
   auto aluBytes = sizeof(latte::AluInst) * GX2FSCalcNumAluInsts(type, mode);
   auto cfBytes = sizeof(latte::ControlFlowInst) * GX2FSCalcNumCFInsts(fetch, type);

   return static_cast<uint32_t>(sizeof(latte::VertexFetchInst) * fetch + align_up(cfBytes + aluBytes, 16));
}
Exemplo n.º 5
0
/// @summary Allocates memory using malloc with alignment.
/// @param size_in_bytes The amount of memory being requested.
/// @param alignment The desired power-of-two alignment of the returned address.
/// @param actual On return, stores the number of bytes actually allocated.
/// @return A pointer to a memory block whose address is an even integer multiple
/// of alignment, and whose size is at least size_in_bytes, or NULL.
static void* allocate_aligned(size_t size_in_bytes, size_t alignment, size_t &actual)
{
    size_t   total_size = size_in_bytes + sizeof(uintptr_t); // allocate enough extra to store the base address
    size_t   alloc_size = align_up(total_size, alignment);   // allocate enough extra to properly align
    uint8_t *mem_ptr    = (uint8_t*) malloc(alloc_size);     // allocate the raw memory block
    uint8_t *aln_ptr    = align_to(mem_ptr, alignment);      // calculate the aligned address
    uint8_t *base       = aln_ptr - sizeof(uintptr_t);       // where to store the address returned by malloc
    *(uintptr_t*)base   = (uintptr_t) mem_ptr;               // store the address returned by malloc
    actual = alloc_size;
    return aln_ptr;
}
void allocate_executable_memory(memory_block_data * self       //in
                                , intptr_t          size_bytes //in
                                , intptr_t          alignment  //in
                                , char **           out_begin  //out
                                , char **           out_end    //out
                                )
{
    executable_memory_block* emb = static_cast<executable_memory_block*>(self);
    // some preconditions
    assert( executable_memory_block_type == executable_memory_block_type);
    assert( (size_t)size_bytes <= emb->m_chunk_size );
#ifdef ENABLE_LOGGING
    std::cout << "allocating " << size_bytes 
              << " of executable memory with alignment " << alignment 
              << std::endl;
#endif //ENABLE LOGGING
    
    if ((size_t)size_bytes > emb->m_chunk_size)
    {
        std::stringstream ss;
        ss << "Memory allocation request of " << size_bytes 
           << " is too large for this executable_memory_block"
              " with chunk size" << emb->m_chunk_size;
        throw std::runtime_error(ss.str());
    }
    
    if (emb->m_allocated_chunks.empty())
        emb->add_chunk();
    
    void* current_chunk = emb->m_allocated_chunks.back();
    void* begin = reinterpret_cast<void*>(align_up(reinterpret_cast<size_t>(emb->m_pivot), alignment));
    void* end   = ptr_offset(begin, size_bytes);
    if (ptr_offset(current_chunk, emb->m_chunk_size) < ptr_offset(emb->m_pivot, size_bytes))
    {
        emb->add_chunk();
        begin = emb->m_allocated_chunks.back();
        end   = ptr_offset(begin, size_bytes);
    }

    emb->m_pivot = end;
    assert(ptr_in_range(begin
                        , emb->m_allocated_chunks.back()
                        , ptr_offset(emb->m_allocated_chunks.back()
                                     , emb->m_chunk_size)));
    assert(ptr_in_range(end
                        , emb->m_allocated_chunks.back()
                        , ptr_offset(emb->m_allocated_chunks.back()
                        , emb->m_chunk_size)));
    assert(((int8_t*)end - (int8_t*)begin) == size_bytes);
    assert(emb->m_pivot == end);
    *out_begin = static_cast<char*>(begin);
    *out_end = static_cast<char*>(end);
    
}
Exemplo n.º 7
0
void
padCommandBuffer(pm4::Buffer *buffer)
{
   // Display list is meant to be padded to 32 bytes
   auto alignedSize = align_up(buffer->curSize, 32 / 4);

   decaf_check(alignedSize <= buffer->maxSize);

   while (buffer->curSize < alignedSize) {
      buffer->buffer[buffer->curSize++] = byte_swap(0xBEEF2929);
   }
}
Exemplo n.º 8
0
cudaError_t GPUAllocator::slabAllocate(void** dev_ptr, size_t size)
{
    if (current_size_ + size >= total_size_)
	return cudaErrorMemoryAllocation;

    *dev_ptr = current_ptr_;
    size_t aligned_size = align_up(size, ALIGNMENT);
    current_ptr_ = (uint8_t*)current_ptr_ + aligned_size;
    current_size_ += aligned_size;

    return cudaSuccess;
}
Exemplo n.º 9
0
int dynarray::ensure_capacity(size_t min_size) {
    min_size  = align_up(min_size, MM_PAGE_SIZE);
    int err = 0;
    if(size() < min_size) {
	size_t next_size = std::max(min_size, 2*size());
	err = resize(next_size);
	
	// did we reach a limit?
	if(err == EFBIG) 
	    err = resize(min_size);
    }
    return err;
}
Exemplo n.º 10
0
int FlashSimBlockDevice::init()
{
    int ret = _bd->init();
    if (ret) {
        return ret;
    }
    _blank_buf_size = align_up(min_blank_buf_size, _bd->get_program_size());
    if (!_blank_buf) {
        _blank_buf = new uint8_t[_blank_buf_size];
        MBED_ASSERT(_blank_buf);
    }
    return BD_ERROR_OK;
}
Exemplo n.º 11
0
void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
{
    len = align_up(len, page_size);
    void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
    if (addr) {
        // VirtualLock is used to attempt to keep keying material out of swap. Note
        // that it does not provide this as a guarantee, but, in practice, memory
        // that has been VirtualLock'd almost never gets written to the pagefile
        // except in rare circumstances where memory is extremely low.
        *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
    }
    return addr;
}
Exemplo n.º 12
0
Arquivo: ir.c Projeto: ST3ALth/redream
struct ir_local *ir_alloc_local(struct ir *ir, enum ir_type type) {
  // align local to natural size
  int type_size = ir_type_size(type);
  ir->locals_size = align_up(ir->locals_size, type_size);

  struct ir_local *l = ir_calloc(ir, sizeof(struct ir_local));
  l->type = type;
  l->offset = ir_alloc_i32(ir, ir->locals_size);
  list_add(&ir->locals, &l->it);

  ir->locals_size += type_size;

  return l;
}
Exemplo n.º 13
0
static void swizzle_32bpp_ref(uint32_t *dest, uint32_t dx, uint32_t dy, uint32_t dw, uint32_t dh,
                              const uint32_t *src, uint32_t sw, uint32_t sh, uint32_t spitch)
{
    uintptr_t tile_y_stride = swizzle_x_tile(align_up(dw, swizzle_outermost_tile_w()));

    for (uint32_t y = 0; y < sh; y++) {
        uintptr_t dest_y_offs = ((dy + y) / swizzle_outermost_tile_h()) * tile_y_stride + swizzle_y(dy + y);
        uint32_t *dest_y = dest + dest_y_offs;

        for (uint32_t x = 0; x < sw; x++)
            dest_y[swizzle_x_tile(dx + x)] = src[x];

        src += spitch;
    }
}
static void* libc_alloc(WasmAllocator* allocator, size_t size, size_t align) {
  assert(is_power_of_two(align));
  if (align < sizeof(void*))
    align = sizeof(void*);

  void* p = malloc(size + sizeof(MemInfo) + align - 1);
  if (!p)
    return NULL;

  void* aligned = align_up(p + sizeof(MemInfo), align);
  MemInfo* mem_info = (MemInfo*)aligned - 1;
  assert(is_aligned(mem_info, sizeof(void*)));
  mem_info->real_pointer = p;
  mem_info->size = size;
  return aligned;
}
Exemplo n.º 15
0
size_t contract(Heap *heap, size_t new_size)
{
	// Sanity check.
	ASSERT(heap->start_addr + new_size < heap->end_addr);

	new_size = align_up(new_size);
	if (new_size < HEAP_MIN_SIZE)
		new_size = HEAP_MIN_SIZE;

	size_t old_size = heap->end_addr - heap->start_addr;
	for (size_t i = old_size - PAGE_SIZE; i > new_size; i -= PAGE_SIZE)
		free_frame(get_page(heap->start_addr + i, 0, kernel_dir));

	heap->end_addr = heap->start_addr + new_size;
	return new_size;
}
Exemplo n.º 16
0
/**
 * prepare_finalized_boot_args
 *
 * Prepare the final boot-args.
 */
boot_args *prepare_finalized_boot_args(void)
{
    boot_args *bootArgs;

    gBootArgs.topOfKernelData = align_up(kernel_region.pos, 0x100000);

    printf("gBootArgs.commandLine = [%s]\n", gBootArgs.commandLine);

    /* Allocate kernel memory for this. */
    bootArgs =
        (boot_args *) memory_region_reserve(&kernel_region,
                                            sizeof(boot_args), 1024);
    bcopy(&gBootArgs, bootArgs, sizeof(boot_args));

    return bootArgs;
}
Exemplo n.º 17
0
static int
ctf_save_align_write (struct trace_write_handler *handler,
		      const gdb_byte *buf,
		      size_t size, size_t align_size)
{
  long offset
    = (align_up (handler->content_size, align_size)
       - handler->content_size);

  if (ctf_save_fseek (handler, offset, SEEK_CUR))
    return -1;

  if (ctf_save_write (handler, buf, size))
    return -1;

  return 0;
}
Exemplo n.º 18
0
int BufferParams::get_passes_size()
{
  int size = 0;

  for (size_t i = 0; i < passes.size(); i++)
    size += passes[i].components;

  if (denoising_data_pass) {
    size += DENOISING_PASS_SIZE_BASE;
    if (denoising_clean_pass)
      size += DENOISING_PASS_SIZE_CLEAN;
    if (denoising_prefiltered_pass)
      size += DENOISING_PASS_SIZE_PREFILTERED;
  }

  return align_up(size, 4);
}
Exemplo n.º 19
0
MEMHeapHandle
MEMCreateExpHeapEx(virt_ptr<void> base,
                   uint32_t size,
                   uint32_t flags)
{
   decaf_check(base);

   auto heapData = virt_cast<uint8_t *>(base);
   auto alignedStart = align_up(heapData, 4);
   auto alignedEnd = align_down(heapData + size, 4);

   if (alignedEnd < alignedStart || alignedEnd - alignedStart < 0x6C) {
      // Not enough room for the header
      return nullptr;
   }

   // Get our heap header
   auto heap = virt_cast<MEMExpHeap *>(alignedStart);

   // Register Heap
   internal::registerHeap(virt_addrof(heap->header),
                          MEMHeapTag::ExpandedHeap,
                          alignedStart + sizeof(MEMExpHeap),
                          alignedEnd,
                          static_cast<MEMHeapFlags>(flags));

   // Create an initial block of the data
   auto dataStart = alignedStart + sizeof(MEMExpHeap);
   auto firstBlock = virt_cast<MEMExpHeapBlock *>(dataStart);

   firstBlock->attribs = MEMExpHeapBlockAttribs::get(0);
   firstBlock->blockSize = static_cast<uint32_t>((alignedEnd - dataStart) - sizeof(MEMExpHeapBlock));
   firstBlock->next = nullptr;
   firstBlock->prev = nullptr;
   firstBlock->tag = FreeTag;

   heap->freeList.head = firstBlock;
   heap->freeList.tail = firstBlock;
   heap->usedList.head = nullptr;
   heap->usedList.tail = nullptr;

   heap->groupId = uint16_t { 0 };
   heap->attribs = MEMExpHeapAttribs::get(0);

   return virt_cast<MEMHeapHeader *>(heap);
}
Exemplo n.º 20
0
void expand(Heap *heap, size_t new_size)
{
	// Sanity check
	ASSERT(heap->start_addr + new_size > heap->end_addr);

	new_size = align_up(new_size);

	// We don't want to expand over the max address
	ASSERT(heap->start_addr + new_size <= heap->max_addr);

	size_t old_size = heap->end_addr - heap->start_addr;

	for (size_t i = old_size; i < new_size; i += PAGE_SIZE)
		alloc_frame(get_page(heap->start_addr + i, 1, kernel_dir),
				heap->supervisor, heap->readonly);

	heap->end_addr = heap->start_addr + new_size;
}
Exemplo n.º 21
0
static char*
par_alloc_for_promotion_slow_path (int age, size_t objsize)
{
	char *p;
	size_t allocated_size;
	size_t aligned_objsize = (size_t)align_up (objsize, SGEN_TO_SPACE_GRANULE_BITS);

	mono_mutex_lock (&par_alloc_buffer_refill_mutex);

restart:
	p = age_alloc_buffers [age].next;
	if (G_LIKELY (p + objsize <= age_alloc_buffers [age].end)) {
		if (SGEN_CAS_PTR ((void*)&age_alloc_buffers [age].next, p + objsize, p) != p)
			goto restart;
	} else {
		/* Reclaim remaining space - if we OOMd the nursery nothing to see here. */
		char *end = age_alloc_buffers [age].end;
		if (end) {
			do {
				p = age_alloc_buffers [age].next;
			} while (SGEN_CAS_PTR ((void*)&age_alloc_buffers [age].next, end, p) != p);
				sgen_clear_range (p, end);
		}

		/* By setting end to NULL we make sure no other thread can advance while we're updating.*/
		age_alloc_buffers [age].end = NULL;
		STORE_STORE_FENCE;

		p = sgen_fragment_allocator_par_range_alloc (
			&collector_allocator,
			MAX (aligned_objsize, AGE_ALLOC_BUFFER_DESIRED_SIZE),
			MAX (aligned_objsize, AGE_ALLOC_BUFFER_MIN_SIZE),
			&allocated_size);
		if (p) {
			set_age_in_range (p, p + allocated_size, age);
			age_alloc_buffers [age].next = p + objsize;
			STORE_STORE_FENCE; /* Next must arrive before the new value for next. */
			age_alloc_buffers [age].end = p + allocated_size;
		}
	}

	mono_mutex_unlock (&par_alloc_buffer_refill_mutex);
	return p;
}
Exemplo n.º 22
0
Arquivo: sink.c Projeto: Phuehvk/upb
void *upb_pipeline_alloc(upb_pipeline *p, size_t bytes) {
  void *mem = align_up(p->bump_top);
  if (!mem || mem > p->bump_limit || p->bump_limit - mem < bytes) {
    size_t size = regionsize(UPB_MAX(BLOCK_SIZE, bytes));
    struct region *r;
    if (!p->realloc || !(r = p->realloc(p->ud, NULL, size))) {
      return NULL;
    }
    r->prev = p->region_head;
    p->region_head = r;
    p->bump_limit = (char*)r + size;
    mem = &r->data[0];
    assert(p->bump_limit > mem);
    assert(p->bump_limit - mem >= bytes);
  }
  p->bump_top = mem + bytes;
  p->last_alloc = mem;
  return mem;
}
Exemplo n.º 23
0
static void
prepare_to_space (char *to_space_bitmap, int space_bitmap_size)
{
	SgenFragment **previous, *frag;

	memset (to_space_bitmap, 0, space_bitmap_size);
	memset (age_alloc_buffers, 0, sizeof (age_alloc_buffers));

	previous = &collector_allocator.alloc_head;

	for (frag = *previous; frag; frag = *previous) {
		char *start = align_up (frag->fragment_next, SGEN_TO_SPACE_GRANULE_BITS);
		char *end = align_down (frag->fragment_end, SGEN_TO_SPACE_GRANULE_BITS);

		/* Fragment is too small to be usable. */
		if ((end - start) < SGEN_MAX_NURSERY_WASTE) {
			sgen_clear_range (frag->fragment_next, frag->fragment_end);
			frag->fragment_next = frag->fragment_end = frag->fragment_start;
			*previous = frag->next;
			continue;
		}

		/*
		We need to insert 3 phony objects so the fragments build step can correctly
		walk the nursery.
		*/

		/* Clean the fragment range. */
		sgen_clear_range (start, end);
		/* We need a phony object in between the original fragment start and the effective one. */
		if (start != frag->fragment_next)
			sgen_clear_range (frag->fragment_next, start);
		/* We need an phony object in between the new fragment end and the original fragment end. */
		if (end != frag->fragment_end)
			sgen_clear_range (end, frag->fragment_end);

		frag->fragment_start = frag->fragment_next = start;
		frag->fragment_end = end;
		mark_bits_in_range (to_space_bitmap, start, end);
		previous = &frag->next;
	}
}
Exemplo n.º 24
0
static char*
alloc_for_promotion_slow_path (int age, size_t objsize)
{
	char *p;
	size_t allocated_size;
	size_t aligned_objsize = (size_t)align_up (objsize, SGEN_TO_SPACE_GRANULE_BITS);

	p = sgen_fragment_allocator_serial_range_alloc (
		&collector_allocator,
		MAX (aligned_objsize, AGE_ALLOC_BUFFER_DESIRED_SIZE),
		MAX (aligned_objsize, AGE_ALLOC_BUFFER_MIN_SIZE),
		&allocated_size);
	if (p) {
		set_age_in_range (p, p + allocated_size, age);
		sgen_clear_range (age_alloc_buffers [age].next, age_alloc_buffers [age].end);
		age_alloc_buffers [age].next = p + objsize;
		age_alloc_buffers [age].end = p + allocated_size;
	}
	return p;
}
Exemplo n.º 25
0
void *Arena::alloc(size_t size) {
    // Round to next multiple of alignment
    size = align_up(size, alignment);

    // Don't handle zero-sized chunks
    if (size == 0) return nullptr;

    // Pick a large enough free-chunk
    auto it =
        std::find_if(chunks_free.begin(), chunks_free.end(),
                     [=](const std::map<char *, size_t>::value_type &chunk) {
                         return chunk.second >= size;
                     });
    if (it == chunks_free.end()) return nullptr;

    // Create the used-chunk, taking its space from the end of the free-chunk
    auto alloced =
        chunks_used.emplace(it->first + it->second - size, size).first;
    if (!(it->second -= size)) chunks_free.erase(it);
    return reinterpret_cast<void *>(alloced->first);
}
Exemplo n.º 26
0
Arquivo: vm.c Projeto: carriercomm/ix
/**
 * vm_map_to_user - makes kernel memory available to the user
 * @kern_addr: a pointer to kernel memory (must be aligned)
 * @nr: the number of pages
 * @size: the size of each page
 * @perm: the permission of the new mapping
 *
 * NOTE: vm_lock must be held.
 *
 * Returns an address in the IOMAP region if successful, otherwise fail.
 */
void *vm_map_to_user(void *kern_addr, int nr, int size, int perm)
{
	int ret;
	virtaddr_t va;

	perm |= VM_PERM_U;

	spin_lock(&vm_lock);
	va = align_up(vm_iomapk_pos, size);

	ret = __vm_map_phys((physaddr_t) kern_addr, va, nr, size, perm);
	if (ret) {
		spin_unlock(&vm_lock);
		return NULL;
	}

	vm_iomapk_pos = va + size * nr;
	spin_unlock(&vm_lock);

	return (void *) va;
}
Exemplo n.º 27
0
int dynarray::resize(size_t new_size) {
    // round up to the nearest page boundary
    new_size = align_up(new_size, MM_PAGE_SIZE);

    // validate
    if(_size > new_size)
	return EINVAL;

    static int const PROTS = PROT_READ | PROT_WRITE;
    static int const FLAGS = MAP_FIXED | MAP_ANON | MAP_PRIVATE;

    // remap the new range as RW. Don't mess w/ the existing region!!
    void* result = mmap(_base+_size, new_size-_size, PROTS, FLAGS, -1, 0);
    if (result == MAP_FAILED) {
        fprintf(stderr, "mmap failed: %s", strerror(errno));
        abort();
    }
    if(result == MAP_FAILED)
	return errno;

    _size = new_size;
    return 0;
}
Exemplo n.º 28
0
void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
{
    len = align_up(len, page_size);
    memory_cleanse(addr, len);
    VirtualUnlock(const_cast<void*>(addr), len);
}
Exemplo n.º 29
0
void PosixLockedPageAllocator::FreeLocked(void *addr, size_t len) {
    len = align_up(len, page_size);
    memory_cleanse(addr, len);
    munlock(addr, len);
    munmap(addr, len);
}
Exemplo n.º 30
0
uint32_t
MEMResizeForMBlockExpHeap(MEMHeapHandle handle,
                          virt_ptr<void> ptr,
                          uint32_t size)
{
   auto heap = virt_cast<MEMExpHeap *>(handle);
   internal::HeapLock lock { virt_addrof(heap->header) };
   size = align_up(size, 4);

   auto block = getUsedMemBlock(ptr);

   if (size < block->blockSize) {
      auto releasedSpace = block->blockSize - size;

      if (releasedSpace > sizeof(MEMExpHeapBlock) + 0x4) {
         auto releasedMemEnd = getBlockMemEnd(block);
         auto releasedMemStart = releasedMemEnd - releasedSpace;

         block->blockSize -= releasedSpace;
         releaseMemory(heap, releasedMemStart, releasedMemEnd);
      }
   } else if (size > block->blockSize) {
      auto blockMemEnd = getBlockMemEnd(block);
      auto freeBlock = virt_ptr<MEMExpHeapBlock> { nullptr };

      for (auto i = heap->freeList.head; i; i = i->next) {
         auto freeBlockMemStart = getBlockMemStart(i);

         if (freeBlockMemStart == blockMemEnd) {
            freeBlock = i;
            break;
         }

         // Free list is sorted, so we only need to search a little bit
         if (freeBlockMemStart > blockMemEnd) {
            break;
         }
      }

      if (!freeBlock) {
         return 0;
      }

      // Grab the data we need from the free block
      auto freeBlockMemStart = getBlockMemStart(freeBlock);
      auto freeBlockMemEnd = getBlockMemEnd(freeBlock);
      auto freeMemSize = static_cast<uint32_t>(freeBlockMemEnd - freeBlockMemStart);

      // Drop the free block from the list of free regions
      removeBlock(virt_addrof(heap->freeList), freeBlock);

      // Adjust the sizing of the free area and the block
      auto newAllocSize = (size - block->blockSize);
      freeMemSize -= newAllocSize;
      block->blockSize = size;

      if (heap->header.flags & MEMHeapFlags::ZeroAllocated) {
         memset(freeBlockMemStart, 0, newAllocSize);
      } else if(heap->header.flags & MEMHeapFlags::DebugMode) {
         auto fillVal = MEMGetFillValForHeap(MEMHeapFillType::Allocated);
         memset(freeBlockMemStart, fillVal, newAllocSize);
      }

      // If we have enough room to create a new free block, lets release
      //  the memory back to the heap.  Otherwise we just tack the remainder
      //  onto the end of the block we resized.
      if (freeMemSize >= sizeof(MEMExpHeapBlock) + 0x4) {
         releaseMemory(heap, freeBlockMemEnd - freeMemSize, freeBlockMemEnd);
      } else {
         block->blockSize += freeMemSize;
      }
   }

   return block->blockSize;
}