Esempio n. 1
0
void ivfc_process(ivfc_context* ctx, u32 actions)
{
	ivfc_fseek(ctx, ctx->offset);
	ivfc_fread(ctx, &ctx->header, 1, sizeof(ivfc_header));

	if (getle32(ctx->header.magic) != MAGIC_IVFC)
	{
		fprintf(stdout, "Error, IVFC segment corrupted\n");
		return;
	}

	if (getle32(ctx->header.id) == 0x10000)
	{
		ctx->levelcount = 3;

		ctx->level[2].hashblocksize = 1 << getle32(ctx->header.level3.blocksize);
		ctx->level[1].hashblocksize = 1 << getle32(ctx->header.level2.blocksize);
		ctx->level[0].hashblocksize = 1 << getle32(ctx->header.level1.blocksize);

		ctx->bodyoffset = align64(IVFC_HEADER_SIZE + getle32(ctx->header.masterhashsize), ctx->level[2].hashblocksize);
		ctx->bodysize = getle64(ctx->header.level3.hashdatasize);
		
		ctx->level[2].dataoffset = ctx->bodyoffset;
		ctx->level[2].datasize = align64(ctx->bodysize, ctx->level[2].hashblocksize);

		ctx->level[0].dataoffset = ctx->level[2].dataoffset + ctx->level[2].datasize;
		ctx->level[0].datasize = align64(getle64(ctx->header.level1.hashdatasize), ctx->level[0].hashblocksize);

		ctx->level[1].dataoffset = ctx->level[0].dataoffset + ctx->level[0].datasize;
		ctx->level[1].datasize = align64(getle64(ctx->header.level2.hashdatasize), ctx->level[1].hashblocksize);

		ctx->level[0].hashoffset = IVFC_HEADER_SIZE;
		ctx->level[1].hashoffset = ctx->level[0].dataoffset;
		ctx->level[2].hashoffset = ctx->level[1].dataoffset;
	}

	if (actions & VerifyFlag)
		ivfc_verify(ctx, actions);

	if (actions & InfoFlag)
		ivfc_print(ctx);		

}
Esempio n. 2
0
static inline void sd_write_aligned_blob(std::ostringstream &buf, void *b, int b_size,
				 const char *name)
{
	sd_write_name(buf, name);
	/* pad calculation MUST come after name is written */
	size_t pad = align64(buf.tellp() + ((std::streamoff) 5l)) - (buf.tellp() + ((std::streamoff) 5l));
	sd_write8(buf, SD_BLOB);
	sd_write32(buf, b_size + pad);
	buf.write(zeros, pad);
	buf.write((const char *) b, b_size);
}
Esempio n. 3
0
static ucell *aligncell(ucell *v)
{
  if (pc_cellsize==2)
    return align16(v);
  if (pc_cellsize==4)
    return align32(v);
  #if PAWN_CELL_SIZE>=64
    if (pc_cellsize==8)
      return align64(v);
  #endif
  assert(0);
}
Esempio n. 4
0
static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
					    void *pointer, uint64_t size)
{
    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
    amdgpu_bo_handle buf_handle;
    struct amdgpu_winsys_bo *bo;
    uint64_t va;
    amdgpu_va_handle va_handle;

    bo = CALLOC_STRUCT(amdgpu_winsys_bo);
    if (!bo)
        return NULL;

    if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
        goto error;

    if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
                              size, 1 << 12, 0, &va, &va_handle, 0))
        goto error_va_alloc;

    if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
        goto error_va_map;

    /* Initialize it. */
    pipe_reference_init(&bo->base.reference, 1);
    bo->bo = buf_handle;
    bo->base.alignment = 0;
    bo->base.size = size;
    bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
    bo->ws = ws;
    bo->user_ptr = pointer;
    bo->va = va;
    bo->va_handle = va_handle;
    bo->initial_domain = RADEON_DOMAIN_GTT;
    bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);

    ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);

    amdgpu_add_buffer_to_global_list(bo);

    return (struct pb_buffer*)bo;

error_va_map:
    amdgpu_va_range_free(va_handle);

error_va_alloc:
    amdgpu_bo_free(buf_handle);

error:
    FREE(bo);
    return NULL;
}
Esempio n. 5
0
void amdgpu_bo_destroy(struct pb_buffer *_buf)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
   int i;

   pipe_mutex_lock(bo->ws->global_bo_list_lock);
   LIST_DEL(&bo->global_list_item);
   bo->ws->num_buffers--;
   pipe_mutex_unlock(bo->ws->global_bo_list_lock);

   amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
   amdgpu_va_range_free(bo->va_handle);
   amdgpu_bo_free(bo->bo);

   for (i = 0; i < RING_LAST; i++)
      amdgpu_fence_reference(&bo->fence[i], NULL);

   if (bo->initial_domain & RADEON_DOMAIN_VRAM)
      bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
   else if (bo->initial_domain & RADEON_DOMAIN_GTT)
      bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
   FREE(bo);
}
Esempio n. 6
0
static struct pb_buffer *
amdgpu_bo_create(struct radeon_winsys *rws,
                 uint64_t size,
                 unsigned alignment,
                 enum radeon_bo_domain domain,
                 enum radeon_bo_flag flags)
{
   struct amdgpu_winsys *ws = amdgpu_winsys(rws);
   struct amdgpu_winsys_bo *bo;
   unsigned usage = 0;

   /* Align size to page size. This is the minimum alignment for normal
    * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
    * like constant/uniform buffers, can benefit from better and more reuse.
    */
   size = align64(size, ws->info.gart_page_size);
   alignment = align(alignment, ws->info.gart_page_size);

   /* Only set one usage bit each for domains and flags, or the cache manager
    * might consider different sets of domains / flags compatible
    */
   if (domain == RADEON_DOMAIN_VRAM_GTT)
      usage = 1 << 2;
   else
      usage = domain >> 1;
   assert(flags < sizeof(usage) * 8 - 3);
   usage |= 1 << (flags + 3);

   /* Get a buffer from the cache. */
   bo = (struct amdgpu_winsys_bo*)
        pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage);
   if (bo)
      return &bo->base;

   /* Create a new one. */
   bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
   if (!bo) {
      /* Clear the cache and try again. */
      pb_cache_release_all_buffers(&ws->bo_cache);
      bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
      if (!bo)
         return NULL;
   }

   bo->use_reusable_pool = true;
   return &bo->base;
}
Esempio n. 7
0
static int compute_level(struct amdgpu_winsys *ws,
                         struct radeon_surf *surf, bool is_stencil,
                         unsigned level, unsigned type, bool compressed,
                         ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
                         ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
                         ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
                         ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut)
{
   struct radeon_surf_level *surf_level;
   ADDR_E_RETURNCODE ret;

   AddrSurfInfoIn->mipLevel = level;
   AddrSurfInfoIn->width = u_minify(surf->npix_x, level);
   AddrSurfInfoIn->height = u_minify(surf->npix_y, level);

   if (type == RADEON_SURF_TYPE_3D)
      AddrSurfInfoIn->numSlices = u_minify(surf->npix_z, level);
   else if (type == RADEON_SURF_TYPE_CUBEMAP)
      AddrSurfInfoIn->numSlices = 6;
   else
      AddrSurfInfoIn->numSlices = surf->array_size;

   if (level > 0) {
      /* Set the base level pitch. This is needed for calculation
       * of non-zero levels. */
      if (is_stencil)
         AddrSurfInfoIn->basePitch = surf->stencil_level[0].nblk_x;
      else
         AddrSurfInfoIn->basePitch = surf->level[0].nblk_x;

      /* Convert blocks to pixels for compressed formats. */
      if (compressed)
         AddrSurfInfoIn->basePitch *= surf->blk_w;
   }

   ret = AddrComputeSurfaceInfo(ws->addrlib,
                                AddrSurfInfoIn,
                                AddrSurfInfoOut);
   if (ret != ADDR_OK) {
      return ret;
   }

   surf_level = is_stencil ? &surf->stencil_level[level] : &surf->level[level];
   surf_level->offset = align64(surf->bo_size, AddrSurfInfoOut->baseAlign);
   surf_level->slice_size = AddrSurfInfoOut->sliceSize;
   surf_level->pitch_bytes = AddrSurfInfoOut->pitch * (is_stencil ? 1 : surf->bpe);
   surf_level->npix_x = u_minify(surf->npix_x, level);
   surf_level->npix_y = u_minify(surf->npix_y, level);
   surf_level->npix_z = u_minify(surf->npix_z, level);
   surf_level->nblk_x = AddrSurfInfoOut->pitch;
   surf_level->nblk_y = AddrSurfInfoOut->height;
   if (type == RADEON_SURF_TYPE_3D)
      surf_level->nblk_z = AddrSurfInfoOut->depth;
   else
      surf_level->nblk_z = 1;

   switch (AddrSurfInfoOut->tileMode) {
   case ADDR_TM_LINEAR_ALIGNED:
      surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
      break;
   case ADDR_TM_1D_TILED_THIN1:
      surf_level->mode = RADEON_SURF_MODE_1D;
      break;
   case ADDR_TM_2D_TILED_THIN1:
      surf_level->mode = RADEON_SURF_MODE_2D;
      break;
   default:
      assert(0);
   }

   if (is_stencil)
      surf->stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
   else
      surf->tiling_index[level] = AddrSurfInfoOut->tileIndex;

   surf->bo_size = surf_level->offset + AddrSurfInfoOut->surfSize;

   /* Clear DCC fields at the beginning. */
   surf_level->dcc_offset = 0;
   surf_level->dcc_enabled = false;

   /* The previous level's flag tells us if we can use DCC for this level. */
   if (AddrSurfInfoIn->flags.dccCompatible &&
       (level == 0 || AddrDccOut->subLvlCompressible)) {
      AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
      AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
      AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
      AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
      AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;

      ret = AddrComputeDccInfo(ws->addrlib,
                               AddrDccIn,
                               AddrDccOut);

      if (ret == ADDR_OK) {
         surf_level->dcc_offset = surf->dcc_size;
         surf_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
         surf_level->dcc_enabled = true;
         surf->dcc_size = surf_level->dcc_offset + AddrDccOut->dccRamSize;
         surf->dcc_alignment = MAX2(surf->dcc_alignment, AddrDccOut->dccRamBaseAlign);
      }
   }

   return 0;
}
Esempio n. 8
0
static inline u64 dispatch_t162(t162 a163, t40 a164)
{
    return align64(a164);
}
Esempio n. 9
0
static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
                                               struct winsys_handle *whandle,
                                               unsigned *stride,
                                               unsigned *offset)
{
   struct amdgpu_winsys *ws = amdgpu_winsys(rws);
   struct amdgpu_winsys_bo *bo;
   enum amdgpu_bo_handle_type type;
   struct amdgpu_bo_import_result result = {0};
   uint64_t va;
   amdgpu_va_handle va_handle;
   struct amdgpu_bo_info info = {0};
   enum radeon_bo_domain initial = 0;
   int r;

   /* Initialize the structure. */
   bo = CALLOC_STRUCT(amdgpu_winsys_bo);
   if (!bo) {
      return NULL;
   }

   switch (whandle->type) {
   case DRM_API_HANDLE_TYPE_SHARED:
      type = amdgpu_bo_handle_type_gem_flink_name;
      break;
   case DRM_API_HANDLE_TYPE_FD:
      type = amdgpu_bo_handle_type_dma_buf_fd;
      break;
   default:
      return NULL;
   }

   r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
   if (r)
      goto error;

   /* Get initial domains. */
   r = amdgpu_bo_query_info(result.buf_handle, &info);
   if (r)
      goto error_query;

   r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
                             result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
   if (r)
      goto error_query;

   r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
   if (r)
      goto error_va_map;

   if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
      initial |= RADEON_DOMAIN_VRAM;
   if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
      initial |= RADEON_DOMAIN_GTT;


   pipe_reference_init(&bo->base.reference, 1);
   bo->base.alignment = info.phys_alignment;
   bo->bo = result.buf_handle;
   bo->base.size = result.alloc_size;
   bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
   bo->ws = ws;
   bo->va = va;
   bo->va_handle = va_handle;
   bo->initial_domain = initial;
   bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
   bo->is_shared = true;

   if (stride)
      *stride = whandle->stride;
   if (offset)
      *offset = whandle->offset;

   if (bo->initial_domain & RADEON_DOMAIN_VRAM)
      ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
   else if (bo->initial_domain & RADEON_DOMAIN_GTT)
      ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);

   amdgpu_add_buffer_to_global_list(bo);

   return &bo->base;

error_va_map:
   amdgpu_va_range_free(va_handle);

error_query:
   amdgpu_bo_free(result.buf_handle);

error:
   FREE(bo);
   return NULL;
}
Esempio n. 10
0
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
                                                 uint64_t size,
                                                 unsigned alignment,
                                                 unsigned usage,
                                                 enum radeon_bo_domain initial_domain,
                                                 unsigned flags)
{
   struct amdgpu_bo_alloc_request request = {0};
   amdgpu_bo_handle buf_handle;
   uint64_t va = 0;
   struct amdgpu_winsys_bo *bo;
   amdgpu_va_handle va_handle;
   int r;

   assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
   bo = CALLOC_STRUCT(amdgpu_winsys_bo);
   if (!bo) {
      return NULL;
   }

   pb_cache_init_entry(&ws->bo_cache, &bo->cache_entry, &bo->base);
   request.alloc_size = size;
   request.phys_alignment = alignment;

   if (initial_domain & RADEON_DOMAIN_VRAM)
      request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
   if (initial_domain & RADEON_DOMAIN_GTT)
      request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;

   if (flags & RADEON_FLAG_CPU_ACCESS)
      request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
   if (flags & RADEON_FLAG_NO_CPU_ACCESS)
      request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
   if (flags & RADEON_FLAG_GTT_WC)
      request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;

   r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
   if (r) {
      fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
      fprintf(stderr, "amdgpu:    size      : %"PRIu64" bytes\n", size);
      fprintf(stderr, "amdgpu:    alignment : %u bytes\n", alignment);
      fprintf(stderr, "amdgpu:    domains   : %u\n", initial_domain);
      goto error_bo_alloc;
   }

   r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
                             size, alignment, 0, &va, &va_handle, 0);
   if (r)
      goto error_va_alloc;

   r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
   if (r)
      goto error_va_map;

   pipe_reference_init(&bo->base.reference, 1);
   bo->base.alignment = alignment;
   bo->base.usage = usage;
   bo->base.size = size;
   bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
   bo->ws = ws;
   bo->bo = buf_handle;
   bo->va = va;
   bo->va_handle = va_handle;
   bo->initial_domain = initial_domain;
   bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);

   if (initial_domain & RADEON_DOMAIN_VRAM)
      ws->allocated_vram += align64(size, ws->info.gart_page_size);
   else if (initial_domain & RADEON_DOMAIN_GTT)
      ws->allocated_gtt += align64(size, ws->info.gart_page_size);

   amdgpu_add_buffer_to_global_list(bo);

   return bo;

error_va_map:
   amdgpu_va_range_free(va_handle);

error_va_alloc:
   amdgpu_bo_free(buf_handle);

error_bo_alloc:
   FREE(bo);
   return NULL;
}