Ejemplo n.º 1
0
/**
 * Called via glGetBufferSubDataARB()
 */
static void
radeonGetBufferSubData(GLcontext * ctx,
                       GLenum target,
                       GLintptrARB offset,
                       GLsizeiptrARB size,
                       GLvoid * data,
                       struct gl_buffer_object *obj)
{
    struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);

    radeon_bo_map(radeon_obj->bo, GL_FALSE);

    memcpy(data, radeon_obj->bo->ptr + offset, size);

    radeon_bo_unmap(radeon_obj->bo);
}
Ejemplo n.º 2
0
static void radeonQueryGetResult(struct gl_context *ctx, struct gl_query_object *q)
{
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
	struct radeon_query_object *query = (struct radeon_query_object *)q;
        uint32_t *result;
	int i;

	radeon_print(RADEON_STATE, RADEON_VERBOSE,
			"%s: query id %d, result %d\n",
			__FUNCTION__, query->Base.Id, (int) query->Base.Result);

	radeon_bo_map(query->bo, GL_FALSE);
        result = query->bo->ptr;

	query->Base.Result = 0;
	if (IS_R600_CLASS(radeon->radeonScreen)) {
		/* ZPASS EVENT writes alternating qwords
		 * At query start we set the start offset to 0 and
		 * hw writes zpass start counts to qwords 0, 2, 4, 6.
		 * At query end we set the start offset to 8 and
		 * hw writes zpass end counts to qwords 1, 3, 5, 7.
		 * then we substract. MSB is the valid bit.
		 */
		for (i = 0; i < 32; i += 4) {
			uint64_t start = (uint64_t)LE32_TO_CPU(result[i]) |
					 (uint64_t)LE32_TO_CPU(result[i + 1]) << 32;
			uint64_t end = (uint64_t)LE32_TO_CPU(result[i + 2]) |
				       (uint64_t)LE32_TO_CPU(result[i + 3]) << 32;
			if ((start & 0x8000000000000000) && (end & 0x8000000000000000)) {
				uint64_t query_count = end - start;
				query->Base.Result += query_count;

			}
			radeon_print(RADEON_STATE, RADEON_TRACE,
				     "%d start: %" PRIu64 ", end: %" PRIu64 " %" PRIu64 "\n", i, start, end, end - start);
		}
	} else {
		for (i = 0; i < query->curr_offset/sizeof(uint32_t); ++i) {
			query->Base.Result += LE32_TO_CPU(result[i]);
			radeon_print(RADEON_STATE, RADEON_TRACE, "result[%d] = %d\n", i, LE32_TO_CPU(result[i]));
		}
	}

	radeon_bo_unmap(query->bo);
}
Ejemplo n.º 3
0
compute_shader::compute_shader(r800_state* state, const std::vector<char>& binary)
{
  const cs_image_header* header = (const cs_image_header*)&binary[0];

  assert(binary.size()%4 == 0);
  assert(header->magic == 0x42424242);

  uint32_t sum = 0;

  for (int i = 8; i < binary.size(); i++)
  {
    sum = sum + binary[i];
  }

  assert(header->chksum == sum);

  lds_alloc = header->lds_alloc;
  num_gprs = header->num_gprs;
  temp_gprs = header->temp_gprs;
  global_gprs = header->global_gprs;
  stack_size = header->stack_size;
  thread_num = header->thread_num;
  dyn_gpr_limit = header->dyn_gpr_limit;

  int shader_start = sizeof(cs_image_header);

  alloc_size = binary.size() - shader_start;

  if (alloc_size % 16)
  {
    alloc_size += (16 - alloc_size % 16);
  }

  binary_code_bo = state->bo_open(0, alloc_size, 0, RADEON_GEM_DOMAIN_VRAM, 0);

  assert(binary_code_bo != NULL);

  assert(radeon_bo_map(binary_code_bo, 1) == 0);

  memcpy(binary_code_bo->ptr, &binary[0] + shader_start, binary.size() - shader_start);

  radeon_bo_unmap(binary_code_bo);
}
Ejemplo n.º 4
0
/**
 * Replace data in a subrange of buffer object.  If the data range
 * specified by size + offset extends beyond the end of the buffer or
 * if data is NULL, no copy is performed.
 * Called via glBufferSubDataARB().
 */
static void
radeonBufferSubData(struct gl_context * ctx,
                    GLintptrARB offset,
                    GLsizeiptrARB size,
                    const GLvoid * data,
                    struct gl_buffer_object *obj)
{
    radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);

    if (radeon_bo_is_referenced_by_cs(radeon_obj->bo, radeon->cmdbuf.cs)) {
        radeon_firevertices(radeon);
    }

    radeon_bo_map(radeon_obj->bo, GL_TRUE);

    memcpy(radeon_obj->bo->ptr + offset, data, size);

    radeon_bo_unmap(radeon_obj->bo);
}
Ejemplo n.º 5
0
static void evergreenAlignDataToDword(GLcontext *ctx, 
                                 const struct gl_client_array *input, 
                                 int count, 
                                 struct StreamDesc *attr)
{
    context_t *context = EVERGREEN_CONTEXT(ctx);
    const int dst_stride = (input->StrideB + 3) & ~3;
    const int size = getTypeSize(input->Type) * input->Size * count;
    GLboolean mapped_named_bo = GL_FALSE;

    radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset, size, 32);

    radeon_bo_map(attr->bo, 1);

    if (!input->BufferObj->Pointer) 
    {
        ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
        mapped_named_bo = GL_TRUE;
    }

    {
        GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
        GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
        int i;

        for (i = 0; i < count; ++i) 
        {
            memcpy(dst_ptr, src_ptr, input->StrideB);
            src_ptr += input->StrideB;
            dst_ptr += dst_stride;
        }
    }

    radeon_bo_unmap(attr->bo);
    if (mapped_named_bo) 
    {
        ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
    }

    attr->stride = dst_stride;
}
Ejemplo n.º 6
0
static void radeonQueryGetResult(struct gl_context *ctx, struct gl_query_object *q)
{
	struct radeon_query_object *query = (struct radeon_query_object *)q;
        uint32_t *result;
	int i;

	radeon_print(RADEON_STATE, RADEON_VERBOSE,
			"%s: query id %d, result %d\n",
			__FUNCTION__, query->Base.Id, (int) query->Base.Result);

	radeon_bo_map(query->bo, GL_FALSE);
        result = query->bo->ptr;

	query->Base.Result = 0;
	for (i = 0; i < query->curr_offset/sizeof(uint32_t); ++i) {
		query->Base.Result += LE32_TO_CPU(result[i]);
		radeon_print(RADEON_STATE, RADEON_TRACE, "result[%d] = %d\n", i, LE32_TO_CPU(result[i]));
	}

	radeon_bo_unmap(query->bo);
}
Ejemplo n.º 7
0
/**
 * Allocate space for and store data in a buffer object.  Any data that was
 * previously stored in the buffer object is lost.  If data is NULL,
 * memory will be allocated, but no copy will occur.
 * Called via ctx->Driver.BufferData().
 * \return GL_TRUE for success, GL_FALSE if out of memory
 */
static GLboolean
radeonBufferData(struct gl_context * ctx,
                 GLenum target,
                 GLsizeiptrARB size,
                 const GLvoid * data,
                 GLenum usage,
                 struct gl_buffer_object *obj)
{
    radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    struct radeon_buffer_object *radeon_obj = get_radeon_buffer_object(obj);

    radeon_obj->Base.Size = size;
    radeon_obj->Base.Usage = usage;

    if (radeon_obj->bo != NULL) {
        radeon_bo_unref(radeon_obj->bo);
        radeon_obj->bo = NULL;
    }

    if (size != 0) {
        radeon_obj->bo = radeon_bo_open(radeon->radeonScreen->bom,
                                        0,
                                        size,
                                        32,
                                        RADEON_GEM_DOMAIN_GTT,
                                        0);

        if (!radeon_obj->bo)
            return GL_FALSE;

        if (data != NULL) {
            radeon_bo_map(radeon_obj->bo, GL_TRUE);

            memcpy(radeon_obj->bo->ptr, data, size);

            radeon_bo_unmap(radeon_obj->bo);
        }
    }
    return GL_TRUE;
}
Ejemplo n.º 8
0
void* r600_texture_transfer_map(struct pipe_context *ctx,
				struct pipe_transfer* transfer)
{
	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
	struct radeon_bo *bo;
	enum pipe_format format = transfer->resource->format;
	struct r600_screen *rscreen = r600_screen(ctx->screen);
	struct r600_resource_texture *rtex;
	unsigned long offset = 0;
	char *map;
	int r;

	r600_flush(ctx, 0, NULL);
	if (rtransfer->linear_texture) {
		bo = ((struct r600_resource *)rtransfer->linear_texture)->bo;
	} else {
		rtex = (struct r600_resource_texture*)transfer->resource;
		if (rtex->depth) {
			r = r600_texture_from_depth(ctx, rtex, transfer->sr.level);
			if (r) {
				return NULL;
			}
			r600_flush(ctx, 0, NULL);
			bo = rtex->uncompressed;
		} else {
			bo = ((struct r600_resource *)transfer->resource)->bo;
		}
		offset = rtransfer->offset +
			transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
			transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
	}
	if (radeon_bo_map(rscreen->rw, bo)) {
		return NULL;
	}
	radeon_bo_wait(rscreen->rw, bo);

	map = bo->data;
	return map + offset;
}
Ejemplo n.º 9
0
static void r300AlignDataToDword(GLcontext *ctx, const struct gl_client_array *input, int count, struct vertex_attribute *attr)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	const int dst_stride = (input->StrideB + 3) & ~3;
	const int size = getTypeSize(input->Type) * input->Size * count;
	GLboolean mapped_named_bo = GL_FALSE;

	radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, size, 32);

	radeon_bo_map(attr->bo, 1);

	if (!input->BufferObj->Pointer) {
		ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
		mapped_named_bo = GL_TRUE;
	}

	radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT, "%s. Vertex alignment doesn't match hw requirements.\n", __func__);

	{
		GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
		GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
		int i;

		for (i = 0; i < count; ++i) {
			memcpy(dst_ptr, src_ptr, input->StrideB);
			src_ptr += input->StrideB;
			dst_ptr += dst_stride;
		}
	}

	if (mapped_named_bo) {
		ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
	}

	radeon_bo_unmap(attr->bo);
	attr->stride = dst_stride;
}
Ejemplo n.º 10
0
/**
 * Map a validated texture for reading during software rendering.
 */
void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
{
	radeonTexObj* t = radeon_tex_obj(texObj);
	int face, level;

	radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
			"%s(%p, tex %p)\n",
			__func__, ctx, texObj);

	if (!radeon_validate_texture_miptree(ctx, texObj)) {
		radeon_error("%s(%p, tex %p) Failed to validate miptree for "
			"sw fallback.\n",
			__func__, ctx, texObj);
		return;
	}

	if (t->image_override && t->bo) {
		radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
			"%s(%p, tex %p) Work around for missing miptree in r100.\n",
			__func__, ctx, texObj);

		map_override(ctx, t);
	}

	/* for r100 3D sw fallbacks don't have mt */
	if (!t->mt) {
		radeon_warning("%s(%p, tex %p) No miptree in texture.\n",
			__func__, ctx, texObj);
		return;
	}

	radeon_bo_map(t->mt->bo, GL_FALSE);
	for(face = 0; face < t->mt->faces; ++face) {
		for(level = t->minLod; level <= t->maxLod; ++level)
			teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
	}
}
static Bool
RADEONDownloadFromScreenCS(PixmapPtr pSrc, int x, int y, int w,
                           int h, char *dst, int dst_pitch)
{
    RINFO_FROM_SCREEN(pSrc->drawable.pScreen);
    struct radeon_exa_pixmap_priv *driver_priv;
    struct radeon_bo *scratch = NULL;
    struct radeon_bo *copy_src;
    unsigned size;
    uint32_t datatype = 0;
    uint32_t src_domain = 0;
    uint32_t src_pitch_offset;
    unsigned bpp = pSrc->drawable.bitsPerPixel;
    uint32_t scratch_pitch = RADEON_ALIGN(w * bpp / 8, 64);
    uint32_t copy_pitch;
    uint32_t swap = RADEON_HOST_DATA_SWAP_NONE;
    int ret;
    Bool flush = FALSE;
    Bool r;

    if (bpp < 8)
	return FALSE;

    driver_priv = exaGetPixmapDriverPrivate(pSrc);
    if (!driver_priv || !driver_priv->bo)
	return FALSE;

#if X_BYTE_ORDER == X_BIG_ENDIAN
    switch (bpp) {
    case 32:
	swap = RADEON_HOST_DATA_SWAP_32BIT;
	break;
    case 16:
	swap = RADEON_HOST_DATA_SWAP_16BIT;
	break;
    }
#endif

    /* If we know the BO won't end up in VRAM anyway, don't bother with a scratch */
    copy_src = driver_priv->bo;
    copy_pitch = pSrc->devKind;
    if (!(driver_priv->tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) {
	if (radeon_bo_is_referenced_by_cs(driver_priv->bo, info->cs)) {
	    src_domain = radeon_bo_get_src_domain(driver_priv->bo);
	    if ((src_domain & (RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) ==
		(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM))
		src_domain = 0;
	    else /* A write may be scheduled */
		flush = TRUE;
	}

	if (!src_domain)
	    radeon_bo_is_busy(driver_priv->bo, &src_domain);

	if (src_domain & ~(uint32_t)RADEON_GEM_DOMAIN_VRAM)
	    goto copy;
    }
    size = scratch_pitch * h;
    scratch = radeon_bo_open(info->bufmgr, 0, size, 0, RADEON_GEM_DOMAIN_GTT, 0);
    if (scratch == NULL) {
	goto copy;
    }
    radeon_cs_space_reset_bos(info->cs);
    radeon_add_pixmap(info->cs, pSrc, RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
    radeon_cs_space_add_persistent_bo(info->cs, scratch, 0, RADEON_GEM_DOMAIN_GTT);
    ret = radeon_cs_space_check(info->cs);
    if (ret) {
	goto copy;
    }
    RADEONGetDatatypeBpp(pSrc->drawable.bitsPerPixel, &datatype);
    RADEONGetPixmapOffsetPitch(pSrc, &src_pitch_offset);
    RADEON_SWITCH_TO_2D();
    RADEONBlitChunk(pScrn, driver_priv->bo, scratch, datatype, src_pitch_offset,
                    scratch_pitch << 16, x, y, 0, 0, w, h,
                    RADEON_GEM_DOMAIN_VRAM | RADEON_GEM_DOMAIN_GTT,
                    RADEON_GEM_DOMAIN_GTT);
    copy_src = scratch;
    copy_pitch = scratch_pitch;
    flush = TRUE;

copy:
    if (flush)
	FLUSH_RING();

    ret = radeon_bo_map(copy_src, 0);
    if (ret) {
	ErrorF("failed to map pixmap: %d\n", ret);
        r = FALSE;
        goto out;
    }
    r = TRUE;
    w *= bpp / 8;
    if (copy_src == driver_priv->bo)
	size = y * copy_pitch + x * bpp / 8;
    else
	size = 0;
    while (h--) {
        RADEONCopySwap((uint8_t*)dst, copy_src->ptr + size, w, swap);
        size += copy_pitch;
        dst += dst_pitch;
    }
    radeon_bo_unmap(copy_src);
out:
    if (scratch)
	radeon_bo_unref(scratch);
    return r;
}
static Bool
RADEONUploadToScreenCS(PixmapPtr pDst, int x, int y, int w, int h,
		       char *src, int src_pitch)
{
    ScreenPtr pScreen = pDst->drawable.pScreen;
    RINFO_FROM_SCREEN(pScreen);
    struct radeon_exa_pixmap_priv *driver_priv;
    struct radeon_bo *scratch = NULL;
    struct radeon_bo *copy_dst;
    unsigned char *dst;
    unsigned size;
    uint32_t datatype = 0;
    uint32_t dst_domain;
    uint32_t dst_pitch_offset;
    unsigned bpp = pDst->drawable.bitsPerPixel;
    uint32_t scratch_pitch = RADEON_ALIGN(w * bpp / 8, 64);
    uint32_t copy_pitch;
    uint32_t swap = RADEON_HOST_DATA_SWAP_NONE;
    int ret;
    Bool flush = TRUE;
    Bool r;
    int i;

    if (bpp < 8)
	return FALSE;

    driver_priv = exaGetPixmapDriverPrivate(pDst);
    if (!driver_priv || !driver_priv->bo)
	return FALSE;

#if X_BYTE_ORDER == X_BIG_ENDIAN
    switch (bpp) {
    case 32:
	swap = RADEON_HOST_DATA_SWAP_32BIT;
	break;
    case 16:
	swap = RADEON_HOST_DATA_SWAP_16BIT;
	break;
    }
#endif

    /* If we know the BO won't be busy / in VRAM, don't bother with a scratch */
    copy_dst = driver_priv->bo;
    copy_pitch = pDst->devKind;
    if (!(driver_priv->tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO))) {
	if (!radeon_bo_is_referenced_by_cs(driver_priv->bo, info->cs)) {
	    flush = FALSE;
	    if (!radeon_bo_is_busy(driver_priv->bo, &dst_domain) &&
		!(dst_domain & RADEON_GEM_DOMAIN_VRAM))
		goto copy;
	}
	/* use cpu copy for fast fb access */
	if (info->is_fast_fb)
	    goto copy;
    }

    size = scratch_pitch * h;
    scratch = radeon_bo_open(info->bufmgr, 0, size, 0, RADEON_GEM_DOMAIN_GTT, 0);
    if (scratch == NULL) {
	goto copy;
    }
    radeon_cs_space_reset_bos(info->cs);
    radeon_add_pixmap(info->cs, pDst, 0, RADEON_GEM_DOMAIN_VRAM);
    radeon_cs_space_add_persistent_bo(info->cs, scratch, RADEON_GEM_DOMAIN_GTT, 0);
    ret = radeon_cs_space_check(info->cs);
    if (ret) {
	goto copy;
    }
    copy_dst = scratch;
    copy_pitch = scratch_pitch;
    flush = FALSE;

copy:
    if (flush)
	radeon_cs_flush_indirect(pScrn);

    ret = radeon_bo_map(copy_dst, 0);
    if (ret) {
        r = FALSE;
        goto out;
    }
    r = TRUE;
    size = w * bpp / 8;
    dst = copy_dst->ptr;
    if (copy_dst == driver_priv->bo)
	dst += y * copy_pitch + x * bpp / 8;
    for (i = 0; i < h; i++) {
        RADEONCopySwap(dst + i * copy_pitch, (uint8_t*)src, size, swap);
        src += src_pitch;
    }
    radeon_bo_unmap(copy_dst);

    if (copy_dst == scratch) {
	RADEONGetDatatypeBpp(pDst->drawable.bitsPerPixel, &datatype);
	RADEONGetPixmapOffsetPitch(pDst, &dst_pitch_offset);
	RADEON_SWITCH_TO_2D();
	RADEONBlitChunk(pScrn, scratch, driver_priv->bo, datatype, scratch_pitch << 16,
			dst_pitch_offset, 0, 0, x, y, w, h,
			RADEON_GEM_DOMAIN_GTT, RADEON_GEM_DOMAIN_VRAM);
    }

out:
    if (scratch)
	radeon_bo_unref(scratch);
    return r;
}
Ejemplo n.º 13
0
static void evergreenSetupStreams(GLcontext *ctx, const struct gl_client_array *input[], int count)
{
	context_t *context = EVERGREEN_CONTEXT(ctx);
    GLuint stride;
    int ret;
    int i, index;

    EVERGREEN_STATECHANGE(context, vtx);

    for(index = 0; index < context->nNumActiveAos; index++) 
    {
        struct radeon_aos *aos = &context->radeon.tcl.aos[index];
        i = context->stream_desc[index].element;

        stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;

        if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT
#if MESA_BIG_ENDIAN
            || getTypeSize(input[i]->Type) != 4
#endif
	   )
        {
            evergreenConvertAttrib(ctx, count, input[i], &context->stream_desc[index]);
        } 
        else 
        {
            if (input[i]->BufferObj->Name) 
            {
		    context->stream_desc[index].stride = input[i]->StrideB;
		    context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr;
		    context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
		    context->stream_desc[index].is_named_bo = GL_TRUE;
            } 
            else 
            {
                int size;
                int local_count = count;
                uint32_t *dst;

                if (input[i]->StrideB == 0) 
                {
                    size = getTypeSize(input[i]->Type) * input[i]->Size;
                    local_count = 1;
                } 
                else 
                {
                    size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
                }

                radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo, 
                                     &context->stream_desc[index].bo_offset, size, 32);

                radeon_bo_map(context->stream_desc[index].bo, 1);
                assert(context->stream_desc[index].bo->ptr != NULL);


                dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr, 
                                               context->stream_desc[index].bo_offset);

                switch (context->stream_desc[index].dwords) 
                {
                case 1:                       
                    radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
                    break;
                case 2:                     
                    radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count); 
                    break;
                case 3:                     
                    radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count); 
                    break;
                case 4:                     
                    radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count); 
                    break;
                default: 
                    assert(0); 
                    break;
                }

                radeon_bo_unmap(context->stream_desc[index].bo);
            }
        }

        aos->count = context->stream_desc[index].stride == 0 ? 1 : count;
        aos->stride = context->stream_desc[index].stride / sizeof(float);
        aos->components = context->stream_desc[index].dwords;
        aos->bo = context->stream_desc[index].bo;
        aos->offset = context->stream_desc[index].bo_offset;

        if(context->stream_desc[index].is_named_bo) 
        {
            radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs, 
                                              context->stream_desc[index].bo, 
                                              RADEON_GEM_DOMAIN_GTT, 0);
        }
    }

    ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs, 
                                        first_elem(&context->radeon.dma.reserved)->bo, 
                                        RADEON_GEM_DOMAIN_GTT, 0);    
}
Ejemplo n.º 14
0
static void r300SetupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);

	if (!mesa_ind_buf) {
		r300->ind_buf.bo = NULL;
		return;
	}
	radeon_print(RADEON_RENDER, RADEON_TRACE, "%s\n", __func__);

#if MESA_BIG_ENDIAN
	if (mesa_ind_buf->type == GL_UNSIGNED_INT) {
#else
	if (mesa_ind_buf->type != GL_UNSIGNED_BYTE) {
#endif
		const GLvoid *src_ptr;
		GLvoid *dst_ptr;
		GLboolean mapped_named_bo = GL_FALSE;

		if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
			ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
			assert(mesa_ind_buf->obj->Pointer != NULL);
			mapped_named_bo = GL_TRUE;
		}

		src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);

		const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);

		radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);

		radeon_bo_map(r300->ind_buf.bo, 1);
		assert(r300->ind_buf.bo->ptr != NULL);
		dst_ptr = ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);
		memcpy(dst_ptr, src_ptr, size);

		radeon_bo_unmap(r300->ind_buf.bo);
		r300->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
		r300->ind_buf.count = mesa_ind_buf->count;

		if (mapped_named_bo) {
			ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
		}
	} else {
		r300FixupIndexBuffer(ctx, mesa_ind_buf);
	}
}

#define CONVERT( TYPE, MACRO ) do {		\
	GLuint i, j, sz;				\
	sz = input->Size;				\
	if (input->Normalized) {			\
		for (i = 0; i < count; i++) {		\
			const TYPE *in = (TYPE *)src_ptr;		\
			for (j = 0; j < sz; j++) {		\
				*dst_ptr++ = MACRO(*in);		\
				in++;				\
			}					\
			src_ptr += stride;			\
		}						\
	} else {					\
		for (i = 0; i < count; i++) {		\
			const TYPE *in = (TYPE *)src_ptr;		\
			for (j = 0; j < sz; j++) {		\
				*dst_ptr++ = (GLfloat)(*in);		\
				in++;				\
			}					\
			src_ptr += stride;			\
		}						\
	}						\
} while (0)

/**
 * Convert attribute data type to float
 * If the attribute uses named buffer object replace the bo with newly allocated bo
 */
static void r300ConvertAttrib(GLcontext *ctx, int count, const struct gl_client_array *input, struct vertex_attribute *attr)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	const GLvoid *src_ptr;
	GLboolean mapped_named_bo = GL_FALSE;
	GLfloat *dst_ptr;
	GLuint stride;

	stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;

	/* Convert value for first element only */
	if (input->StrideB == 0)
		count = 1;

	if (input->BufferObj->Name) {
		if (!input->BufferObj->Pointer) {
			ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
			mapped_named_bo = GL_TRUE;
		}

		src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
	} else {
		src_ptr = input->Ptr;
	}

	radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, sizeof(GLfloat) * input->Size * count, 32);
	radeon_bo_map(attr->bo, 1);
	dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);

	radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
			"%s: Converting vertex attributes, attribute data format %x,"
			"stride %d, components %d\n"
			, __FUNCTION__, input->Type
			, stride, input->Size);

	assert(src_ptr != NULL);

	switch (input->Type) {
		case GL_DOUBLE:
			CONVERT(GLdouble, (GLfloat));
			break;
		case GL_UNSIGNED_INT:
			CONVERT(GLuint, UINT_TO_FLOAT);
			break;
		case GL_INT:
			CONVERT(GLint, INT_TO_FLOAT);
			break;
		case GL_UNSIGNED_SHORT:
			CONVERT(GLushort, USHORT_TO_FLOAT);
			break;
		case GL_SHORT:
			CONVERT(GLshort, SHORT_TO_FLOAT);
			break;
		case GL_UNSIGNED_BYTE:
			assert(input->Format != GL_BGRA);
			CONVERT(GLubyte, UBYTE_TO_FLOAT);
			break;
		case GL_BYTE:
			CONVERT(GLbyte, BYTE_TO_FLOAT);
			break;
		default:
			assert(0);
			break;
	}

	radeon_bo_unmap(attr->bo);
	if (mapped_named_bo) {
		ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
	}
}
Ejemplo n.º 15
0
static void evergreenFixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
{
    context_t *context = EVERGREEN_CONTEXT(ctx);
    GLvoid *src_ptr;
    GLuint *out;
    int i;
    GLboolean mapped_named_bo = GL_FALSE;

    if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
    {
        ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
        mapped_named_bo = GL_TRUE;
        assert(mesa_ind_buf->obj->Pointer != NULL);
    }
    src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);

    if (mesa_ind_buf->type == GL_UNSIGNED_BYTE)
    {
        GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
        GLubyte *in = (GLubyte *)src_ptr;

	radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
			     &context->ind_buf.bo_offset, size, 4);

	radeon_bo_map(context->ind_buf.bo, 1);
	assert(context->ind_buf.bo->ptr != NULL);
	out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);

        for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
        {
            *out++ = in[i] | in[i + 1] << 16;
        }

        if (i < mesa_ind_buf->count)
        {
            *out++ = in[i];
        }

	radeon_bo_unmap(context->ind_buf.bo);
#if MESA_BIG_ENDIAN
    }
    else
    { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
        GLushort *in = (GLushort *)src_ptr;
        GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);

	radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
			     &context->ind_buf.bo_offset, size, 4);

	radeon_bo_map(context->ind_buf.bo, 1);
	assert(context->ind_buf.bo->ptr != NULL);
	out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);

        for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
        {
            *out++ = in[i] | in[i + 1] << 16;
        }

        if (i < mesa_ind_buf->count)
        {
            *out++ = in[i];
        }
	radeon_bo_unmap(context->ind_buf.bo);
#endif
    }

    context->ind_buf.is_32bit = GL_FALSE;
    context->ind_buf.count = mesa_ind_buf->count;

    if (mapped_named_bo)
    {
        ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
    }
}
Ejemplo n.º 16
0
static void r300AllocDmaRegions(GLcontext *ctx, const struct gl_client_array *input[], int count)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	struct r300_vertex_buffer *vbuf = &r300->vbuf;
	GLuint stride;
	int ret;
	int i, index;
	radeon_print(RADEON_RENDER, RADEON_VERBOSE,
			"%s: count %d num_attribs %d\n",
			__func__, count, vbuf->num_attribs);

	for (index = 0; index < vbuf->num_attribs; index++) {
		struct radeon_aos *aos = &r300->radeon.tcl.aos[index];
		i = vbuf->attribs[index].element;

		stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;

		if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT ||
#if MESA_BIG_ENDIAN
				getTypeSize(input[i]->Type) != 4 ||
#endif
				stride < 4) {

			r300ConvertAttrib(ctx, count, input[i], &vbuf->attribs[index]);
		} else {
			if (input[i]->BufferObj->Name) {
				if (stride % 4 != 0 || (intptr_t)input[i]->Ptr % 4 != 0) {
					r300AlignDataToDword(ctx, input[i], count, &vbuf->attribs[index]);
					vbuf->attribs[index].is_named_bo = GL_FALSE;
				} else {
					vbuf->attribs[index].stride = input[i]->StrideB;
					vbuf->attribs[index].bo_offset = (intptr_t) input[i]->Ptr;
					vbuf->attribs[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
					vbuf->attribs[index].is_named_bo = GL_TRUE;
				}
			} else {

				int size;
				int local_count = count;
				uint32_t *dst;

				if (input[i]->StrideB == 0) {
					size = getTypeSize(input[i]->Type) * input[i]->Size;
					local_count = 1;
				} else {
					size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
				}

				radeonAllocDmaRegion(&r300->radeon, &vbuf->attribs[index].bo, &vbuf->attribs[index].bo_offset, size, 32);
				radeon_bo_map(vbuf->attribs[index].bo, 1);
				assert(vbuf->attribs[index].bo->ptr != NULL);
				dst = (uint32_t *)ADD_POINTERS(vbuf->attribs[index].bo->ptr, vbuf->attribs[index].bo_offset);
				switch (vbuf->attribs[index].dwords) {
					case 1: radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					case 2: radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					case 3: radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					case 4: radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					default: assert(0); break;
				}
				radeon_bo_unmap(vbuf->attribs[index].bo);

			}
		}

		aos->count = vbuf->attribs[index].stride == 0 ? 1 : count;
		aos->stride = vbuf->attribs[index].stride / sizeof(float);
		aos->components = vbuf->attribs[index].dwords;
		aos->bo = vbuf->attribs[index].bo;
		aos->offset = vbuf->attribs[index].bo_offset;

		if (vbuf->attribs[index].is_named_bo) {
			radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs, r300->vbuf.attribs[index].bo, RADEON_GEM_DOMAIN_GTT, 0);
		}
	}

	r300->radeon.tcl.aos_count = vbuf->num_attribs;
	ret = radeon_cs_space_check_with_bo(r300->radeon.cmdbuf.cs, first_elem(&r300->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0);
	r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, ret);

}
Ejemplo n.º 17
0
static void r300FixupIndexBuffer(GLcontext *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	GLvoid *src_ptr;
	GLuint *out;
	int i;
	GLboolean mapped_named_bo = GL_FALSE;

	if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer) {
		ctx->Driver.MapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, GL_READ_ONLY_ARB, mesa_ind_buf->obj);
		mapped_named_bo = GL_TRUE;
		assert(mesa_ind_buf->obj->Pointer != NULL);
	}
	src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);

	radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
			"%s: Fixing index buffer format. type %d\n",
			__func__, mesa_ind_buf->type);

	if (mesa_ind_buf->type == GL_UNSIGNED_BYTE) {
		GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
		GLubyte *in = (GLubyte *)src_ptr;

		radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo, &r300->ind_buf.bo_offset, size, 4);
		radeon_bo_map(r300->ind_buf.bo, 1);
		assert(r300->ind_buf.bo->ptr != NULL);
		out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);

		for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
			*out++ = in[i] | in[i + 1] << 16;
		}

		if (i < mesa_ind_buf->count) {
			*out++ = in[i];
		}
		radeon_bo_unmap(r300->ind_buf.bo);
#if MESA_BIG_ENDIAN
	} else { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
		GLushort *in = (GLushort *)src_ptr;
		GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);

		radeonAllocDmaRegion(&r300->radeon, &r300->ind_buf.bo,
				     &r300->ind_buf.bo_offset, size, 4);

		radeon_bo_map(r300->ind_buf.bo, 1);
		assert(r300->ind_buf.bo->ptr != NULL);
		out = (GLuint *)ADD_POINTERS(r300->ind_buf.bo->ptr, r300->ind_buf.bo_offset);

		for (i = 0; i + 1 < mesa_ind_buf->count; i += 2) {
			*out++ = in[i] | in[i + 1] << 16;
		}

		if (i < mesa_ind_buf->count) {
			*out++ = in[i];
		}
		radeon_bo_unmap(r300->ind_buf.bo);
#endif
	}

	r300->ind_buf.is_32bit = GL_FALSE;
	r300->ind_buf.count = mesa_ind_buf->count;

	if (mapped_named_bo) {
		ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER, mesa_ind_buf->obj);
	}
}
Ejemplo n.º 18
0
static GLboolean
do_blit_readpixels(struct gl_context * ctx,
                   GLint x, GLint y, GLsizei width, GLsizei height,
                   GLenum format, GLenum type,
                   const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
    radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    const struct radeon_renderbuffer *rrb = radeon_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer);
    const gl_format dst_format = gl_format_and_type_to_mesa_format(format, type);
    unsigned dst_rowstride, dst_imagesize, aligned_rowstride, flip_y;
    struct radeon_bo *dst_buffer;
    GLint dst_x = 0, dst_y = 0;
    intptr_t dst_offset;

    /* It's not worth if number of pixels to copy is really small */
    if (width * height < 100) {
        return GL_FALSE;
    }

    if (dst_format == MESA_FORMAT_NONE ||
        !radeon->vtbl.check_blit(dst_format) || !radeon->vtbl.blit) {
        return GL_FALSE;
    }

    if (ctx->_ImageTransferState || ctx->Color._LogicOpEnabled) {
        return GL_FALSE;
    }

    if (pack->SwapBytes || pack->LsbFirst) {
        return GL_FALSE;
    }

    if (pack->RowLength > 0) {
        dst_rowstride = pack->RowLength;
    } else {
        dst_rowstride = width;
    }

    if (!_mesa_clip_copytexsubimage(ctx, &dst_x, &dst_y, &x, &y, &width, &height)) {
        return GL_TRUE;
    }
    assert(x >= 0 && y >= 0);

    aligned_rowstride = get_texture_image_row_stride(radeon, dst_format, dst_rowstride, 0);
    dst_rowstride *= _mesa_get_format_bytes(dst_format);
    if (_mesa_is_bufferobj(pack->BufferObj) && aligned_rowstride != dst_rowstride)
        return GL_FALSE;
    dst_imagesize = get_texture_image_size(dst_format,
                                           aligned_rowstride,
                                           height, 1, 0);

    if (!_mesa_is_bufferobj(pack->BufferObj))
    {
        dst_buffer = radeon_bo_open(radeon->radeonScreen->bom, 0, dst_imagesize, 1024, RADEON_GEM_DOMAIN_GTT, 0);
        dst_offset = 0;
    }
    else
    {
        dst_buffer = get_radeon_buffer_object(pack->BufferObj)->bo;
        dst_offset = (intptr_t)pixels;
    }

    /* Disable source Y flipping for FBOs */
    flip_y = (ctx->ReadBuffer->Name == 0);
    if (pack->Invert) {
        y = rrb->base.Height - height - y;
        flip_y = !flip_y;
    }

    if (radeon->vtbl.blit(ctx,
                          rrb->bo,
                          rrb->draw_offset,
                          rrb->base.Format,
                          rrb->pitch / rrb->cpp,
                          rrb->base.Width,
                          rrb->base.Height,
                          x,
                          y,
                          dst_buffer,
                          dst_offset,
                          dst_format,
                          aligned_rowstride / _mesa_get_format_bytes(dst_format),
                          width,
                          height,
                          0, /* dst_x */
                          0, /* dst_y */
                          width,
                          height,
                          flip_y))
    {
        if (!_mesa_is_bufferobj(pack->BufferObj))
        {
            radeon_bo_map(dst_buffer, 0);
            copy_rows(pixels, dst_rowstride, dst_buffer->ptr,
                      aligned_rowstride, height, dst_rowstride);
            radeon_bo_unmap(dst_buffer);
            radeon_bo_unref(dst_buffer);
        }

        return GL_TRUE;
    }

    if (!_mesa_is_bufferobj(pack->BufferObj))
        radeon_bo_unref(dst_buffer);

    return GL_FALSE;
}
Ejemplo n.º 19
0
/**
 * Convert attribute data type to float
 * If the attribute uses named buffer object replace the bo with newly allocated bo
 */
static void evergreenConvertAttrib(GLcontext *ctx, int count, 
                              const struct gl_client_array *input, 
                              struct StreamDesc *attr)
{
    context_t *context = R700_CONTEXT(ctx);
    const GLvoid *src_ptr;
    GLboolean mapped_named_bo = GL_FALSE;
    GLfloat *dst_ptr;
    GLuint stride;

    stride = (input->StrideB == 0) ? evergreen_getTypeSize(input->Type) * input->Size : input->StrideB;

    /* Convert value for first element only */
    if (input->StrideB == 0)
    {
        count = 1;
    }

    if (input->BufferObj->Name) 
    {
        if (!input->BufferObj->Pointer) 
        {
            ctx->Driver.MapBuffer(ctx, GL_ARRAY_BUFFER, GL_READ_ONLY_ARB, input->BufferObj);
            mapped_named_bo = GL_TRUE;
        }

        src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
    } 
    else 
    {
        src_ptr = input->Ptr;
    }

    radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset, 
                         sizeof(GLfloat) * input->Size * count, 32);

    radeon_bo_map(attr->bo, 1);

    dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);

    assert(src_ptr != NULL);

    switch (input->Type) 
    {
        case GL_DOUBLE:
            CONVERT(GLdouble, (GLfloat));
            break;
        case GL_UNSIGNED_INT:
            CONVERT(GLuint, UINT_TO_FLOAT);
            break;
        case GL_INT:
            CONVERT(GLint, INT_TO_FLOAT);
            break;
        case GL_UNSIGNED_SHORT:
            CONVERT(GLushort, USHORT_TO_FLOAT);
            break;
        case GL_SHORT:
            CONVERT(GLshort, SHORT_TO_FLOAT);
            break;
        case GL_UNSIGNED_BYTE:
            assert(input->Format != GL_BGRA);
            CONVERT(GLubyte, UBYTE_TO_FLOAT);
            break;
        case GL_BYTE:
            CONVERT(GLbyte, BYTE_TO_FLOAT);
            break;
        default:
            assert(0);
            break;
    }

    radeon_bo_unmap(attr->bo);

    if (mapped_named_bo) 
    {
        ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER, input->BufferObj);
    }
}
Ejemplo n.º 20
0
static void cs_gem_dump_bof(struct radeon_cs_int *cs)
{
    struct cs_gem *csg = (struct cs_gem*)cs;
    struct radeon_cs_manager_gem *csm;
    bof_t *bcs, *blob, *array, *bo, *size, *handle, *device_id, *root;
    char tmp[256];
    unsigned i;

    csm = (struct radeon_cs_manager_gem *)cs->csm;
    root = device_id = bcs = blob = array = bo = size = handle = NULL;
    root = bof_object();
    if (root == NULL)
        goto out_err;
    device_id = bof_int32(csm->device_id);
    if (device_id == NULL)
        return;
    if (bof_object_set(root, "device_id", device_id))
        goto out_err;
    bof_decref(device_id);
    device_id = NULL;
    /* dump relocs */
    blob = bof_blob(csg->nrelocs * 16, csg->relocs);
    if (blob == NULL)
        goto out_err;
    if (bof_object_set(root, "reloc", blob))
        goto out_err;
    bof_decref(blob);
    blob = NULL;
    /* dump cs */
    blob = bof_blob(cs->cdw * 4, cs->packets);
    if (blob == NULL)
        goto out_err;
    if (bof_object_set(root, "pm4", blob))
        goto out_err;
    bof_decref(blob);
    blob = NULL;
    /* dump bo */
    array = bof_array();
    if (array == NULL)
        goto out_err;
    for (i = 0; i < csg->base.crelocs; i++) {
        bo = bof_object();
        if (bo == NULL)
            goto out_err;
        size = bof_int32(csg->relocs_bo[i]->size);
        if (size == NULL)
            goto out_err;
        if (bof_object_set(bo, "size", size))
            goto out_err;
        bof_decref(size);
        size = NULL;
        handle = bof_int32(csg->relocs_bo[i]->handle);
        if (handle == NULL)
            goto out_err;
        if (bof_object_set(bo, "handle", handle))
            goto out_err;
        bof_decref(handle);
        handle = NULL;
        radeon_bo_map((struct radeon_bo*)csg->relocs_bo[i], 0);
        blob = bof_blob(csg->relocs_bo[i]->size, csg->relocs_bo[i]->ptr);
        radeon_bo_unmap((struct radeon_bo*)csg->relocs_bo[i]);
        if (blob == NULL)
            goto out_err;
        if (bof_object_set(bo, "data", blob))
            goto out_err;
        bof_decref(blob);
        blob = NULL;
        if (bof_array_append(array, bo))
            goto out_err;
        bof_decref(bo);
        bo = NULL;
    }
    if (bof_object_set(root, "bo", array))
        goto out_err;
    sprintf(tmp, "d-0x%04X-%08d.bof", csm->device_id, csm->nbof++);
    bof_dump_file(root, tmp);
out_err:
    bof_decref(blob);
    bof_decref(array);
    bof_decref(bo);
    bof_decref(size);
    bof_decref(handle);
    bof_decref(device_id);
    bof_decref(root);
}
Ejemplo n.º 21
0
void radeon_ctx_dump_bof(struct radeon_ctx *ctx, const char *file)
{
	bof_t *bcs, *blob, *array, *bo, *size, *handle, *device_id, *root;
	unsigned i;

	root = device_id = bcs = blob = array = bo = size = handle = NULL;
	root = bof_object();
	if (root == NULL)
		goto out_err;
	device_id = bof_int32(ctx->radeon->device);
	if (device_id == NULL)
		return;
	if (bof_object_set(root, "device_id", device_id))
		goto out_err;
	bof_decref(device_id);
	device_id = NULL;
	/* dump relocs */
	blob = bof_blob(ctx->nreloc * 16, ctx->reloc);
	if (blob == NULL)
		goto out_err;
	if (bof_object_set(root, "reloc", blob))
		goto out_err;
	bof_decref(blob);
	blob = NULL;
	/* dump cs */
	blob = bof_blob(ctx->cdwords * 4, ctx->pm4);
	if (blob == NULL)
		goto out_err;
	if (bof_object_set(root, "pm4", blob))
		goto out_err;
	bof_decref(blob);
	blob = NULL;
	/* dump bo */
	array = bof_array();
	if (array == NULL)
		goto out_err;
	for (i = 0; i < ctx->nbo; i++) {
		bo = bof_object();
		if (bo == NULL)
			goto out_err;
		size = bof_int32(ctx->bo[i]->size);
		if (size == NULL)
			goto out_err;
		if (bof_object_set(bo, "size", size))
			goto out_err;
		bof_decref(size);
		size = NULL;
		handle = bof_int32(ctx->bo[i]->handle);
		if (handle == NULL)
			goto out_err;
		if (bof_object_set(bo, "handle", handle))
			goto out_err;
		bof_decref(handle);
		handle = NULL;
		radeon_bo_map(ctx->radeon, ctx->bo[i]);
		blob = bof_blob(ctx->bo[i]->size, ctx->bo[i]->data);
		radeon_bo_unmap(ctx->radeon, ctx->bo[i]);
		if (blob == NULL)
			goto out_err;
		if (bof_object_set(bo, "data", blob))
			goto out_err;
		bof_decref(blob);
		blob = NULL;
		if (bof_array_append(array, bo))
			goto out_err;
		bof_decref(bo);
		bo = NULL;
	}
	if (bof_object_set(root, "bo", array))
		goto out_err;
	bof_dump_file(root, file);
out_err:
	bof_decref(blob);
	bof_decref(array);
	bof_decref(bo);
	bof_decref(size);
	bof_decref(handle);
	bof_decref(device_id);
	bof_decref(root);
}