Esempio n. 1
0
static void r700SendQueryBegin(GLcontext *ctx, struct radeon_state_atom *atom)
{
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
	struct radeon_query_object *query = radeon->query.current;
	BATCH_LOCALS(radeon);
	radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s\n", __func__);

	/* clear the buffer */
	radeon_bo_map(query->bo, GL_FALSE);
	memset(query->bo->ptr, 0, 4 * 2 * sizeof(uint64_t)); /* 4 DBs, 2 qwords each */
	radeon_bo_unmap(query->bo);

	radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
				      query->bo,
				      0, RADEON_GEM_DOMAIN_GTT);

	BEGIN_BATCH_NO_AUTOSTATE(4 + 2);
	R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 2));
	R600_OUT_BATCH(ZPASS_DONE);
	R600_OUT_BATCH(query->curr_offset); /* hw writes qwords */
	R600_OUT_BATCH(0x00000000);
	R600_OUT_BATCH_RELOC(VGT_EVENT_INITIATOR, query->bo, 0, 0, RADEON_GEM_DOMAIN_GTT, 0);
	END_BATCH();
	query->emitted_begin = GL_TRUE;
}
Esempio n. 2
0
static GLboolean validate_buffers(struct r200_context *r200,
                                  struct radeon_bo *src_bo,
                                  struct radeon_bo *dst_bo)
{
    int ret;

    radeon_cs_space_reset_bos(r200->radeon.cmdbuf.cs);

    ret = radeon_cs_space_check_with_bo(r200->radeon.cmdbuf.cs,
                                        src_bo, RADEON_GEM_DOMAIN_VRAM | RADEON_GEM_DOMAIN_GTT, 0);
    if (ret)
        return GL_FALSE;

    ret = radeon_cs_space_check_with_bo(r200->radeon.cmdbuf.cs,
                                        dst_bo, 0, RADEON_GEM_DOMAIN_VRAM | RADEON_GEM_DOMAIN_GTT);
    if (ret)
        return GL_FALSE;

    return GL_TRUE;
}
Esempio n. 3
0
/**
 * Ensure all enabled and complete textures are uploaded along with any buffers being used.
 */
GLboolean r300ValidateBuffers(GLcontext * ctx)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct radeon_renderbuffer *rrb;
	int i;
	int ret;

	radeon_cs_space_reset_bos(rmesa->radeon.cmdbuf.cs);

	rrb = radeon_get_colorbuffer(&rmesa->radeon);
	/* color buffer */
	if (rrb && rrb->bo) {
		radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
						  rrb->bo, 0,
						  RADEON_GEM_DOMAIN_VRAM);
	}

	/* depth buffer */
	rrb = radeon_get_depthbuffer(&rmesa->radeon);
	if (rrb && rrb->bo) {
		radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
						  rrb->bo, 0,
						  RADEON_GEM_DOMAIN_VRAM);
	}
	
	for (i = 0; i < ctx->Const.MaxTextureImageUnits; ++i) {
		radeonTexObj *t;

		if (!ctx->Texture.Unit[i]._ReallyEnabled)
			continue;

		if (!r300_validate_texture(ctx, ctx->Texture.Unit[i]._Current)) {
			_mesa_warning(ctx,
				      "failed to validate texture for unit %d.\n",
				      i);
		}
		t = radeon_tex_obj(ctx->Texture.Unit[i]._Current);
		if (t->image_override && t->bo)
			radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
							  t->bo,
							  RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
		else if (t->mt->bo)
			radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
							  t->mt->bo,
							  RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
	}

	ret = radeon_cs_space_check_with_bo(rmesa->radeon.cmdbuf.cs, first_elem(&rmesa->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0);
	if (ret)
		return GL_FALSE;
	return GL_TRUE;
}
Esempio n. 4
0
void radeonEmitQueryEnd(struct gl_context *ctx)
{
	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
	struct radeon_query_object *query = radeon->query.current;

	if (!query)
		return;

	if (query->emitted_begin == GL_FALSE)
		return;

	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d, bo %p, offset %d\n", __FUNCTION__, query->Base.Id, query->bo, query->curr_offset);

	radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
				      query->bo,
				      0, RADEON_GEM_DOMAIN_GTT);

	radeon->vtbl.emit_query_finish(radeon);
}
Esempio n. 5
0
static void evergreenSetupStreams(GLcontext *ctx, const struct gl_client_array *input[], int count)
{
	context_t *context = EVERGREEN_CONTEXT(ctx);
    GLuint stride;
    int ret;
    int i, index;

    EVERGREEN_STATECHANGE(context, vtx);

    for(index = 0; index < context->nNumActiveAos; index++) 
    {
        struct radeon_aos *aos = &context->radeon.tcl.aos[index];
        i = context->stream_desc[index].element;

        stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;

        if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT
#if MESA_BIG_ENDIAN
            || getTypeSize(input[i]->Type) != 4
#endif
	   )
        {
            evergreenConvertAttrib(ctx, count, input[i], &context->stream_desc[index]);
        } 
        else 
        {
            if (input[i]->BufferObj->Name) 
            {
		    context->stream_desc[index].stride = input[i]->StrideB;
		    context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr;
		    context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
		    context->stream_desc[index].is_named_bo = GL_TRUE;
            } 
            else 
            {
                int size;
                int local_count = count;
                uint32_t *dst;

                if (input[i]->StrideB == 0) 
                {
                    size = getTypeSize(input[i]->Type) * input[i]->Size;
                    local_count = 1;
                } 
                else 
                {
                    size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
                }

                radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo, 
                                     &context->stream_desc[index].bo_offset, size, 32);

                radeon_bo_map(context->stream_desc[index].bo, 1);
                assert(context->stream_desc[index].bo->ptr != NULL);


                dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr, 
                                               context->stream_desc[index].bo_offset);

                switch (context->stream_desc[index].dwords) 
                {
                case 1:                       
                    radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
                    break;
                case 2:                     
                    radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count); 
                    break;
                case 3:                     
                    radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count); 
                    break;
                case 4:                     
                    radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count); 
                    break;
                default: 
                    assert(0); 
                    break;
                }

                radeon_bo_unmap(context->stream_desc[index].bo);
            }
        }

        aos->count = context->stream_desc[index].stride == 0 ? 1 : count;
        aos->stride = context->stream_desc[index].stride / sizeof(float);
        aos->components = context->stream_desc[index].dwords;
        aos->bo = context->stream_desc[index].bo;
        aos->offset = context->stream_desc[index].bo_offset;

        if(context->stream_desc[index].is_named_bo) 
        {
            radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs, 
                                              context->stream_desc[index].bo, 
                                              RADEON_GEM_DOMAIN_GTT, 0);
        }
    }

    ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs, 
                                        first_elem(&context->radeon.dma.reserved)->bo, 
                                        RADEON_GEM_DOMAIN_GTT, 0);    
}
Esempio n. 6
0
static void r300AllocDmaRegions(GLcontext *ctx, const struct gl_client_array *input[], int count)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	struct r300_vertex_buffer *vbuf = &r300->vbuf;
	GLuint stride;
	int ret;
	int i, index;
	radeon_print(RADEON_RENDER, RADEON_VERBOSE,
			"%s: count %d num_attribs %d\n",
			__func__, count, vbuf->num_attribs);

	for (index = 0; index < vbuf->num_attribs; index++) {
		struct radeon_aos *aos = &r300->radeon.tcl.aos[index];
		i = vbuf->attribs[index].element;

		stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;

		if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT ||
#if MESA_BIG_ENDIAN
				getTypeSize(input[i]->Type) != 4 ||
#endif
				stride < 4) {

			r300ConvertAttrib(ctx, count, input[i], &vbuf->attribs[index]);
		} else {
			if (input[i]->BufferObj->Name) {
				if (stride % 4 != 0 || (intptr_t)input[i]->Ptr % 4 != 0) {
					r300AlignDataToDword(ctx, input[i], count, &vbuf->attribs[index]);
					vbuf->attribs[index].is_named_bo = GL_FALSE;
				} else {
					vbuf->attribs[index].stride = input[i]->StrideB;
					vbuf->attribs[index].bo_offset = (intptr_t) input[i]->Ptr;
					vbuf->attribs[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
					vbuf->attribs[index].is_named_bo = GL_TRUE;
				}
			} else {

				int size;
				int local_count = count;
				uint32_t *dst;

				if (input[i]->StrideB == 0) {
					size = getTypeSize(input[i]->Type) * input[i]->Size;
					local_count = 1;
				} else {
					size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
				}

				radeonAllocDmaRegion(&r300->radeon, &vbuf->attribs[index].bo, &vbuf->attribs[index].bo_offset, size, 32);
				radeon_bo_map(vbuf->attribs[index].bo, 1);
				assert(vbuf->attribs[index].bo->ptr != NULL);
				dst = (uint32_t *)ADD_POINTERS(vbuf->attribs[index].bo->ptr, vbuf->attribs[index].bo_offset);
				switch (vbuf->attribs[index].dwords) {
					case 1: radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					case 2: radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					case 3: radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					case 4: radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					default: assert(0); break;
				}
				radeon_bo_unmap(vbuf->attribs[index].bo);

			}
		}

		aos->count = vbuf->attribs[index].stride == 0 ? 1 : count;
		aos->stride = vbuf->attribs[index].stride / sizeof(float);
		aos->components = vbuf->attribs[index].dwords;
		aos->bo = vbuf->attribs[index].bo;
		aos->offset = vbuf->attribs[index].bo_offset;

		if (vbuf->attribs[index].is_named_bo) {
			radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs, r300->vbuf.attribs[index].bo, RADEON_GEM_DOMAIN_GTT, 0);
		}
	}

	r300->radeon.tcl.aos_count = vbuf->num_attribs;
	ret = radeon_cs_space_check_with_bo(r300->radeon.cmdbuf.cs, first_elem(&r300->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0);
	r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, ret);

}