static void evergreenSetupStreams(GLcontext *ctx, const struct gl_client_array *input[], int count) { context_t *context = EVERGREEN_CONTEXT(ctx); GLuint stride; int ret; int i, index; EVERGREEN_STATECHANGE(context, vtx); for(index = 0; index < context->nNumActiveAos; index++) { struct radeon_aos *aos = &context->radeon.tcl.aos[index]; i = context->stream_desc[index].element; stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB; if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT #if MESA_BIG_ENDIAN || getTypeSize(input[i]->Type) != 4 #endif ) { evergreenConvertAttrib(ctx, count, input[i], &context->stream_desc[index]); } else { if (input[i]->BufferObj->Name) { context->stream_desc[index].stride = input[i]->StrideB; context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr; context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo; context->stream_desc[index].is_named_bo = GL_TRUE; } else { int size; int local_count = count; uint32_t *dst; if (input[i]->StrideB == 0) { size = getTypeSize(input[i]->Type) * input[i]->Size; local_count = 1; } else { size = getTypeSize(input[i]->Type) * input[i]->Size * local_count; } radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo, &context->stream_desc[index].bo_offset, size, 32); radeon_bo_map(context->stream_desc[index].bo, 1); assert(context->stream_desc[index].bo->ptr != NULL); dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr, context->stream_desc[index].bo_offset); switch (context->stream_desc[index].dwords) { case 1: radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count); break; case 2: radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count); break; case 3: radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count); break; case 4: radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count); break; default: assert(0); break; } radeon_bo_unmap(context->stream_desc[index].bo); } } aos->count = context->stream_desc[index].stride == 0 ? 1 : count; aos->stride = context->stream_desc[index].stride / sizeof(float); aos->components = context->stream_desc[index].dwords; aos->bo = context->stream_desc[index].bo; aos->offset = context->stream_desc[index].bo_offset; if(context->stream_desc[index].is_named_bo) { radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs, context->stream_desc[index].bo, RADEON_GEM_DOMAIN_GTT, 0); } } ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs, first_elem(&context->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0); }
static void r300AllocDmaRegions(GLcontext *ctx, const struct gl_client_array *input[], int count) { r300ContextPtr r300 = R300_CONTEXT(ctx); struct r300_vertex_buffer *vbuf = &r300->vbuf; GLuint stride; int ret; int i, index; radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: count %d num_attribs %d\n", __func__, count, vbuf->num_attribs); for (index = 0; index < vbuf->num_attribs; index++) { struct radeon_aos *aos = &r300->radeon.tcl.aos[index]; i = vbuf->attribs[index].element; stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB; if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT || #if MESA_BIG_ENDIAN getTypeSize(input[i]->Type) != 4 || #endif stride < 4) { r300ConvertAttrib(ctx, count, input[i], &vbuf->attribs[index]); } else { if (input[i]->BufferObj->Name) { if (stride % 4 != 0 || (intptr_t)input[i]->Ptr % 4 != 0) { r300AlignDataToDword(ctx, input[i], count, &vbuf->attribs[index]); vbuf->attribs[index].is_named_bo = GL_FALSE; } else { vbuf->attribs[index].stride = input[i]->StrideB; vbuf->attribs[index].bo_offset = (intptr_t) input[i]->Ptr; vbuf->attribs[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo; vbuf->attribs[index].is_named_bo = GL_TRUE; } } else { int size; int local_count = count; uint32_t *dst; if (input[i]->StrideB == 0) { size = getTypeSize(input[i]->Type) * input[i]->Size; local_count = 1; } else { size = getTypeSize(input[i]->Type) * input[i]->Size * local_count; } radeonAllocDmaRegion(&r300->radeon, &vbuf->attribs[index].bo, &vbuf->attribs[index].bo_offset, size, 32); radeon_bo_map(vbuf->attribs[index].bo, 1); assert(vbuf->attribs[index].bo->ptr != NULL); dst = (uint32_t *)ADD_POINTERS(vbuf->attribs[index].bo->ptr, vbuf->attribs[index].bo_offset); switch (vbuf->attribs[index].dwords) { case 1: radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count); break; case 2: radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count); break; case 3: radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count); break; case 4: radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count); break; default: assert(0); break; } radeon_bo_unmap(vbuf->attribs[index].bo); } } aos->count = vbuf->attribs[index].stride == 0 ? 1 : count; aos->stride = vbuf->attribs[index].stride / sizeof(float); aos->components = vbuf->attribs[index].dwords; aos->bo = vbuf->attribs[index].bo; aos->offset = vbuf->attribs[index].bo_offset; if (vbuf->attribs[index].is_named_bo) { radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs, r300->vbuf.attribs[index].bo, RADEON_GEM_DOMAIN_GTT, 0); } } r300->radeon.tcl.aos_count = vbuf->num_attribs; ret = radeon_cs_space_check_with_bo(r300->radeon.cmdbuf.cs, first_elem(&r300->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0); r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, ret); }