Пример #1
0
void r300_swtcl_flush(GLcontext *ctx, uint32_t current_offset)
{
	radeon_print(RADEON_SWRENDER, RADEON_TRACE, "%s\n", __func__);
	r300ContextPtr rmesa = R300_CONTEXT(ctx);

	r300EmitCacheFlush(rmesa);

	radeonEmitState(&rmesa->radeon);
    r300_emit_scissor(ctx);
	r300EmitVertexAOS(rmesa,
			rmesa->radeon.swtcl.vertex_size,
			first_elem(&rmesa->radeon.dma.reserved)->bo,
			current_offset);

	r300EmitVbufPrim(rmesa,
		   rmesa->radeon.swtcl.hw_primitive,
		   rmesa->radeon.swtcl.numverts);
	r300EmitCacheFlush(rmesa);
	if ( rmesa->radeon.swtcl.emit_prediction < rmesa->radeon.cmdbuf.cs->cdw )
		WARN_ONCE("Rendering was %d commands larger than predicted size."
			" We might overflow  command buffer.\n",
			rmesa->radeon.cmdbuf.cs->cdw - rmesa->radeon.swtcl.emit_prediction );
	rmesa->radeon.swtcl.emit_prediction = 0;
	COMMIT_BATCH();
}
Пример #2
0
void r100_swtcl_flush(GLcontext *ctx, uint32_t current_offset)
{
   r100ContextPtr rmesa = R100_CONTEXT(ctx);



   radeonEmitState(&rmesa->radeon);
   radeonEmitVertexAOS( rmesa,
			rmesa->radeon.swtcl.vertex_size,
			first_elem(&rmesa->radeon.dma.reserved)->bo,
			current_offset);

		      
   radeonEmitVbufPrim( rmesa,
		       rmesa->swtcl.vertex_format,
		       rmesa->radeon.swtcl.hw_primitive,
		       rmesa->radeon.swtcl.numverts);
   if ( rmesa->radeon.swtcl.emit_prediction < rmesa->radeon.cmdbuf.cs->cdw )
     WARN_ONCE("Rendering was %d commands larger than predicted size."
	 " We might overflow  command buffer.\n",
	 rmesa->radeon.cmdbuf.cs->cdw - rmesa->radeon.swtcl.emit_prediction );


   rmesa->radeon.swtcl.emit_prediction = 0;

}
Пример #3
0
static void
llvmpipe_delete_fs_state(struct pipe_context *pipe, void *fs)
{
   struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
   struct lp_fragment_shader *shader = fs;
   struct lp_fs_variant_list_item *li;

   assert(fs != llvmpipe->fs);
   (void) llvmpipe;

   /*
    * XXX: we need to flush the context until we have some sort of reference
    * counting in fragment shaders as they may still be binned
    * Flushing alone might not sufficient we need to wait on it too.
    */

   llvmpipe_finish(pipe, __FUNCTION__);

   li = first_elem(&shader->variants);
   while(!at_end(&shader->variants, li)) {
      struct lp_fs_variant_list_item *next = next_elem(li);
      remove_shader_variant(llvmpipe, li->base);
      li = next;
   }

   assert(shader->variants_cached == 0);
   FREE((void *) shader->base.tokens);
   FREE(shader);
}
Пример #4
0
/**
 * Update fragment state.  This is called just prior to drawing
 * something when some fragment-related state has changed.
 */
void 
llvmpipe_update_fs(struct llvmpipe_context *lp)
{
   struct lp_fragment_shader *shader = lp->fs;
   struct lp_fragment_shader_variant_key key;
   struct lp_fragment_shader_variant *variant = NULL;
   struct lp_fs_variant_list_item *li;

   make_variant_key(lp, shader, &key);

   li = first_elem(&shader->variants);
   while(!at_end(&shader->variants, li)) {
      if(memcmp(&li->base->key, &key, shader->variant_key_size) == 0) {
         variant = li->base;
         break;
      }
      li = next_elem(li);
   }

   if (variant) {
      move_to_head(&lp->fs_variants_list, &variant->list_item_global);
   }
   else {
      int64_t t0, t1;
      int64_t dt;
      unsigned i;
      if (lp->nr_fs_variants >= LP_MAX_SHADER_VARIANTS) {
         struct pipe_context *pipe = &lp->pipe;

         /*
          * XXX: we need to flush the context until we have some sort of reference
          * counting in fragment shaders as they may still be binned
          * Flushing alone might not be sufficient we need to wait on it too.
          */
         llvmpipe_finish(pipe, __FUNCTION__);

         for (i = 0; i < LP_MAX_SHADER_VARIANTS / 4; i++) {
            struct lp_fs_variant_list_item *item = last_elem(&lp->fs_variants_list);
            remove_shader_variant(lp, item->base);
         }
      }
      t0 = os_time_get();

      variant = generate_variant(lp, shader, &key);

      t1 = os_time_get();
      dt = t1 - t0;
      LP_COUNT_ADD(llvm_compile_time, dt);
      LP_COUNT_ADD(nr_llvm_compiles, 2);  /* emit vs. omit in/out test */

      if (variant) {
         insert_at_head(&shader->variants, &variant->list_item_local);
         insert_at_head(&lp->fs_variants_list, &variant->list_item_global);
         lp->nr_fs_variants++;
         shader->variants_cached++;
      }
   }

   lp_setup_set_fs_variant(lp->setup, variant);
}
Пример #5
0
/**
 * Ensure all enabled and complete textures are uploaded along with any buffers being used.
 */
GLboolean r300ValidateBuffers(GLcontext * ctx)
{
	r300ContextPtr rmesa = R300_CONTEXT(ctx);
	struct radeon_renderbuffer *rrb;
	int i;
	int ret;

	radeon_cs_space_reset_bos(rmesa->radeon.cmdbuf.cs);

	rrb = radeon_get_colorbuffer(&rmesa->radeon);
	/* color buffer */
	if (rrb && rrb->bo) {
		radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
						  rrb->bo, 0,
						  RADEON_GEM_DOMAIN_VRAM);
	}

	/* depth buffer */
	rrb = radeon_get_depthbuffer(&rmesa->radeon);
	if (rrb && rrb->bo) {
		radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
						  rrb->bo, 0,
						  RADEON_GEM_DOMAIN_VRAM);
	}
	
	for (i = 0; i < ctx->Const.MaxTextureImageUnits; ++i) {
		radeonTexObj *t;

		if (!ctx->Texture.Unit[i]._ReallyEnabled)
			continue;

		if (!r300_validate_texture(ctx, ctx->Texture.Unit[i]._Current)) {
			_mesa_warning(ctx,
				      "failed to validate texture for unit %d.\n",
				      i);
		}
		t = radeon_tex_obj(ctx->Texture.Unit[i]._Current);
		if (t->image_override && t->bo)
			radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
							  t->bo,
							  RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
		else if (t->mt->bo)
			radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
							  t->mt->bo,
							  RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
	}

	ret = radeon_cs_space_check_with_bo(rmesa->radeon.cmdbuf.cs, first_elem(&rmesa->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0);
	if (ret)
		return GL_FALSE;
	return GL_TRUE;
}
Пример #6
0
void draw_delete_geometry_shader(struct draw_context *draw,
                                 struct draw_geometry_shader *dgs)
{
   if (!dgs) {
      return;
   }
#ifdef HAVE_LLVM
   if (draw_get_option_use_llvm()) {
      struct llvm_geometry_shader *shader = llvm_geometry_shader(dgs);
      struct draw_gs_llvm_variant_list_item *li;

      li = first_elem(&shader->variants);
      while(!at_end(&shader->variants, li)) {
         struct draw_gs_llvm_variant_list_item *next = next_elem(li);
         draw_gs_llvm_destroy_variant(li->base);
         li = next;
      }

      assert(shader->variants_cached == 0);

      if (dgs->llvm_prim_lengths) {
         unsigned i;
         for (i = 0; i < dgs->max_out_prims; ++i) {
            align_free(dgs->llvm_prim_lengths[i]);
         }
         FREE(dgs->llvm_prim_lengths);
      }
      align_free(dgs->llvm_emitted_primitives);
      align_free(dgs->llvm_emitted_vertices);
      align_free(dgs->llvm_prim_ids);

      align_free(dgs->gs_input);
   }
#endif

   FREE(dgs->primitive_lengths);
   FREE((void*) dgs->state.tokens);
   FREE(dgs);
}
static void
llvm_middle_end_prepare_gs(struct llvm_middle_end *fpme)
{
   struct draw_context *draw = fpme->draw;
   struct draw_geometry_shader *gs = draw->gs.geometry_shader;
   struct draw_gs_llvm_variant_key *key;
   struct draw_gs_llvm_variant *variant = NULL;
   struct draw_gs_llvm_variant_list_item *li;
   struct llvm_geometry_shader *shader = llvm_geometry_shader(gs);
   char store[DRAW_GS_LLVM_MAX_VARIANT_KEY_SIZE];
   unsigned i;

   key = draw_gs_llvm_make_variant_key(fpme->llvm, store);

   /* Search shader's list of variants for the key */
   li = first_elem(&shader->variants);
   while (!at_end(&shader->variants, li)) {
      if (memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
         variant = li->base;
         break;
      }
      li = next_elem(li);
   }

   if (variant) {
      /* found the variant, move to head of global list (for LRU) */
      move_to_head(&fpme->llvm->gs_variants_list,
                   &variant->list_item_global);
   }
   else {
      /* Need to create new variant */

      /* First check if we've created too many variants.  If so, free
       * 25% of the LRU to avoid using too much memory.
       */
      if (fpme->llvm->nr_gs_variants >= DRAW_MAX_SHADER_VARIANTS) {
         /*
          * XXX: should we flush here ?
          */
         for (i = 0; i < DRAW_MAX_SHADER_VARIANTS / 4; i++) {
            struct draw_gs_llvm_variant_list_item *item;
            if (is_empty_list(&fpme->llvm->gs_variants_list)) {
               break;
            }
            item = last_elem(&fpme->llvm->gs_variants_list);
            assert(item);
            assert(item->base);
            draw_gs_llvm_destroy_variant(item->base);
         }
      }

      variant = draw_gs_llvm_create_variant(fpme->llvm, gs->info.num_outputs, key);

      if (variant) {
         insert_at_head(&shader->variants, &variant->list_item_local);
         insert_at_head(&fpme->llvm->gs_variants_list,
                        &variant->list_item_global);
         fpme->llvm->nr_gs_variants++;
         shader->variants_cached++;
      }
   }

   gs->current_variant = variant;
}
/**
 * Prepare/validate middle part of the vertex pipeline.
 * NOTE: if you change this function, also look at the non-LLVM
 * function fetch_pipeline_prepare() for similar changes.
 */
static void
llvm_middle_end_prepare( struct draw_pt_middle_end *middle,
                         unsigned in_prim,
                         unsigned opt,
                         unsigned *max_vertices )
{
   struct llvm_middle_end *fpme = llvm_middle_end(middle);
   struct draw_context *draw = fpme->draw;
   struct draw_vertex_shader *vs = draw->vs.vertex_shader;
   struct draw_geometry_shader *gs = draw->gs.geometry_shader;
   const unsigned out_prim = gs ? gs->output_primitive :
      u_assembled_prim(in_prim);
   unsigned point_clip = draw->rasterizer->fill_front == PIPE_POLYGON_MODE_POINT ||
                         out_prim == PIPE_PRIM_POINTS;
   unsigned nr;

   fpme->input_prim = in_prim;
   fpme->opt = opt;

   draw_pt_post_vs_prepare( fpme->post_vs,
                            draw->clip_xy,
                            draw->clip_z,
                            draw->clip_user,
                            point_clip ? draw->guard_band_points_xy :
                                         draw->guard_band_xy,
                            draw->identity_viewport,
                            draw->rasterizer->clip_halfz,
                            (draw->vs.edgeflag_output ? TRUE : FALSE) );

   draw_pt_so_emit_prepare( fpme->so_emit, gs == NULL );

   if (!(opt & PT_PIPELINE)) {
      draw_pt_emit_prepare( fpme->emit,
			    out_prim,
                            max_vertices );

      *max_vertices = MAX2( *max_vertices, 4096 );
   }
   else {
      /* limit max fetches by limiting max_vertices */
      *max_vertices = 4096;
   }

   /* Get the number of float[4] attributes per vertex.
    * Note: this must be done after draw_pt_emit_prepare() since that
    * can effect the vertex size.
    */
   nr = MAX2(vs->info.num_inputs, draw_total_vs_outputs(draw));

   /* Always leave room for the vertex header whether we need it or
    * not.  It's hard to get rid of it in particular because of the
    * viewport code in draw_pt_post_vs.c.
    */
   fpme->vertex_size = sizeof(struct vertex_header) + nr * 4 * sizeof(float);

   /* Get the number of float[4] attributes per vertex.
    * Note: this must be done after draw_pt_emit_prepare() since that
    * can effect the vertex size.
    */
   nr = MAX2(vs->info.num_inputs, draw_total_vs_outputs(draw));

   /* Always leave room for the vertex header whether we need it or
    * not.  It's hard to get rid of it in particular because of the
    * viewport code in draw_pt_post_vs.c.
    */
   fpme->vertex_size = sizeof(struct vertex_header) + nr * 4 * sizeof(float);

   /* return even number */
   *max_vertices = *max_vertices & ~1;

   /* Find/create the vertex shader variant */
   {
      struct draw_llvm_variant_key *key;
      struct draw_llvm_variant *variant = NULL;
      struct draw_llvm_variant_list_item *li;
      struct llvm_vertex_shader *shader = llvm_vertex_shader(vs);
      char store[DRAW_LLVM_MAX_VARIANT_KEY_SIZE];
      unsigned i;

      key = draw_llvm_make_variant_key(fpme->llvm, store);

      /* Search shader's list of variants for the key */
      li = first_elem(&shader->variants);
      while (!at_end(&shader->variants, li)) {
         if (memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
            variant = li->base;
            break;
         }
         li = next_elem(li);
      }

      if (variant) {
         /* found the variant, move to head of global list (for LRU) */
         move_to_head(&fpme->llvm->vs_variants_list,
                      &variant->list_item_global);
      }
      else {
         /* Need to create new variant */

         /* First check if we've created too many variants.  If so, free
          * 25% of the LRU to avoid using too much memory.
          */
         if (fpme->llvm->nr_variants >= DRAW_MAX_SHADER_VARIANTS) {
            /*
             * XXX: should we flush here ?
             */
            for (i = 0; i < DRAW_MAX_SHADER_VARIANTS / 4; i++) {
               struct draw_llvm_variant_list_item *item;
               if (is_empty_list(&fpme->llvm->vs_variants_list)) {
                  break;
               }
               item = last_elem(&fpme->llvm->vs_variants_list);
               assert(item);
               assert(item->base);
               draw_llvm_destroy_variant(item->base);
            }
         }

         variant = draw_llvm_create_variant(fpme->llvm, nr, key);

         if (variant) {
            insert_at_head(&shader->variants, &variant->list_item_local);
            insert_at_head(&fpme->llvm->vs_variants_list,
                           &variant->list_item_global);
            fpme->llvm->nr_variants++;
            shader->variants_cached++;
         }
      }

      fpme->current_variant = variant;
   }

   if (gs) {
      llvm_middle_end_prepare_gs(fpme);
   }
}
static void
llvm_middle_end_prepare( struct draw_pt_middle_end *middle,
                         unsigned in_prim,
                         unsigned opt,
                         unsigned *max_vertices )
{
    struct llvm_middle_end *fpme = (struct llvm_middle_end *)middle;
    struct draw_context *draw = fpme->draw;
    struct llvm_vertex_shader *shader =
        llvm_vertex_shader(draw->vs.vertex_shader);
    char store[DRAW_LLVM_MAX_VARIANT_KEY_SIZE];
    struct draw_llvm_variant_key *key;
    struct draw_llvm_variant *variant = NULL;
    struct draw_llvm_variant_list_item *li;
    unsigned i;
    unsigned instance_id_index = ~0;
    const unsigned out_prim = (draw->gs.geometry_shader ?
                               draw->gs.geometry_shader->output_primitive :
                               in_prim);

    /* Add one to num_outputs because the pipeline occasionally tags on
     * an additional texcoord, eg for AA lines.
     */
    const unsigned nr = MAX2( shader->base.info.num_inputs,
                              shader->base.info.num_outputs + 1 );

    /* Scan for instanceID system value.
     * XXX but we never use instance_id_index?!
     */
    for (i = 0; i < shader->base.info.num_inputs; i++) {
        if (shader->base.info.input_semantic_name[i] == TGSI_SEMANTIC_INSTANCEID) {
            instance_id_index = i;
            break;
        }
    }

    fpme->input_prim = in_prim;
    fpme->opt = opt;

    /* Always leave room for the vertex header whether we need it or
     * not.  It's hard to get rid of it in particular because of the
     * viewport code in draw_pt_post_vs.c.
     */
    fpme->vertex_size = sizeof(struct vertex_header) + nr * 4 * sizeof(float);


    /* XXX: it's not really gl rasterization rules we care about here,
     * but gl vs dx9 clip spaces.
     */
    draw_pt_post_vs_prepare( fpme->post_vs,
                             draw->clip_xy,
                             draw->clip_z,
                             draw->clip_user,
                             draw->identity_viewport,
                             (boolean)draw->rasterizer->gl_rasterization_rules,
                             (draw->vs.edgeflag_output ? TRUE : FALSE) );

    draw_pt_so_emit_prepare( fpme->so_emit );

    if (!(opt & PT_PIPELINE)) {
        draw_pt_emit_prepare( fpme->emit,
                              out_prim,
                              max_vertices );

        *max_vertices = MAX2( *max_vertices, 4096 );
    }
    else {
        /* limit max fetches by limiting max_vertices */
        *max_vertices = 4096;
    }

    /* return even number */
    *max_vertices = *max_vertices & ~1;

    key = draw_llvm_make_variant_key(fpme->llvm, store);

    /* Search shader's list of variants for the key */
    li = first_elem(&shader->variants);
    while (!at_end(&shader->variants, li)) {
        if (memcmp(&li->base->key, key, shader->variant_key_size) == 0) {
            variant = li->base;
            break;
        }
        li = next_elem(li);
    }

    if (variant) {
        /* found the variant, move to head of global list (for LRU) */
        move_to_head(&fpme->llvm->vs_variants_list, &variant->list_item_global);
    }
    else {
        /* Need to create new variant */
        unsigned i;

        /* First check if we've created too many variants.  If so, free
         * 25% of the LRU to avoid using too much memory.
         */
        if (fpme->llvm->nr_variants >= DRAW_MAX_SHADER_VARIANTS) {
            /*
             * XXX: should we flush here ?
             */
            for (i = 0; i < DRAW_MAX_SHADER_VARIANTS / 4; i++) {
                struct draw_llvm_variant_list_item *item =
                    last_elem(&fpme->llvm->vs_variants_list);
                draw_llvm_destroy_variant(item->base);
            }
        }

        variant = draw_llvm_create_variant(fpme->llvm, nr, key);

        if (variant) {
            insert_at_head(&shader->variants, &variant->list_item_local);
            insert_at_head(&fpme->llvm->vs_variants_list, &variant->list_item_global);
            fpme->llvm->nr_variants++;
            shader->variants_cached++;
        }
    }

    fpme->current_variant = variant;

    /*XXX we only support one constant buffer */
    fpme->llvm->jit_context.vs_constants =
        draw->pt.user.vs_constants[0];
    fpme->llvm->jit_context.gs_constants =
        draw->pt.user.gs_constants[0];
    fpme->llvm->jit_context.planes =
        (float (*) [12][4]) draw->pt.user.planes[0];
    fpme->llvm->jit_context.viewport =
        (float *)draw->viewport.scale;

}
Пример #10
0
static void evergreenSetupStreams(GLcontext *ctx, const struct gl_client_array *input[], int count)
{
	context_t *context = EVERGREEN_CONTEXT(ctx);
    GLuint stride;
    int ret;
    int i, index;

    EVERGREEN_STATECHANGE(context, vtx);

    for(index = 0; index < context->nNumActiveAos; index++) 
    {
        struct radeon_aos *aos = &context->radeon.tcl.aos[index];
        i = context->stream_desc[index].element;

        stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;

        if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT
#if MESA_BIG_ENDIAN
            || getTypeSize(input[i]->Type) != 4
#endif
	   )
        {
            evergreenConvertAttrib(ctx, count, input[i], &context->stream_desc[index]);
        } 
        else 
        {
            if (input[i]->BufferObj->Name) 
            {
		    context->stream_desc[index].stride = input[i]->StrideB;
		    context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr;
		    context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
		    context->stream_desc[index].is_named_bo = GL_TRUE;
            } 
            else 
            {
                int size;
                int local_count = count;
                uint32_t *dst;

                if (input[i]->StrideB == 0) 
                {
                    size = getTypeSize(input[i]->Type) * input[i]->Size;
                    local_count = 1;
                } 
                else 
                {
                    size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
                }

                radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo, 
                                     &context->stream_desc[index].bo_offset, size, 32);

                radeon_bo_map(context->stream_desc[index].bo, 1);
                assert(context->stream_desc[index].bo->ptr != NULL);


                dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr, 
                                               context->stream_desc[index].bo_offset);

                switch (context->stream_desc[index].dwords) 
                {
                case 1:                       
                    radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
                    break;
                case 2:                     
                    radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count); 
                    break;
                case 3:                     
                    radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count); 
                    break;
                case 4:                     
                    radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count); 
                    break;
                default: 
                    assert(0); 
                    break;
                }

                radeon_bo_unmap(context->stream_desc[index].bo);
            }
        }

        aos->count = context->stream_desc[index].stride == 0 ? 1 : count;
        aos->stride = context->stream_desc[index].stride / sizeof(float);
        aos->components = context->stream_desc[index].dwords;
        aos->bo = context->stream_desc[index].bo;
        aos->offset = context->stream_desc[index].bo_offset;

        if(context->stream_desc[index].is_named_bo) 
        {
            radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs, 
                                              context->stream_desc[index].bo, 
                                              RADEON_GEM_DOMAIN_GTT, 0);
        }
    }

    ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs, 
                                        first_elem(&context->radeon.dma.reserved)->bo, 
                                        RADEON_GEM_DOMAIN_GTT, 0);    
}
Пример #11
0
static void r300AllocDmaRegions(GLcontext *ctx, const struct gl_client_array *input[], int count)
{
	r300ContextPtr r300 = R300_CONTEXT(ctx);
	struct r300_vertex_buffer *vbuf = &r300->vbuf;
	GLuint stride;
	int ret;
	int i, index;
	radeon_print(RADEON_RENDER, RADEON_VERBOSE,
			"%s: count %d num_attribs %d\n",
			__func__, count, vbuf->num_attribs);

	for (index = 0; index < vbuf->num_attribs; index++) {
		struct radeon_aos *aos = &r300->radeon.tcl.aos[index];
		i = vbuf->attribs[index].element;

		stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;

		if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT ||
#if MESA_BIG_ENDIAN
				getTypeSize(input[i]->Type) != 4 ||
#endif
				stride < 4) {

			r300ConvertAttrib(ctx, count, input[i], &vbuf->attribs[index]);
		} else {
			if (input[i]->BufferObj->Name) {
				if (stride % 4 != 0 || (intptr_t)input[i]->Ptr % 4 != 0) {
					r300AlignDataToDword(ctx, input[i], count, &vbuf->attribs[index]);
					vbuf->attribs[index].is_named_bo = GL_FALSE;
				} else {
					vbuf->attribs[index].stride = input[i]->StrideB;
					vbuf->attribs[index].bo_offset = (intptr_t) input[i]->Ptr;
					vbuf->attribs[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
					vbuf->attribs[index].is_named_bo = GL_TRUE;
				}
			} else {

				int size;
				int local_count = count;
				uint32_t *dst;

				if (input[i]->StrideB == 0) {
					size = getTypeSize(input[i]->Type) * input[i]->Size;
					local_count = 1;
				} else {
					size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
				}

				radeonAllocDmaRegion(&r300->radeon, &vbuf->attribs[index].bo, &vbuf->attribs[index].bo_offset, size, 32);
				radeon_bo_map(vbuf->attribs[index].bo, 1);
				assert(vbuf->attribs[index].bo->ptr != NULL);
				dst = (uint32_t *)ADD_POINTERS(vbuf->attribs[index].bo->ptr, vbuf->attribs[index].bo_offset);
				switch (vbuf->attribs[index].dwords) {
					case 1: radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					case 2: radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					case 3: radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					case 4: radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count); break;
					default: assert(0); break;
				}
				radeon_bo_unmap(vbuf->attribs[index].bo);

			}
		}

		aos->count = vbuf->attribs[index].stride == 0 ? 1 : count;
		aos->stride = vbuf->attribs[index].stride / sizeof(float);
		aos->components = vbuf->attribs[index].dwords;
		aos->bo = vbuf->attribs[index].bo;
		aos->offset = vbuf->attribs[index].bo_offset;

		if (vbuf->attribs[index].is_named_bo) {
			radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs, r300->vbuf.attribs[index].bo, RADEON_GEM_DOMAIN_GTT, 0);
		}
	}

	r300->radeon.tcl.aos_count = vbuf->num_attribs;
	ret = radeon_cs_space_check_with_bo(r300->radeon.cmdbuf.cs, first_elem(&r300->radeon.dma.reserved)->bo, RADEON_GEM_DOMAIN_GTT, 0);
	r300SwitchFallback(ctx, R300_FALLBACK_INVALID_BUFFERS, ret);

}