Exemplo n.º 1
0
void _tnl_register_fastpath( struct tnl_clipspace *vtx,
			     GLboolean match_strides )
{
   struct tnl_clipspace_fastpath *fastpath = CALLOC_STRUCT(tnl_clipspace_fastpath);
   GLuint i;

   if (fastpath == NULL) {
      _mesa_error_no_memory(__func__);
      return;
   }

   fastpath->vertex_size = vtx->vertex_size;
   fastpath->attr_count = vtx->attr_count;
   fastpath->match_strides = match_strides;
   fastpath->func = vtx->emit;
   fastpath->attr = malloc(vtx->attr_count * sizeof(fastpath->attr[0]));

   if (fastpath->attr == NULL) {
      free(fastpath);
      _mesa_error_no_memory(__func__);
      return;
   }

   for (i = 0; i < vtx->attr_count; i++) {
      fastpath->attr[i].format = vtx->attr[i].format;
      fastpath->attr[i].stride = vtx->attr[i].inputstride;
      fastpath->attr[i].size = vtx->attr[i].inputsize;
      fastpath->attr[i].offset = vtx->attr[i].vertoffset;
   }

   fastpath->next = vtx->fastpath;
   vtx->fastpath = fastpath;
}
Exemplo n.º 2
0
/**
 * The NewBufferObject() driver hook.
 *
 * Allocates a new intel_buffer_object structure and initializes it.
 *
 * There is some duplication between mesa's bufferobjects and our
 * bufmgr buffers.  Both have an integer handle and a hashtable to
 * lookup an opaque structure.  It would be nice if the handles and
 * internal structure where somehow shared.
 */
static struct gl_buffer_object *
brw_new_buffer_object(struct gl_context * ctx, GLuint name)
{
   struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
   if (!obj) {
      _mesa_error_no_memory(__func__);
   }

   _mesa_initialize_buffer_object(ctx, &obj->Base, name);

   obj->buffer = NULL;

   return &obj->Base;
}
Exemplo n.º 3
0
static GLintptr
register_surface(struct gl_context *ctx, GLboolean isOutput,
                 const GLvoid *vdpSurface, GLenum target,
                 GLsizei numTextureNames, const GLuint *textureNames)
{
   struct vdp_surface *surf;
   int i;

   if (!ctx->vdpDevice || !ctx->vdpGetProcAddress || !ctx->vdpSurfaces) {
      _mesa_error(ctx, GL_INVALID_OPERATION, "VDPAURegisterSurfaceNV");
      return (GLintptr)NULL;
   }

   if (target != GL_TEXTURE_2D && target != GL_TEXTURE_RECTANGLE) {
      _mesa_error(ctx, GL_INVALID_ENUM, "VDPAURegisterSurfaceNV");
      return (GLintptr)NULL;
   }

   if (target == GL_TEXTURE_RECTANGLE && !ctx->Extensions.NV_texture_rectangle) {
      _mesa_error(ctx, GL_INVALID_ENUM, "VDPAURegisterSurfaceNV");
      return (GLintptr)NULL;
   }

   surf = CALLOC_STRUCT( vdp_surface );
   if (surf == NULL) {
      _mesa_error_no_memory("VDPAURegisterSurfaceNV");
      return (GLintptr)NULL;
   }

   surf->vdpSurface = vdpSurface;
   surf->target = target;
   surf->access = GL_READ_WRITE;
   surf->state = GL_SURFACE_REGISTERED_NV;
   surf->output = isOutput;
   for (i = 0; i < numTextureNames; ++i) {
      struct gl_texture_object *tex;
      tex  = _mesa_lookup_texture(ctx, textureNames[i]);
      if (tex == NULL) {
         free(surf);
         _mesa_error(ctx, GL_INVALID_OPERATION,
                     "VDPAURegisterSurfaceNV(texture ID not found)");
         return (GLintptr)NULL;
      }

      _mesa_lock_texture(ctx, tex);

      if (tex->Immutable) {
         _mesa_unlock_texture(ctx, tex);
         free(surf);
         _mesa_error(ctx, GL_INVALID_OPERATION,
                     "VDPAURegisterSurfaceNV(texture is immutable)");
         return (GLintptr)NULL;
      }

      if (tex->Target == 0)
         tex->Target = target;
      else if (tex->Target != target) {
         _mesa_unlock_texture(ctx, tex);
         free(surf);
         _mesa_error(ctx, GL_INVALID_OPERATION,
                     "VDPAURegisterSurfaceNV(target mismatch)");
         return (GLintptr)NULL;
      }

      /* This will disallow respecifying the storage. */
      tex->Immutable = GL_TRUE;
      _mesa_unlock_texture(ctx, tex);

      _mesa_reference_texobj(&surf->textures[i], tex);
   }

   _mesa_set_add(ctx->vdpSurfaces, surf);

   return (GLintptr)surf;
}
Exemplo n.º 4
0
void
link_uniform_blocks(void *mem_ctx,
                    struct gl_context *ctx,
                    struct gl_shader_program *prog,
                    struct gl_linked_shader *shader,
                    struct gl_uniform_block **ubo_blocks,
                    unsigned *num_ubo_blocks,
                    struct gl_uniform_block **ssbo_blocks,
                    unsigned *num_ssbo_blocks)
{
   /* This hash table will track all of the uniform blocks that have been
    * encountered.  Since blocks with the same block-name must be the same,
    * the hash is organized by block-name.
    */
   struct hash_table *block_hash =
      _mesa_hash_table_create(mem_ctx, _mesa_key_hash_string,
                              _mesa_key_string_equal);

   if (block_hash == NULL) {
      _mesa_error_no_memory(__func__);
      linker_error(prog, "out of memory\n");
      return;
   }

   /* Determine which uniform blocks are active.
    */
   link_uniform_block_active_visitor v(mem_ctx, block_hash, prog);
   visit_list_elements(&v, shader->ir);

   /* Count the number of active uniform blocks.  Count the total number of
    * active slots in those uniform blocks.
    */
   unsigned num_ubo_variables = 0;
   unsigned num_ssbo_variables = 0;
   count_block_size block_size;
   struct hash_entry *entry;

   hash_table_foreach (block_hash, entry) {
      struct link_uniform_block_active *const b =
         (struct link_uniform_block_active *) entry->data;

      assert((b->array != NULL) == b->type->is_array());

      if (b->array != NULL &&
          (b->type->without_array()->interface_packing ==
           GLSL_INTERFACE_PACKING_PACKED)) {
         b->type = resize_block_array(b->type, b->array);
         b->var->type = b->type;
      }

      block_size.num_active_uniforms = 0;
      block_size.process(b->type->without_array(), "");

      if (b->array != NULL) {
         unsigned aoa_size = b->type->arrays_of_arrays_size();
         if (b->is_shader_storage) {
            *num_ssbo_blocks += aoa_size;
            num_ssbo_variables += aoa_size * block_size.num_active_uniforms;
         } else {
            *num_ubo_blocks += aoa_size;
            num_ubo_variables += aoa_size * block_size.num_active_uniforms;
         }
      } else {
         if (b->is_shader_storage) {
            (*num_ssbo_blocks)++;
            num_ssbo_variables += block_size.num_active_uniforms;
         } else {
            (*num_ubo_blocks)++;
            num_ubo_variables += block_size.num_active_uniforms;
         }
      }

   }

   create_buffer_blocks(mem_ctx, ctx, prog, ubo_blocks, *num_ubo_blocks,
                        block_hash, num_ubo_variables, true);
   create_buffer_blocks(mem_ctx, ctx, prog, ssbo_blocks, *num_ssbo_blocks,
                        block_hash, num_ssbo_variables, false);

   _mesa_hash_table_destroy(block_hash, NULL);
}
Exemplo n.º 5
0
/* Adjust primitives, indices and vertex definitions so that min_index
 * becomes zero. There are lots of reasons for wanting to do this, eg:
 *
 * Software tnl:
 *    - any time min_index != 0, otherwise unused vertices lower than
 *      min_index will be transformed.
 *
 * Hardware tnl:
 *    - if ib != NULL and min_index != 0, otherwise vertices lower than 
 *      min_index will be uploaded.  Requires adjusting index values.
 *
 *    - if ib == NULL and min_index != 0, just for convenience so this doesn't
 *      have to be handled within the driver.
 *
 * Hardware tnl with VBO support:
 *    - as above, but only when vertices are not (all?) in VBO's.
 *    - can't save time by trying to upload half a vbo - typically it is
 *      all or nothing.
 */
void vbo_rebase_prims( struct gl_context *ctx,
		       const struct gl_vertex_array *arrays[],
		       const struct _mesa_prim *prim,
		       GLuint nr_prims,
		       const struct _mesa_index_buffer *ib,
		       GLuint min_index,
		       GLuint max_index,
		       vbo_draw_func draw )
{
   struct gl_vertex_array tmp_arrays[VERT_ATTRIB_MAX];
   const struct gl_vertex_array *tmp_array_pointers[VERT_ATTRIB_MAX];

   struct _mesa_index_buffer tmp_ib;
   struct _mesa_prim *tmp_prims = NULL;
   const struct gl_vertex_array **saved_arrays = ctx->Array._DrawArrays;
   void *tmp_indices = NULL;
   GLuint i;

   assert(min_index != 0);

   if (0)
      printf("%s %d..%d\n", __func__, min_index, max_index);


   /* XXX this path is disabled for now.
    * There's rendering corruption in some apps when it's enabled.
    */
   if (0 && ib && ctx->Extensions.ARB_draw_elements_base_vertex) {
      /* If we can just tell the hardware or the TNL to interpret our
       * indices with a different base, do so.
       */
      tmp_prims = malloc(sizeof(*prim) * nr_prims);

      if (tmp_prims == NULL) {
         _mesa_error_no_memory(__func__);
         return;
      }

      for (i = 0; i < nr_prims; i++) {
	 tmp_prims[i] = prim[i];
	 tmp_prims[i].basevertex -= min_index;
      }

      prim = tmp_prims;
   } else if (ib) {
      /* Unfortunately need to adjust each index individually.
       */
      GLboolean map_ib = ib->obj->Name &&
                         !ib->obj->Mappings[MAP_INTERNAL].Pointer;
      void *ptr;

      if (map_ib) 
	 ctx->Driver.MapBufferRange(ctx, 0, ib->obj->Size, GL_MAP_READ_BIT,
				    ib->obj, MAP_INTERNAL);


      ptr = ADD_POINTERS(ib->obj->Mappings[MAP_INTERNAL].Pointer, ib->ptr);

      /* Some users might prefer it if we translated elements to
       * GLuints here.  Others wouldn't...
       */
      switch (ib->type) {
      case GL_UNSIGNED_INT: 
	 tmp_indices = rebase_GLuint( ptr, ib->count, min_index );
	 break;
      case GL_UNSIGNED_SHORT: 
	 tmp_indices = rebase_GLushort( ptr, ib->count, min_index );
	 break;
      case GL_UNSIGNED_BYTE: 
	 tmp_indices = rebase_GLubyte( ptr, ib->count, min_index );
	 break;
      }      

      if (map_ib) 
	 ctx->Driver.UnmapBuffer(ctx, ib->obj, MAP_INTERNAL);

      if (tmp_indices == NULL) {
         return;
      }

      tmp_ib.obj = ctx->Shared->NullBufferObj;
      tmp_ib.ptr = tmp_indices;
      tmp_ib.count = ib->count;
      tmp_ib.type = ib->type;

      ib = &tmp_ib;
   }
   else {
      /* Otherwise the primitives need adjustment.
       */
      tmp_prims = malloc(sizeof(*prim) * nr_prims);

      if (tmp_prims == NULL) {
         _mesa_error_no_memory(__func__);
         return;
      }

      for (i = 0; i < nr_prims; i++) {
	 /* If this fails, it could indicate an application error:
	  */
	 assert(prim[i].start >= min_index);

	 tmp_prims[i] = prim[i];
	 tmp_prims[i].start -= min_index;
      }

      prim = tmp_prims;
   }

   /* Just need to adjust the pointer values on each incoming array.
    * This works for VBO and non-vbo rendering and shouldn't pesimize
    * VBO-based upload schemes.  However this may still not be a fast
    * path for hardware tnl for VBO based rendering as most machines
    * will be happier if you just specify a starting vertex value in
    * each primitive.
    *
    * For drivers with hardware tnl, you only want to do this if you
    * are forced to, eg non-VBO indexed rendering with start != 0.
    */
   for (i = 0; i < VERT_ATTRIB_MAX; i++) {
      tmp_arrays[i] = *arrays[i];
      tmp_arrays[i].Ptr += min_index * tmp_arrays[i].StrideB;
      tmp_array_pointers[i] = &tmp_arrays[i];
   }
   
   /* Re-issue the draw call.
    */
   ctx->Array._DrawArrays = tmp_array_pointers;
   ctx->NewDriverState |= ctx->DriverFlags.NewArray;

   draw( ctx, 
	 prim,
	 nr_prims, 
	 ib, 
	 GL_TRUE,
	 0, 
	 max_index - min_index,
	 NULL, 0, NULL );

   ctx->Array._DrawArrays = saved_arrays;
   ctx->NewDriverState |= ctx->DriverFlags.NewArray;
   
   free(tmp_indices);
   
   free(tmp_prims);
}