Пример #1
0
struct pb_manager *
fenced_bufmgr_create(struct pb_manager *provider,
                     struct pb_fence_ops *ops,
                     pb_size max_buffer_size,
                     pb_size max_cpu_total_size)
{
   struct fenced_manager *fenced_mgr;

   if(!provider)
      return NULL;

   fenced_mgr = CALLOC_STRUCT(fenced_manager);
   if (!fenced_mgr)
      return NULL;

   fenced_mgr->base.destroy = fenced_bufmgr_destroy;
   fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
   fenced_mgr->base.flush = fenced_bufmgr_flush;

   fenced_mgr->provider = provider;
   fenced_mgr->ops = ops;
   fenced_mgr->max_buffer_size = max_buffer_size;
   fenced_mgr->max_cpu_total_size = max_cpu_total_size;

   LIST_INITHEAD(&fenced_mgr->fenced);
   fenced_mgr->num_fenced = 0;

   LIST_INITHEAD(&fenced_mgr->unfenced);
   fenced_mgr->num_unfenced = 0;

   pipe_mutex_init(fenced_mgr->mutex);

   return &fenced_mgr->base;
}
Пример #2
0
/**
 * Register a vCPU to a scheduler
 *
 * You have to call sched_vcpu_attach() to \
 * run a vcpu by adding it to runqueue, additionally.
 *
 * @param shed A scheduler definition
 * @param vcpu A vCPU
 * @return
 */
int sched_rr_vcpu_register(vcpuid_t vcpuid, uint32_t pcpu)
{
    struct rq_entry_rr *new_entry;

    /* Check if vcpu is already registered */

    /* Allocate a rq_entry_rr */
    new_entry = (struct rq_entry_rr *) malloc(sizeof(struct rq_entry_rr));// alloc_rq_entry_rr();

    /* Initialize rq_entry_rr instance */
    LIST_INITHEAD(&new_entry->registered_list_head);
    LIST_INITHEAD(&new_entry->head);

    new_entry->vcpuid = vcpuid;

    /* FIXME:(igkang) Hardcoded. should use function parameter's value for tick_reset_val init. */
    new_entry->tick_reset_val = 5;

    new_entry->state = DETACHED;

    /* Add it to registerd vcpus list */
    LIST_ADDTAIL(&new_entry->registered_list_head, &registered_list_rr[pcpu]);

    return 0;
}
Пример #3
0
/**
 * Scheduler related data initialization
 *
 * @param
 * @return
 */
int sched_rr_init(uint32_t pcpu)
{
    /* Check scheduler config */

    /* Allocate memory for system-wide data */

    /* Initialize data */
    current[pcpu] = NULL;

    LIST_INITHEAD(&runqueue_rr[pcpu]);
    LIST_INITHEAD(&registered_list_rr[pcpu]);

    return 0;
}
Пример #4
0
int si_context_init(struct r600_context *ctx)
{
    int r;

    LIST_INITHEAD(&ctx->active_query_list);

    /* init dirty list */
    LIST_INITHEAD(&ctx->dirty);
    LIST_INITHEAD(&ctx->enable_list);

    ctx->range = calloc(NUM_RANGES, sizeof(struct r600_range));
    if (!ctx->range) {
        r = -ENOMEM;
        goto out_err;
    }

    /* add blocks */
    r = r600_context_add_block(ctx, si_config_reg_list,
                               Elements(si_config_reg_list), PKT3_SET_CONFIG_REG, SI_CONFIG_REG_OFFSET);
    if (r)
        goto out_err;
    r = r600_context_add_block(ctx, si_context_reg_list,
                               Elements(si_context_reg_list), PKT3_SET_CONTEXT_REG, SI_CONTEXT_REG_OFFSET);
    if (r)
        goto out_err;
    r = r600_context_add_block(ctx, si_sh_reg_list,
                               Elements(si_sh_reg_list), PKT3_SET_SH_REG, SI_SH_REG_OFFSET);
    if (r)
        goto out_err;


    /* PS SAMPLER */
    /* VS SAMPLER */

    /* PS SAMPLER BORDER */
    /* VS SAMPLER BORDER */

    /* PS RESOURCES */
    /* VS RESOURCES */

    ctx->cs = ctx->ws->cs_create(ctx->ws);

    r600_init_cs(ctx);
    ctx->max_db = 8;
    return 0;
out_err:
    r600_context_fini(ctx);
    return r;
}
Пример #5
0
/**
 * Create a caching buffer manager
 *
 * @param provider The buffer manager to which cache miss buffer requests
 * should be redirected.
 * @param usecs Unused buffers may be released from the cache after this
 * time
 * @param size_factor Declare buffers that are size_factor times bigger than
 * the requested size as cache hits.
 * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
 * buffer allocation requests are redirected to the provider.
 * @param maximum_cache_size  Maximum size of all unused buffers the cache can
 * hold.
 */
struct pb_manager *
pb_cache_manager_create(struct pb_manager *provider, 
                        unsigned usecs,
                        float size_factor,
                        unsigned bypass_usage,
                        uint64_t maximum_cache_size)
{
   struct pb_cache_manager *mgr;

   if(!provider)
      return NULL;
   
   mgr = CALLOC_STRUCT(pb_cache_manager);
   if (!mgr)
      return NULL;

   mgr->base.destroy = pb_cache_manager_destroy;
   mgr->base.create_buffer = pb_cache_manager_create_buffer;
   mgr->base.flush = pb_cache_manager_flush;
   mgr->provider = provider;
   mgr->usecs = usecs;
   mgr->size_factor = size_factor;
   mgr->bypass_usage = bypass_usage;
   LIST_INITHEAD(&mgr->delayed);
   mgr->numDelayed = 0;
   mgr->max_cache_size = maximum_cache_size;
   pipe_mutex_init(mgr->mutex);
      
   return &mgr->base;
}
Пример #6
0
struct pb_manager *
pb_debug_manager_create(struct pb_manager *provider, 
                        pb_size underflow_size, pb_size overflow_size) 
{
   struct pb_debug_manager *mgr;

   if(!provider)
      return NULL;
   
   mgr = CALLOC_STRUCT(pb_debug_manager);
   if (!mgr)
      return NULL;

   mgr->base.destroy = pb_debug_manager_destroy;
   mgr->base.create_buffer = pb_debug_manager_create_buffer;
   mgr->base.flush = pb_debug_manager_flush;
   mgr->provider = provider;
   mgr->underflow_size = underflow_size;
   mgr->overflow_size = overflow_size;
    
   pipe_mutex_init(mgr->mutex);
   LIST_INITHEAD(&mgr->list);

   return &mgr->base;
}
int si_context_init(struct r600_context *ctx)
{
	int r;

	LIST_INITHEAD(&ctx->active_query_list);

	ctx->cs = ctx->ws->cs_create(ctx->ws);

	ctx->max_db = 8;
	return 0;
}
Пример #8
0
static struct virgl_winsys *
virgl_drm_winsys_create(int drmFD)
{
   struct virgl_drm_winsys *qdws;
   int ret;
   struct drm_virtgpu_getparam getparam = {0};

   qdws = CALLOC_STRUCT(virgl_drm_winsys);
   if (!qdws)
      return NULL;

   qdws->fd = drmFD;
   qdws->num_delayed = 0;
   qdws->usecs = 1000000;
   LIST_INITHEAD(&qdws->delayed);
   (void) mtx_init(&qdws->mutex, mtx_plain);
   (void) mtx_init(&qdws->bo_handles_mutex, mtx_plain);
   qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
   qdws->bo_names = util_hash_table_create(handle_hash, handle_compare);
   qdws->base.destroy = virgl_drm_winsys_destroy;

   qdws->base.transfer_put = virgl_bo_transfer_put;
   qdws->base.transfer_get = virgl_bo_transfer_get;
   qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
   qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
   qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
   qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
   qdws->base.resource_map = virgl_drm_resource_map;
   qdws->base.resource_wait = virgl_drm_resource_wait;
   qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
   qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
   qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
   qdws->base.emit_res = virgl_drm_emit_res;
   qdws->base.res_is_referenced = virgl_drm_res_is_ref;

   qdws->base.cs_create_fence = virgl_cs_create_fence;
   qdws->base.fence_wait = virgl_fence_wait;
   qdws->base.fence_reference = virgl_fence_reference;

   qdws->base.get_caps = virgl_drm_get_caps;

   uint32_t value;
   getparam.param = VIRTGPU_PARAM_CAPSET_QUERY_FIX;
   getparam.value = (uint64_t)(uintptr_t)&value;
   ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_GETPARAM, &getparam);
   if (ret == 0) {
      if (value == 1)
         qdws->has_capset_query_fix = true;
   }

   return &qdws->base;

}
bool r600_common_context_init(struct r600_common_context *rctx,
			      struct r600_common_screen *rscreen)
{
	util_slab_create(&rctx->pool_transfers,
			 sizeof(struct r600_transfer), 64,
			 UTIL_SLAB_SINGLETHREADED);

	rctx->screen = rscreen;
	rctx->ws = rscreen->ws;
	rctx->family = rscreen->family;
	rctx->chip_class = rscreen->chip_class;

	if (rscreen->family == CHIP_HAWAII)
		rctx->max_db = 16;
	else if (rscreen->chip_class >= EVERGREEN)
		rctx->max_db = 8;
	else
		rctx->max_db = 4;

	rctx->b.transfer_map = u_transfer_map_vtbl;
	rctx->b.transfer_flush_region = u_default_transfer_flush_region;
	rctx->b.transfer_unmap = u_transfer_unmap_vtbl;
	rctx->b.transfer_inline_write = u_default_transfer_inline_write;
        rctx->b.memory_barrier = r600_memory_barrier;
	rctx->b.flush = r600_flush_from_st;

	LIST_INITHEAD(&rctx->texture_buffers);

	r600_init_context_texture_functions(rctx);
	r600_streamout_init(rctx);
	r600_query_init(rctx);
	cayman_init_msaa(&rctx->b);

	rctx->allocator_so_filled_size = u_suballocator_create(&rctx->b, 4096, 4,
							       0, PIPE_USAGE_DEFAULT, TRUE);
	if (!rctx->allocator_so_filled_size)
		return false;

	rctx->uploader = u_upload_create(&rctx->b, 1024 * 1024, 256,
					PIPE_BIND_INDEX_BUFFER |
					PIPE_BIND_CONSTANT_BUFFER);
	if (!rctx->uploader)
		return false;

	if (rscreen->info.r600_has_dma && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
		rctx->rings.dma.cs = rctx->ws->cs_create(rctx->ws, RING_DMA,
							 r600_flush_dma_ring,
							 rctx, NULL);
		rctx->rings.dma.flush = r600_flush_dma_ring;
	}

	return true;
}
Пример #10
0
/**
 * Initialize a caching buffer manager.
 *
 * @param mgr     The cache buffer manager
 * @param usecs   Unused buffers may be released from the cache after this
 *                time
 * @param size_factor  Declare buffers that are size_factor times bigger than
 *                     the requested size as cache hits.
 * @param bypass_usage  Bitmask. If (requested usage & bypass_usage) != 0,
 *                      buffer allocation requests are rejected.
 * @param maximum_cache_size  Maximum size of all unused buffers the cache can
 *                            hold.
 * @param destroy_buffer  Function that destroys a buffer for good.
 * @param can_reclaim     Whether a buffer can be reclaimed (e.g. is not busy)
 */
void
pb_cache_init(struct pb_cache *mgr, uint usecs, float size_factor,
              unsigned bypass_usage, uint64_t maximum_cache_size,
              void (*destroy_buffer)(struct pb_buffer *buf),
              bool (*can_reclaim)(struct pb_buffer *buf))
{
   LIST_INITHEAD(&mgr->cache);
   pipe_mutex_init(mgr->mutex);
   mgr->cache_size = 0;
   mgr->max_cache_size = maximum_cache_size;
   mgr->usecs = usecs;
   mgr->num_buffers = 0;
   mgr->bypass_usage = bypass_usage;
   mgr->size_factor = size_factor;
   mgr->destroy_buffer = destroy_buffer;
   mgr->can_reclaim = can_reclaim;
}
Пример #11
0
static OMX_ERRORTYPE h264d_prc_allocate_resources(void *ap_obj, OMX_U32 a_pid)
{
   vid_dec_PrivateType*priv = ap_obj;
   struct pipe_screen *screen;
   vl_csc_matrix csc;

   assert (priv);

   priv->screen = omx_get_screen();
   if (!priv->screen)
      return OMX_ErrorInsufficientResources;

   screen = priv->screen->pscreen;
   priv->pipe = screen->context_create(screen, priv->screen, 0);
   if (!priv->pipe)
      return OMX_ErrorInsufficientResources;

   if (!vl_compositor_init(&priv->compositor, priv->pipe)) {
      priv->pipe->destroy(priv->pipe);
      priv->pipe = NULL;
      return OMX_ErrorInsufficientResources;
   }

   if (!vl_compositor_init_state(&priv->cstate, priv->pipe)) {
      vl_compositor_cleanup(&priv->compositor);
      priv->pipe->destroy(priv->pipe);
      priv->pipe = NULL;
      return OMX_ErrorInsufficientResources;
   }

   vl_csc_get_matrix(VL_CSC_COLOR_STANDARD_BT_601, NULL, true, &csc);
   if (!vl_compositor_set_csc_matrix(&priv->cstate, (const vl_csc_matrix *)&csc, 1.0f, 0.0f)) {
      vl_compositor_cleanup(&priv->compositor);
      priv->pipe->destroy(priv->pipe);
      priv->pipe = NULL;
      return OMX_ErrorInsufficientResources;
   }

   LIST_INITHEAD(&priv->codec_data.h264.dpb_list);

   priv->video_buffer_map = util_hash_table_create(handle_hash, handle_compare);

   return OMX_ErrorNone;
}
Пример #12
0
static struct virgl_winsys *
virgl_drm_winsys_create(int drmFD)
{
   struct virgl_drm_winsys *qdws;

   qdws = CALLOC_STRUCT(virgl_drm_winsys);
   if (!qdws)
      return NULL;

   qdws->fd = drmFD;
   qdws->num_delayed = 0;
   qdws->usecs = 1000000;
   LIST_INITHEAD(&qdws->delayed);
   pipe_mutex_init(qdws->mutex);
   pipe_mutex_init(qdws->bo_handles_mutex);
   qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
   qdws->bo_names = util_hash_table_create(handle_hash, handle_compare);
   qdws->base.destroy = virgl_drm_winsys_destroy;

   qdws->base.transfer_put = virgl_bo_transfer_put;
   qdws->base.transfer_get = virgl_bo_transfer_get;
   qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
   qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
   qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
   qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
   qdws->base.resource_map = virgl_drm_resource_map;
   qdws->base.resource_wait = virgl_drm_resource_wait;
   qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
   qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
   qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
   qdws->base.emit_res = virgl_drm_emit_res;
   qdws->base.res_is_referenced = virgl_drm_res_is_ref;

   qdws->base.cs_create_fence = virgl_cs_create_fence;
   qdws->base.fence_wait = virgl_fence_wait;
   qdws->base.fence_reference = virgl_fence_reference;

   qdws->base.get_caps = virgl_drm_get_caps;
   return &qdws->base;

}
Пример #13
0
struct virgl_winsys *
virgl_vtest_winsys_wrap(struct sw_winsys *sws)
{
   struct virgl_vtest_winsys *vtws;

   vtws = CALLOC_STRUCT(virgl_vtest_winsys);
   if (!vtws)
      return NULL;

   virgl_vtest_connect(vtws);
   vtws->sws = sws;

   vtws->usecs = 1000000;
   LIST_INITHEAD(&vtws->delayed);
   pipe_mutex_init(vtws->mutex);

   vtws->base.destroy = virgl_vtest_winsys_destroy;

   vtws->base.transfer_put = virgl_vtest_transfer_put;
   vtws->base.transfer_get = virgl_vtest_transfer_get;

   vtws->base.resource_create = virgl_vtest_winsys_resource_cache_create;
   vtws->base.resource_unref = virgl_vtest_winsys_resource_unref;
   vtws->base.resource_map = virgl_vtest_resource_map;
   vtws->base.resource_wait = virgl_vtest_resource_wait;
   vtws->base.cmd_buf_create = virgl_vtest_cmd_buf_create;
   vtws->base.cmd_buf_destroy = virgl_vtest_cmd_buf_destroy;
   vtws->base.submit_cmd = virgl_vtest_winsys_submit_cmd;

   vtws->base.emit_res = virgl_vtest_emit_res;
   vtws->base.res_is_referenced = virgl_vtest_res_is_ref;
   vtws->base.get_caps = virgl_vtest_get_caps;

   vtws->base.cs_create_fence = virgl_cs_create_fence;
   vtws->base.fence_wait = virgl_fence_wait;
   vtws->base.fence_reference = virgl_fence_reference;

   vtws->base.flush_frontbuffer = virgl_vtest_flush_frontbuffer;

   return &vtws->base;
}
Пример #14
0
struct pb_manager *
pb_cache_manager_create(struct pb_manager *provider, 
                     	unsigned usecs) 
{
   struct pb_cache_manager *mgr;

   if(!provider)
      return NULL;
   
   mgr = CALLOC_STRUCT(pb_cache_manager);
   if (!mgr)
      return NULL;

   mgr->base.destroy = pb_cache_manager_destroy;
   mgr->base.create_buffer = pb_cache_manager_create_buffer;
   mgr->base.flush = pb_cache_manager_flush;
   mgr->provider = provider;
   mgr->usecs = usecs;
   LIST_INITHEAD(&mgr->delayed);
   mgr->numDelayed = 0;
   pipe_mutex_init(mgr->mutex);
      
   return &mgr->base;
}
Пример #15
0
struct pb_manager *
pool_bufmgr_create(struct pb_manager *provider, 
                   pb_size numBufs, 
                   pb_size bufSize,
                   const struct pb_desc *desc) 
{
   struct pool_pb_manager *pool;
   struct pool_buffer *pool_buf;
   pb_size i;

   if(!provider)
      return NULL;
   
   pool = CALLOC_STRUCT(pool_pb_manager);
   if (!pool)
      return NULL;

   pool->base.destroy = pool_bufmgr_destroy;
   pool->base.create_buffer = pool_bufmgr_create_buffer;
   pool->base.flush = pool_bufmgr_flush;

   LIST_INITHEAD(&pool->free);

   pool->numTot = numBufs;
   pool->numFree = numBufs;
   pool->bufSize = bufSize;
   pool->bufAlign = desc->alignment; 
   
   pipe_mutex_init(pool->mutex);

   pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc); 
   if (!pool->buffer)
      goto failure;

   pool->map = pb_map(pool->buffer,
                          PB_USAGE_CPU_READ |
                          PB_USAGE_CPU_WRITE, NULL);
   if(!pool->map)
      goto failure;

   pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs));
   if (!pool->bufs)
      goto failure;

   pool_buf = pool->bufs;
   for (i = 0; i < numBufs; ++i) {
      pipe_reference_init(&pool_buf->base.reference, 0);
      pool_buf->base.alignment = 0;
      pool_buf->base.usage = 0;
      pool_buf->base.size = bufSize;
      pool_buf->base.vtbl = &pool_buffer_vtbl;
      pool_buf->mgr = pool;
      pool_buf->start = i * bufSize;
      LIST_ADDTAIL(&pool_buf->head, &pool->free);
      pool_buf++;
   }

   return SUPER(pool);
   
failure:
   if(pool->bufs)
      FREE(pool->bufs);
   if(pool->map)
      pb_unmap(pool->buffer);
   if(pool->buffer)
      pb_reference(&pool->buffer, NULL);
   if(pool)
      FREE(pool);
   return NULL;
}
Пример #16
0
static struct pipe_context *r600_create_context(struct pipe_screen *screen,
                                                void *priv, unsigned flags)
{
	struct r600_context *rctx = CALLOC_STRUCT(r600_context);
	struct r600_screen* rscreen = (struct r600_screen *)screen;
	struct radeon_winsys *ws = rscreen->b.ws;

	if (!rctx)
		return NULL;

	rctx->b.b.screen = screen;
	assert(!priv);
	rctx->b.b.priv = NULL; /* for threaded_context_unwrap_sync */
	rctx->b.b.destroy = r600_destroy_context;
	rctx->b.set_atom_dirty = (void *)r600_set_atom_dirty;

	if (!r600_common_context_init(&rctx->b, &rscreen->b, flags))
		goto fail;

	rctx->screen = rscreen;
	LIST_INITHEAD(&rctx->texture_buffers);

	r600_init_blit_functions(rctx);

	if (rscreen->b.info.has_hw_decode) {
		rctx->b.b.create_video_codec = r600_uvd_create_decoder;
		rctx->b.b.create_video_buffer = r600_video_buffer_create;
	} else {
		rctx->b.b.create_video_codec = vl_create_decoder;
		rctx->b.b.create_video_buffer = vl_video_buffer_create;
	}

	if (getenv("R600_TRACE"))
		rctx->is_debug = true;
	r600_init_common_state_functions(rctx);

	switch (rctx->b.chip_class) {
	case R600:
	case R700:
		r600_init_state_functions(rctx);
		r600_init_atom_start_cs(rctx);
		rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
		rctx->custom_blend_resolve = rctx->b.chip_class == R700 ? r700_create_resolve_blend(rctx)
								      : r600_create_resolve_blend(rctx);
		rctx->custom_blend_decompress = r600_create_decompress_blend(rctx);
		rctx->has_vertex_cache = !(rctx->b.family == CHIP_RV610 ||
					   rctx->b.family == CHIP_RV620 ||
					   rctx->b.family == CHIP_RS780 ||
					   rctx->b.family == CHIP_RS880 ||
					   rctx->b.family == CHIP_RV710);
		break;
	case EVERGREEN:
	case CAYMAN:
		evergreen_init_state_functions(rctx);
		evergreen_init_atom_start_cs(rctx);
		evergreen_init_atom_start_compute_cs(rctx);
		rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
		rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx);
		rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx);
		rctx->custom_blend_fastclear = evergreen_create_fastclear_blend(rctx);
		rctx->has_vertex_cache = !(rctx->b.family == CHIP_CEDAR ||
					   rctx->b.family == CHIP_PALM ||
					   rctx->b.family == CHIP_SUMO ||
					   rctx->b.family == CHIP_SUMO2 ||
					   rctx->b.family == CHIP_CAICOS ||
					   rctx->b.family == CHIP_CAYMAN ||
					   rctx->b.family == CHIP_ARUBA);
		break;
	default:
		R600_ERR("Unsupported chip class %d.\n", rctx->b.chip_class);
		goto fail;
	}

	rctx->b.gfx.cs = ws->cs_create(rctx->b.ctx, RING_GFX,
				       r600_context_gfx_flush, rctx);
	rctx->b.gfx.flush = r600_context_gfx_flush;

	rctx->allocator_fetch_shader =
		u_suballocator_create(&rctx->b.b, 64 * 1024,
				      0, PIPE_USAGE_DEFAULT, 0, FALSE);
	if (!rctx->allocator_fetch_shader)
		goto fail;

	rctx->isa = calloc(1, sizeof(struct r600_isa));
	if (!rctx->isa || r600_isa_init(rctx, rctx->isa))
		goto fail;

	if (rscreen->b.debug_flags & DBG_FORCE_DMA)
		rctx->b.b.resource_copy_region = rctx->b.dma_copy;

	rctx->blitter = util_blitter_create(&rctx->b.b);
	if (rctx->blitter == NULL)
		goto fail;
	util_blitter_set_texture_multisample(rctx->blitter, rscreen->has_msaa);
	rctx->blitter->draw_rectangle = r600_draw_rectangle;

	r600_begin_new_cs(rctx);

	rctx->dummy_pixel_shader =
		util_make_fragment_cloneinput_shader(&rctx->b.b, 0,
						     TGSI_SEMANTIC_GENERIC,
						     TGSI_INTERPOLATE_CONSTANT);
	rctx->b.b.bind_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);

	return &rctx->b.b;

fail:
	r600_destroy_context(&rctx->b.b);
	return NULL;
}
Пример #17
0
static struct pipe_context *r600_create_context(struct pipe_screen *screen, void *priv)
{
	struct r600_context *rctx = CALLOC_STRUCT(r600_context);
	struct r600_screen* rscreen = (struct r600_screen *)screen;
	int shader, i;

	if (rctx == NULL)
		return NULL;

	if (!r600_common_context_init(&rctx->b, &rscreen->b))
		goto fail;

	rctx->b.b.screen = screen;
	rctx->b.b.priv = priv;
	rctx->b.b.destroy = r600_destroy_context;
	rctx->b.b.flush = r600_flush_from_st;

	/* Easy accessing of screen/winsys. */
	rctx->screen = rscreen;

	si_init_blit_functions(rctx);
	r600_init_query_functions(rctx);
	r600_init_context_resource_functions(rctx);
	si_init_compute_functions(rctx);

	if (rscreen->b.info.has_uvd) {
		rctx->b.b.create_video_codec = radeonsi_uvd_create_decoder;
		rctx->b.b.create_video_buffer = radeonsi_video_buffer_create;
	} else {
		rctx->b.b.create_video_codec = vl_create_decoder;
		rctx->b.b.create_video_buffer = vl_video_buffer_create;
	}

	rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, NULL);
	rctx->b.rings.gfx.flush = r600_flush_from_winsys;

	si_init_all_descriptors(rctx);

	/* Initialize cache_flush. */
	rctx->cache_flush = si_atom_cache_flush;
	rctx->atoms.cache_flush = &rctx->cache_flush;

	rctx->atoms.streamout_begin = &rctx->b.streamout.begin_atom;

	switch (rctx->b.chip_class) {
	case SI:
	case CIK:
		si_init_state_functions(rctx);
		LIST_INITHEAD(&rctx->active_nontimer_query_list);
		rctx->max_db = 8;
		si_init_config(rctx);
		break;
	default:
		R600_ERR("Unsupported chip class %d.\n", rctx->b.chip_class);
		goto fail;
	}

	rctx->b.ws->cs_set_flush_callback(rctx->b.rings.gfx.cs, r600_flush_from_winsys, rctx);

	util_slab_create(&rctx->pool_transfers,
			 sizeof(struct pipe_transfer), 64,
			 UTIL_SLAB_SINGLETHREADED);

        rctx->uploader = u_upload_create(&rctx->b.b, 1024 * 1024, 256,
                                         PIPE_BIND_INDEX_BUFFER |
                                         PIPE_BIND_CONSTANT_BUFFER);
        if (!rctx->uploader)
		goto fail;

	rctx->blitter = util_blitter_create(&rctx->b.b);
	if (rctx->blitter == NULL)
		goto fail;

	rctx->dummy_pixel_shader =
		util_make_fragment_cloneinput_shader(&rctx->b.b, 0,
						     TGSI_SEMANTIC_GENERIC,
						     TGSI_INTERPOLATE_CONSTANT);
	rctx->b.b.bind_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);

	/* these must be last */
	si_begin_new_cs(rctx);
	si_get_backend_mask(rctx);

	/* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
	 * with a NULL buffer). We need to use a dummy buffer instead. */
	if (rctx->b.chip_class == CIK) {
		rctx->null_const_buf.buffer = pipe_buffer_create(screen, PIPE_BIND_CONSTANT_BUFFER,
								 PIPE_USAGE_STATIC, 16);
		rctx->null_const_buf.buffer_size = rctx->null_const_buf.buffer->width0;

		for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
			for (i = 0; i < NUM_CONST_BUFFERS; i++) {
				rctx->b.b.set_constant_buffer(&rctx->b.b, shader, i,
							      &rctx->null_const_buf);
			}
		}

		/* Clear the NULL constant buffer, because loads should return zeros. */
		rctx->b.clear_buffer(&rctx->b.b, rctx->null_const_buf.buffer, 0,
				     rctx->null_const_buf.buffer->width0, 0);
	}

	return &rctx->b.b;
fail:
	r600_destroy_context(&rctx->b.b);
	return NULL;
}
Пример #18
0
bool r600_common_context_init(struct r600_common_context *rctx,
			      struct r600_common_screen *rscreen)
{
	util_slab_create(&rctx->pool_transfers,
			 sizeof(struct r600_transfer), 64,
			 UTIL_SLAB_SINGLETHREADED);

	rctx->screen = rscreen;
	rctx->ws = rscreen->ws;
	rctx->family = rscreen->family;
	rctx->chip_class = rscreen->chip_class;

	if (rscreen->chip_class >= CIK)
		rctx->max_db = MAX2(8, rscreen->info.r600_num_backends);
	else if (rscreen->chip_class >= EVERGREEN)
		rctx->max_db = 8;
	else
		rctx->max_db = 4;

	rctx->b.invalidate_resource = r600_invalidate_resource;
	rctx->b.transfer_map = u_transfer_map_vtbl;
	rctx->b.transfer_flush_region = u_transfer_flush_region_vtbl;
	rctx->b.transfer_unmap = u_transfer_unmap_vtbl;
	rctx->b.transfer_inline_write = u_default_transfer_inline_write;
        rctx->b.memory_barrier = r600_memory_barrier;
	rctx->b.flush = r600_flush_from_st;
	rctx->b.set_debug_callback = r600_set_debug_callback;

	if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 43) {
		rctx->b.get_device_reset_status = r600_get_reset_status;
		rctx->gpu_reset_counter =
			rctx->ws->query_value(rctx->ws,
					      RADEON_GPU_RESET_COUNTER);
	}

	LIST_INITHEAD(&rctx->texture_buffers);

	r600_init_context_texture_functions(rctx);
	r600_streamout_init(rctx);
	r600_query_init(rctx);
	cayman_init_msaa(&rctx->b);

	rctx->allocator_so_filled_size = u_suballocator_create(&rctx->b, 4096, 4,
							       0, PIPE_USAGE_DEFAULT, TRUE);
	if (!rctx->allocator_so_filled_size)
		return false;

	rctx->uploader = u_upload_create(&rctx->b, 1024 * 1024,
					PIPE_BIND_INDEX_BUFFER |
					PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STREAM);
	if (!rctx->uploader)
		return false;

	rctx->ctx = rctx->ws->ctx_create(rctx->ws);
	if (!rctx->ctx)
		return false;

	if (rscreen->info.r600_has_dma && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
		rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
						   r600_flush_dma_ring,
						   rctx, NULL);
		rctx->dma.flush = r600_flush_dma_ring;
	}

	return true;
}
Пример #19
0
struct pipe_screen *
nvfx_screen_create(struct pipe_winsys *ws, struct nouveau_device *dev)
{
    static const unsigned query_sizes[] = {(4096 - 4 * 32) / 32, 3 * 1024 / 32, 2 * 1024 / 32, 1024 / 32};
    struct nvfx_screen *screen = CALLOC_STRUCT(nvfx_screen);
    struct nouveau_channel *chan;
    struct pipe_screen *pscreen;
    unsigned eng3d_class = 0;
    int ret, i;

    if (!screen)
        return NULL;

    pscreen = &screen->base.base;

    ret = nouveau_screen_init(&screen->base, dev);
    if (ret) {
        nvfx_screen_destroy(pscreen);
        return NULL;
    }
    chan = screen->base.channel;
    screen->cur_ctx = NULL;
    chan->user_private = screen;
    chan->flush_notify = nvfx_channel_flush_notify;

    pscreen->winsys = ws;
    pscreen->destroy = nvfx_screen_destroy;
    pscreen->get_param = nvfx_screen_get_param;
    pscreen->get_shader_param = nvfx_screen_get_shader_param;
    pscreen->get_paramf = nvfx_screen_get_paramf;
    pscreen->is_format_supported = nvfx_screen_is_format_supported;
    pscreen->context_create = nvfx_create;

    switch (dev->chipset & 0xf0) {
    case 0x30:
        if (NV30_3D_CHIPSET_3X_MASK & (1 << (dev->chipset & 0x0f)))
            eng3d_class = NV30_3D;
        else if (NV34_3D_CHIPSET_3X_MASK & (1 << (dev->chipset & 0x0f)))
            eng3d_class = NV34_3D;
        else if (NV35_3D_CHIPSET_3X_MASK & (1 << (dev->chipset & 0x0f)))
            eng3d_class = NV35_3D;
        break;
    case 0x40:
        if (NV4X_GRCLASS4097_CHIPSETS & (1 << (dev->chipset & 0x0f)))
            eng3d_class = NV40_3D;
        else if (NV4X_GRCLASS4497_CHIPSETS & (1 << (dev->chipset & 0x0f)))
            eng3d_class = NV44_3D;
        screen->is_nv4x = ~0;
        break;
    case 0x60:
        if (NV6X_GRCLASS4497_CHIPSETS & (1 << (dev->chipset & 0x0f)))
            eng3d_class = NV44_3D;
        screen->is_nv4x = ~0;
        break;
    }

    if (!eng3d_class) {
        NOUVEAU_ERR("Unknown nv3x/nv4x chipset: nv%02x\n", dev->chipset);
        return NULL;
    }

    screen->advertise_npot = !!screen->is_nv4x;
    screen->advertise_blend_equation_separate = !!screen->is_nv4x;
    screen->use_nv4x = screen->is_nv4x;

    if(screen->is_nv4x) {
        if(debug_get_bool_option("NVFX_SIMULATE_NV30", FALSE))
            screen->use_nv4x = 0;
        if(!debug_get_bool_option("NVFX_NPOT", TRUE))
            screen->advertise_npot = 0;
        if(!debug_get_bool_option("NVFX_BLEND_EQ_SEP", TRUE))
            screen->advertise_blend_equation_separate = 0;
    }

    screen->force_swtnl = debug_get_bool_option("NVFX_SWTNL", FALSE);
    screen->trace_draw = debug_get_bool_option("NVFX_TRACE_DRAW", FALSE);

    screen->buffer_allocation_cost = debug_get_num_option("NVFX_BUFFER_ALLOCATION_COST", 16384);
    screen->inline_cost_per_hardware_cost = atof(debug_get_option("NVFX_INLINE_COST_PER_HARDWARE_COST", "1.0"));
    screen->static_reuse_threshold = atof(debug_get_option("NVFX_STATIC_REUSE_THRESHOLD", "2.0"));

    /* We don't advertise these by default because filtering and blending doesn't work as
     * it should, due to several restrictions.
     * The only exception is fp16 on nv40.
     */
    screen->advertise_fp16 = debug_get_bool_option("NVFX_FP16", !!screen->use_nv4x);
    screen->advertise_fp32 = debug_get_bool_option("NVFX_FP32", 0);

    screen->vertex_buffer_reloc_flags = nvfx_screen_get_vertex_buffer_flags(screen);

    /* surely both nv3x and nv44 support index buffers too: find out how and test that */
    if(eng3d_class == NV40_3D)
        screen->index_buffer_reloc_flags = screen->vertex_buffer_reloc_flags;

    if(!screen->force_swtnl && screen->vertex_buffer_reloc_flags == screen->index_buffer_reloc_flags)
        screen->base.vertex_buffer_flags = screen->base.index_buffer_flags = screen->vertex_buffer_reloc_flags;

    nvfx_screen_init_resource_functions(pscreen);

    ret = nouveau_grobj_alloc(chan, 0xbeef3097, eng3d_class, &screen->eng3d);
    if (ret) {
        NOUVEAU_ERR("Error creating 3D object: %d\n", ret);
        return FALSE;
    }

    /* 2D engine setup */
    nvfx_screen_surface_init(pscreen);

    /* Notifier for sync purposes */
    ret = nouveau_notifier_alloc(chan, 0xbeef0301, 1, &screen->sync);
    if (ret) {
        NOUVEAU_ERR("Error creating notifier object: %d\n", ret);
        nvfx_screen_destroy(pscreen);
        return NULL;
    }

    /* Query objects */
    for(i = 0; i < sizeof(query_sizes) / sizeof(query_sizes[0]); ++i)
    {
        ret = nouveau_notifier_alloc(chan, 0xbeef0302, query_sizes[i], &screen->query);
        if(!ret)
            break;
    }

    if (ret) {
        NOUVEAU_ERR("Error initialising query objects: %d\n", ret);
        nvfx_screen_destroy(pscreen);
        return NULL;
    }

    ret = nouveau_resource_init(&screen->query_heap, 0, query_sizes[i]);
    if (ret) {
        NOUVEAU_ERR("Error initialising query object heap: %d\n", ret);
        nvfx_screen_destroy(pscreen);
        return NULL;
    }

    LIST_INITHEAD(&screen->query_list);

    /* Vtxprog resources */
    if (nouveau_resource_init(&screen->vp_exec_heap, 0, screen->use_nv4x ? 512 : 256) ||
            nouveau_resource_init(&screen->vp_data_heap, 0, screen->use_nv4x ? 468 : 256)) {
        nvfx_screen_destroy(pscreen);
        return NULL;
    }

    BIND_RING(chan, screen->eng3d, 7);

    /* Static eng3d initialisation */
    /* note that we just started using the channel, so we must have space in the pushbuffer */
    OUT_RING(chan, RING_3D(NV30_3D_DMA_NOTIFY, 1));
    OUT_RING(chan, screen->sync->handle);
    OUT_RING(chan, RING_3D(NV30_3D_DMA_TEXTURE0, 2));
    OUT_RING(chan, chan->vram->handle);
    OUT_RING(chan, chan->gart->handle);
    OUT_RING(chan, RING_3D(NV30_3D_DMA_COLOR1, 1));
    OUT_RING(chan, chan->vram->handle);
    OUT_RING(chan, RING_3D(NV30_3D_DMA_COLOR0, 2));
    OUT_RING(chan, chan->vram->handle);
    OUT_RING(chan, chan->vram->handle);
    OUT_RING(chan, RING_3D(NV30_3D_DMA_VTXBUF0, 2));
    OUT_RING(chan, chan->vram->handle);
    OUT_RING(chan, chan->gart->handle);

    OUT_RING(chan, RING_3D(NV30_3D_DMA_FENCE, 2));
    OUT_RING(chan, 0);
    OUT_RING(chan, screen->query->handle);

    OUT_RING(chan, RING_3D(NV30_3D_DMA_UNK1AC, 2));
    OUT_RING(chan, chan->vram->handle);
    OUT_RING(chan, chan->vram->handle);

    if(!screen->is_nv4x)
        nv30_screen_init(screen);
    else
        nv40_screen_init(screen);

    return pscreen;
}
Пример #20
0
struct pipe_context *svga_context_create( struct pipe_screen *screen,
					  void *priv )
{
   struct svga_screen *svgascreen = svga_screen(screen);
   struct svga_context *svga = NULL;
   enum pipe_error ret;

   svga = CALLOC_STRUCT(svga_context);
   if (svga == NULL)
      goto no_svga;

   svga->pipe.screen = screen;
   svga->pipe.priv = priv;
   svga->pipe.destroy = svga_destroy;
   svga->pipe.clear = svga_clear;

   svga->swc = svgascreen->sws->context_create(svgascreen->sws);
   if(!svga->swc)
      goto no_swc;

   svga_init_resource_functions(svga);
   svga_init_blend_functions(svga);
   svga_init_blit_functions(svga);
   svga_init_depth_stencil_functions(svga);
   svga_init_draw_functions(svga);
   svga_init_flush_functions(svga);
   svga_init_misc_functions(svga);
   svga_init_rasterizer_functions(svga);
   svga_init_sampler_functions(svga);
   svga_init_fs_functions(svga);
   svga_init_vs_functions(svga);
   svga_init_vertex_functions(svga);
   svga_init_constbuffer_functions(svga);
   svga_init_query_functions(svga);
   svga_init_surface_functions(svga);


   /* debug */
   svga->debug.no_swtnl = debug_get_option_no_swtnl();
   svga->debug.force_swtnl = debug_get_option_force_swtnl();
   svga->debug.use_min_mipmap = debug_get_option_use_min_mipmap();
   svga->debug.disable_shader = debug_get_option_disable_shader();
   svga->debug.no_line_width = debug_get_option_no_line_width();
   svga->debug.force_hw_line_stipple = debug_get_option_force_hw_line_stipple();

   svga->fs_bm = util_bitmask_create();
   if (svga->fs_bm == NULL)
      goto no_fs_bm;

   svga->vs_bm = util_bitmask_create();
   if (svga->vs_bm == NULL)
      goto no_vs_bm;

   svga->upload_ib = u_upload_create( &svga->pipe,
                                      32 * 1024,
                                      16,
                                      PIPE_BIND_INDEX_BUFFER );
   if (svga->upload_ib == NULL)
      goto no_upload_ib;

   svga->upload_vb = u_upload_create( &svga->pipe,
                                      128 * 1024,
                                      16,
                                      PIPE_BIND_VERTEX_BUFFER );
   if (svga->upload_vb == NULL)
      goto no_upload_vb;

   svga->hwtnl = svga_hwtnl_create( svga,
                                    svga->upload_ib,
                                    svga->swc );
   if (svga->hwtnl == NULL)
      goto no_hwtnl;

   if (!svga_init_swtnl(svga))
      goto no_swtnl;

   ret = svga_emit_initial_state( svga );
   if (ret != PIPE_OK)
      goto no_state;
   
   /* Avoid shortcircuiting state with initial value of zero.
    */
   memset(&svga->state.hw_clear, 0xcd, sizeof(svga->state.hw_clear));
   memset(&svga->state.hw_clear.framebuffer, 0x0, 
          sizeof(svga->state.hw_clear.framebuffer));

   memset(&svga->state.hw_draw, 0xcd, sizeof(svga->state.hw_draw));
   memset(&svga->state.hw_draw.views, 0x0, sizeof(svga->state.hw_draw.views));
   svga->state.hw_draw.num_views = 0;

   svga->dirty = ~0;

   LIST_INITHEAD(&svga->dirty_buffers);

   return &svga->pipe;

no_state:
   svga_destroy_swtnl(svga);
no_swtnl:
   svga_hwtnl_destroy( svga->hwtnl );
no_hwtnl:
   u_upload_destroy( svga->upload_vb );
no_upload_vb:
   u_upload_destroy( svga->upload_ib );
no_upload_ib:
   util_bitmask_destroy( svga->vs_bm );
no_vs_bm:
   util_bitmask_destroy( svga->fs_bm );
no_fs_bm:
   svga->swc->destroy(svga->swc);
no_swc:
   FREE(svga);
no_svga:
   return NULL;
}
Пример #21
0
struct pipe_context *svga_context_create(struct pipe_screen *screen,
					 void *priv, unsigned flags)
{
   struct svga_screen *svgascreen = svga_screen(screen);
   struct svga_context *svga = NULL;
   enum pipe_error ret;

   svga = CALLOC_STRUCT(svga_context);
   if (!svga)
      goto cleanup;

   LIST_INITHEAD(&svga->dirty_buffers);

   svga->pipe.screen = screen;
   svga->pipe.priv = priv;
   svga->pipe.destroy = svga_destroy;
   svga->pipe.clear = svga_clear;

   svga->swc = svgascreen->sws->context_create(svgascreen->sws);
   if (!svga->swc)
      goto cleanup;

   svga_init_resource_functions(svga);
   svga_init_blend_functions(svga);
   svga_init_blit_functions(svga);
   svga_init_depth_stencil_functions(svga);
   svga_init_draw_functions(svga);
   svga_init_flush_functions(svga);
   svga_init_misc_functions(svga);
   svga_init_rasterizer_functions(svga);
   svga_init_sampler_functions(svga);
   svga_init_fs_functions(svga);
   svga_init_vs_functions(svga);
   svga_init_gs_functions(svga);
   svga_init_vertex_functions(svga);
   svga_init_constbuffer_functions(svga);
   svga_init_query_functions(svga);
   svga_init_surface_functions(svga);
   svga_init_stream_output_functions(svga);

   /* init misc state */
   svga->curr.sample_mask = ~0;

   /* debug */
   svga->debug.no_swtnl = debug_get_option_no_swtnl();
   svga->debug.force_swtnl = debug_get_option_force_swtnl();
   svga->debug.use_min_mipmap = debug_get_option_use_min_mipmap();
   svga->debug.disable_shader = debug_get_option_disable_shader();
   svga->debug.no_line_width = debug_get_option_no_line_width();
   svga->debug.force_hw_line_stipple = debug_get_option_force_hw_line_stipple();

   if (!(svga->blend_object_id_bm = util_bitmask_create()))
      goto cleanup;

   if (!(svga->ds_object_id_bm = util_bitmask_create()))
      goto cleanup;

   if (!(svga->input_element_object_id_bm = util_bitmask_create()))
      goto cleanup;

   if (!(svga->rast_object_id_bm = util_bitmask_create()))
      goto cleanup;

   if (!(svga->sampler_object_id_bm = util_bitmask_create()))
      goto cleanup;

   if (!(svga->sampler_view_id_bm = util_bitmask_create()))
      goto cleanup;

   if (!(svga->shader_id_bm = util_bitmask_create()))
      goto cleanup;

   if (!(svga->surface_view_id_bm = util_bitmask_create()))
      goto cleanup;

   if (!(svga->stream_output_id_bm = util_bitmask_create()))
      goto cleanup;

   if (!(svga->query_id_bm = util_bitmask_create()))
      goto cleanup;

   svga->hwtnl = svga_hwtnl_create(svga);
   if (svga->hwtnl == NULL)
      goto cleanup;

   if (!svga_init_swtnl(svga))
      goto cleanup;

   ret = svga_emit_initial_state( svga );
   if (ret != PIPE_OK)
      goto cleanup;

   svga->const0_upload = u_upload_create(&svga->pipe,
                                         CONST0_UPLOAD_DEFAULT_SIZE,
                                         CONST0_UPLOAD_ALIGNMENT,
                                         PIPE_BIND_CONSTANT_BUFFER);
   if (!svga->const0_upload)
      goto cleanup;

   /* Avoid shortcircuiting state with initial value of zero.
    */
   memset(&svga->state.hw_clear, 0xcd, sizeof(svga->state.hw_clear));
   memset(&svga->state.hw_clear.framebuffer, 0x0, 
          sizeof(svga->state.hw_clear.framebuffer));

   memset(&svga->state.hw_draw, 0xcd, sizeof(svga->state.hw_draw));
   memset(&svga->state.hw_draw.views, 0x0, sizeof(svga->state.hw_draw.views));
   memset(&svga->state.hw_draw.num_sampler_views, 0,
      sizeof(svga->state.hw_draw.num_sampler_views));
   svga->state.hw_draw.num_views = 0;

   /* Initialize the shader pointers */
   svga->state.hw_draw.vs = NULL;
   svga->state.hw_draw.gs = NULL;
   svga->state.hw_draw.fs = NULL;
   memset(svga->state.hw_draw.constbuf, 0,
          sizeof(svga->state.hw_draw.constbuf));
   memset(svga->state.hw_draw.default_constbuf_size, 0,
          sizeof(svga->state.hw_draw.default_constbuf_size));
   memset(svga->state.hw_draw.enabled_constbufs, 0,
          sizeof(svga->state.hw_draw.enabled_constbufs));

   /* Create a no-operation blend state which we will bind whenever the
    * requested blend state is impossible (e.g. due to having an integer
    * render target attached).
    *
    * XXX: We will probably actually need 16 of these, one for each possible
    * RGBA color mask (4 bits).  Then, we would bind the one with a color mask
    * matching the blend state it is replacing.
    */
   {
      struct pipe_blend_state noop_tmpl = {0};
      unsigned i;

      for (i = 0; i < PIPE_MAX_COLOR_BUFS; ++i) {
         // Set the color mask to all-ones.  Later this may change.
         noop_tmpl.rt[i].colormask = PIPE_MASK_RGBA;
      }
      svga->noop_blend = svga->pipe.create_blend_state(&svga->pipe, &noop_tmpl);
   }

   svga->dirty = ~0;

   return &svga->pipe;

cleanup:
   svga_destroy_swtnl(svga);

   if (svga->const0_upload)
      u_upload_destroy(svga->const0_upload);
   if (svga->hwtnl)
      svga_hwtnl_destroy(svga->hwtnl);
   if (svga->swc)
      svga->swc->destroy(svga->swc);
   util_bitmask_destroy(svga->blend_object_id_bm);
   util_bitmask_destroy(svga->ds_object_id_bm);
   util_bitmask_destroy(svga->input_element_object_id_bm);
   util_bitmask_destroy(svga->rast_object_id_bm);
   util_bitmask_destroy(svga->sampler_object_id_bm);
   util_bitmask_destroy(svga->sampler_view_id_bm);
   util_bitmask_destroy(svga->shader_id_bm);
   util_bitmask_destroy(svga->surface_view_id_bm);
   util_bitmask_destroy(svga->stream_output_id_bm);
   util_bitmask_destroy(svga->query_id_bm);
   FREE(svga);
   return NULL;
}