Exemplo n.º 1
0
boolean
stw_init(const struct stw_winsys *stw_winsys)
{
   static struct stw_device stw_dev_storage;
   struct pipe_screen *screen;

   debug_printf("%s\n", __FUNCTION__);
   
   assert(!stw_dev);

   stw_tls_init();

   stw_dev = &stw_dev_storage;
   memset(stw_dev, 0, sizeof(*stw_dev));

#ifdef DEBUG
   stw_dev->memdbg_no = debug_memory_begin();
#endif
   
   stw_dev->stw_winsys = stw_winsys;

#ifdef WIN32_THREADS
   _glthread_INIT_MUTEX(OneTimeLock);
#endif

   screen = stw_winsys->create_screen();
   if(!screen)
      goto error1;

#ifdef DEBUG
   stw_dev->screen = trace_screen_create(screen);
   stw_dev->trace_running = stw_dev->screen != screen ? TRUE : FALSE;
#else
   stw_dev->screen = screen;
#endif
   
   stw_dev->screen->flush_frontbuffer = &stw_flush_frontbuffer;
   
   pipe_mutex_init( stw_dev->ctx_mutex );
   pipe_mutex_init( stw_dev->fb_mutex );

   stw_dev->ctx_table = handle_table_create();
   if (!stw_dev->ctx_table) {
      goto error1;
   }

   stw_pixelformat_init();

   return TRUE;

error1:
   stw_dev = NULL;
   return FALSE;
}
struct radeon_winsys *radeon_drm_winsys_create(int fd)
{
    struct radeon_drm_winsys *ws = CALLOC_STRUCT(radeon_drm_winsys);
    if (!ws) {
        return NULL;
    }

    ws->fd = fd;

    if (!do_winsys_init(ws))
        goto fail;

    /* Create managers. */
    ws->kman = radeon_bomgr_create(ws);
    if (!ws->kman)
        goto fail;
    ws->cman = pb_cache_manager_create(ws->kman, 1000000);
    if (!ws->cman)
        goto fail;

    if (ws->gen >= R600) {
        ws->surf_man = radeon_surface_manager_new(fd);
        if (!ws->surf_man)
            goto fail;
    }

    /* Set functions. */
    ws->base.destroy = radeon_winsys_destroy;
    ws->base.query_info = radeon_query_info;
    ws->base.cs_request_feature = radeon_cs_request_feature;
    ws->base.surface_init = radeon_drm_winsys_surface_init;
    ws->base.surface_best = radeon_drm_winsys_surface_best;
    ws->base.query_timestamp = radeon_query_timestamp;

    radeon_bomgr_init_functions(ws);
    radeon_drm_cs_init_functions(ws);

    pipe_mutex_init(ws->hyperz_owner_mutex);
    pipe_mutex_init(ws->cmask_owner_mutex);

    return &ws->base;

fail:
    if (ws->cman)
        ws->cman->destroy(ws->cman);
    if (ws->kman)
        ws->kman->destroy(ws->kman);
    if (ws->surf_man)
        radeon_surface_manager_free(ws->surf_man);
    FREE(ws);
    return NULL;
}
Exemplo n.º 3
0
/**
 * Create a caching buffer manager
 *
 * @param provider The buffer manager to which cache miss buffer requests
 * should be redirected.
 * @param usecs Unused buffers may be released from the cache after this
 * time
 * @param size_factor Declare buffers that are size_factor times bigger than
 * the requested size as cache hits.
 * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
 * buffer allocation requests are redirected to the provider.
 * @param maximum_cache_size  Maximum size of all unused buffers the cache can
 * hold.
 */
struct pb_manager *
pb_cache_manager_create(struct pb_manager *provider, 
                        unsigned usecs,
                        float size_factor,
                        unsigned bypass_usage,
                        uint64_t maximum_cache_size)
{
   struct pb_cache_manager *mgr;

   if(!provider)
      return NULL;
   
   mgr = CALLOC_STRUCT(pb_cache_manager);
   if (!mgr)
      return NULL;

   mgr->base.destroy = pb_cache_manager_destroy;
   mgr->base.create_buffer = pb_cache_manager_create_buffer;
   mgr->base.flush = pb_cache_manager_flush;
   mgr->provider = provider;
   mgr->usecs = usecs;
   mgr->size_factor = size_factor;
   mgr->bypass_usage = bypass_usage;
   LIST_INITHEAD(&mgr->delayed);
   mgr->numDelayed = 0;
   mgr->max_cache_size = maximum_cache_size;
   pipe_mutex_init(mgr->mutex);
      
   return &mgr->base;
}
Exemplo n.º 4
0
struct pb_manager *
fenced_bufmgr_create(struct pb_manager *provider,
                     struct pb_fence_ops *ops,
                     pb_size max_buffer_size,
                     pb_size max_cpu_total_size)
{
   struct fenced_manager *fenced_mgr;

   if(!provider)
      return NULL;

   fenced_mgr = CALLOC_STRUCT(fenced_manager);
   if (!fenced_mgr)
      return NULL;

   fenced_mgr->base.destroy = fenced_bufmgr_destroy;
   fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
   fenced_mgr->base.flush = fenced_bufmgr_flush;

   fenced_mgr->provider = provider;
   fenced_mgr->ops = ops;
   fenced_mgr->max_buffer_size = max_buffer_size;
   fenced_mgr->max_cpu_total_size = max_cpu_total_size;

   LIST_INITHEAD(&fenced_mgr->fenced);
   fenced_mgr->num_fenced = 0;

   LIST_INITHEAD(&fenced_mgr->unfenced);
   fenced_mgr->num_unfenced = 0;

   pipe_mutex_init(fenced_mgr->mutex);

   return &fenced_mgr->base;
}
Exemplo n.º 5
0
const char*
debug_symbol_name_cached(const void *addr)
{
   const char* name;
#ifdef PIPE_SUBSYSTEM_WINDOWS_USER
   static boolean first = TRUE;

   if (first) {
      pipe_mutex_init(symbols_mutex);
      first = FALSE;
   }
#endif

   pipe_mutex_lock(symbols_mutex);
   if(!symbols_hash)
      symbols_hash = util_hash_table_create(hash_ptr, compare_ptr);
   name = util_hash_table_get(symbols_hash, (void*)addr);
   if(!name)
   {
      char buf[1024];
      debug_symbol_name(addr, buf, sizeof(buf));
      name = strdup(buf);

      util_hash_table_set(symbols_hash, (void*)addr, (void*)name);
   }
   pipe_mutex_unlock(symbols_mutex);
   return name;
}
Exemplo n.º 6
0
/**
 * Create a new primitive setup/render stage.
 */
struct setup_context *setup_create_context( struct softpipe_context *softpipe )
{
   struct setup_context *setup = CALLOC_STRUCT(setup_context);
#if SP_NUM_QUAD_THREADS > 1
   uint i;
#endif

   setup->softpipe = softpipe;

   setup->quad.coef = setup->coef;
   setup->quad.posCoef = &setup->posCoef;

#if SP_NUM_QUAD_THREADS > 1
   setup->que.first = 0;
   setup->que.last = 0;
   pipe_mutex_init( setup->que.que_mutex );
   pipe_condvar_init( setup->que.que_notfull_condvar );
   pipe_condvar_init( setup->que.que_notempty_condvar );
   setup->que.jobs_added = 0;
   setup->que.jobs_done = 0;
   pipe_condvar_init( setup->que.que_done_condvar );
   for (i = 0; i < SP_NUM_QUAD_THREADS; i++) {
      setup->threads[i].setup = setup;
      setup->threads[i].id = i;
      setup->threads[i].handle = pipe_thread_create( quad_thread, &setup->threads[i] );
   }
#endif

   return setup;
}
Exemplo n.º 7
0
struct pb_manager *
pb_debug_manager_create(struct pb_manager *provider, 
                        pb_size underflow_size, pb_size overflow_size) 
{
   struct pb_debug_manager *mgr;

   if(!provider)
      return NULL;
   
   mgr = CALLOC_STRUCT(pb_debug_manager);
   if (!mgr)
      return NULL;

   mgr->base.destroy = pb_debug_manager_destroy;
   mgr->base.create_buffer = pb_debug_manager_create_buffer;
   mgr->base.flush = pb_debug_manager_flush;
   mgr->provider = provider;
   mgr->underflow_size = underflow_size;
   mgr->overflow_size = overflow_size;
    
   pipe_mutex_init(mgr->mutex);
   LIST_INITHEAD(&mgr->list);

   return &mgr->base;
}
Exemplo n.º 8
0
/**
 * Create a new scene object.
 * \param queue  the queue to put newly rendered/emptied scenes into
 */
struct lp_scene *
lp_scene_create( struct pipe_context *pipe )
{
   struct lp_scene *scene = CALLOC_STRUCT(lp_scene);
   if (!scene)
      return NULL;

   scene->pipe = pipe;

   scene->data.head =
      CALLOC_STRUCT(data_block);

   pipe_mutex_init(scene->mutex);

#ifdef DEBUG
   /* Do some scene limit sanity checks here */
   {
      size_t maxBins = TILES_X * TILES_Y;
      size_t maxCommandBytes = sizeof(struct cmd_block) * maxBins;
      size_t maxCommandPlusData = maxCommandBytes + DATA_BLOCK_SIZE;
      /* We'll need at least one command block per bin.  Make sure that's
       * less than the max allowed scene size.
       */
      assert(maxCommandBytes < LP_SCENE_MAX_SIZE);
      /* We'll also need space for at least one other data block */
      assert(maxCommandPlusData <= LP_SCENE_MAX_SIZE);
   }
#endif

   return scene;
}
Exemplo n.º 9
0
struct pipe_screen *
rbug_screen_create(struct pipe_screen *screen)
{
   struct rbug_screen *rb_screen;

   if (!debug_get_option_rbug())
      return screen;

   rb_screen = CALLOC_STRUCT(rbug_screen);
   if (!rb_screen)
      return screen;

   pipe_mutex_init(rb_screen->list_mutex);
   make_empty_list(&rb_screen->contexts);
   make_empty_list(&rb_screen->resources);
   make_empty_list(&rb_screen->surfaces);
   make_empty_list(&rb_screen->transfers);

   rb_screen->base.winsys = NULL;

   rb_screen->base.destroy = rbug_screen_destroy;
   rb_screen->base.get_name = rbug_screen_get_name;
   rb_screen->base.get_vendor = rbug_screen_get_vendor;
   rb_screen->base.get_param = rbug_screen_get_param;
   rb_screen->base.get_shader_param = rbug_screen_get_shader_param;
   rb_screen->base.get_paramf = rbug_screen_get_paramf;
   rb_screen->base.is_format_supported = rbug_screen_is_format_supported;
   rb_screen->base.context_create = rbug_screen_context_create;
   rb_screen->base.resource_create = rbug_screen_resource_create;
   rb_screen->base.resource_from_handle = rbug_screen_resource_from_handle;
   rb_screen->base.resource_get_handle = rbug_screen_resource_get_handle;
   rb_screen->base.resource_destroy = rbug_screen_resource_destroy;
   rb_screen->base.user_buffer_create = rbug_screen_user_buffer_create;
   rb_screen->base.flush_frontbuffer = rbug_screen_flush_frontbuffer;
   rb_screen->base.fence_reference = rbug_screen_fence_reference;
   rb_screen->base.fence_signalled = rbug_screen_fence_signalled;
   rb_screen->base.fence_finish = rbug_screen_fence_finish;

   rb_screen->screen = screen;

   rb_screen->private_context = screen->context_create(screen, NULL);
   if (!rb_screen->private_context)
      goto err_free;

   rb_screen->rbug = rbug_start(rb_screen);

   if (!rb_screen->rbug)
      goto err_context;

   return &rb_screen->base;

err_context:
   rb_screen->private_context->destroy(rb_screen->private_context);
err_free:
   FREE(rb_screen);
   return screen;
}
Exemplo n.º 10
0
static XMesaDisplay
xmesa_init_display( Display *display )
{
   pipe_static_mutex(init_mutex);
   XMesaDisplay xmdpy;
   int i;

   pipe_mutex_lock(init_mutex);

   /* Look for XMesaDisplay which corresponds to 'display' */
   for (i = 0; i < NumDisplays; i++) {
      if (Displays[i].display == display) {
         /* Found it */
         pipe_mutex_unlock(init_mutex);
         return &Displays[i];
      }
   }

   /* Create new XMesaDisplay */

   assert(NumDisplays < MAX_DISPLAYS);
   xmdpy = &Displays[NumDisplays];
   NumDisplays++;

   if (!xmdpy->display && display) {
      xmdpy->display = display;
      xmdpy->screen = driver.create_pipe_screen(display);
      xmdpy->smapi = CALLOC_STRUCT(st_manager);
      if (xmdpy->smapi) {
         xmdpy->smapi->screen = xmdpy->screen;
         xmdpy->smapi->get_param = xmesa_get_param;
      }

      if (xmdpy->screen && xmdpy->smapi) {
         pipe_mutex_init(xmdpy->mutex);
      }
      else {
         if (xmdpy->screen) {
            xmdpy->screen->destroy(xmdpy->screen);
            xmdpy->screen = NULL;
         }
         if (xmdpy->smapi) {
            FREE(xmdpy->smapi);
            xmdpy->smapi = NULL;
         }

         xmdpy->display = NULL;
      }
   }
   if (!xmdpy->display || xmdpy->display != display)
      xmdpy = NULL;

   pipe_mutex_unlock(init_mutex);

   return xmdpy;
}
Exemplo n.º 11
0
void
util_queue_init(struct util_queue *queue,
                void (*execute_job)(void *))
{
   memset(queue, 0, sizeof(*queue));
   queue->execute_job = execute_job;
   pipe_mutex_init(queue->lock);
   pipe_semaphore_init(&queue->has_space, ARRAY_SIZE(queue->jobs));
   pipe_semaphore_init(&queue->queued, 0);
   queue->thread = pipe_thread_create(util_queue_thread_func, queue);
}
Exemplo n.º 12
0
static struct virgl_winsys *
virgl_drm_winsys_create(int drmFD)
{
   struct virgl_drm_winsys *qdws;

   qdws = CALLOC_STRUCT(virgl_drm_winsys);
   if (!qdws)
      return NULL;

   qdws->fd = drmFD;
   qdws->num_delayed = 0;
   qdws->usecs = 1000000;
   LIST_INITHEAD(&qdws->delayed);
   pipe_mutex_init(qdws->mutex);
   pipe_mutex_init(qdws->bo_handles_mutex);
   qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
   qdws->bo_names = util_hash_table_create(handle_hash, handle_compare);
   qdws->base.destroy = virgl_drm_winsys_destroy;

   qdws->base.transfer_put = virgl_bo_transfer_put;
   qdws->base.transfer_get = virgl_bo_transfer_get;
   qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
   qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
   qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
   qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
   qdws->base.resource_map = virgl_drm_resource_map;
   qdws->base.resource_wait = virgl_drm_resource_wait;
   qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
   qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
   qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
   qdws->base.emit_res = virgl_drm_emit_res;
   qdws->base.res_is_referenced = virgl_drm_res_is_ref;

   qdws->base.cs_create_fence = virgl_cs_create_fence;
   qdws->base.fence_wait = virgl_fence_wait;
   qdws->base.fence_reference = virgl_fence_reference;

   qdws->base.get_caps = virgl_drm_get_caps;
   return &qdws->base;

}
Exemplo n.º 13
0
bool r600_common_screen_init(struct r600_common_screen *rscreen,
                             struct radeon_winsys *ws)
{
    ws->query_info(ws, &rscreen->info);

    rscreen->b.get_name = r600_get_name;
    rscreen->b.get_vendor = r600_get_vendor;
    rscreen->b.get_compute_param = r600_get_compute_param;
    rscreen->b.get_paramf = r600_get_paramf;
    rscreen->b.get_driver_query_info = r600_get_driver_query_info;
    rscreen->b.get_timestamp = r600_get_timestamp;
    rscreen->b.fence_finish = r600_fence_finish;
    rscreen->b.fence_reference = r600_fence_reference;
    rscreen->b.fence_signalled = r600_fence_signalled;
    rscreen->b.resource_destroy = u_resource_destroy_vtbl;

    if (rscreen->info.has_uvd) {
        rscreen->b.get_video_param = rvid_get_video_param;
        rscreen->b.is_video_format_supported = rvid_is_format_supported;
    } else {
        rscreen->b.get_video_param = r600_get_video_param;
        rscreen->b.is_video_format_supported = vl_video_buffer_is_format_supported;
    }

    r600_init_screen_texture_functions(rscreen);

    rscreen->ws = ws;
    rscreen->family = rscreen->info.family;
    rscreen->chip_class = rscreen->info.chip_class;
    rscreen->debug_flags = debug_get_flags_option("R600_DEBUG", common_debug_options, 0);

    if (!r600_init_tiling(rscreen)) {
        return false;
    }
    util_format_s3tc_init();
    pipe_mutex_init(rscreen->aux_context_lock);

    if (rscreen->info.drm_minor >= 28 && (rscreen->debug_flags & DBG_TRACE_CS)) {
        rscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->b,
                            PIPE_BIND_CUSTOM,
                            PIPE_USAGE_STAGING,
                            4096);
        if (rscreen->trace_bo) {
            rscreen->trace_ptr = rscreen->ws->buffer_map(rscreen->trace_bo->cs_buf, NULL,
                                 PIPE_TRANSFER_UNSYNCHRONIZED);
        }
    }

    return true;
}
Exemplo n.º 14
0
struct pipe_screen* r300_screen_create(struct radeon_winsys *rws)
{
    struct r300_screen *r300screen = CALLOC_STRUCT(r300_screen);

    if (!r300screen) {
        FREE(r300screen);
        return NULL;
    }

    rws->query_info(rws, &r300screen->info);

    r300_init_debug(r300screen);
    r300_parse_chipset(r300screen->info.pci_id, &r300screen->caps);

    if (SCREEN_DBG_ON(r300screen, DBG_NO_ZMASK))
        r300screen->caps.zmask_ram = 0;
    if (SCREEN_DBG_ON(r300screen, DBG_NO_HIZ))
        r300screen->caps.hiz_ram = 0;

    if (r300screen->info.drm_minor < 8)
        r300screen->caps.has_us_format = FALSE;

    pipe_mutex_init(r300screen->num_contexts_mutex);

    util_slab_create(&r300screen->pool_buffers,
                     sizeof(struct r300_resource), 64,
                     UTIL_SLAB_SINGLETHREADED);

    r300screen->rws = rws;
    r300screen->screen.winsys = (struct pipe_winsys*)rws;
    r300screen->screen.destroy = r300_destroy_screen;
    r300screen->screen.get_name = r300_get_name;
    r300screen->screen.get_vendor = r300_get_vendor;
    r300screen->screen.get_param = r300_get_param;
    r300screen->screen.get_shader_param = r300_get_shader_param;
    r300screen->screen.get_paramf = r300_get_paramf;
    r300screen->screen.get_video_param = r300_get_video_param;
    r300screen->screen.is_format_supported = r300_is_format_supported;
    r300screen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
    r300screen->screen.context_create = r300_create_context;
    r300screen->screen.fence_reference = r300_fence_reference;
    r300screen->screen.fence_signalled = r300_fence_signalled;
    r300screen->screen.fence_finish = r300_fence_finish;

    r300_init_screen_resource_functions(r300screen);

    util_format_s3tc_init();

    return &r300screen->screen;
}
GalliumContext::GalliumContext(ulong options)
	:
	fOptions(options),
	fScreen(NULL),
	fCurrentContext(0)
{
	CALLED();

	// Make all contexts a known value
	for (context_id i = 0; i < CONTEXT_MAX; i++)
		fContext[i] = NULL;

	CreateScreen();

	pipe_mutex_init(fMutex);
}
Exemplo n.º 16
0
/**
 * Create a new scene object.
 * \param queue  the queue to put newly rendered/emptied scenes into
 */
struct lp_scene *
lp_scene_create( struct pipe_context *pipe )
{
   struct lp_scene *scene = CALLOC_STRUCT(lp_scene);
   if (!scene)
      return NULL;

   scene->pipe = pipe;

   scene->data.head =
      CALLOC_STRUCT(data_block);

   pipe_mutex_init(scene->mutex);

   return scene;
}
Exemplo n.º 17
0
struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
{
	struct si_screen *sscreen = CALLOC_STRUCT(si_screen);

	if (!sscreen) {
		return NULL;
	}

	/* Set functions first. */
	sscreen->b.b.context_create = si_create_context;
	sscreen->b.b.destroy = si_destroy_screen;
	sscreen->b.b.get_param = si_get_param;
	sscreen->b.b.get_shader_param = si_get_shader_param;
	sscreen->b.b.is_format_supported = si_is_format_supported;
	sscreen->b.b.resource_create = r600_resource_create_common;

	si_init_screen_state_functions(sscreen);

	if (!r600_common_screen_init(&sscreen->b, ws) ||
	    !si_init_gs_info(sscreen) ||
	    !si_init_shader_cache(sscreen)) {
		FREE(sscreen);
		return NULL;
	}

	if (!debug_get_bool_option("RADEON_DISABLE_PERFCOUNTERS", FALSE))
		si_init_perfcounters(sscreen);

	sscreen->b.has_cp_dma = true;
	sscreen->b.has_streamout = true;
	pipe_mutex_init(sscreen->shader_parts_mutex);
	sscreen->use_monolithic_shaders =
		HAVE_LLVM < 0x0308 ||
		(sscreen->b.debug_flags & DBG_MONOLITHIC_SHADERS) != 0;

	if (debug_get_bool_option("RADEON_DUMP_SHADERS", FALSE))
		sscreen->b.debug_flags |= DBG_FS | DBG_VS | DBG_GS | DBG_PS | DBG_CS;

	/* Create the auxiliary context. This must be done last. */
	sscreen->b.aux_context = sscreen->b.b.context_create(&sscreen->b.b, NULL, 0);

	if (sscreen->b.debug_flags & DBG_TEST_DMA)
		r600_test_dma(&sscreen->b);

	return &sscreen->b.b;
}
Exemplo n.º 18
0
/**
 * Initialize a caching buffer manager.
 *
 * @param mgr     The cache buffer manager
 * @param usecs   Unused buffers may be released from the cache after this
 *                time
 * @param size_factor  Declare buffers that are size_factor times bigger than
 *                     the requested size as cache hits.
 * @param bypass_usage  Bitmask. If (requested usage & bypass_usage) != 0,
 *                      buffer allocation requests are rejected.
 * @param maximum_cache_size  Maximum size of all unused buffers the cache can
 *                            hold.
 * @param destroy_buffer  Function that destroys a buffer for good.
 * @param can_reclaim     Whether a buffer can be reclaimed (e.g. is not busy)
 */
void
pb_cache_init(struct pb_cache *mgr, uint usecs, float size_factor,
              unsigned bypass_usage, uint64_t maximum_cache_size,
              void (*destroy_buffer)(struct pb_buffer *buf),
              bool (*can_reclaim)(struct pb_buffer *buf))
{
   LIST_INITHEAD(&mgr->cache);
   pipe_mutex_init(mgr->mutex);
   mgr->cache_size = 0;
   mgr->max_cache_size = maximum_cache_size;
   mgr->usecs = usecs;
   mgr->num_buffers = 0;
   mgr->bypass_usage = bypass_usage;
   mgr->size_factor = size_factor;
   mgr->destroy_buffer = destroy_buffer;
   mgr->can_reclaim = can_reclaim;
}
Exemplo n.º 19
0
struct pb_manager *
mm_bufmgr_create_from_buffer(struct pb_buffer *buffer, 
                             pb_size size, pb_size align2) 
{
   struct mm_pb_manager *mm;

   if(!buffer)
      return NULL;
   
   mm = CALLOC_STRUCT(mm_pb_manager);
   if (!mm)
      return NULL;

   mm->base.destroy = mm_bufmgr_destroy;
   mm->base.create_buffer = mm_bufmgr_create_buffer;
   mm->base.flush = mm_bufmgr_flush;

   mm->size = size;
   mm->align2 = align2; /* 64-byte alignment */

   pipe_mutex_init(mm->mutex);

   mm->buffer = buffer; 

   mm->map = pb_map(mm->buffer, 
		    PIPE_BUFFER_USAGE_CPU_READ |
		    PIPE_BUFFER_USAGE_CPU_WRITE);
   if(!mm->map)
      goto failure;

   mm->heap = u_mmInit(0, (int)size); 
   if (!mm->heap)
      goto failure;

   return SUPER(mm);
   
failure:
if(mm->heap)
   u_mmDestroy(mm->heap);
   if(mm->map)
      pb_unmap(mm->buffer);
   if(mm)
      FREE(mm);
   return NULL;
}
Exemplo n.º 20
0
struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws)
{
    struct radeon_bomgr *mgr;

    mgr = CALLOC_STRUCT(radeon_bomgr);
    if (!mgr)
	return NULL;

    mgr->base.destroy = radeon_bomgr_destroy;
    mgr->base.create_buffer = radeon_bomgr_create_bo;
    mgr->base.flush = radeon_bomgr_flush;
    mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy;

    mgr->rws = rws;
    mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare);
    pipe_mutex_init(mgr->bo_handles_mutex);
    return &mgr->base;
}
Exemplo n.º 21
0
struct pipe_screen* r300_screen_create(struct radeon_winsys *rws)
{
    struct r300_screen *r300screen = CALLOC_STRUCT(r300_screen);

    if (!r300screen) {
        FREE(r300screen);
        return NULL;
    }

    rws->query_info(rws, &r300screen->info);

    r300_init_debug(r300screen);
    r300_parse_chipset(r300screen->info.pci_id, &r300screen->caps);

    if (SCREEN_DBG_ON(r300screen, DBG_NO_ZMASK))
        r300screen->caps.zmask_ram = 0;
    if (SCREEN_DBG_ON(r300screen, DBG_NO_HIZ))
        r300screen->caps.hiz_ram = 0;

    r300screen->rws = rws;
    r300screen->screen.destroy = r300_destroy_screen;
    r300screen->screen.get_name = r300_get_name;
    r300screen->screen.get_vendor = r300_get_vendor;
    r300screen->screen.get_device_vendor = r300_get_device_vendor;
    r300screen->screen.get_param = r300_get_param;
    r300screen->screen.get_shader_param = r300_get_shader_param;
    r300screen->screen.get_paramf = r300_get_paramf;
    r300screen->screen.get_video_param = r300_get_video_param;
    r300screen->screen.is_format_supported = r300_is_format_supported;
    r300screen->screen.is_video_format_supported = vl_video_buffer_is_format_supported;
    r300screen->screen.context_create = r300_create_context;
    r300screen->screen.fence_reference = r300_fence_reference;
    r300screen->screen.fence_finish = r300_fence_finish;

    r300_init_screen_resource_functions(r300screen);

    slab_create_parent(&r300screen->pool_transfers, sizeof(struct pipe_transfer), 64);

    util_format_s3tc_init();
    pipe_mutex_init(r300screen->cmask_mutex);

    return &r300screen->screen;
}
Exemplo n.º 22
0
static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr,
						pb_size size,
						const struct pb_desc *desc)
{
    struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
    struct radeon_drm_winsys *rws = mgr->rws;
    struct radeon_bo *bo;
    struct drm_radeon_gem_create args = {};

    args.size = size;
    args.alignment = desc->alignment;
    args.initial_domain =
        (desc->usage & RADEON_PB_USAGE_DOMAIN_GTT  ?
         RADEON_GEM_DOMAIN_GTT  : 0) |
        (desc->usage & RADEON_PB_USAGE_DOMAIN_VRAM ?
         RADEON_GEM_DOMAIN_VRAM : 0);

    if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
                            &args, sizeof(args))) {
        fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
        fprintf(stderr, "radeon:    size      : %d bytes\n", size);
        fprintf(stderr, "radeon:    alignment : %d bytes\n", desc->alignment);
        fprintf(stderr, "radeon:    domains   : %d\n", args.initial_domain);
        return NULL;
    }

    bo = CALLOC_STRUCT(radeon_bo);
    if (!bo)
	return NULL;

    pipe_reference_init(&bo->base.base.reference, 1);
    bo->base.base.alignment = desc->alignment;
    bo->base.base.usage = desc->usage;
    bo->base.base.size = size;
    bo->base.vtbl = &radeon_bo_vtbl;
    bo->mgr = mgr;
    bo->rws = mgr->rws;
    bo->handle = args.handle;
    bo->size = size;
    pipe_mutex_init(bo->map_mutex);

    return &bo->base;
}
Exemplo n.º 23
0
struct intel_winsys *
intel_winsys_create_for_fd(int fd)
{
   struct intel_winsys *winsys;

   winsys = CALLOC_STRUCT(intel_winsys);
   if (!winsys)
      return NULL;

   winsys->fd = fd;

   winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, BATCH_SZ);
   if (!winsys->bufmgr) {
      debug_error("failed to create GEM buffer manager");
      FREE(winsys);
      return NULL;
   }

   pipe_mutex_init(winsys->mutex);

   if (!probe_winsys(winsys)) {
      drm_intel_bufmgr_destroy(winsys->bufmgr);
      FREE(winsys);
      return NULL;
   }

   /*
    * No need to implicitly set up a fence register for each non-linear reloc
    * entry.  When a fence register is needed for a reloc entry,
    * drm_intel_bo_emit_reloc_fence() will be called explicitly.
    *
    * intel_bo_add_reloc() currently lacks "bool fenced" for this to work.
    * But we never need a fence register on GEN4+ so we do not need to worry
    * about it yet.
    */
   drm_intel_bufmgr_gem_enable_fenced_relocs(winsys->bufmgr);

   drm_intel_bufmgr_gem_enable_reuse(winsys->bufmgr);

   return winsys;
}
Exemplo n.º 24
0
struct virgl_winsys *
virgl_vtest_winsys_wrap(struct sw_winsys *sws)
{
   struct virgl_vtest_winsys *vtws;

   vtws = CALLOC_STRUCT(virgl_vtest_winsys);
   if (!vtws)
      return NULL;

   virgl_vtest_connect(vtws);
   vtws->sws = sws;

   vtws->usecs = 1000000;
   LIST_INITHEAD(&vtws->delayed);
   pipe_mutex_init(vtws->mutex);

   vtws->base.destroy = virgl_vtest_winsys_destroy;

   vtws->base.transfer_put = virgl_vtest_transfer_put;
   vtws->base.transfer_get = virgl_vtest_transfer_get;

   vtws->base.resource_create = virgl_vtest_winsys_resource_cache_create;
   vtws->base.resource_unref = virgl_vtest_winsys_resource_unref;
   vtws->base.resource_map = virgl_vtest_resource_map;
   vtws->base.resource_wait = virgl_vtest_resource_wait;
   vtws->base.cmd_buf_create = virgl_vtest_cmd_buf_create;
   vtws->base.cmd_buf_destroy = virgl_vtest_cmd_buf_destroy;
   vtws->base.submit_cmd = virgl_vtest_winsys_submit_cmd;

   vtws->base.emit_res = virgl_vtest_emit_res;
   vtws->base.res_is_referenced = virgl_vtest_res_is_ref;
   vtws->base.get_caps = virgl_vtest_get_caps;

   vtws->base.cs_create_fence = virgl_cs_create_fence;
   vtws->base.fence_wait = virgl_fence_wait;
   vtws->base.fence_reference = virgl_fence_reference;

   vtws->base.flush_frontbuffer = virgl_vtest_flush_frontbuffer;

   return &vtws->base;
}
Exemplo n.º 25
0
/**
 * Create a new fence object.
 *
 * The rank will be the number of bins in the scene.  Whenever a rendering
 * thread hits a fence command, it'll increment the fence counter.  When
 * the counter == the rank, the fence is finished.
 *
 * \param rank  the expected finished value of the fence counter.
 */
struct lp_fence *
lp_fence_create(unsigned rank)
{
   static int fence_id;
   struct lp_fence *fence = CALLOC_STRUCT(lp_fence);

   if (!fence)
      return NULL;

   pipe_reference_init(&fence->reference, 1);

   pipe_mutex_init(fence->mutex);
   pipe_condvar_init(fence->signalled);

   fence->id = fence_id++;
   fence->rank = rank;

   if (LP_DEBUG & DEBUG_FENCE)
      debug_printf("%s %d\n", __FUNCTION__, fence->id);

   return fence;
}
Exemplo n.º 26
0
struct intel_winsys *
intel_winsys_create_for_fd(int fd)
{
   /* so that we can have enough (up to 4094) relocs per bo */
   const int batch_size = sizeof(uint32_t) * 8192;
   struct intel_winsys *winsys;

   winsys = CALLOC_STRUCT(intel_winsys);
   if (!winsys)
      return NULL;

   winsys->fd = fd;

   winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, batch_size);
   if (!winsys->bufmgr) {
      debug_error("failed to create GEM buffer manager");
      FREE(winsys);
      return NULL;
   }

   pipe_mutex_init(winsys->mutex);

   if (!probe_winsys(winsys)) {
      pipe_mutex_destroy(winsys->mutex);
      drm_intel_bufmgr_destroy(winsys->bufmgr);
      FREE(winsys);
      return NULL;
   }

   /*
    * No need to implicitly set up a fence register for each non-linear reloc
    * entry.  INTEL_RELOC_FENCE will be set on reloc entries that need them.
    */
   drm_intel_bufmgr_gem_enable_fenced_relocs(winsys->bufmgr);

   drm_intel_bufmgr_gem_enable_reuse(winsys->bufmgr);

   return winsys;
}
Exemplo n.º 27
0
/**
 * Return a small integer serial number for the given pointer.
 */
static boolean
debug_serial(void *p, unsigned *pserial)
{
   unsigned serial;
   boolean found = TRUE;
#ifdef PIPE_SUBSYSTEM_WINDOWS_USER
   static boolean first = TRUE;

   if (first) {
      pipe_mutex_init(serials_mutex);
      first = FALSE;
   }
#endif

   pipe_mutex_lock(serials_mutex);
   if (!serials_hash)
      serials_hash = util_hash_table_create(hash_ptr, compare_ptr);

   serial = (unsigned) (uintptr_t) util_hash_table_get(serials_hash, p);
   if (!serial) {
      /* time to stop logging... (you'll have a 100 GB logfile at least at
       * this point)  TODO: avoid this
       */
      serial = ++serials_last;
      if (!serial) {
         debug_error("More than 2^32 objects detected, aborting.\n");
         os_abort();
      }

      util_hash_table_set(serials_hash, p, (void *) (uintptr_t) serial);
      found = FALSE;
   }
   pipe_mutex_unlock(serials_mutex);

   *pserial = serial;

   return found;
}
bool r600_common_screen_init(struct r600_common_screen *rscreen,
			     struct radeon_winsys *ws)
{
	ws->query_info(ws, &rscreen->info);

	rscreen->b.fence_finish = r600_fence_finish;
	rscreen->b.fence_reference = r600_fence_reference;
	rscreen->b.fence_signalled = r600_fence_signalled;

	rscreen->ws = ws;
	rscreen->family = rscreen->info.family;
	rscreen->chip_class = rscreen->info.chip_class;
	rscreen->debug_flags = debug_get_flags_option("R600_DEBUG", common_debug_options, 0);

	if (!r600_init_tiling(rscreen)) {
		return false;
	}

	util_format_s3tc_init();

	pipe_mutex_init(rscreen->aux_context_lock);
	return true;
}
Exemplo n.º 29
0
struct util_ringbuffer *util_ringbuffer_create( unsigned dwords )
{
   struct util_ringbuffer *ring = CALLOC_STRUCT(util_ringbuffer);
   if (ring == NULL)
      return NULL;

   assert(util_is_power_of_two(dwords));
   
   ring->buf = MALLOC( dwords * sizeof(unsigned) );
   if (ring->buf == NULL)
      goto fail;

   ring->mask = dwords - 1;

   pipe_condvar_init(ring->change);
   pipe_mutex_init(ring->mutex);
   return ring;

fail:
   FREE(ring->buf);
   FREE(ring);
   return NULL;
}
Exemplo n.º 30
0
struct pb_manager *
pb_cache_manager_create(struct pb_manager *provider, 
                     	unsigned usecs) 
{
   struct pb_cache_manager *mgr;

   if(!provider)
      return NULL;
   
   mgr = CALLOC_STRUCT(pb_cache_manager);
   if (!mgr)
      return NULL;

   mgr->base.destroy = pb_cache_manager_destroy;
   mgr->base.create_buffer = pb_cache_manager_create_buffer;
   mgr->base.flush = pb_cache_manager_flush;
   mgr->provider = provider;
   mgr->usecs = usecs;
   LIST_INITHEAD(&mgr->delayed);
   mgr->numDelayed = 0;
   pipe_mutex_init(mgr->mutex);
      
   return &mgr->base;
}