Exemple #1
0
HRESULT WINAPI
NineVolume9_GetPrivateData( struct NineVolume9 *This,
                            REFGUID refguid,
                            void *pData,
                            DWORD *pSizeOfData )
{
    struct pheader *header;

    user_assert(pSizeOfData, E_POINTER);

    header = util_hash_table_get(This->pdata, refguid);
    if (!header) { return D3DERR_NOTFOUND; }

    if (!pData) {
        *pSizeOfData = header->size;
        return D3D_OK;
    }
    if (*pSizeOfData < header->size) {
        return D3DERR_MOREDATA;
    }

    if (header->unknown) { IUnknown_AddRef(*(IUnknown **)header->data); }
    memcpy(pData, header->data, header->size);

    return D3D_OK;
}
Exemple #2
0
const char*
debug_symbol_name_cached(const void *addr)
{
   const char* name;
#ifdef PIPE_SUBSYSTEM_WINDOWS_USER
   static boolean first = TRUE;

   if (first) {
      pipe_mutex_init(symbols_mutex);
      first = FALSE;
   }
#endif

   pipe_mutex_lock(symbols_mutex);
   if(!symbols_hash)
      symbols_hash = util_hash_table_create(hash_ptr, compare_ptr);
   name = util_hash_table_get(symbols_hash, (void*)addr);
   if(!name)
   {
      char buf[1024];
      debug_symbol_name(addr, buf, sizeof(buf));
      name = strdup(buf);

      util_hash_table_set(symbols_hash, (void*)addr, (void*)name);
   }
   pipe_mutex_unlock(symbols_mutex);
   return name;
}
/* TODO with some refactoring we might be able to re-use debug_symbol_name_cached()
 * instead.. otoh if using libunwind I think u_debug_symbol could just be excluded
 * from build?
 */
static const char *
symbol_name_cached(unw_cursor_t *cursor, unw_proc_info_t *pip)
{
   void *addr = (void *)(uintptr_t)pip->start_ip;
   char *name;

   mtx_lock(&symbols_mutex);
   if(!symbols_hash)
      symbols_hash = util_hash_table_create(hash_ptr, compare_ptr);
   name = util_hash_table_get(symbols_hash, addr);
   if(!name)
   {
      char procname[256];
      unw_word_t off;
      int ret;

      ret = unw_get_proc_name(cursor, procname, sizeof(procname), &off);
      if (ret && ret != -UNW_ENOMEM) {
         procname[0] = '?';
         procname[1] = 0;
      }

      if (asprintf(&name, "%s%s", procname, ret == -UNW_ENOMEM ? "..." : "") == -1) 
         name = "??";
      util_hash_table_set(symbols_hash, addr, (void*)name);
   }
   mtx_unlock(&symbols_mutex);

   return name;
}
Exemple #4
0
/**
 * vmw_swc_surface_clear_reference - Clear referenced info for a surface
 *
 * @swc:   Pointer to an svga_winsys_context
 * @vsurf: Pointer to a vmw_svga_winsys_surface, the referenced info of which
 *         we want to clear
 *
 * This is primarily used by a discard surface map to indicate that the
 * surface data is no longer referenced by a draw call, and mapping it
 * should therefore no longer cause a flush.
 */
void
vmw_swc_surface_clear_reference(struct svga_winsys_context *swc,
                                struct vmw_svga_winsys_surface *vsurf)
{
   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
   struct vmw_ctx_validate_item *isrf =
      util_hash_table_get(vswc->hash, vsurf);

   if (isrf && isrf->referenced) {
      isrf->referenced = FALSE;
      p_atomic_dec(&vsurf->validated);
   }
}
Exemple #5
0
static void
vmw_swc_shader_relocation(struct svga_winsys_context *swc,
			  uint32 *shid,
			  uint32 *mobid,
			  uint32 *offset,
			  struct svga_winsys_gb_shader *shader,
                          unsigned flags)
{
   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
   struct vmw_winsys_screen *vws = vswc->vws;
   struct vmw_svga_winsys_shader *vshader;
   struct vmw_ctx_validate_item *ishader;

   if(!shader) {
      *shid = SVGA3D_INVALID_ID;
      return;
   }

   vshader = vmw_svga_winsys_shader(shader);

   if (!vws->base.have_vgpu10) {
      assert(vswc->shader.staged < vswc->shader.reserved);
      ishader = util_hash_table_get(vswc->hash, vshader);

      if (ishader == NULL) {
         ishader = &vswc->shader.items[vswc->shader.used + vswc->shader.staged];
         vmw_svga_winsys_shader_reference(&ishader->vshader, vshader);
         ishader->referenced = FALSE;
         /*
          * Note that a failure here may just fall back to unhashed behavior
          * and potentially cause unnecessary flushing, so ignore the
          * return code.
          */
         (void) util_hash_table_set(vswc->hash, vshader, ishader);
         ++vswc->shader.staged;
      }

      if (!ishader->referenced) {
         ishader->referenced = TRUE;
         p_atomic_inc(&vshader->validated);
      }
   }

   if (shid)
      *shid = vshader->shid;

   if (vshader->buf)
      vmw_swc_mob_relocation(swc, mobid, offset, vshader->buf,
			     0, SVGA_RELOC_READ);
}
Exemple #6
0
HRESULT WINAPI
NineVolume9_FreePrivateData( struct NineVolume9 *This,
                             REFGUID refguid )
{
    struct pheader *header;

    DBG("This=%p refguid=%p\n", This, refguid);

    header = util_hash_table_get(This->pdata, refguid);
    if (!header) { return D3DERR_NOTFOUND; }

    ht_guid_delete(NULL, header, NULL);
    util_hash_table_remove(This->pdata, refguid);

    return D3D_OK;
}
Exemple #7
0
static void get_eglimage(vid_dec_PrivateType* priv) {
   OMX_PTR p_eglimage = NULL;
   OMX_NATIVE_WINDOWTYPE * p_egldisplay = NULL;
   const tiz_port_t * p_port = NULL;
   struct pipe_video_buffer templat = {};
   struct pipe_video_buffer *video_buffer = NULL;
   struct pipe_resource * p_res = NULL;
   struct pipe_resource *resources[VL_NUM_COMPONENTS];

   if (OMX_ErrorNone ==
      tiz_krn_claim_eglimage(tiz_get_krn (handleOf (priv)),
                             OMX_VID_DEC_AVC_OUTPUT_PORT_INDEX,
                             priv->p_outhdr_, &p_eglimage)) {
      priv->use_eglimage = true;
      p_port = tiz_krn_get_port(tiz_get_krn (handleOf (priv)),
                                OMX_VID_DEC_AVC_OUTPUT_PORT_INDEX);
      p_egldisplay = p_port->portdef_.format.video.pNativeWindow;

      if (!util_hash_table_get(priv->video_buffer_map, priv->p_outhdr_)) {
        p_res = st_omx_pipe_texture_from_eglimage(p_egldisplay, p_eglimage);

        assert(p_res);

        memset(&templat, 0, sizeof(templat));
        templat.buffer_format = p_res->format;
        templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_NONE;
        templat.width = p_res->width0;
        templat.height = p_res->height0;
        templat.interlaced = 0;

        memset(resources, 0, sizeof(resources));
        pipe_resource_reference(&resources[0], p_res);

        video_buffer = vl_video_buffer_create_ex2(priv->pipe, &templat, resources);

        assert(video_buffer);
        assert(video_buffer->buffer_format == p_res->format);

        util_hash_table_set(priv->video_buffer_map, priv->p_outhdr_, video_buffer);
      }
   } else {
      (void) tiz_krn_release_buffer(tiz_get_krn (handleOf (priv)),
                                    OMX_VID_DEC_AVC_OUTPUT_PORT_INDEX,
                                    priv->p_outhdr_);
      priv->p_outhdr_ = NULL;
   }
}
PUBLIC struct pipe_screen *
nouveau_drm_screen_create(int fd)
{
	struct nouveau_drm *drm = NULL;
	struct nouveau_device *dev = NULL;
	struct nouveau_screen *(*init)(struct nouveau_device *);
	struct nouveau_screen *screen = NULL;
	int ret, dupfd;

	pipe_mutex_lock(nouveau_screen_mutex);
	if (!fd_tab) {
		fd_tab = util_hash_table_create(hash_fd, compare_fd);
		if (!fd_tab) {
			pipe_mutex_unlock(nouveau_screen_mutex);
			return NULL;
		}
	}

	screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
	if (screen) {
		screen->refcount++;
		pipe_mutex_unlock(nouveau_screen_mutex);
		return &screen->base;
	}

	/* Since the screen re-use is based on the device node and not the fd,
	 * create a copy of the fd to be owned by the device. Otherwise a
	 * scenario could occur where two screens are created, and the first
	 * one is shut down, along with the fd being closed. The second
	 * (identical) screen would now have a reference to the closed fd. We
	 * avoid this by duplicating the original fd. Note that
	 * nouveau_device_wrap does not close the fd in case of a device
	 * creation error.
	 */
	dupfd = dup(fd);

	ret = nouveau_drm_new(dupfd, &drm);
	if (ret)
		goto err;

	ret = nouveau_device_new(&drm->client, NV_DEVICE,
				 &(struct nv_device_v0) {
					.device = ~0ULL,
				 }, sizeof(struct nv_device_v0), &dev);
const char*
debug_symbol_name_cached(const void *addr)
{
   const char* name;
   pipe_mutex_lock(symbols_mutex);
   if(!symbols_hash)
      symbols_hash = util_hash_table_create(hash_ptr, compare_ptr);
   name = util_hash_table_get(symbols_hash, (void*)addr);
   if(!name)
   {
      char buf[1024];
      debug_symbol_name(addr, buf, sizeof(buf));
      name = strdup(buf);

      util_hash_table_set(symbols_hash, (void*)addr, (void*)name);
   }
   pipe_mutex_unlock(symbols_mutex);
   return name;
}
Exemple #10
0
static void
dri2_display_invalidate_buffers(struct x11_screen *xscr, Drawable drawable,
                                void *user_data)
{
   struct native_display *ndpy = (struct native_display* ) user_data;
   struct dri2_display *dri2dpy = dri2_display(ndpy);
   struct native_surface *nsurf;
   struct dri2_surface *dri2surf;

   nsurf = (struct native_surface *)
      util_hash_table_get(dri2dpy->surfaces, (void *) drawable);
   if (!nsurf)
      return;

   dri2surf = dri2_surface(nsurf);

   dri2surf->server_stamp++;
   dri2dpy->event_handler->invalid_surface(&dri2dpy->base,
         &dri2surf->base, dri2surf->server_stamp);
}
Exemple #11
0
static void
vmw_swc_surface_only_relocation(struct svga_winsys_context *swc,
				uint32 *where,
				struct vmw_svga_winsys_surface *vsurf,
				unsigned flags)
{
   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
   struct vmw_ctx_validate_item *isrf;

   assert(vswc->surface.staged < vswc->surface.reserved);
   isrf = util_hash_table_get(vswc->hash, vsurf);

   if (isrf == NULL) {
      isrf = &vswc->surface.items[vswc->surface.used + vswc->surface.staged];
      vmw_svga_winsys_surface_reference(&isrf->vsurf, vsurf);
      isrf->referenced = FALSE;
      /*
       * Note that a failure here may just fall back to unhashed behavior
       * and potentially cause unnecessary flushing, so ignore the
       * return code.
       */
      (void) util_hash_table_set(vswc->hash, vsurf, isrf);
      ++vswc->surface.staged;

      vswc->seen_surfaces += vsurf->size;
      if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) &&
          vswc->seen_surfaces >=
            vswc->vws->ioctl.max_surface_memory / VMW_MAX_SURF_MEM_FACTOR)
         vswc->preemptive_flush = TRUE;
   }

   if (!(flags & SVGA_RELOC_INTERNAL) && !isrf->referenced) {
      isrf->referenced = TRUE;
      p_atomic_inc(&vsurf->validated);
   }

   if (where)
      *where = vsurf->sid;
}
Exemple #12
0
/**
 * Return a small integer serial number for the given pointer.
 */
static boolean
debug_serial(void *p, unsigned *pserial)
{
   unsigned serial;
   boolean found = TRUE;
#ifdef PIPE_SUBSYSTEM_WINDOWS_USER
   static boolean first = TRUE;

   if (first) {
      pipe_mutex_init(serials_mutex);
      first = FALSE;
   }
#endif

   pipe_mutex_lock(serials_mutex);
   if (!serials_hash)
      serials_hash = util_hash_table_create(hash_ptr, compare_ptr);

   serial = (unsigned) (uintptr_t) util_hash_table_get(serials_hash, p);
   if (!serial) {
      /* time to stop logging... (you'll have a 100 GB logfile at least at
       * this point)  TODO: avoid this
       */
      serial = ++serials_last;
      if (!serial) {
         debug_error("More than 2^32 objects detected, aborting.\n");
         os_abort();
      }

      util_hash_table_set(serials_hash, p, (void *) (uintptr_t) serial);
      found = FALSE;
   }
   pipe_mutex_unlock(serials_mutex);

   *pserial = serial;

   return found;
}
Exemple #13
0
struct pipe_screen *
virgl_drm_screen_create(int fd)
{
   struct pipe_screen *pscreen = NULL;

   mtx_lock(&virgl_screen_mutex);
   if (!fd_tab) {
      fd_tab = util_hash_table_create(hash_fd, compare_fd);
      if (!fd_tab)
         goto unlock;
   }

   pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
   if (pscreen) {
      virgl_screen(pscreen)->refcnt++;
   } else {
      struct virgl_winsys *vws;
      int dup_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);

      vws = virgl_drm_winsys_create(dup_fd);

      pscreen = virgl_create_screen(vws);
      if (pscreen) {
         util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen);

         /* Bit of a hack, to avoid circular linkage dependency,
          * ie. pipe driver having to call in to winsys, we
          * override the pipe drivers screen->destroy():
          */
         virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
         pscreen->destroy = virgl_drm_screen_destroy;
      }
   }

unlock:
   mtx_unlock(&virgl_screen_mutex);
   return pscreen;
}
Exemple #14
0
static boolean
vmw_swc_add_validate_buffer(struct vmw_svga_winsys_context *vswc,
			    struct pb_buffer *pb_buf,
			    unsigned flags)
{
   enum pipe_error ret;
   unsigned translated_flags;

   /*
    * TODO: Update pb_validate to provide a similar functionality
    * (Check buffer already present before adding)
    */
   if (util_hash_table_get(vswc->hash, pb_buf) != pb_buf) {
      translated_flags = vmw_translate_to_pb_flags(flags);
      ret = pb_validate_add_buffer(vswc->validate, pb_buf, translated_flags);
      /* TODO: Update pipebuffer to reserve buffers and not fail here */
      assert(ret == PIPE_OK);
      (void)ret;
      (void)util_hash_table_set(vswc->hash, pb_buf, pb_buf);
      return TRUE;
   }

   return FALSE;
}
PUBLIC struct radeon_winsys *
radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
{
    struct radeon_drm_winsys *ws;

    pipe_mutex_lock(fd_tab_mutex);
    if (!fd_tab) {
        fd_tab = util_hash_table_create(hash_fd, compare_fd);
    }

    ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
    if (ws) {
        pipe_reference(NULL, &ws->reference);
        pipe_mutex_unlock(fd_tab_mutex);
        return &ws->base;
    }

    ws = CALLOC_STRUCT(radeon_drm_winsys);
    if (!ws) {
        pipe_mutex_unlock(fd_tab_mutex);
        return NULL;
    }

    ws->fd = dup(fd);

    if (!do_winsys_init(ws))
        goto fail;

    /* Create managers. */
    ws->kman = radeon_bomgr_create(ws);
    if (!ws->kman)
        goto fail;

    ws->cman = pb_cache_manager_create(ws->kman, 500000, 2.0f, 0,
                                       MIN2(ws->info.vram_size, ws->info.gart_size));
    if (!ws->cman)
        goto fail;

    if (ws->gen >= DRV_R600) {
        ws->surf_man = radeon_surface_manager_new(ws->fd);
        if (!ws->surf_man)
            goto fail;
    }

    /* init reference */
    pipe_reference_init(&ws->reference, 1);

    /* Set functions. */
    ws->base.unref = radeon_winsys_unref;
    ws->base.destroy = radeon_winsys_destroy;
    ws->base.query_info = radeon_query_info;
    ws->base.cs_request_feature = radeon_cs_request_feature;
    ws->base.query_value = radeon_query_value;
    ws->base.read_registers = radeon_read_registers;

    radeon_bomgr_init_functions(ws);
    radeon_drm_cs_init_functions(ws);
    radeon_surface_init_functions(ws);

    pipe_mutex_init(ws->hyperz_owner_mutex);
    pipe_mutex_init(ws->cmask_owner_mutex);
    pipe_mutex_init(ws->cs_stack_lock);

    ws->ncs = 0;
    pipe_semaphore_init(&ws->cs_queued, 0);
    if (ws->num_cpus > 1 && debug_get_option_thread())
        ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);

    /* Create the screen at the end. The winsys must be initialized
     * completely.
     *
     * Alternatively, we could create the screen based on "ws->gen"
     * and link all drivers into one binary blob. */
    ws->base.screen = screen_create(&ws->base);
    if (!ws->base.screen) {
        radeon_winsys_destroy(&ws->base);
        pipe_mutex_unlock(fd_tab_mutex);
        return NULL;
    }

    util_hash_table_set(fd_tab, intptr_to_pointer(ws->fd), ws);

    /* We must unlock the mutex once the winsys is fully initialized, so that
     * other threads attempting to create the winsys from the same fd will
     * get a fully initialized winsys and not just half-way initialized. */
    pipe_mutex_unlock(fd_tab_mutex);

    return &ws->base;

fail:
    pipe_mutex_unlock(fd_tab_mutex);
    if (ws->cman)
        ws->cman->destroy(ws->cman);
    if (ws->kman)
        ws->kman->destroy(ws->kman);
    if (ws->surf_man)
        radeon_surface_manager_free(ws->surf_man);
    if (ws->fd >= 0)
        close(ws->fd);

    FREE(ws);
    return NULL;
}
Exemple #16
0
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
                                                           struct winsys_handle *whandle,
                                                           unsigned *stride,
                                                           unsigned *size)
{
    struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
    struct radeon_bo *bo;
    struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
    struct drm_gem_open open_arg = {};

    /* We must maintain a list of pairs <handle, bo>, so that we always return
     * the same BO for one particular handle. If we didn't do that and created
     * more than one BO for the same handle and then relocated them in a CS,
     * we would hit a deadlock in the kernel.
     *
     * The list of pairs is guarded by a mutex, of course. */
    pipe_mutex_lock(mgr->bo_handles_mutex);

    /* First check if there already is an existing bo for the handle. */
    bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle);
    if (bo) {
        /* Increase the refcount. */
        struct pb_buffer *b = NULL;
        pb_reference(&b, &bo->base);
        goto done;
    }

    /* There isn't, create a new one. */
    bo = CALLOC_STRUCT(radeon_bo);
    if (!bo) {
        goto fail;
    }

    /* Open the BO. */
    open_arg.name = whandle->handle;
    if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
        FREE(bo);
        goto fail;
    }
    bo->handle = open_arg.handle;
    bo->size = open_arg.size;
    bo->name = whandle->handle;

    /* Initialize it. */
    pipe_reference_init(&bo->base.base.reference, 1);
    bo->base.base.alignment = 0;
    bo->base.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
    bo->base.base.size = bo->size;
    bo->base.vtbl = &radeon_bo_vtbl;
    bo->mgr = mgr;
    bo->rws = mgr->rws;
    pipe_mutex_init(bo->map_mutex);

    util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo);

done:
    pipe_mutex_unlock(mgr->bo_handles_mutex);

    if (stride)
        *stride = whandle->stride;
    if (size)
        *size = bo->base.base.size;

    return (struct pb_buffer*)bo;

fail:
    pipe_mutex_unlock(mgr->bo_handles_mutex);
    return NULL;
}
PUBLIC struct pipe_screen *
nouveau_drm_screen_create(int fd)
{
	struct nouveau_device *dev = NULL;
	struct pipe_screen *(*init)(struct nouveau_device *);
	struct nouveau_screen *screen;
	int ret, dupfd = -1;

	pipe_mutex_lock(nouveau_screen_mutex);
	if (!fd_tab) {
		fd_tab = util_hash_table_create(hash_fd, compare_fd);
		if (!fd_tab)
			goto err;
	}

	screen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
	if (screen) {
		screen->refcount++;
		pipe_mutex_unlock(nouveau_screen_mutex);
		return &screen->base;
	}

	/* Since the screen re-use is based on the device node and not the fd,
	 * create a copy of the fd to be owned by the device. Otherwise a
	 * scenario could occur where two screens are created, and the first
	 * one is shut down, along with the fd being closed. The second
	 * (identical) screen would now have a reference to the closed fd. We
	 * avoid this by duplicating the original fd. Note that
	 * nouveau_device_wrap does not close the fd in case of a device
	 * creation error.
	 */
	dupfd = dup(fd);
	ret = nouveau_device_wrap(dupfd, 1, &dev);
	if (ret)
		goto err;

	switch (dev->chipset & ~0xf) {
	case 0x30:
	case 0x40:
	case 0x60:
		init = nv30_screen_create;
		break;
	case 0x50:
	case 0x80:
	case 0x90:
	case 0xa0:
		init = nv50_screen_create;
		break;
	case 0xc0:
	case 0xd0:
	case 0xe0:
	case 0xf0:
	case 0x100:
		init = nvc0_screen_create;
		break;
	default:
		debug_printf("%s: unknown chipset nv%02x\n", __func__,
			     dev->chipset);
		goto err;
	}

	screen = (struct nouveau_screen*)init(dev);
	if (!screen)
		goto err;

	util_hash_table_set(fd_tab, intptr_to_pointer(fd), screen);
	screen->refcount = 1;
	pipe_mutex_unlock(nouveau_screen_mutex);
	return &screen->base;

err:
	if (dev)
		nouveau_device_del(&dev);
	else if (dupfd >= 0)
		close(dupfd);
	pipe_mutex_unlock(nouveau_screen_mutex);
	return NULL;
}
Exemple #18
0
struct radeon_winsys *radeon_drm_winsys_create(int fd)
{
    struct radeon_drm_winsys *ws;

    if (!fd_tab) {
        fd_tab = util_hash_table_create(hash_fd, compare_fd);
    }

    ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
    if (ws) {
        pipe_reference(NULL, &ws->base.reference);
        return &ws->base;
    }

    ws = CALLOC_STRUCT(radeon_drm_winsys);
    if (!ws) {
        return NULL;
    }
    ws->fd = fd;
    util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws);

    if (!do_winsys_init(ws))
        goto fail;

    /* Create managers. */
    ws->kman = radeon_bomgr_create(ws);
    if (!ws->kman)
        goto fail;
    ws->cman = pb_cache_manager_create(ws->kman, 1000000);
    if (!ws->cman)
        goto fail;

    if (ws->gen >= DRV_R600) {
        ws->surf_man = radeon_surface_manager_new(fd);
        if (!ws->surf_man)
            goto fail;
    }

    /* init reference */
    pipe_reference_init(&ws->base.reference, 1);

    /* Set functions. */
    ws->base.destroy = radeon_winsys_destroy;
    ws->base.query_info = radeon_query_info;
    ws->base.cs_request_feature = radeon_cs_request_feature;
    ws->base.surface_init = radeon_drm_winsys_surface_init;
    ws->base.surface_best = radeon_drm_winsys_surface_best;
    ws->base.query_value = radeon_query_value;

    radeon_bomgr_init_functions(ws);
    radeon_drm_cs_init_functions(ws);

    pipe_mutex_init(ws->hyperz_owner_mutex);
    pipe_mutex_init(ws->cmask_owner_mutex);
    pipe_mutex_init(ws->cs_stack_lock);

    p_atomic_set(&ws->ncs, 0);
    pipe_semaphore_init(&ws->cs_queued, 0);
    pipe_condvar_init(ws->cs_queue_empty);
    if (ws->num_cpus > 1 && debug_get_option_thread())
        ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);

    return &ws->base;

fail:
    if (ws->cman)
        ws->cman->destroy(ws->cman);
    if (ws->kman)
        ws->kman->destroy(ws->kman);
    if (ws->surf_man)
        radeon_surface_manager_free(ws->surf_man);
    FREE(ws);
    return NULL;
}
Exemple #19
0
struct vmw_winsys_screen *
vmw_winsys_create( int fd )
{
   struct vmw_winsys_screen *vws;
   struct stat stat_buf;

   if (dev_hash == NULL) {
      dev_hash = util_hash_table_create(vmw_dev_hash, vmw_dev_compare);
      if (dev_hash == NULL)
         return NULL;
   }

   if (fstat(fd, &stat_buf))
      return NULL;

   vws = util_hash_table_get(dev_hash, &stat_buf.st_rdev);
   if (vws) {
      vws->open_count++;
      return vws;
   }

   vws = CALLOC_STRUCT(vmw_winsys_screen);
   if (!vws)
      goto out_no_vws;

   vws->device = stat_buf.st_rdev;
   vws->open_count = 1;
   vws->ioctl.drm_fd = dup(fd);
   vws->base.have_gb_dma = TRUE;
   vws->base.need_to_rebind_resources = FALSE;

   if (!vmw_ioctl_init(vws))
      goto out_no_ioctl;

   vws->fence_ops = vmw_fence_ops_create(vws);
   if (!vws->fence_ops)
      goto out_no_fence_ops;

   if(!vmw_pools_init(vws))
      goto out_no_pools;

   if (!vmw_winsys_screen_init_svga(vws))
      goto out_no_svga;

   if (util_hash_table_set(dev_hash, &vws->device, vws) != PIPE_OK)
      goto out_no_hash_insert;

   return vws;
out_no_hash_insert:
out_no_svga:
   vmw_pools_cleanup(vws);
out_no_pools:
   vws->fence_ops->destroy(vws->fence_ops);
out_no_fence_ops:
   vmw_ioctl_cleanup(vws);
out_no_ioctl:
   close(vws->ioctl.drm_fd);
   FREE(vws);
out_no_vws:
   return NULL;
}
Exemple #20
0
int amdgpu_bo_import(amdgpu_device_handle dev,
		     enum amdgpu_bo_handle_type type,
		     uint32_t shared_handle,
		     struct amdgpu_bo_import_result *output)
{
	struct drm_gem_open open_arg = {};
	struct amdgpu_bo *bo = NULL;
	int r;
	int dma_fd;
	uint64_t dma_buf_size = 0;

	/* Convert a DMA buf handle to a KMS handle now. */
	if (type == amdgpu_bo_handle_type_dma_buf_fd) {
		uint32_t handle;
		off_t size;

		/* Get a KMS handle. */
		r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
		if (r) {
			return r;
		}

		/* Query the buffer size. */
		size = lseek(shared_handle, 0, SEEK_END);
		if (size == (off_t)-1) {
			amdgpu_close_kms_handle(dev, handle);
			return -errno;
		}
		lseek(shared_handle, 0, SEEK_SET);

		dma_buf_size = size;
		shared_handle = handle;
	}

	/* We must maintain a list of pairs <handle, bo>, so that we always
	 * return the same amdgpu_bo instance for the same handle. */
	pthread_mutex_lock(&dev->bo_table_mutex);

	/* If we have already created a buffer with this handle, find it. */
	switch (type) {
	case amdgpu_bo_handle_type_gem_flink_name:
		bo = util_hash_table_get(dev->bo_flink_names,
					 (void*)(uintptr_t)shared_handle);
		break;

	case amdgpu_bo_handle_type_dma_buf_fd:
		bo = util_hash_table_get(dev->bo_handles,
					 (void*)(uintptr_t)shared_handle);
		break;

	case amdgpu_bo_handle_type_kms:
		/* Importing a KMS handle in not allowed. */
		pthread_mutex_unlock(&dev->bo_table_mutex);
		return -EPERM;

	default:
		pthread_mutex_unlock(&dev->bo_table_mutex);
		return -EINVAL;
	}

	if (bo) {
		pthread_mutex_unlock(&dev->bo_table_mutex);

		/* The buffer already exists, just bump the refcount. */
		atomic_inc(&bo->refcount);

		output->buf_handle = bo;
		output->alloc_size = bo->alloc_size;
		return 0;
	}

	bo = calloc(1, sizeof(struct amdgpu_bo));
	if (!bo) {
		pthread_mutex_unlock(&dev->bo_table_mutex);
		if (type == amdgpu_bo_handle_type_dma_buf_fd) {
			amdgpu_close_kms_handle(dev, shared_handle);
		}
		return -ENOMEM;
	}

	/* Open the handle. */
	switch (type) {
	case amdgpu_bo_handle_type_gem_flink_name:
		open_arg.name = shared_handle;
		r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
		if (r) {
			free(bo);
			pthread_mutex_unlock(&dev->bo_table_mutex);
			return r;
		}

		bo->handle = open_arg.handle;
		if (dev->flink_fd != dev->fd) {
			r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
			if (r) {
				free(bo);
				pthread_mutex_unlock(&dev->bo_table_mutex);
				return r;
			}
			r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );

			close(dma_fd);

			if (r) {
				free(bo);
				pthread_mutex_unlock(&dev->bo_table_mutex);
				return r;
			}
		}
		bo->flink_name = shared_handle;
		bo->alloc_size = open_arg.size;
		util_hash_table_set(dev->bo_flink_names,
				    (void*)(uintptr_t)bo->flink_name, bo);
		break;

	case amdgpu_bo_handle_type_dma_buf_fd:
		bo->handle = shared_handle;
		bo->alloc_size = dma_buf_size;
		break;

	case amdgpu_bo_handle_type_kms:
		assert(0); /* unreachable */
	}

	/* Initialize it. */
	atomic_set(&bo->refcount, 1);
	bo->dev = dev;
	pthread_mutex_init(&bo->cpu_access_mutex, NULL);

	util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
	pthread_mutex_unlock(&dev->bo_table_mutex);

	output->buf_handle = bo;
	output->alloc_size = bo->alloc_size;
	return 0;
}
Exemple #21
0
static struct virgl_hw_res *
virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
                                        struct winsys_handle *whandle)
{
   struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
   struct drm_gem_open open_arg = {};
   struct drm_virtgpu_resource_info info_arg = {};
   struct virgl_hw_res *res;
   uint32_t handle = whandle->handle;

   if (whandle->offset != 0) {
      fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
              whandle->offset);
      return NULL;
   }

   mtx_lock(&qdws->bo_handles_mutex);

   if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
      res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
      if (res) {
         struct virgl_hw_res *r = NULL;
         virgl_drm_resource_reference(qdws, &r, res);
         goto done;
      }
   }

   if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
      int r;
      r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
      if (r) {
         res = NULL;
         goto done;
      }
   }

   res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
   fprintf(stderr, "resource %p for handle %d, pfd=%d\n", res, handle, whandle->handle);
   if (res) {
      struct virgl_hw_res *r = NULL;
      virgl_drm_resource_reference(qdws, &r, res);
      goto done;
   }

   res = CALLOC_STRUCT(virgl_hw_res);
   if (!res)
      goto done;

   if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
      res->bo_handle = handle;
   } else {
      fprintf(stderr, "gem open handle %d\n", handle);
      memset(&open_arg, 0, sizeof(open_arg));
      open_arg.name = whandle->handle;
      if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
         FREE(res);
         res = NULL;
         goto done;
      }
      res->bo_handle = open_arg.handle;
   }
   res->name = handle;

   memset(&info_arg, 0, sizeof(info_arg));
   info_arg.bo_handle = res->bo_handle;

   if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
      /* close */
      FREE(res);
      res = NULL;
      goto done;
   }

   res->res_handle = info_arg.res_handle;

   res->size = info_arg.size;
   res->stride = info_arg.stride;
   pipe_reference_init(&res->reference, 1);
   res->num_cs_references = 0;

   util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);

done:
   mtx_unlock(&qdws->bo_handles_mutex);
   return res;
}