Пример #1
0
static struct pipe_surface *
rbug_context_create_surface(struct pipe_context *_pipe,
                            struct pipe_resource *_resource,
                            const struct pipe_surface *surf_tmpl)
{
   struct rbug_context *rb_pipe = rbug_context(_pipe);
   struct rbug_resource *rb_resource = rbug_resource(_resource);
   struct pipe_context *pipe = rb_pipe->pipe;
   struct pipe_resource *resource = rb_resource->resource;
   struct pipe_surface *result;

   pipe_mutex_lock(rb_pipe->call_mutex);
   result = pipe->create_surface(pipe,
                                 resource,
                                 surf_tmpl);
   pipe_mutex_unlock(rb_pipe->call_mutex);

   if (result)
      return rbug_surface_create(rb_pipe, rb_resource, result);
   return NULL;
}
/**
 * Copy image data from a VdpOutputSurface to application memory in the
 * surface's native format.
 */
VdpStatus
vlVdpOutputSurfaceGetBitsNative(VdpOutputSurface surface,
                                VdpRect const *source_rect,
                                void *const *destination_data,
                                uint32_t const *destination_pitches)
{
   vlVdpOutputSurface *vlsurface;
   struct pipe_context *pipe;
   struct pipe_resource *res;
   struct pipe_box box;
   struct pipe_transfer *transfer;
   uint8_t *map;

   vlsurface = vlGetDataHTAB(surface);
   if (!vlsurface)
      return VDP_STATUS_INVALID_HANDLE;

   pipe = vlsurface->device->context;
   if (!pipe)
      return VDP_STATUS_INVALID_HANDLE;

   pipe_mutex_lock(vlsurface->device->mutex);
   vlVdpResolveDelayedRendering(vlsurface->device, NULL, NULL);

   res = vlsurface->sampler_view->texture;
   box = RectToPipeBox(source_rect, res);
   map = pipe->transfer_map(pipe, res, 0, PIPE_TRANSFER_READ, &box, &transfer);
   if (!map) {
      pipe_mutex_unlock(vlsurface->device->mutex);
      return VDP_STATUS_RESOURCES;
   }

   util_copy_rect(*destination_data, res->format, *destination_pitches, 0, 0,
                  box.width, box.height, map, transfer->stride, 0, 0);

   pipe_transfer_unmap(pipe, transfer);
   pipe_mutex_unlock(vlsurface->device->mutex);

   return VDP_STATUS_OK;
}
Пример #3
0
VAStatus
vlVaDestroyImage(VADriverContextP ctx, VAImageID image)
{
   vlVaDriver *drv;
   VAImage  *vaimage;

   if (!ctx)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   drv = VL_VA_DRIVER(ctx);
   pipe_mutex_lock(drv->mutex);
   vaimage = handle_table_get(drv->htab, image);
   if (!vaimage) {
      pipe_mutex_unlock(drv->mutex);
      return VA_STATUS_ERROR_INVALID_IMAGE;
   }

   handle_table_remove(VL_VA_DRIVER(ctx)->htab, image);
   pipe_mutex_unlock(drv->mutex);
   FREE(vaimage);
   return vlVaDestroyBuffer(ctx, vaimage->buf);
}
Пример #4
0
static void
pb_cache_manager_flush(struct pb_manager *_mgr)
{
   struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
   struct list_head *curr, *next;
   struct pb_cache_buffer *buf;

   pipe_mutex_lock(mgr->mutex);
   curr = mgr->delayed.next;
   next = curr->next;
   while(curr != &mgr->delayed) {
      buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
      _pb_cache_buffer_destroy(buf);
      curr = next; 
      next = curr->next;
   }
   pipe_mutex_unlock(mgr->mutex);
   
   assert(mgr->provider->flush);
   if(mgr->provider->flush)
      mgr->provider->flush(mgr->provider);
}
Пример #5
0
static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
                                       struct virgl_hw_res **dres,
                                       struct virgl_hw_res *sres)
{
   struct virgl_hw_res *old = *dres;
   if (pipe_reference(&(*dres)->reference, &sres->reference)) {

      if (!can_cache_resource(old)) {
         virgl_hw_res_destroy(qdws, old);
      } else {
         pipe_mutex_lock(qdws->mutex);
         virgl_cache_list_check_free(qdws);

         old->start = os_time_get();
         old->end = old->start + qdws->usecs;
         LIST_ADDTAIL(&old->head, &qdws->delayed);
         qdws->num_delayed++;
         pipe_mutex_unlock(qdws->mutex);
      }
   }
   *dres = sres;
}
Пример #6
0
static void
rbug_clear_render_target(struct pipe_context *_pipe,
                         struct pipe_surface *_dst,
                         const union pipe_color_union *color,
                         unsigned dstx, unsigned dsty,
                         unsigned width, unsigned height)
{
   struct rbug_context *rb_pipe = rbug_context(_pipe);
   struct rbug_surface *rb_surface_dst = rbug_surface(_dst);
   struct pipe_context *pipe = rb_pipe->pipe;
   struct pipe_surface *dst = rb_surface_dst->surface;

   pipe_mutex_lock(rb_pipe->call_mutex);
   pipe->clear_render_target(pipe,
                             dst,
                             color,
                             dstx,
                             dsty,
                             width,
                             height);
   pipe_mutex_unlock(rb_pipe->call_mutex);
}
Пример #7
0
static void *
pb_debug_buffer_map(struct pb_buffer *_buf, 
                    unsigned flags)
{
   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
   void *map;
   
   pb_debug_buffer_check(buf);

   map = pb_map(buf->buffer, flags);
   if(!map)
      return NULL;
   
   if(map) {
      pipe_mutex_lock(buf->mutex);
      ++buf->map_count;
      debug_backtrace_capture(buf->map_backtrace, 1, PB_DEBUG_MAP_BACKTRACE);
      pipe_mutex_unlock(buf->mutex);
   }
   
   return (uint8_t *)map + buf->underflow_size;
}
Пример #8
0
static boolean
stw_st_framebuffer_flush_front(struct st_context_iface *stctx,
                               struct st_framebuffer_iface *stfb,
                               enum st_attachment_type statt)
{
   struct stw_st_framebuffer *stwfb = stw_st_framebuffer(stfb);
   boolean ret;
   HDC hDC;

   pipe_mutex_lock(stwfb->fb->mutex);

   /* We must not cache HDCs anywhere, as they can be invalidated by the
    * application, or screen resolution changes. */

   hDC = GetDC(stwfb->fb->hWnd);

   ret = stw_st_framebuffer_present_locked(hDC, &stwfb->base, statt);

   ReleaseDC(stwfb->fb->hWnd, hDC);

   return ret;
}
Пример #9
0
VAStatus
vlVaMapBuffer(VADriverContextP ctx, VABufferID buf_id, void **pbuff)
{
   vlVaDriver *drv;
   vlVaBuffer *buf;

   if (!ctx)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   drv = VL_VA_DRIVER(ctx);
   if (!drv)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   if (!pbuff)
      return VA_STATUS_ERROR_INVALID_PARAMETER;

   pipe_mutex_lock(drv->mutex);
   buf = handle_table_get(drv->htab, buf_id);
   if (!buf || buf->export_refcount > 0) {
      pipe_mutex_unlock(drv->mutex);
      return VA_STATUS_ERROR_INVALID_BUFFER;
   }

   if (buf->derived_surface.resource) {
      *pbuff = pipe_buffer_map(drv->pipe, buf->derived_surface.resource,
                               PIPE_TRANSFER_WRITE,
                               &buf->derived_surface.transfer);
      pipe_mutex_unlock(drv->mutex);

      if (!buf->derived_surface.transfer || !*pbuff)
         return VA_STATUS_ERROR_INVALID_BUFFER;

   } else {
      pipe_mutex_unlock(drv->mutex);
      *pbuff = buf->data;
   }

   return VA_STATUS_SUCCESS;
}
Пример #10
0
void *
debug_malloc(const char *file, unsigned line, const char *function,
             size_t size) 
{
   struct debug_memory_header *hdr;
   struct debug_memory_footer *ftr;
   
   hdr = os_malloc(sizeof(*hdr) + size + sizeof(*ftr));
   if(!hdr) {
      debug_printf("%s:%u:%s: out of memory when trying to allocate %lu bytes\n",
                   file, line, function,
                   (long unsigned)size);
      return NULL;
   }
 
   hdr->no = last_no++;
   hdr->file = file;
   hdr->line = line;
   hdr->function = function;
   hdr->size = size;
   hdr->magic = DEBUG_MEMORY_MAGIC;
   hdr->tag = 0;
#if DEBUG_FREED_MEMORY
   hdr->freed = FALSE;
#endif

#if DEBUG_MEMORY_STACK
   debug_backtrace_capture(hdr->backtrace, 0, DEBUG_MEMORY_STACK);
#endif

   ftr = footer_from_header(hdr);
   ftr->magic = DEBUG_MEMORY_MAGIC;
   
   pipe_mutex_lock(list_mutex);
   LIST_ADDTAIL(&hdr->head, &list);
   pipe_mutex_unlock(list_mutex);
   
   return data_from_header(hdr);
}
Пример #11
0
/**
 * Empty the cache. Useful when there is not enough memory.
 */
void
pb_cache_release_all_buffers(struct pb_cache *mgr)
{
   struct list_head *curr, *next;
   struct pb_cache_entry *buf;
   unsigned i;

   pipe_mutex_lock(mgr->mutex);
   for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++) {
      struct list_head *cache = &mgr->buckets[i];

      curr = cache->next;
      next = curr->next;
      while (curr != cache) {
         buf = LIST_ENTRY(struct pb_cache_entry, curr, head);
         destroy_buffer_locked(buf);
         curr = next;
         next = curr->next;
      }
   }
   pipe_mutex_unlock(mgr->mutex);
}
Пример #12
0
void
intel_winsys_decode_bo(struct intel_winsys *winsys,
                       struct intel_bo *bo, int used)
{
   void *ptr;

   ptr = intel_bo_map(bo, false);
   if (!ptr) {
      debug_printf("failed to map buffer for decoding\n");
      return;
   }

   pipe_mutex_lock(winsys->mutex);

   if (!winsys->decode) {
      winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid);
      if (!winsys->decode) {
         pipe_mutex_unlock(winsys->mutex);
         intel_bo_unmap(bo);
         return;
      }

      /* debug_printf()/debug_error() uses stderr by default */
      drm_intel_decode_set_output_file(winsys->decode, stderr);
   }

   /* in dwords */
   used /= 4;

   drm_intel_decode_set_batch_pointer(winsys->decode,
         ptr, gem_bo(bo)->offset64, used);

   drm_intel_decode(winsys->decode);

   pipe_mutex_unlock(winsys->mutex);

   intel_bo_unmap(bo);
}
Пример #13
0
static void
rbug_set_framebuffer_state(struct pipe_context *_pipe,
                           const struct pipe_framebuffer_state *_state)
{
   struct rbug_context *rb_pipe = rbug_context(_pipe);
   struct pipe_context *pipe = rb_pipe->pipe;
   struct pipe_framebuffer_state unwrapped_state;
   struct pipe_framebuffer_state *state = NULL;
   unsigned i;

   /* must protect curr status */
   pipe_mutex_lock(rb_pipe->call_mutex);

   rb_pipe->curr.nr_cbufs = 0;
   memset(rb_pipe->curr.cbufs, 0, sizeof(rb_pipe->curr.cbufs));
   rb_pipe->curr.zsbuf = NULL;

   /* unwrap the input state */
   if (_state) {
      memcpy(&unwrapped_state, _state, sizeof(unwrapped_state));

      rb_pipe->curr.nr_cbufs = _state->nr_cbufs;
      for(i = 0; i < _state->nr_cbufs; i++) {
         unwrapped_state.cbufs[i] = rbug_surface_unwrap(_state->cbufs[i]);
         if (_state->cbufs[i])
            rb_pipe->curr.cbufs[i] = rbug_resource(_state->cbufs[i]->texture);
      }
      unwrapped_state.zsbuf = rbug_surface_unwrap(_state->zsbuf);
      if (_state->zsbuf)
         rb_pipe->curr.zsbuf = rbug_resource(_state->zsbuf->texture);
      state = &unwrapped_state;
   }

   pipe->set_framebuffer_state(pipe,
                               state);

   pipe_mutex_unlock(rb_pipe->call_mutex);
}
Пример #14
0
VAStatus
vlVaQueryConfigAttributes(VADriverContextP ctx, VAConfigID config_id, VAProfile *profile,
                          VAEntrypoint *entrypoint, VAConfigAttrib *attrib_list, int *num_attribs)
{
   vlVaDriver *drv;
   vlVaConfig *config;

   if (!ctx)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   drv = VL_VA_DRIVER(ctx);

   if (!drv)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   pipe_mutex_lock(drv->mutex);
   config = handle_table_get(drv->htab, config_id);
   pipe_mutex_unlock(drv->mutex);

   if (!config)
      return VA_STATUS_ERROR_INVALID_CONFIG;

   *profile = PipeToProfile(config->profile);

   if (config->profile == PIPE_VIDEO_PROFILE_UNKNOWN) {
      *entrypoint = VAEntrypointVideoProc;
      *num_attribs = 0;
      return VA_STATUS_SUCCESS;
   }

   *entrypoint = config->entrypoint;

   *num_attribs = 1;
   attrib_list[0].type = VAConfigAttribRTFormat;
   attrib_list[0].value = config->rt_format;

   return VA_STATUS_SUCCESS;
}
Пример #15
0
/** Rasterize all scene's bins */
static void
lp_setup_rasterize_scene( struct lp_setup_context *setup )
{
   struct lp_scene *scene = setup->scene;
   struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen);

   scene->num_active_queries = setup->active_binned_queries;
   memcpy(scene->active_queries, setup->active_queries,
          scene->num_active_queries * sizeof(scene->active_queries[0]));

   lp_scene_end_binning(scene);

   lp_fence_reference(&setup->last_fence, scene->fence);

   if (setup->last_fence)
      setup->last_fence->issued = TRUE;

   pipe_mutex_lock(screen->rast_mutex);

   /* FIXME: We enqueue the scene then wait on the rasterizer to finish.
    * This means we never actually run any vertex stuff in parallel to
    * rasterization (not in the same context at least) which is what the
    * multiple scenes per setup is about - when we get a new empty scene
    * any old one is already empty again because we waited here for
    * raster tasks to be finished. Ideally, we shouldn't need to wait here
    * and rely on fences elsewhere when waiting is necessary.
    * Certainly, lp_scene_end_rasterization() would need to be deferred too
    * and there's probably other bits why this doesn't actually work.
    */
   lp_rast_queue_scene(screen->rast, scene);
   lp_rast_finish(screen->rast);
   pipe_mutex_unlock(screen->rast_mutex);

   lp_scene_end_rasterization(setup->scene);
   lp_setup_reset( setup );

   LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
}
Пример #16
0
static void
svga_buffer_transfer_unmap( struct pipe_context *pipe,
                            struct pipe_transfer *transfer )
{
   struct svga_screen *ss = svga_screen(pipe->screen);
   struct svga_context *svga = svga_context(pipe);
   struct svga_buffer *sbuf = svga_buffer(transfer->resource);

   pipe_mutex_lock(ss->swc_mutex);

   assert(sbuf->map.count);
   if (sbuf->map.count) {
      --sbuf->map.count;
   }

   if (svga_buffer_has_hw_storage(sbuf)) {
      svga_buffer_hw_storage_unmap(svga, sbuf);
   }

   if (transfer->usage & PIPE_TRANSFER_WRITE) {
      if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
         /*
          * Mapped range not flushed explicitly, so flush the whole buffer,
          * and tell the host to discard the contents when processing the DMA
          * command.
          */

         SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");

         sbuf->dma.flags.discard = TRUE;

         svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
      }
   }

   pipe_mutex_unlock(ss->swc_mutex);
   FREE(transfer);
}
Пример #17
0
struct pipe_screen *
virgl_drm_screen_create(int fd)
{
   struct pipe_screen *pscreen = NULL;

   pipe_mutex_lock(virgl_screen_mutex);
   if (!fd_tab) {
      fd_tab = util_hash_table_create(hash_fd, compare_fd);
      if (!fd_tab)
         goto unlock;
   }

   pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
   if (pscreen) {
      virgl_screen(pscreen)->refcnt++;
   } else {
      struct virgl_winsys *vws;
      int dup_fd = dup(fd);

      vws = virgl_drm_winsys_create(dup_fd);

      pscreen = virgl_create_screen(vws);
      if (pscreen) {
         util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen);

         /* Bit of a hack, to avoid circular linkage dependency,
          * ie. pipe driver having to call in to winsys, we
          * override the pipe drivers screen->destroy():
          */
         virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
         pscreen->destroy = virgl_drm_screen_destroy;
      }
   }

unlock:
   pipe_mutex_unlock(virgl_screen_mutex);
   return pscreen;
}
Пример #18
0
/**
 * Return a small integer serial number for the given pointer.
 */
static boolean
debug_serial(void *p, unsigned *pserial)
{
   unsigned serial;
   boolean found = TRUE;
#ifdef PIPE_SUBSYSTEM_WINDOWS_USER
   static boolean first = TRUE;

   if (first) {
      pipe_mutex_init(serials_mutex);
      first = FALSE;
   }
#endif

   pipe_mutex_lock(serials_mutex);
   if (!serials_hash)
      serials_hash = util_hash_table_create(hash_ptr, compare_ptr);

   serial = (unsigned) (uintptr_t) util_hash_table_get(serials_hash, p);
   if (!serial) {
      /* time to stop logging... (you'll have a 100 GB logfile at least at
       * this point)  TODO: avoid this
       */
      serial = ++serials_last;
      if (!serial) {
         debug_error("More than 2^32 objects detected, aborting.\n");
         os_abort();
      }

      util_hash_table_set(serials_hash, p, (void *) (uintptr_t) serial);
      found = FALSE;
   }
   pipe_mutex_unlock(serials_mutex);

   *pserial = serial;

   return found;
}
Пример #19
0
/**
 * Destroy a VdpVideoMixer.
 */
VdpStatus
vlVdpVideoMixerDestroy(VdpVideoMixer mixer)
{
   vlVdpVideoMixer *vmixer;

   vmixer = vlGetDataHTAB(mixer);
   if (!vmixer)
      return VDP_STATUS_INVALID_HANDLE;

   pipe_mutex_lock(vmixer->device->mutex);

   vlVdpResolveDelayedRendering(vmixer->device, NULL, NULL);

   vlRemoveDataHTAB(mixer);

   vl_compositor_cleanup_state(&vmixer->cstate);

   if (vmixer->deint.filter) {
      vl_deint_filter_cleanup(vmixer->deint.filter);
      FREE(vmixer->deint.filter);
   }

   if (vmixer->noise_reduction.filter) {
      vl_median_filter_cleanup(vmixer->noise_reduction.filter);
      FREE(vmixer->noise_reduction.filter);
   }

   if (vmixer->sharpness.filter) {
      vl_matrix_filter_cleanup(vmixer->sharpness.filter);
      FREE(vmixer->sharpness.filter);
   }
   pipe_mutex_unlock(vmixer->device->mutex);
   DeviceReference(&vmixer->device, NULL);

   FREE(vmixer);

   return VDP_STATUS_OK;
}
Пример #20
0
/** Rasterize all scene's bins */
static void
lp_setup_rasterize_scene( struct lp_setup_context *setup )
{
   struct lp_scene *scene = setup->scene;
   struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen);

   lp_scene_end_binning(scene);

   lp_fence_reference(&setup->last_fence, scene->fence);

   if (setup->last_fence)
      setup->last_fence->issued = TRUE;

   pipe_mutex_lock(screen->rast_mutex);
   lp_rast_queue_scene(screen->rast, scene);
   lp_rast_finish(screen->rast);
   pipe_mutex_unlock(screen->rast_mutex);

   lp_scene_end_rasterization(setup->scene);
   lp_setup_reset( setup );

   LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
}
Пример #21
0
VAStatus
vlVaBufferInfo(VADriverContextP ctx, VABufferID buf_id, VABufferType *type,
               unsigned int *size, unsigned int *num_elements)
{
   vlVaDriver *drv;
   vlVaBuffer *buf;

   if (!ctx)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   drv = VL_VA_DRIVER(ctx);
   pipe_mutex_lock(drv->mutex);
   buf = handle_table_get(drv->htab, buf_id);
   pipe_mutex_unlock(drv->mutex);
   if (!buf)
      return VA_STATUS_ERROR_INVALID_BUFFER;

   *type = buf->type;
   *size = buf->size;
   *num_elements = buf->num_elements;

   return VA_STATUS_SUCCESS;
}
Пример #22
0
static void
rbug_set_constant_buffer(struct pipe_context *_pipe,
                         uint shader,
                         uint index,
                         struct pipe_constant_buffer *_cb)
{
   struct rbug_context *rb_pipe = rbug_context(_pipe);
   struct pipe_context *pipe = rb_pipe->pipe;
   struct pipe_constant_buffer cb;

   /* XXX hmm? unwrap the input state */
   if (_cb) {
      cb = *_cb;
      cb.buffer = rbug_resource_unwrap(_cb->buffer);
   }

   pipe_mutex_lock(rb_pipe->call_mutex);
   pipe->set_constant_buffer(pipe,
                             shader,
                             index,
                             _cb ? &cb : NULL);
   pipe_mutex_unlock(rb_pipe->call_mutex);
}
Пример #23
0
VAStatus
vlVaDestroySurfaces(VADriverContextP ctx, VASurfaceID *surface_list, int num_surfaces)
{
   vlVaDriver *drv;
   int i;

   if (!ctx)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   drv = VL_VA_DRIVER(ctx);
   pipe_mutex_lock(drv->mutex);
   for (i = 0; i < num_surfaces; ++i) {
      vlVaSurface *surf = handle_table_get(drv->htab, surface_list[i]);
      if (surf->buffer)
         surf->buffer->destroy(surf->buffer);
      util_dynarray_fini(&surf->subpics);
      FREE(surf);
      handle_table_remove(drv->htab, surface_list[i]);
   }
   pipe_mutex_unlock(drv->mutex);

   return VA_STATUS_SUCCESS;
}
Пример #24
0
void amdgpu_bo_destroy(struct pb_buffer *_buf)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
   int i;

   pipe_mutex_lock(bo->ws->global_bo_list_lock);
   LIST_DEL(&bo->global_list_item);
   bo->ws->num_buffers--;
   pipe_mutex_unlock(bo->ws->global_bo_list_lock);

   amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
   amdgpu_va_range_free(bo->va_handle);
   amdgpu_bo_free(bo->bo);

   for (i = 0; i < RING_LAST; i++)
      amdgpu_fence_reference(&bo->fence[i], NULL);

   if (bo->initial_domain & RADEON_DOMAIN_VRAM)
      bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
   else if (bo->initial_domain & RADEON_DOMAIN_GTT)
      bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
   FREE(bo);
}
Пример #25
0
static void
pb_debug_manager_dump(struct pb_debug_manager *mgr)
{
   struct list_head *curr, *next;
   struct pb_debug_buffer *buf;

   pipe_mutex_lock(mgr->mutex);
      
   curr = mgr->list.next;
   next = curr->next;
   while(curr != &mgr->list) {
      buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);

      debug_printf("buffer = %p\n", (void *) buf);
      debug_printf("    .size = 0x%x\n", buf->base.base.size);
      debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
      
      curr = next; 
      next = curr->next;
   }

   pipe_mutex_unlock(mgr->mutex);
}
Пример #26
0
static void
rbug_set_sampler_views(struct pipe_context *_pipe,
                       unsigned shader,
                       unsigned start,
                       unsigned num,
                       struct pipe_sampler_view **_views)
{
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_sampler_view *unwrapped_views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
    struct pipe_sampler_view **views = NULL;
    unsigned i;

    assert(start == 0); /* XXX fix */

    /* must protect curr status */
    pipe_mutex_lock(rb_pipe->call_mutex);

    rb_pipe->curr.num_views[shader] = 0;
    memset(rb_pipe->curr.views[shader], 0, sizeof(rb_pipe->curr.views[shader]));
    memset(rb_pipe->curr.texs[shader], 0, sizeof(rb_pipe->curr.texs[shader]));
    memset(unwrapped_views, 0, sizeof(unwrapped_views));

    if (_views) {
        rb_pipe->curr.num_views[shader] = num;
        for (i = 0; i < num; i++) {
            rb_pipe->curr.views[shader][i] = rbug_sampler_view(_views[i]);
            rb_pipe->curr.texs[shader][i] = rbug_resource(_views[i] ? _views[i]->texture : NULL);
            unwrapped_views[i] = rbug_sampler_view_unwrap(_views[i]);
        }
        views = unwrapped_views;
    }

    pipe->set_sampler_views(pipe, shader, start, num, views);

    pipe_mutex_unlock(rb_pipe->call_mutex);
}
Пример #27
0
/**
 * Return pointer to next bin to be rendered.
 * The lp_scene::curr_x and ::curr_y fields will be advanced.
 * Multiple rendering threads will call this function to get a chunk
 * of work (a bin) to work on.
 */
struct cmd_bin *
lp_scene_bin_iter_next( struct lp_scene *scene )
{
   struct cmd_bin *bin = NULL;

   pipe_mutex_lock(scene->mutex);

   if (scene->curr_x < 0) {
      /* first bin */
      scene->curr_x = 0;
      scene->curr_y = 0;
   }
   else if (!next_bin(scene)) {
      /* no more bins left */
      goto end;
   }

   bin = lp_scene_get_bin(scene, scene->curr_x, scene->curr_y);

end:
   /*printf("return bin %p at %d, %d\n", (void *) bin, *bin_x, *bin_y);*/
   pipe_mutex_unlock(scene->mutex);
   return bin;
}
Пример #28
0
static void
fenced_buffer_fence(struct pb_buffer *buf,
                    struct pipe_fence_handle *fence)
{
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
   struct pb_fence_ops *ops = fenced_mgr->ops;

   pipe_mutex_lock(fenced_mgr->mutex);

   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(fenced_buf->buffer);

   if(fence != fenced_buf->fence) {
      assert(fenced_buf->vl);
      assert(fenced_buf->validation_flags);

      if (fenced_buf->fence) {
         boolean destroyed;
         destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
         assert(!destroyed);
      }
      if (fence) {
         ops->fence_reference(ops, &fenced_buf->fence, fence);
         fenced_buf->flags |= fenced_buf->validation_flags;
         fenced_buffer_add_locked(fenced_mgr, fenced_buf);
      }

      pb_fence(fenced_buf->buffer, fence);

      fenced_buf->vl = NULL;
      fenced_buf->validation_flags = 0;
   }

   pipe_mutex_unlock(fenced_mgr->mutex);
}
Пример #29
0
BOOL APIENTRY
DrvShareLists(
    DHGLRC dhglrc1,
    DHGLRC dhglrc2 )
{
    struct stw_context *ctx1;
    struct stw_context *ctx2;
    BOOL ret = FALSE;

    if (!stw_dev)
        return FALSE;

    pipe_mutex_lock( stw_dev->ctx_mutex );

    ctx1 = stw_lookup_context_locked( dhglrc1 );
    ctx2 = stw_lookup_context_locked( dhglrc2 );

    if (ctx1 && ctx2 && ctx2->st->share)
        ret = ctx2->st->share(ctx2->st, ctx1->st);

    pipe_mutex_unlock( stw_dev->ctx_mutex );

    return ret;
}
Пример #30
0
bool
hsp_copy_context(uint64 srcCtxId, uint64 dstCtxId, uint mask)
{
	TRACE("%s(src: %d dst: %d mask: %d)\n", __FUNCTION__,
		srcCtxId, dstCtxId, mask);
	struct hsp_context *src;
	struct hsp_context *dst;
	bool ret = false;

	pipe_mutex_lock(hsp_dev->mutex);

	src = hsp_lookup_context(srcCtxId);
	dst = hsp_lookup_context(dstCtxId);

	if (src && dst) {
		#warning Implement copying context!
		(void)src;
		(void)dst;
		(void)mask;
	}

	pipe_mutex_unlock(hsp_dev->mutex);
	return ret;
}