static boolean
svga_fence_signalled(struct pipe_screen *screen,
                     struct pipe_fence_handle *fence)
{
   struct svga_winsys_screen *sws = svga_screen(screen)->sws;
   return sws->fence_signalled(sws, fence, 0) == 0;
}
示例#2
0
boolean svga_init_swtnl( struct svga_context *svga )
{
   struct svga_screen *screen = svga_screen(svga->pipe.screen);

   svga->swtnl.backend = svga_vbuf_render_create(svga);
   if(!svga->swtnl.backend)
      goto fail;

   /*
    * Create drawing context and plug our rendering stage into it.
    */
   svga->swtnl.draw = draw_create(&svga->pipe);
   if (svga->swtnl.draw == NULL)
      goto fail;


   draw_set_rasterize_stage(svga->swtnl.draw, 
                            draw_vbuf_stage( svga->swtnl.draw, svga->swtnl.backend ));

   draw_set_render(svga->swtnl.draw, svga->swtnl.backend);

   svga->blitter = util_blitter_create(&svga->pipe);
   if (!svga->blitter)
      goto fail;

   /* must be done before installing Draw stages */
   util_blitter_cache_all_shaders(svga->blitter);

   if (!screen->haveLineSmooth)
      draw_install_aaline_stage(svga->swtnl.draw, &svga->pipe);

   /* enable/disable line stipple stage depending on device caps */
   draw_enable_line_stipple(svga->swtnl.draw, !screen->haveLineStipple);

   /* always install AA point stage */
   draw_install_aapoint_stage(svga->swtnl.draw, &svga->pipe);

   /* Set wide line threshold above device limit (so we'll never really use it)
    */
   draw_wide_line_threshold(svga->swtnl.draw,
                            MAX2(screen->maxLineWidth,
                                 screen->maxLineWidthAA));

   if (debug_get_bool_option("SVGA_SWTNL_FSE", FALSE))
      draw_set_driver_clipping(svga->swtnl.draw, TRUE, TRUE, TRUE, FALSE);

   return TRUE;

fail:
   if (svga->blitter)
      util_blitter_destroy(svga->blitter);

   if (svga->swtnl.backend)
      svga->swtnl.backend->destroy( svga->swtnl.backend );

   if (svga->swtnl.draw)
      draw_destroy( svga->swtnl.draw );

   return FALSE;
}
示例#3
0
static void
svga_buffer_destroy(struct pipe_screen *screen,
                    struct pipe_resource *buf)
{
   struct svga_screen *ss = svga_screen(screen);
   struct svga_buffer *sbuf = svga_buffer(buf);

   assert(!p_atomic_read(&buf->reference.count));

   assert(!sbuf->dma.pending);

   if (sbuf->handle)
      svga_buffer_destroy_host_surface(ss, sbuf);

   if (sbuf->uploaded.buffer)
      pipe_resource_reference(&sbuf->uploaded.buffer, NULL);

   if (sbuf->hwbuf)
      svga_buffer_destroy_hw_storage(ss, sbuf);

   if (sbuf->swbuf && !sbuf->user)
      align_free(sbuf->swbuf);

   pipe_resource_reference(&sbuf->translated_indices.buffer, NULL);

   ss->hud.total_resource_bytes -= sbuf->size;
   assert(ss->hud.num_resources > 0);
   if (ss->hud.num_resources > 0)
      ss->hud.num_resources--;

   FREE(sbuf);
}
static void 
svga_buffer_unmap( struct pipe_screen *screen,
                   struct pipe_buffer *buf)
{
   struct svga_screen *ss = svga_screen(screen); 
   struct svga_winsys_screen *sws = ss->sws;
   struct svga_buffer *sbuf = svga_buffer( buf );
   
   pipe_mutex_lock(ss->swc_mutex);
   
   assert(sbuf->map.count);
   if(sbuf->map.count)
      --sbuf->map.count;

   if(sbuf->hwbuf)
      sws->buffer_unmap(sws, sbuf->hwbuf);

   if(sbuf->map.writing) {
      if(!sbuf->map.flush_explicit) {
         /* No mapped range was flushed -- flush the whole buffer */
         SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
   
         svga_buffer_add_range(sbuf, 0, sbuf->base.size);
      }
      
      sbuf->map.writing = FALSE;
      sbuf->map.flush_explicit = FALSE;
   }

   pipe_mutex_unlock(ss->swc_mutex);
}
示例#5
0
/**
 * Check whether we can blit using the intra_surface_copy command.
 */
static bool
can_blit_via_intra_surface_copy(struct svga_context *svga,
                                const struct pipe_blit_info *blit_info)
{
   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
   struct svga_texture *dtex, *stex;

   if (!svga_have_vgpu10(svga))
      return false;

   /* src surface cannot be multisample */
   if (blit_info->src.resource->nr_samples > 1)
      return false;

   if (!sws->have_intra_surface_copy)
      return false;

   if (svga->render_condition && blit_info->render_condition_enable)
      return false;

   if (blit_info->src.level != blit_info->dst.level)
      return false;

   if (has_layer_face_index_in_z(blit_info->src.resource->target)){
      if (blit_info->src.box.z != blit_info->dst.box.z)
         return false;
   }

   stex = svga_texture(blit_info->src.resource);
   dtex = svga_texture(blit_info->dst.resource);

   return (stex->handle == dtex->handle);
}
static void
svga_texture_destroy(struct pipe_screen *screen,
                     struct pipe_resource *pt)
{
   struct svga_screen *ss = svga_screen(screen);
   struct svga_texture *tex = svga_texture(pt);

   ss->texture_timestamp++;

   svga_sampler_view_reference(&tex->cached_view, NULL);

   /*
     DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
   */
   SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
   svga_screen_surface_destroy(ss, &tex->key, &tex->handle);

   /* Destroy the backed surface handle if exists */
   if (tex->backed_handle)
      svga_screen_surface_destroy(ss, &tex->backed_key, &tex->backed_handle);

   ss->hud.total_resource_bytes -= tex->size;

   FREE(tex->defined);
   FREE(tex->rendered_to);
   FREE(tex->dirty);
   FREE(tex);

   assert(ss->hud.num_resources > 0);
   if (ss->hud.num_resources > 0)
      ss->hud.num_resources--;
}
static float
svga_get_paramf(struct pipe_screen *screen, enum pipe_capf param)
{
   struct svga_screen *svgascreen = svga_screen(screen);
   struct svga_winsys_screen *sws = svgascreen->sws;
   SVGA3dDevCapResult result;

   switch (param) {
   case PIPE_CAPF_MAX_LINE_WIDTH:
      /* fall-through */
   case PIPE_CAPF_MAX_LINE_WIDTH_AA:
      return 7.0;

   case PIPE_CAPF_MAX_POINT_WIDTH:
      /* fall-through */
   case PIPE_CAPF_MAX_POINT_WIDTH_AA:
      return svgascreen->maxPointSize;

   case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
      if(!sws->get_cap(sws, SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY, &result))
         return 4.0f;
      return (float) result.u;

   case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
      return 15.0;
   case PIPE_CAPF_GUARD_BAND_LEFT:
   case PIPE_CAPF_GUARD_BAND_TOP:
   case PIPE_CAPF_GUARD_BAND_RIGHT:
   case PIPE_CAPF_GUARD_BAND_BOTTOM:
      return 0.0;
   }

   debug_printf("Unexpected PIPE_CAPF_ query %u\n", param);
   return 0;
}
示例#8
0
static boolean
svga_fence_finish(struct pipe_screen *screen,
                  struct pipe_context *ctx,
                  struct pipe_fence_handle *fence,
                  uint64_t timeout)
{
   struct svga_winsys_screen *sws = svga_screen(screen)->sws;
   boolean retVal;

   SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_FENCEFINISH);

   if (!timeout) {
      retVal = sws->fence_signalled(sws, fence, 0) == 0;
   }
   else {
      SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "%s fence_ptr %p\n",
               __FUNCTION__, fence);

      retVal = sws->fence_finish(sws, fence, 0) == 0;
   }

   SVGA_STATS_TIME_POP(sws);

   return retVal;
}
示例#9
0
static void *
svga_create_fs_state(struct pipe_context *pipe,
                     const struct pipe_shader_state *templ)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_screen *svgascreen = svga_screen(pipe->screen);
   struct svga_fragment_shader *fs;

   fs = CALLOC_STRUCT(svga_fragment_shader);
   if (!fs)
      return NULL;

   fs->base.tokens = tgsi_dup_tokens(templ->tokens);

   /* Collect basic info that we'll need later:
    */
   tgsi_scan_shader(fs->base.tokens, &fs->base.info);

   fs->base.id = svga->debug.shader_id++;
   fs->base.use_sm30 = svgascreen->use_ps30;
   
   if (SVGA_DEBUG & DEBUG_TGSI || 0) {
      debug_printf("%s id: %u, inputs: %u, outputs: %u\n",
                   __FUNCTION__, fs->base.id,
                   fs->base.info.num_inputs, fs->base.info.num_outputs);
   }

   return fs;
}
示例#10
0
static float
svga_get_paramf(struct pipe_screen *screen, enum pipe_capf param)
{
   struct svga_screen *svgascreen = svga_screen(screen);
   struct svga_winsys_screen *sws = svgascreen->sws;
   SVGA3dDevCapResult result;

   switch (param) {
   case PIPE_CAPF_MAX_LINE_WIDTH:
      /* fall-through */
   case PIPE_CAPF_MAX_LINE_WIDTH_AA:
      return 7.0;

   case PIPE_CAPF_MAX_POINT_WIDTH:
      /* fall-through */
   case PIPE_CAPF_MAX_POINT_WIDTH_AA:
      /* Keep this to a reasonable size to avoid failures in
       * conform/pntaa.c:
       */
      return SVGA_MAX_POINTSIZE;

   case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
      if(!sws->get_cap(sws, SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY, &result))
         return 4.0;
      return result.u;

   case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
      return 16.0;

   default:
      return 0;
   }
}
/**
 * unmap DMA transfer request
 */
static void
svga_texture_transfer_unmap_dma(struct svga_context *svga,
                                struct svga_transfer *st)
{
   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;

   if (st->hwbuf)
      sws->buffer_unmap(sws, st->hwbuf);

   if (st->base.usage & PIPE_TRANSFER_WRITE) {
      /* Use DMA to transfer texture data */
      SVGA3dSurfaceDMAFlags flags;

      memset(&flags, 0, sizeof flags);
      if (st->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
         flags.discard = TRUE;
      }
      if (st->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
         flags.unsynchronized = TRUE;
      }

      svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
   }

   FREE(st->swbuf);
   sws->buffer_destroy(sws, st->hwbuf);
}
示例#12
0
static void
svga_texture_transfer_destroy(struct pipe_context *pipe,
                              struct pipe_transfer *transfer)
{
    struct svga_context *svga = svga_context(pipe);
    struct svga_texture *tex = svga_texture(transfer->resource);
    struct svga_screen *ss = svga_screen(pipe->screen);
    struct svga_winsys_screen *sws = ss->sws;
    struct svga_transfer *st = svga_transfer(transfer);

    if (st->base.usage & PIPE_TRANSFER_WRITE) {
        SVGA3dSurfaceDMAFlags flags;

        memset(&flags, 0, sizeof flags);
        if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
            flags.discard = TRUE;
        }
        if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
            flags.unsynchronized = TRUE;
        }

        svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
        ss->texture_timestamp++;
        tex->view_age[transfer->level] = ++(tex->age);
        if (transfer->resource->target == PIPE_TEXTURE_CUBE)
            tex->defined[transfer->box.z][transfer->level] = TRUE;
        else
            tex->defined[0][transfer->level] = TRUE;
    }

    pipe_resource_reference(&st->base.resource, NULL);
    FREE(st->swbuf);
    sws->buffer_destroy(sws, st->hwbuf);
    FREE(st);
}
/**
 * Copy the contents of the malloc buffer to a hardware buffer.
 */
static enum pipe_error
svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf)
{
   assert(!sbuf->user);
   if (!svga_buffer_has_hw_storage(sbuf)) {
      struct svga_screen *ss = svga_screen(sbuf->b.b.screen);
      enum pipe_error ret;
      boolean retry;
      void *map;

      assert(sbuf->swbuf);
      if (!sbuf->swbuf)
         return PIPE_ERROR;

      ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen),
					  sbuf);
      if (ret != PIPE_OK)
         return ret;

      pipe_mutex_lock(ss->swc_mutex);
      map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
      assert(map);
      assert(!retry);
      if (!map) {
	 pipe_mutex_unlock(ss->swc_mutex);
         svga_buffer_destroy_hw_storage(ss, sbuf);
         return PIPE_ERROR;
      }

      memcpy(map, sbuf->swbuf, sbuf->b.b.width0);
      svga_buffer_hw_storage_unmap(svga, sbuf);

      /* This user/malloc buffer is now indistinguishable from a gpu buffer */
      assert(!sbuf->map.count);
      if (!sbuf->map.count) {
         if (sbuf->user)
            sbuf->user = FALSE;
         else
            align_free(sbuf->swbuf);
         sbuf->swbuf = NULL;
      }

      pipe_mutex_unlock(ss->swc_mutex);
   }

   return PIPE_OK;
}
示例#14
0
文件: svga_screen.c 项目: gqmelo/mesa
static void
svga_fence_reference(struct pipe_screen *screen,
                     struct pipe_fence_handle **ptr,
                     struct pipe_fence_handle *fence)
{
   struct svga_winsys_screen *sws = svga_screen(screen)->sws;
   sws->fence_reference(sws, ptr, fence);
}
示例#15
0
static int
svga_fence_get_fd(struct pipe_screen *screen,
                  struct pipe_fence_handle *fence)
{
   struct svga_winsys_screen *sws = svga_screen(screen)->sws;

   return sws->fence_get_fd(sws, fence, TRUE);
}
示例#16
0
static enum pipe_error
emit_framebuffer( struct svga_context *svga,
                  unsigned dirty )
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   const struct pipe_framebuffer_state *curr = &svga->curr.framebuffer;
   struct pipe_framebuffer_state *hw = &svga->state.hw_clear.framebuffer;
   boolean reemit = svga->rebind.rendertargets;
   unsigned i;
   enum pipe_error ret;

   /*
    * We need to reemit non-null surface bindings, even when they are not
    * dirty, to ensure that the resources are paged in.
    */

   for (i = 0; i < svgascreen->max_color_buffers; i++) {
      if (curr->cbufs[i] != hw->cbufs[i] ||
          (reemit && hw->cbufs[i])) {
         if (svga->curr.nr_fbs++ > MAX_RT_PER_BATCH)
            return PIPE_ERROR_OUT_OF_MEMORY;

         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_COLOR0 + i,
                                      curr->cbufs[i]);
         if (ret != PIPE_OK)
            return ret;

         pipe_surface_reference(&hw->cbufs[i], curr->cbufs[i]);
      }
   }

   if (curr->zsbuf != hw->zsbuf ||
       (reemit && hw->zsbuf)) {
      ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_DEPTH, curr->zsbuf);
      if (ret != PIPE_OK)
         return ret;

      if (curr->zsbuf &&
          curr->zsbuf->format == PIPE_FORMAT_S8_UINT_Z24_UNORM) {
         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL,
                                      curr->zsbuf);
         if (ret != PIPE_OK)
            return ret;
      }
      else {
         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL, NULL);
         if (ret != PIPE_OK)
            return ret;
      }

      pipe_surface_reference(&hw->zsbuf, curr->zsbuf);
   }

   svga->rebind.rendertargets = FALSE;

   return PIPE_OK;
}
示例#17
0
void svga_context_flush( struct svga_context *svga, 
                         struct pipe_fence_handle **pfence )
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   struct pipe_fence_handle *fence = NULL;
   uint64_t t0;

   svga->curr.nr_fbs = 0;

   /* Ensure that texture dma uploads are processed
    * before submitting commands.
    */
   svga_context_flush_buffers(svga);

   svga->hud.command_buffer_size +=
      svga->swc->get_command_buffer_size(svga->swc);

   /* Flush pending commands to hardware:
    */
   t0 = os_time_get();
   svga->swc->flush(svga->swc, &fence);
   svga->hud.flush_time += (os_time_get() - t0);

   svga->hud.num_flushes++;

   svga_screen_cache_flush(svgascreen, fence);

   /* To force the re-emission of rendertargets and texture sampler bindings on
    * the next command buffer.
    */
   svga->rebind.flags.rendertargets = TRUE;
   svga->rebind.flags.texture_samplers = TRUE;

   if (svga_have_gb_objects(svga)) {

      svga->rebind.flags.constbufs = TRUE;
      svga->rebind.flags.vs = TRUE;
      svga->rebind.flags.fs = TRUE;
      svga->rebind.flags.gs = TRUE;

      if (svga_need_to_rebind_resources(svga)) {
         svga->rebind.flags.query = TRUE;
      }
   }

   if (SVGA_DEBUG & DEBUG_SYNC) {
      if (fence)
         svga->pipe.screen->fence_finish( svga->pipe.screen, fence,
                                          PIPE_TIMEOUT_INFINITE);
   }

   if (pfence)
      svgascreen->sws->fence_reference(svgascreen->sws, pfence, fence);

   svgascreen->sws->fence_reference(svgascreen->sws, &fence, NULL);
}
示例#18
0
static enum pipe_error
emit_fb_vgpu9(struct svga_context *svga)
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   const struct pipe_framebuffer_state *curr = &svga->curr.framebuffer;
   struct pipe_framebuffer_state *hw = &svga->state.hw_clear.framebuffer;
   boolean reemit = svga->rebind.flags.rendertargets;
   unsigned i;
   enum pipe_error ret;

   assert(!svga_have_vgpu10(svga));

   /*
    * We need to reemit non-null surface bindings, even when they are not
    * dirty, to ensure that the resources are paged in.
    */

   for (i = 0; i < svgascreen->max_color_buffers; i++) {
      if ((curr->cbufs[i] != hw->cbufs[i]) || (reemit && hw->cbufs[i])) {
         if (svga->curr.nr_fbs++ > MAX_RT_PER_BATCH)
            return PIPE_ERROR_OUT_OF_MEMORY;

         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_COLOR0 + i,
                                      curr->cbufs[i]);
         if (ret != PIPE_OK)
            return ret;

         pipe_surface_reference(&hw->cbufs[i], curr->cbufs[i]);
      }
   }

   if ((curr->zsbuf != hw->zsbuf) || (reemit && hw->zsbuf)) {
      ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_DEPTH, curr->zsbuf);
      if (ret != PIPE_OK)
         return ret;

      if (curr->zsbuf &&
          util_format_is_depth_and_stencil(curr->zsbuf->format)) {
         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL,
                                      curr->zsbuf);
         if (ret != PIPE_OK)
            return ret;
      }
      else {
         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL, NULL);
         if (ret != PIPE_OK)
            return ret;
      }

      pipe_surface_reference(&hw->zsbuf, curr->zsbuf);
   }

   return PIPE_OK;
}
示例#19
0
/**
 * All drawing filters down into this function, either directly
 * on the hardware path or after doing software vertex processing.
 */
enum pipe_error
svga_hwtnl_prim(struct svga_hwtnl *hwtnl,
                const SVGA3dPrimitiveRange * range,
                unsigned vcount,
                unsigned min_index,
                unsigned max_index, struct pipe_resource *ib,
                unsigned start_instance, unsigned instance_count)
{
   enum pipe_error ret = PIPE_OK;

   SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga), SVGA_STATS_TIME_HWTNLPRIM);

   if (svga_have_vgpu10(hwtnl->svga)) {
      /* draw immediately */
      ret = draw_vgpu10(hwtnl, range, vcount, min_index, max_index, ib,
                        start_instance, instance_count);
      if (ret != PIPE_OK) {
         svga_context_flush(hwtnl->svga, NULL);
         ret = draw_vgpu10(hwtnl, range, vcount, min_index, max_index, ib,
                           start_instance, instance_count);
         assert(ret == PIPE_OK);
      }
   }
   else {
      /* batch up drawing commands */
#ifdef DEBUG
      check_draw_params(hwtnl, range, min_index, max_index, ib);
      assert(start_instance == 0);
      assert(instance_count <= 1);
#else
      (void) check_draw_params;
#endif

      if (hwtnl->cmd.prim_count + 1 >= QSZ) {
         ret = svga_hwtnl_flush(hwtnl);
         if (ret != PIPE_OK)
            goto done;
      }

      /* min/max indices are relative to bias */
      hwtnl->cmd.min_index[hwtnl->cmd.prim_count] = min_index;
      hwtnl->cmd.max_index[hwtnl->cmd.prim_count] = max_index;

      hwtnl->cmd.prim[hwtnl->cmd.prim_count] = *range;
      hwtnl->cmd.prim[hwtnl->cmd.prim_count].indexBias += hwtnl->index_bias;

      pipe_resource_reference(&hwtnl->cmd.prim_ib[hwtnl->cmd.prim_count], ib);
      hwtnl->cmd.prim_count++;
   }

done:
   SVGA_STATS_TIME_POP(svga_screen(hwtnl->svga->pipe.screen)->sws);
   return ret;
}
static void *
svga_buffer_map_range( struct pipe_screen *screen,
                       struct pipe_buffer *buf,
                       unsigned offset, unsigned length,
                       unsigned usage )
{
   struct svga_screen *ss = svga_screen(screen); 
   struct svga_winsys_screen *sws = ss->sws;
   struct svga_buffer *sbuf = svga_buffer( buf );
   void *map;

   if (!sbuf->swbuf && !sbuf->hwbuf) {
      if (svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK) {
         /*
          * We can't create a hardware buffer big enough, so create a malloc
          * buffer instead.
          */

         debug_printf("%s: failed to allocate %u KB of DMA, splitting DMA transfers\n",
                      __FUNCTION__,
                      (sbuf->base.size + 1023)/1024);

         sbuf->swbuf = align_malloc(sbuf->base.size, sbuf->base.alignment);
      }
   }

   if (sbuf->swbuf) {
      /* User/malloc buffer */
      map = sbuf->swbuf;
   }
   else if (sbuf->hwbuf) {
      map = sws->buffer_map(sws, sbuf->hwbuf, usage);
   }
   else {
      map = NULL;
   }

   if(map) {
      pipe_mutex_lock(ss->swc_mutex);

      ++sbuf->map.count;

      if (usage & PIPE_BUFFER_USAGE_CPU_WRITE) {
         assert(sbuf->map.count <= 1);
         sbuf->map.writing = TRUE;
         if (usage & PIPE_BUFFER_USAGE_FLUSH_EXPLICIT)
            sbuf->map.flush_explicit = TRUE;
      }
      
      pipe_mutex_unlock(ss->swc_mutex);
   }
   
   return map;
}
示例#21
0
/* XXX: Still implementing this as if it was a screen function, but
 * can now modify it to queue transfers on the context.
 */
static void
svga_texture_transfer_unmap(struct pipe_context *pipe,
                            struct pipe_transfer *transfer)
{
    struct svga_screen *ss = svga_screen(pipe->screen);
    struct svga_winsys_screen *sws = ss->sws;
    struct svga_transfer *st = svga_transfer(transfer);

    if(!st->swbuf)
        sws->buffer_unmap(sws, st->hwbuf);
}
示例#22
0
static boolean
svga_fence_finish(struct pipe_screen *screen,
                  struct pipe_fence_handle *fence,
                  uint64_t timeout)
{
   struct svga_winsys_screen *sws = svga_screen(screen)->sws;

   SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "%s fence_ptr %p\n",
            __FUNCTION__, fence);

   return sws->fence_finish(sws, fence, 0) == 0;
}
示例#23
0
static void svga_destroy_query(struct pipe_context *pipe,
                               struct pipe_query *q)
{
   struct svga_screen *svgascreen = svga_screen(pipe->screen);
   struct svga_winsys_screen *sws = svgascreen->sws;
   struct svga_query *sq = svga_query( q );

   SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
   sws->buffer_destroy(sws, sq->hwbuf);
   sws->fence_reference(sws, &sq->fence, NULL);
   FREE(sq);
}
示例#24
0
void
svga_mark_surfaces_dirty(struct svga_context *svga)
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   unsigned i;

   for (i = 0; i < svgascreen->max_color_buffers; i++) {
      if (svga->curr.framebuffer.cbufs[i])
         svga_mark_surface_dirty(svga->curr.framebuffer.cbufs[i]);
   }
   if (svga->curr.framebuffer.zsbuf)
      svga_mark_surface_dirty(svga->curr.framebuffer.zsbuf);
}
示例#25
0
文件: svga_screen.c 项目: gqmelo/mesa
static int
svga_get_shader_param(struct pipe_screen *screen, unsigned shader,
                      enum pipe_shader_cap param)
{
   struct svga_screen *svgascreen = svga_screen(screen);
   struct svga_winsys_screen *sws = svgascreen->sws;
   if (sws->have_vgpu10) {
      return vgpu10_get_shader_param(screen, shader, param);
   }
   else {
      return vgpu9_get_shader_param(screen, shader, param);
   }
}
示例#26
0
/**
 * Emit extra constants needed for point sprite emulation.
 */
static unsigned
svga_get_pt_sprite_constants(struct svga_context *svga, float **dest)
{
   struct svga_screen *screen = svga_screen(svga->pipe.screen);
   float *dst = *dest;

   dst[0] = 1.0 / (svga->curr.viewport.scale[0] * 2);
   dst[1] = 1.0 / (svga->curr.viewport.scale[1] * 2);
   dst[2] = svga->curr.rast->pointsize;
   dst[3] = screen->maxPointSize;
   *dest = *dest + 4;
   return 1;
}
示例#27
0
文件: svga_screen.c 项目: gqmelo/mesa
static void
svga_destroy_screen( struct pipe_screen *screen )
{
   struct svga_screen *svgascreen = svga_screen(screen);
   
   svga_screen_cache_cleanup(svgascreen);

   pipe_mutex_destroy(svgascreen->swc_mutex);
   pipe_mutex_destroy(svgascreen->tex_mutex);

   svgascreen->sws->destroy(svgascreen->sws);
   
   FREE(svgascreen);
}
示例#28
0
static boolean svga_get_query_result(struct pipe_context *pipe, 
                                     struct pipe_query *q,
                                     boolean wait,
                                     uint64_t *result)
{
   struct svga_context *svga = svga_context( pipe );
   struct svga_screen *svgascreen = svga_screen( pipe->screen );
   struct svga_winsys_screen *sws = svgascreen->sws;
   struct svga_query *sq = svga_query( q );
   SVGA3dQueryState state;
   
   SVGA_DBG(DEBUG_QUERY, "%s wait: %d\n", __FUNCTION__);

   /* The query status won't be updated by the host unless 
    * SVGA_3D_CMD_WAIT_FOR_QUERY is emitted. Unfortunately this will cause a 
    * synchronous wait on the host */
   if(!sq->fence) {
      enum pipe_error ret;

      ret = SVGA3D_WaitForQuery( svga->swc, sq->type, sq->hwbuf);
      if(ret != PIPE_OK) {
         svga_context_flush(svga, NULL);
         ret = SVGA3D_WaitForQuery( svga->swc, sq->type, sq->hwbuf);
         assert(ret == PIPE_OK);
      }
   
      svga_context_flush(svga, &sq->fence);
      
      assert(sq->fence);
   }

   state = sq->queryResult->state;
   if(state == SVGA3D_QUERYSTATE_PENDING) {
      if(!wait)
         return FALSE;
   
      sws->fence_finish(sws, sq->fence, 0);
      
      state = sq->queryResult->state;
   }

   assert(state == SVGA3D_QUERYSTATE_SUCCEEDED || 
          state == SVGA3D_QUERYSTATE_FAILED);
   
   *result = (uint64_t)sq->queryResult->result32;

   SVGA_DBG(DEBUG_QUERY, "%s result %d\n", __FUNCTION__, (unsigned)*result);

   return TRUE;
}
示例#29
0
static void
svga_tex_surface_destroy(struct pipe_surface *surf)
{
   struct svga_surface *s = svga_surface(surf);
   struct svga_texture *t = svga_texture(surf->texture);
   struct svga_screen *ss = svga_screen(surf->texture->screen);

   if(s->handle != t->handle) {
      SVGA_DBG(DEBUG_DMA, "unref sid %p (tex surface)\n", s->handle);
      svga_screen_surface_destroy(ss, &s->key, &s->handle);
   }

   pipe_resource_reference(&surf->texture, NULL);
   FREE(surf);
}
示例#30
0
void svga_cleanup_framebuffer(struct svga_context *svga)
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   struct pipe_framebuffer_state *curr = &svga->curr.framebuffer;
   struct pipe_framebuffer_state *hw = &svga->state.hw_clear.framebuffer;
   unsigned i;

   for (i = 0; i < svgascreen->max_color_buffers; i++) {
      pipe_surface_reference(&curr->cbufs[i], NULL);
      pipe_surface_reference(&hw->cbufs[i], NULL);
   }

   pipe_surface_reference(&curr->zsbuf, NULL);
   pipe_surface_reference(&hw->zsbuf, NULL);
}