示例#1
0
static inline unsigned
svga_translate_blend_factor(const struct svga_context *svga, unsigned factor)
{
    /* Note: there is no SVGA3D_BLENDOP_[INV]BLENDFACTORALPHA so
     * we can't translate PIPE_BLENDFACTOR_[INV_]CONST_ALPHA properly.
     */
    switch (factor) {
    case PIPE_BLENDFACTOR_ZERO:
        return SVGA3D_BLENDOP_ZERO;
    case PIPE_BLENDFACTOR_SRC_ALPHA:
        return SVGA3D_BLENDOP_SRCALPHA;
    case PIPE_BLENDFACTOR_ONE:
        return SVGA3D_BLENDOP_ONE;
    case PIPE_BLENDFACTOR_SRC_COLOR:
        return SVGA3D_BLENDOP_SRCCOLOR;
    case PIPE_BLENDFACTOR_INV_SRC_COLOR:
        return SVGA3D_BLENDOP_INVSRCCOLOR;
    case PIPE_BLENDFACTOR_DST_COLOR:
        return SVGA3D_BLENDOP_DESTCOLOR;
    case PIPE_BLENDFACTOR_INV_DST_COLOR:
        return SVGA3D_BLENDOP_INVDESTCOLOR;
    case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
        return SVGA3D_BLENDOP_INVSRCALPHA;
    case PIPE_BLENDFACTOR_DST_ALPHA:
        return SVGA3D_BLENDOP_DESTALPHA;
    case PIPE_BLENDFACTOR_INV_DST_ALPHA:
        return SVGA3D_BLENDOP_INVDESTALPHA;
    case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
        return SVGA3D_BLENDOP_SRCALPHASAT;
    case PIPE_BLENDFACTOR_CONST_COLOR:
        return SVGA3D_BLENDOP_BLENDFACTOR;
    case PIPE_BLENDFACTOR_INV_CONST_COLOR:
        return SVGA3D_BLENDOP_INVBLENDFACTOR;
    case PIPE_BLENDFACTOR_CONST_ALPHA:
        if (svga_have_vgpu10(svga))
            return SVGA3D_BLENDOP_BLENDFACTORALPHA;
        else
            return SVGA3D_BLENDOP_BLENDFACTOR; /* as close as we can get */
    case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
        if (svga_have_vgpu10(svga))
            return SVGA3D_BLENDOP_INVBLENDFACTORALPHA;
        else
            return SVGA3D_BLENDOP_INVBLENDFACTOR; /* as close as we can get */
    case PIPE_BLENDFACTOR_SRC1_COLOR:
        return SVGA3D_BLENDOP_SRC1COLOR;
    case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
        return SVGA3D_BLENDOP_INVSRC1COLOR;
    case PIPE_BLENDFACTOR_SRC1_ALPHA:
        return SVGA3D_BLENDOP_SRC1ALPHA;
    case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
        return SVGA3D_BLENDOP_INVSRC1ALPHA;
    case 0:
        return SVGA3D_BLENDOP_ONE;
    default:
        assert(0);
        return SVGA3D_BLENDOP_ZERO;
    }
}
/**
 * \brief Clear render target pipe callback
 *
 * \param pipe[in]  The pipe context
 * \param dst[in]  The surface to clear
 * \param color[in]  Clear color
 * \param dstx[in]  Clear region left
 * \param dsty[in]  Clear region top
 * \param width[in]  Clear region width
 * \param height[in]  Clear region height
 * \param render_condition_enabled[in]  Whether to use conditional rendering
 * to clear (if elsewhere enabled).
 */
static void
svga_clear_render_target(struct pipe_context *pipe,
                         struct pipe_surface *dst,
                         const union pipe_color_union *color,
                         unsigned dstx, unsigned dsty,
                         unsigned width, unsigned height,
                         bool render_condition_enabled)
{
    struct svga_context *svga = svga_context( pipe );

    svga_toggle_render_condition(svga, render_condition_enabled, FALSE);
    if (!svga_have_vgpu10(svga) || dstx != 0 || dsty != 0 ||
        width != dst->width || height != dst->height) {
       svga_blitter_clear_render_target(svga, dst, color, dstx, dsty, width,
                                        height);
    } else {
       enum pipe_error ret;
       
       ret = svga_try_clear_render_target(svga, dst, color);
       if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
          svga_context_flush( svga, NULL );
          ret = svga_try_clear_render_target(svga, dst, color);
       }
       
       assert (ret == PIPE_OK);
    }
    svga_toggle_render_condition(svga, render_condition_enabled, TRUE);
}
示例#3
0
enum pipe_error
svga_set_stream_output(struct svga_context *svga,
                       struct svga_stream_output *streamout)
{
   enum pipe_error ret = PIPE_OK;
   unsigned id = streamout ? streamout->id : SVGA3D_INVALID_ID;

   if (!svga_have_vgpu10(svga)) {
      return PIPE_OK;
   }

   SVGA_DBG(DEBUG_STREAMOUT, "%s streamout=0x%x id=%d\n", __FUNCTION__,
            streamout, id);

   if (svga->current_so != streamout) {
      /* Save current SO state */
      svga->current_so = streamout;

      ret = SVGA3D_vgpu10_SetStreamOutput(svga->swc, id);
      if (ret != PIPE_OK) {
         svga_context_flush(svga, NULL);
         ret = SVGA3D_vgpu10_SetStreamOutput(svga->swc, id);
      }
   }

   return ret;
}
示例#4
0
/**
 * Translate the vertex element types to SVGA3dDeclType and check
 * for VS-based vertex attribute adjustments.
 */
static void
translate_vertex_decls(struct svga_context *svga,
                       struct svga_velems_state *velems)
{
   unsigned i;

   assert(!svga_have_vgpu10(svga));

   for (i = 0; i < velems->count; i++) {
      const enum pipe_format f = velems->velem[i].src_format;
      SVGA3dSurfaceFormat svga_format;
      unsigned vf_flags;

      svga_translate_vertex_format_vgpu10(f, &svga_format, &vf_flags);

      velems->decl_type[i] = translate_vertex_format_to_decltype(f);
      if (velems->decl_type[i] == SVGA3D_DECLTYPE_MAX) {
         /* Unsupported format - use software fetch */
         velems->need_swvfetch = TRUE;
      }

      /* Check for VS-based adjustments */
      if (attrib_needs_range_adjustment(f)) {
         velems->adjust_attrib_range |= (1 << i);
      }

      if (vf_flags & VF_W_TO_1) {
         velems->adjust_attrib_w_1 |= (1 << i);
      }
   }
}
示例#5
0
static void *
svga_create_vertex_elements_state(struct pipe_context *pipe,
                                  unsigned count,
                                  const struct pipe_vertex_element *attribs)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_velems_state *velems;

   assert(count <= PIPE_MAX_ATTRIBS);
   velems = (struct svga_velems_state *) MALLOC(sizeof(struct svga_velems_state));
   if (velems) {
      velems->count = count;
      memcpy(velems->velem, attribs, sizeof(*attribs) * count);

      velems->need_swvfetch = FALSE;
      velems->adjust_attrib_range = 0x0;
      velems->attrib_is_pure_int = 0x0;
      velems->adjust_attrib_w_1 = 0x0;
      velems->adjust_attrib_itof = 0x0;
      velems->adjust_attrib_utof = 0x0;
      velems->attrib_is_bgra = 0x0;
      velems->attrib_puint_to_snorm = 0x0;
      velems->attrib_puint_to_uscaled = 0x0;
      velems->attrib_puint_to_sscaled = 0x0;

      if (svga_have_vgpu10(svga)) {
         define_input_element_object(svga, velems);
      }
      else {
         translate_vertex_decls(svga, velems);
      }
   }
   return velems;
}
示例#6
0
/***********************************************************************
 * Scissor state
 */
static enum pipe_error
emit_scissor_rect( struct svga_context *svga,
                   unsigned dirty )
{
   const struct pipe_scissor_state *scissor = &svga->curr.scissor;

   if (svga_have_vgpu10(svga)) {
      SVGASignedRect rect;

      rect.left = scissor->minx;
      rect.top = scissor->miny;
      rect.right = scissor->maxx;
      rect.bottom = scissor->maxy;

      return SVGA3D_vgpu10_SetScissorRects(svga->swc, 1, &rect);
   }
   else {
      SVGA3dRect rect;

      rect.x = scissor->minx;
      rect.y = scissor->miny;
      rect.w = scissor->maxx - scissor->minx; /* + 1 ?? */
      rect.h = scissor->maxy - scissor->miny; /* + 1 ?? */

      return SVGA3D_SetScissorRect(svga->swc, &rect);
   }
}
static enum pipe_error
emit_gs_consts(struct svga_context *svga, unsigned dirty)
{
   const struct svga_shader_variant *variant = svga->state.hw_draw.gs;
   enum pipe_error ret = PIPE_OK;

   /* SVGA_NEW_GS_VARIANT
    */
   if (!variant)
      return PIPE_OK;

   /* SVGA_NEW_GS_CONST_BUFFER
    */
   if (svga_have_vgpu10(svga)) {
      /**
       * If only the rasterizer state has changed and the current geometry
       * shader does not emit wide points, then there is no reason to
       * re-emit the GS constants, so skip it.
       */
      if (dirty == SVGA_NEW_RAST && !variant->key.gs.wide_point)
         return PIPE_OK;

      ret = emit_consts_vgpu10(svga, PIPE_SHADER_GEOMETRY);
   }

   return ret;
}
示例#8
0
/**
 * svga_reemit_gs_bindings - Reemit the geometry shader bindings
 */
enum pipe_error
svga_reemit_gs_bindings(struct svga_context *svga)
{
   enum pipe_error ret;
   struct svga_winsys_gb_shader *gbshader = NULL;
   SVGA3dShaderId shaderId = SVGA3D_INVALID_ID;

   assert(svga->rebind.flags.gs);
   assert(svga_have_gb_objects(svga));

   /* Geometry Shader is only supported in vgpu10 */
   assert(svga_have_vgpu10(svga));

   if (svga->state.hw_draw.gs) {
      gbshader = svga->state.hw_draw.gs->gb_shader;
      shaderId = svga->state.hw_draw.gs->id;
   }

   if (!svga_need_to_rebind_resources(svga)) {
      ret =  svga->swc->resource_rebind(svga->swc, NULL, gbshader,
                                        SVGA_RELOC_READ);
      goto out;
   }

   ret = SVGA3D_vgpu10_SetShader(svga->swc, SVGA3D_SHADERTYPE_GS,
                                 gbshader, shaderId);

 out:
   if (ret != PIPE_OK)
      return ret;

   svga->rebind.flags.gs = FALSE;
   return PIPE_OK;
}
示例#9
0
/**
 * Check whether we can blit using the intra_surface_copy command.
 */
static bool
can_blit_via_intra_surface_copy(struct svga_context *svga,
                                const struct pipe_blit_info *blit_info)
{
   struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
   struct svga_texture *dtex, *stex;

   if (!svga_have_vgpu10(svga))
      return false;

   /* src surface cannot be multisample */
   if (blit_info->src.resource->nr_samples > 1)
      return false;

   if (!sws->have_intra_surface_copy)
      return false;

   if (svga->render_condition && blit_info->render_condition_enable)
      return false;

   if (blit_info->src.level != blit_info->dst.level)
      return false;

   if (has_layer_face_index_in_z(blit_info->src.resource->target)){
      if (blit_info->src.box.z != blit_info->dst.box.z)
         return false;
   }

   stex = svga_texture(blit_info->src.resource);
   dtex = svga_texture(blit_info->dst.resource);

   return (stex->handle == dtex->handle);
}
示例#10
0
/**
 * Check and emit one shader constant register.
 * \param shader  PIPE_SHADER_FRAGMENT or PIPE_SHADER_VERTEX
 * \param i  which float[4] constant to change
 * \param value  the new float[4] value
 */
static enum pipe_error
emit_const(struct svga_context *svga, unsigned shader, unsigned i,
           const float *value)
{
   enum pipe_error ret = PIPE_OK;

   assert(shader < PIPE_SHADER_TYPES);
   assert(i < SVGA3D_CONSTREG_MAX);
   assert(!svga_have_vgpu10(svga));

   if (memcmp(svga->state.hw_draw.cb[shader][i], value,
              4 * sizeof(float)) != 0) {
      if (SVGA_DEBUG & DEBUG_CONSTS)
         debug_printf("%s %s %u: %f %f %f %f\n",
                      __FUNCTION__,
                      shader == PIPE_SHADER_VERTEX ? "VERT" : "FRAG",
                      i,
                      value[0],
                      value[1],
                      value[2],
                      value[3]);

      ret = SVGA3D_SetShaderConst( svga->swc,
                                   i,
                                   svga_shader_type(shader),
                                   SVGA3D_CONST_TYPE_FLOAT,
                                   value );
      if (ret != PIPE_OK)
         return ret;

      memcpy(svga->state.hw_draw.cb[shader][i], value, 4 * sizeof(float));
   }

   return ret;
}
示例#11
0
static void
svga_delete_vertex_elements_state(struct pipe_context *pipe, void *state)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_velems_state *velems = (struct svga_velems_state *) state;

   if (svga_have_vgpu10(svga)) {
      enum pipe_error ret;

      svga_hwtnl_flush_retry(svga);

      ret = SVGA3D_vgpu10_DestroyElementLayout(svga->swc, velems->id);
      if (ret != PIPE_OK) {
         svga_context_flush(svga, NULL);
         ret = SVGA3D_vgpu10_DestroyElementLayout(svga->swc, velems->id);
         assert(ret == PIPE_OK);
      }

      if (velems->id == svga->state.hw_draw.layout_id)
         svga->state.hw_draw.layout_id = SVGA3D_INVALID_ID;

      util_bitmask_clear(svga->input_element_object_id_bm, velems->id);
      velems->id = SVGA3D_INVALID_ID;
   }

   FREE(velems);
}
示例#12
0
/**
 * svga_reemit_fs_bindings - Reemit the fragment shader bindings
 */
enum pipe_error
svga_reemit_fs_bindings(struct svga_context *svga)
{
   enum pipe_error ret;

   assert(svga->rebind.flags.fs);
   assert(svga_have_gb_objects(svga));

   if (!svga->state.hw_draw.fs)
      return PIPE_OK;

   if (!svga_need_to_rebind_resources(svga)) {
      ret =  svga->swc->resource_rebind(svga->swc, NULL,
                                        svga->state.hw_draw.fs->gb_shader,
                                        SVGA_RELOC_READ);
      goto out;
   }

   if (svga_have_vgpu10(svga))
      ret = SVGA3D_vgpu10_SetShader(svga->swc, SVGA3D_SHADERTYPE_PS,
                                    svga->state.hw_draw.fs->gb_shader,
                                    svga->state.hw_draw.fs->id);
   else
      ret = SVGA3D_SetGBShader(svga->swc, SVGA3D_SHADERTYPE_PS,
                               svga->state.hw_draw.fs->gb_shader);

 out:
   if (ret != PIPE_OK)
      return ret;

   svga->rebind.flags.fs = FALSE;
   return PIPE_OK;
}
示例#13
0
static struct pipe_stream_output_target *
svga_create_stream_output_target(struct pipe_context *pipe,
                                 struct pipe_resource *buffer,
                                 unsigned buffer_offset,
                                 unsigned buffer_size)
{
   struct svga_context *svga = svga_context(pipe);
   struct svga_stream_output_target *sot;

   SVGA_DBG(DEBUG_STREAMOUT, "%s offset=%d size=%d\n", __FUNCTION__,
            buffer_offset, buffer_size);

   assert(svga_have_vgpu10(svga));
   (void) svga;

   sot = CALLOC_STRUCT(svga_stream_output_target);
   if (!sot)
      return NULL;

   pipe_reference_init(&sot->base.reference, 1);
   pipe_resource_reference(&sot->base.buffer, buffer);
   sot->base.context = pipe;
   sot->base.buffer = buffer;
   sot->base.buffer_offset = buffer_offset;
   sot->base.buffer_size = buffer_size;

   return &sot->base;
}
示例#14
0
static struct svga_shader_variant *
translate_geometry_program(struct svga_context *svga,
                           const struct svga_geometry_shader *gs,
                           const struct svga_compile_key *key)
{
   assert(svga_have_vgpu10(svga));
   return svga_tgsi_vgpu10_translate(svga, &gs->base, key,
                                     PIPE_SHADER_GEOMETRY);
}
示例#15
0
/**
 * Emit any pending drawing commands to the command buffer.
 * When we receive VGPU9 drawing commands we accumulate them and don't
 * immediately emit them into the command buffer.
 * This function needs to be called before we change state that could
 * effect those pending draws.
 */
enum pipe_error
svga_hwtnl_flush(struct svga_hwtnl *hwtnl)
{
   if (!svga_have_vgpu10(hwtnl->svga) && hwtnl->cmd.prim_count) {
      /* we only queue up primitive for VGPU9 */
      return draw_vgpu9(hwtnl);
   }
   return PIPE_OK;
}
/**
 * unmap direct map transfer request
 */
static void
svga_texture_transfer_unmap_direct(struct svga_context *svga,
                                   struct svga_transfer *st)
{
   struct pipe_transfer *transfer = &st->base;
   struct svga_texture *tex = svga_texture(transfer->resource);

   svga_texture_surface_unmap(svga, transfer);

   /* Now send an update command to update the content in the backend. */
   if (st->base.usage & PIPE_TRANSFER_WRITE) {
      struct svga_winsys_surface *surf = tex->handle;
      enum pipe_error ret;

      assert(svga_have_gb_objects(svga));

      /* update the effected region */
      SVGA3dBox box = st->box;
      unsigned nlayers;

      switch (tex->b.b.target) {
      case PIPE_TEXTURE_2D_ARRAY:
      case PIPE_TEXTURE_CUBE_ARRAY:
      case PIPE_TEXTURE_1D_ARRAY:
         nlayers = box.d;
         box.d = 1;
         break;
      default:
         nlayers = 1;
         break;
      }


      if (0)
         debug_printf("%s %d, %d, %d  %d x %d x %d\n",
                      __FUNCTION__,
                      box.x, box.y, box.z,
                      box.w, box.h, box.d);

      if (svga_have_vgpu10(svga)) {
         unsigned i;

         for (i = 0; i < nlayers; i++) {
            ret = update_image_vgpu10(svga, surf, &box,
                                      st->slice + i, transfer->level,
                                      tex->b.b.last_level + 1);
            assert(ret == PIPE_OK);
         }
      } else {
         assert(nlayers == 1);
         ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
         assert(ret == PIPE_OK);
      }
      (void) ret;
   }
}
示例#17
0
static enum pipe_error
emit_rss(struct svga_context *svga, unsigned dirty)
{
   if (svga_have_vgpu10(svga)) {
      return emit_rss_vgpu10(svga, dirty);
   }
   else {
      return emit_rss_vgpu9(svga, dirty);
   }
}
示例#18
0
static enum pipe_error
emit_framebuffer(struct svga_context *svga, unsigned dirty)
{
   if (svga_have_vgpu10(svga)) {
      return emit_fb_vgpu10(svga);
   }
   else {
      return emit_fb_vgpu9(svga);
   }
}
示例#19
0
static void svga_blit(struct pipe_context *pipe,
                      const struct pipe_blit_info *blit_info)
{
   struct svga_context *svga = svga_context(pipe);
   struct pipe_blit_info info = *blit_info;

   if (!svga_have_vgpu10(svga) &&
       info.src.resource->nr_samples > 1 &&
       info.dst.resource->nr_samples <= 1 &&
       !util_format_is_depth_or_stencil(info.src.resource->format) &&
       !util_format_is_pure_integer(info.src.resource->format)) {
      debug_printf("svga: color resolve unimplemented\n");
      return;
   }

   if (util_try_blit_via_copy_region(pipe, &info)) {
      return; /* done */
   }

   if ((info.mask & PIPE_MASK_S) ||
       !util_blitter_is_blit_supported(svga->blitter, &info)) {
      debug_printf("svga: blit unsupported %s -> %s\n",
                   util_format_short_name(info.src.resource->format),
                   util_format_short_name(info.dst.resource->format));
      return;
   }

   /* XXX turn off occlusion and streamout queries */

   util_blitter_save_vertex_buffer_slot(svga->blitter, svga->curr.vb);
   util_blitter_save_vertex_elements(svga->blitter, (void*)svga->curr.velems);
   util_blitter_save_vertex_shader(svga->blitter, svga->curr.vs);
   util_blitter_save_geometry_shader(svga->blitter, svga->curr.user_gs);
   util_blitter_save_so_targets(svga->blitter, svga->num_so_targets,
                     (struct pipe_stream_output_target**)svga->so_targets);
   util_blitter_save_rasterizer(svga->blitter, (void*)svga->curr.rast);
   util_blitter_save_viewport(svga->blitter, &svga->curr.viewport);
   util_blitter_save_scissor(svga->blitter, &svga->curr.scissor);
   util_blitter_save_fragment_shader(svga->blitter, svga->curr.fs);
   util_blitter_save_blend(svga->blitter, (void*)svga->curr.blend);
   util_blitter_save_depth_stencil_alpha(svga->blitter,
                                         (void*)svga->curr.depth);
   util_blitter_save_stencil_ref(svga->blitter, &svga->curr.stencil_ref);
   util_blitter_save_sample_mask(svga->blitter, svga->curr.sample_mask);
   util_blitter_save_framebuffer(svga->blitter, &svga->curr.framebuffer);
   util_blitter_save_fragment_sampler_states(svga->blitter,
                     svga->curr.num_samplers[PIPE_SHADER_FRAGMENT],
                     (void**)svga->curr.sampler[PIPE_SHADER_FRAGMENT]);
   util_blitter_save_fragment_sampler_views(svga->blitter,
                     svga->curr.num_sampler_views[PIPE_SHADER_FRAGMENT],
                     svga->curr.sampler_views[PIPE_SHADER_FRAGMENT]);
   /*util_blitter_save_render_condition(svga->blitter, svga->render_cond_query,
                                      svga->render_cond_cond, svga->render_cond_mode);*/
   util_blitter_blit(svga->blitter, &info);
}
示例#20
0
/**
 * All drawing filters down into this function, either directly
 * on the hardware path or after doing software vertex processing.
 */
enum pipe_error
svga_hwtnl_prim(struct svga_hwtnl *hwtnl,
                const SVGA3dPrimitiveRange * range,
                unsigned vcount,
                unsigned min_index,
                unsigned max_index, struct pipe_resource *ib,
                unsigned start_instance, unsigned instance_count)
{
   enum pipe_error ret = PIPE_OK;

   SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga), SVGA_STATS_TIME_HWTNLPRIM);

   if (svga_have_vgpu10(hwtnl->svga)) {
      /* draw immediately */
      ret = draw_vgpu10(hwtnl, range, vcount, min_index, max_index, ib,
                        start_instance, instance_count);
      if (ret != PIPE_OK) {
         svga_context_flush(hwtnl->svga, NULL);
         ret = draw_vgpu10(hwtnl, range, vcount, min_index, max_index, ib,
                           start_instance, instance_count);
         assert(ret == PIPE_OK);
      }
   }
   else {
      /* batch up drawing commands */
#ifdef DEBUG
      check_draw_params(hwtnl, range, min_index, max_index, ib);
      assert(start_instance == 0);
      assert(instance_count <= 1);
#else
      (void) check_draw_params;
#endif

      if (hwtnl->cmd.prim_count + 1 >= QSZ) {
         ret = svga_hwtnl_flush(hwtnl);
         if (ret != PIPE_OK)
            goto done;
      }

      /* min/max indices are relative to bias */
      hwtnl->cmd.min_index[hwtnl->cmd.prim_count] = min_index;
      hwtnl->cmd.max_index[hwtnl->cmd.prim_count] = max_index;

      hwtnl->cmd.prim[hwtnl->cmd.prim_count] = *range;
      hwtnl->cmd.prim[hwtnl->cmd.prim_count].indexBias += hwtnl->index_bias;

      pipe_resource_reference(&hwtnl->cmd.prim_ib[hwtnl->cmd.prim_count], ib);
      hwtnl->cmd.prim_count++;
   }

done:
   SVGA_STATS_TIME_POP(svga_screen(hwtnl->svga->pipe.screen)->sws);
   return ret;
}
示例#21
0
/* Setup any hardware state which will be constant through the life of
 * a context.
 */
enum pipe_error svga_emit_initial_state( struct svga_context *svga )
{
   if (svga_have_vgpu10(svga)) {
      SVGA3dRasterizerStateId id = util_bitmask_add(svga->rast_object_id_bm);
      enum pipe_error ret;

      /* XXX preliminary code */
      ret = SVGA3D_vgpu10_DefineRasterizerState(svga->swc,
                                             id,
                                             SVGA3D_FILLMODE_FILL,
                                             SVGA3D_CULL_NONE,
                                             1, /* frontCounterClockwise */
                                             0, /* depthBias */
                                             0.0f, /* depthBiasClamp */
                                             0.0f, /* slopeScaledDepthBiasClamp */
                                             0, /* depthClampEnable */
                                             0, /* scissorEnable */
                                             0, /* multisampleEnable */
                                             0, /* aalineEnable */
                                             1.0f, /* lineWidth */
                                             0, /* lineStippleEnable */
                                             0, /* lineStippleFactor */
                                             0, /* lineStipplePattern */
                                             0); /* provokingVertexLast */


      assert(ret == PIPE_OK);

      ret = SVGA3D_vgpu10_SetRasterizerState(svga->swc, id);
      return ret;
   }
   else {
      SVGA3dRenderState *rs;
      unsigned count = 0;
      const unsigned COUNT = 2;
      enum pipe_error ret;

      ret = SVGA3D_BeginSetRenderState( svga->swc, &rs, COUNT );
      if (ret != PIPE_OK)
         return ret;

      /* Always use D3D style coordinate space as this is the only one
       * which is implemented on all backends.
       */
      EMIT_RS(rs, count, SVGA3D_RS_COORDINATETYPE,
              SVGA3D_COORDINATE_LEFTHANDED );
      EMIT_RS(rs, count, SVGA3D_RS_FRONTWINDING, SVGA3D_FRONTWINDING_CW );

      assert( COUNT == count );
      SVGA_FIFOCommitAll( svga->swc );

      return PIPE_OK;
   }
}
示例#22
0
static enum pipe_error
emit_fb_vgpu9(struct svga_context *svga)
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   const struct pipe_framebuffer_state *curr = &svga->curr.framebuffer;
   struct pipe_framebuffer_state *hw = &svga->state.hw_clear.framebuffer;
   boolean reemit = svga->rebind.flags.rendertargets;
   unsigned i;
   enum pipe_error ret;

   assert(!svga_have_vgpu10(svga));

   /*
    * We need to reemit non-null surface bindings, even when they are not
    * dirty, to ensure that the resources are paged in.
    */

   for (i = 0; i < svgascreen->max_color_buffers; i++) {
      if ((curr->cbufs[i] != hw->cbufs[i]) || (reemit && hw->cbufs[i])) {
         if (svga->curr.nr_fbs++ > MAX_RT_PER_BATCH)
            return PIPE_ERROR_OUT_OF_MEMORY;

         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_COLOR0 + i,
                                      curr->cbufs[i]);
         if (ret != PIPE_OK)
            return ret;

         pipe_surface_reference(&hw->cbufs[i], curr->cbufs[i]);
      }
   }

   if ((curr->zsbuf != hw->zsbuf) || (reemit && hw->zsbuf)) {
      ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_DEPTH, curr->zsbuf);
      if (ret != PIPE_OK)
         return ret;

      if (curr->zsbuf &&
          util_format_is_depth_and_stencil(curr->zsbuf->format)) {
         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL,
                                      curr->zsbuf);
         if (ret != PIPE_OK)
            return ret;
      }
      else {
         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL, NULL);
         if (ret != PIPE_OK)
            return ret;
      }

      pipe_surface_reference(&hw->zsbuf, curr->zsbuf);
   }

   return PIPE_OK;
}
示例#23
0
static enum pipe_error
validate_constant_buffers(struct svga_context *svga)
{
   unsigned shader;

   assert(svga_have_vgpu10(svga));

   for (shader = PIPE_SHADER_VERTEX; shader <= PIPE_SHADER_GEOMETRY; shader++) {
      enum pipe_error ret;
      struct svga_buffer *buffer;
      struct svga_winsys_surface *handle;
      unsigned enabled_constbufs;

      /* Rebind the default constant buffer if needed */
      if (svga->rebind.flags.constbufs) {
         buffer = svga_buffer(svga->state.hw_draw.constbuf[shader]);
         if (buffer) {
            ret = svga->swc->resource_rebind(svga->swc,
                                             buffer->handle,
                                             NULL,
                                             SVGA_RELOC_READ);
            if (ret != PIPE_OK)
               return ret;
         }
      }

      /*
       * Reference other bound constant buffers to ensure pending updates are
       * noticed by the device.
       */
      enabled_constbufs = svga->state.hw_draw.enabled_constbufs[shader] & ~1u;
      while (enabled_constbufs) {
         unsigned i = u_bit_scan(&enabled_constbufs);
         buffer = svga_buffer(svga->curr.constbufs[shader][i].buffer);
         if (buffer) {
            handle = svga_buffer_handle(svga, &buffer->b.b);

            if (svga->rebind.flags.constbufs) {
               ret = svga->swc->resource_rebind(svga->swc,
                                                handle,
                                                NULL,
                                                SVGA_RELOC_READ);
               if (ret != PIPE_OK)
                  return ret;
            }
         }
      }
   }
   svga->rebind.flags.constbufs = FALSE;

   return PIPE_OK;
}
示例#24
0
static struct svga_shader_variant *
translate_fragment_program(struct svga_context *svga,
                           const struct svga_fragment_shader *fs,
                           const struct svga_compile_key *key)
{
   if (svga_have_vgpu10(svga)) {
      return svga_tgsi_vgpu10_translate(svga, &fs->base, key,
                                        PIPE_SHADER_FRAGMENT);
   }
   else {
      return svga_tgsi_vgpu9_translate(svga, &fs->base, key,
                                       PIPE_SHADER_FRAGMENT);
   }
}
示例#25
0
static struct svga_shader_variant *
translate_vertex_program(struct svga_context *svga,
                         const struct svga_vertex_shader *vs,
                         const struct svga_compile_key *key)
{
   if (svga_have_vgpu10(svga)) {
      return svga_tgsi_vgpu10_translate(svga, &vs->base, key,
                                        PIPE_SHADER_VERTEX);
   }
   else {
      return svga_tgsi_vgpu9_translate(svga, &vs->base, key,
                                       PIPE_SHADER_VERTEX);
   }
}
示例#26
0
/**
 * Emit any pending drawing commands to the command buffer.
 * When we receive VGPU9 drawing commands we accumulate them and don't
 * immediately emit them into the command buffer.
 * This function needs to be called before we change state that could
 * effect those pending draws.
 */
enum pipe_error
svga_hwtnl_flush(struct svga_hwtnl *hwtnl)
{
   enum pipe_error ret = PIPE_OK;

   SVGA_STATS_TIME_PUSH(svga_sws(hwtnl->svga), SVGA_STATS_TIME_HWTNLFLUSH);

   if (!svga_have_vgpu10(hwtnl->svga) && hwtnl->cmd.prim_count) {
      /* we only queue up primitive for VGPU9 */
      ret = draw_vgpu9(hwtnl);
   }

   SVGA_STATS_TIME_POP(svga_screen(hwtnl->svga->pipe.screen)->sws);
   return ret;
}
示例#27
0
/* update_tgsi_transform provides a hook to transform a shader if needed.
 */
static enum pipe_error
update_tgsi_transform(struct svga_context *svga, unsigned dirty)
{
   struct svga_geometry_shader *gs = svga->curr.user_gs;   /* current gs */
   struct svga_vertex_shader *vs = svga->curr.vs;     /* currently bound vs */
   struct svga_shader *orig_gs;                       /* original gs */
   struct svga_shader *new_gs;                        /* new gs */

   if (!svga_have_vgpu10(svga))
      return PIPE_OK;

   if (svga->curr.reduced_prim == PIPE_PRIM_POINTS) {
      /* If the current prim type is POINTS and the current geometry shader
       * emits wide points, transform the shader to emulate wide points using
       * quads.
       */
      if (gs != NULL && (gs->base.info.writes_psize || gs->wide_point)) {
         orig_gs = gs->base.parent ? gs->base.parent : &gs->base;
         new_gs = emulate_point_sprite(svga, orig_gs, orig_gs->tokens);
      }

      /* If there is not an active geometry shader and the current vertex
       * shader emits wide point then create a new geometry shader to emulate
       * wide point.
       */
      else if (gs == NULL &&
               (svga->curr.rast->pointsize > 1.0 ||
                vs->base.info.writes_psize)) {
         new_gs = add_point_sprite_shader(svga);
      }
      else {
         /* use the user's GS */
         bind_gs_state(svga, svga->curr.user_gs);
      }
   }
   else if (svga->curr.gs != svga->curr.user_gs) {
      /* If current primitive type is not POINTS, then make sure
       * we don't bind to any of the generated geometry shader
       */
      bind_gs_state(svga, svga->curr.user_gs);
   }
   (void) new_gs;    /* silence the unused var warning */

   return PIPE_OK;
}
示例#28
0
/*
 * Rebind rendertargets.
 *
 * Similar to emit_framebuffer, but without any state checking/update.
 *
 * Called at the beginning of every new command buffer to ensure that
 * non-dirty rendertargets are properly paged-in.
 */
enum pipe_error
svga_reemit_framebuffer_bindings(struct svga_context *svga)
{
   enum pipe_error ret;

   assert(svga->rebind.flags.rendertargets);

   if (svga_have_vgpu10(svga)) {
      ret = emit_fb_vgpu10(svga);
   }
   else {
      ret = svga_reemit_framebuffer_bindings_vgpu9(svga);
   }

   svga->rebind.flags.rendertargets = FALSE;

   return ret;
}
示例#29
0
/**
 * Determine if we need to implement primitive restart with a fallback
 * path which breaks the original primitive into sub-primitive at the
 * restart indexes.
 */
static boolean
need_fallback_prim_restart(const struct svga_context *svga,
                           const struct pipe_draw_info *info)
{
   if (info->primitive_restart && info->indexed) {
      if (!svga_have_vgpu10(svga))
         return TRUE;
      else if (!svga->state.sw.need_swtnl) {
         if (svga->curr.ib.index_size == 1)
            return TRUE; /* no device support for 1-byte indexes */
         else if (svga->curr.ib.index_size == 2)
            return info->restart_index != 0xffff;
         else
            return info->restart_index != 0xffffffff;
      }
   }

   return FALSE;
}
示例#30
0
/*
 * Rebind rendertargets.
 *
 * Similar to emit_framebuffer, but without any state checking/update.
 *
 * Called at the beginning of every new command buffer to ensure that
 * non-dirty rendertargets are properly paged-in.
 */
static enum pipe_error
svga_reemit_framebuffer_bindings_vgpu9(struct svga_context *svga)
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   struct pipe_framebuffer_state *hw = &svga->state.hw_clear.framebuffer;
   unsigned i;
   enum pipe_error ret;

   assert(!svga_have_vgpu10(svga));

   for (i = 0; i < svgascreen->max_color_buffers; i++) {
      if (hw->cbufs[i]) {
         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_COLOR0 + i,
                                      hw->cbufs[i]);
         if (ret != PIPE_OK) {
            return ret;
         }
      }
   }

   if (hw->zsbuf) {
      ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_DEPTH, hw->zsbuf);
      if (ret != PIPE_OK) {
         return ret;
      }

      if (hw->zsbuf &&
          util_format_is_depth_and_stencil(hw->zsbuf->format)) {
         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL, hw->zsbuf);
         if (ret != PIPE_OK) {
            return ret;
         }
      }
      else {
         ret = SVGA3D_SetRenderTarget(svga->swc, SVGA3D_RT_STENCIL, NULL);
         if (ret != PIPE_OK) {
            return ret;
         }
      }
   }

   return PIPE_OK;
}