示例#1
0
void svga_context_flush( struct svga_context *svga, 
                         struct pipe_fence_handle **pfence )
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   struct pipe_fence_handle *fence = NULL;
   uint64_t t0;

   svga->curr.nr_fbs = 0;

   /* Ensure that texture dma uploads are processed
    * before submitting commands.
    */
   svga_context_flush_buffers(svga);

   svga->hud.command_buffer_size +=
      svga->swc->get_command_buffer_size(svga->swc);

   /* Flush pending commands to hardware:
    */
   t0 = os_time_get();
   svga->swc->flush(svga->swc, &fence);
   svga->hud.flush_time += (os_time_get() - t0);

   svga->hud.num_flushes++;

   svga_screen_cache_flush(svgascreen, fence);

   /* To force the re-emission of rendertargets and texture sampler bindings on
    * the next command buffer.
    */
   svga->rebind.flags.rendertargets = TRUE;
   svga->rebind.flags.texture_samplers = TRUE;

   if (svga_have_gb_objects(svga)) {

      svga->rebind.flags.constbufs = TRUE;
      svga->rebind.flags.vs = TRUE;
      svga->rebind.flags.fs = TRUE;
      svga->rebind.flags.gs = TRUE;

      if (svga_need_to_rebind_resources(svga)) {
         svga->rebind.flags.query = TRUE;
      }
   }

   if (SVGA_DEBUG & DEBUG_SYNC) {
      if (fence)
         svga->pipe.screen->fence_finish( svga->pipe.screen, fence,
                                          PIPE_TIMEOUT_INFINITE);
   }

   if (pfence)
      svgascreen->sws->fence_reference(svgascreen->sws, pfence, fence);

   svgascreen->sws->fence_reference(svgascreen->sws, &fence, NULL);
}
示例#2
0
void svga_context_flush( struct svga_context *svga, 
                         struct pipe_fence_handle **pfence )
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   struct pipe_fence_handle *fence = NULL;

   svga->curr.nr_fbs = 0;

   /* Flush the upload managers to ensure recycling of upload buffers
    * without throttling. This should really be conditioned on
    * pipe_buffer_map_range not supporting PIPE_TRANSFER_UNSYNCHRONIZED.
    */

   u_upload_flush(svga->upload_vb);
   u_upload_flush(svga->upload_ib);

   /* Ensure that texture dma uploads are processed
    * before submitting commands.
    */
   svga_context_flush_buffers(svga);

   /* Flush pending commands to hardware:
    */
   svga->swc->flush(svga->swc, &fence);

   svga_screen_cache_flush(svgascreen, fence);

   /* To force the re-emission of rendertargets and texture sampler bindings on
    * the next command buffer.
    */
   svga->rebind.rendertargets = TRUE;
   svga->rebind.texture_samplers = TRUE;

   if (SVGA_DEBUG & DEBUG_SYNC) {
      if (fence)
         svga->pipe.screen->fence_finish( svga->pipe.screen, fence,
                                          PIPE_TIMEOUT_INFINITE);
   }

   if(pfence)
      svgascreen->sws->fence_reference(svgascreen->sws, pfence, fence);

   svgascreen->sws->fence_reference(svgascreen->sws, &fence, NULL);
}
void svga_context_flush( struct svga_context *svga, 
                         struct pipe_fence_handle **pfence )
{
   struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
   struct pipe_fence_handle *fence = NULL;

   svga->curr.nr_fbs = 0;

   /* Ensure that texture dma uploads are processed
    * before submitting commands.
    */
   svga_context_flush_buffers(svga);

   /* Flush pending commands to hardware:
    */
   svga->swc->flush(svga->swc, &fence);

   svga_screen_cache_flush(svgascreen, fence);

   /* To force the re-emission of rendertargets and texture sampler bindings on
    * the next command buffer.
    */
   svga->rebind.rendertargets = TRUE;
   svga->rebind.texture_samplers = TRUE;

   if (SVGA_DEBUG & DEBUG_SYNC) {
      if (fence)
         svga->pipe.screen->fence_finish( svga->pipe.screen, fence,
                                          PIPE_TIMEOUT_INFINITE);
   }

   if(pfence)
      svgascreen->sws->fence_reference(svgascreen->sws, pfence, fence);

   svgascreen->sws->fence_reference(svgascreen->sws, &fence, NULL);
}