示例#1
0
void
intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
{
   struct gl_framebuffer *fb = drawable->driverPrivate;
   struct intel_renderbuffer *rb;
   struct intel_context *intel = context->driverPrivate;
   __DRIbuffer *buffers = NULL;
   int i, count;
   const char *region_name;

   /* If we're rendering to the fake front buffer, make sure all the
    * pending drawing has landed on the real front buffer.  Otherwise
    * when we eventually get to DRI2GetBuffersWithFormat the stale
    * real front buffer contents will get copied to the new fake front
    * buffer.
    */
   if (intel->is_front_buffer_rendering) {
      intel_flush(&intel->ctx);
      intel_flush_front(&intel->ctx);
   }

   /* Set this up front, so that in case our buffers get invalidated
    * while we're getting new buffers, we don't clobber the stamp and
    * thus ignore the invalidate. */
   drawable->lastStamp = drawable->dri2.stamp;

   if (unlikely(INTEL_DEBUG & DEBUG_DRI))
      fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);

   intel_query_dri2_buffers(intel, drawable, &buffers, &count);

   if (buffers == NULL)
      return;

   for (i = 0; i < count; i++) {
       switch (buffers[i].attachment) {
       case __DRI_BUFFER_FRONT_LEFT:
	   rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
	   region_name = "dri2 front buffer";
	   break;

       case __DRI_BUFFER_FAKE_FRONT_LEFT:
	   rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
	   region_name = "dri2 fake front buffer";
	   break;

       case __DRI_BUFFER_BACK_LEFT:
	   rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
	   region_name = "dri2 back buffer";
	   break;

       case __DRI_BUFFER_DEPTH:
       case __DRI_BUFFER_HIZ:
       case __DRI_BUFFER_DEPTH_STENCIL:
       case __DRI_BUFFER_STENCIL:
       case __DRI_BUFFER_ACCUM:
       default:
	   fprintf(stderr,
		   "unhandled buffer attach event, attachment type %d\n",
		   buffers[i].attachment);
	   return;
       }

       intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
   }

   driUpdateFramebufferSize(&intel->ctx, drawable);
}
示例#2
0
/**
 * CopyPixels with the blitter.  Don't support zooming, pixel transfer, etc.
 */
static GLboolean
do_blit_copypixels(struct gl_context * ctx,
                   GLint srcx, GLint srcy,
                   GLsizei width, GLsizei height,
                   GLint dstx, GLint dsty, GLenum type)
{
   struct intel_context *intel = intel_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   struct gl_framebuffer *read_fb = ctx->ReadBuffer;
   GLint orig_dstx;
   GLint orig_dsty;
   GLint orig_srcx;
   GLint orig_srcy;
   GLboolean flip = GL_FALSE;
   struct intel_renderbuffer *draw_irb = NULL;
   struct intel_renderbuffer *read_irb = NULL;

   /* Update draw buffer bounds */
   _mesa_update_state(ctx);

   switch (type) {
   case GL_COLOR:
      if (fb->_NumColorDrawBuffers != 1) {
	 fallback_debug("glCopyPixels() fallback: MRT\n");
	 return GL_FALSE;
      }

      draw_irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
      read_irb = intel_renderbuffer(read_fb->_ColorReadBuffer);
      break;
   case GL_DEPTH_STENCIL_EXT:
      draw_irb = intel_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
      read_irb =
	 intel_renderbuffer(read_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
      break;
   case GL_DEPTH:
      fallback_debug("glCopyPixels() fallback: GL_DEPTH\n");
      return GL_FALSE;
   case GL_STENCIL:
      fallback_debug("glCopyPixels() fallback: GL_STENCIL\n");
      return GL_FALSE;
   default:
      fallback_debug("glCopyPixels(): Unknown type\n");
      return GL_FALSE;
   }

   if (!draw_irb) {
      fallback_debug("glCopyPixels() fallback: missing draw buffer\n");
      return GL_FALSE;
   }

   if (!read_irb) {
      fallback_debug("glCopyPixels() fallback: missing read buffer\n");
      return GL_FALSE;
   }

   if (draw_irb->Base.Format != read_irb->Base.Format &&
       !(draw_irb->Base.Format == MESA_FORMAT_XRGB8888 &&
	 read_irb->Base.Format == MESA_FORMAT_ARGB8888)) {
      fallback_debug("glCopyPixels() fallback: mismatched formats (%s -> %s\n",
		     _mesa_get_format_name(read_irb->Base.Format),
		     _mesa_get_format_name(draw_irb->Base.Format));
      return GL_FALSE;
   }

   /* Copypixels can be more than a straight copy.  Ensure all the
    * extra operations are disabled:
    */
   if (!intel_check_copypixel_blit_fragment_ops(ctx) ||
       ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F)
      return GL_FALSE;

   intel_prepare_render(intel);

   intel_flush(&intel->ctx);

   /* Clip to destination buffer. */
   orig_dstx = dstx;
   orig_dsty = dsty;
   if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
			     fb->_Xmax, fb->_Ymax,
			     &dstx, &dsty, &width, &height))
      goto out;
   /* Adjust src coords for our post-clipped destination origin */
   srcx += dstx - orig_dstx;
   srcy += dsty - orig_dsty;

   /* Clip to source buffer. */
   orig_srcx = srcx;
   orig_srcy = srcy;
   if (!_mesa_clip_to_region(0, 0,
			     read_fb->Width, read_fb->Height,
			     &srcx, &srcy, &width, &height))
      goto out;
   /* Adjust dst coords for our post-clipped source origin */
   dstx += srcx - orig_srcx;
   dsty += srcy - orig_srcy;

   /* Flip dest Y if it's a window system framebuffer. */
   if (fb->Name == 0) {
      /* copypixels to a window system framebuffer */
      dsty = fb->Height - dsty - height;
      flip = !flip;
   }

   /* Flip source Y if it's a window system framebuffer. */
   if (read_fb->Name == 0) {
      srcy = read_fb->Height - srcy - height;
      flip = !flip;
   }

   srcx += read_irb->draw_x;
   srcy += read_irb->draw_y;
   dstx += draw_irb->draw_x;
   dsty += draw_irb->draw_y;

   if (!intel_region_copy(intel,
			  draw_irb->region, 0, dstx, dsty,
			  read_irb->region, 0, srcx, srcy,
			  width, height, flip,
			  ctx->Color.ColorLogicOpEnabled ?
			  ctx->Color.LogicOp : GL_COPY)) {
      DBG("%s: blit failure\n", __FUNCTION__);
      return GL_FALSE;
   }

out:
   intel_check_front_buffer_rendering(intel);

   DBG("%s: success\n", __FUNCTION__);
   return GL_TRUE;
}
示例#3
0
/**
 * Called via glMapBufferRange and glMapBuffer
 *
 * The goal of this extension is to allow apps to accumulate their rendering
 * at the same time as they accumulate their buffer object.  Without it,
 * you'd end up blocking on execution of rendering every time you mapped
 * the buffer to put new data in.
 *
 * We support it in 3 ways: If unsynchronized, then don't bother
 * flushing the batchbuffer before mapping the buffer, which can save blocking
 * in many cases.  If we would still block, and they allow the whole buffer
 * to be invalidated, then just allocate a new buffer to replace the old one.
 * If not, and we'd block, and they allow the subrange of the buffer to be
 * invalidated, then we can make a new little BO, let them write into that,
 * and blit it into the real BO at unmap time.
 */
static void *
intel_bufferobj_map_range(struct gl_context * ctx,
			  GLintptr offset, GLsizeiptr length,
			  GLbitfield access, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   assert(intel_obj);

   /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
    * internally uses our functions directly.
    */
   obj->Offset = offset;
   obj->Length = length;
   obj->AccessFlags = access;

   if (intel_obj->sys_buffer) {
      const bool read_only =
	 (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;

      if (!read_only && intel_obj->source)
	 release_buffer(intel_obj);

      if (!intel_obj->buffer || intel_obj->source) {
	 obj->Pointer = intel_obj->sys_buffer + offset;
	 return obj->Pointer;
      }

      free(intel_obj->sys_buffer);
      intel_obj->sys_buffer = NULL;
   }

   if (intel_obj->buffer == NULL) {
      obj->Pointer = NULL;
      return NULL;
   }

   /* If the access is synchronized (like a normal buffer mapping), then get
    * things flushed out so the later mapping syncs appropriately through GEM.
    * If the user doesn't care about existing buffer contents and mapping would
    * cause us to block, then throw out the old buffer.
    *
    * If they set INVALIDATE_BUFFER, we can pitch the current contents to
    * achieve the required synchronization.
    */
   if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
      if (drm_intel_bo_references(intel->batch.bo, intel_obj->buffer)) {
	 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
	    drm_intel_bo_unreference(intel_obj->buffer);
	    intel_bufferobj_alloc_buffer(intel, intel_obj);
	 } else {
            perf_debug("Stalling on the GPU for mapping a busy buffer "
                       "object\n");
	    intel_flush(ctx);
	 }
      } else if (drm_intel_bo_busy(intel_obj->buffer) &&
		 (access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
	 drm_intel_bo_unreference(intel_obj->buffer);
	 intel_bufferobj_alloc_buffer(intel, intel_obj);
      }
   }

   /* If the user is mapping a range of an active buffer object but
    * doesn't require the current contents of that range, make a new
    * BO, and we'll copy what they put in there out at unmap or
    * FlushRange time.
    */
   if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
       drm_intel_bo_busy(intel_obj->buffer)) {
      if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
	 intel_obj->range_map_buffer = malloc(length);
	 obj->Pointer = intel_obj->range_map_buffer;
      } else {
	 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
						      "range map",
						      length, 64);
	 if (!(access & GL_MAP_READ_BIT)) {
	    drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
	 } else {
	    drm_intel_bo_map(intel_obj->range_map_bo,
			     (access & GL_MAP_WRITE_BIT) != 0);
	 }
	 obj->Pointer = intel_obj->range_map_bo->virtual;
      }
      return obj->Pointer;
   }

   if (access & GL_MAP_UNSYNCHRONIZED_BIT)
      drm_intel_gem_bo_map_unsynchronized(intel_obj->buffer);
   else if (!(access & GL_MAP_READ_BIT)) {
      drm_intel_gem_bo_map_gtt(intel_obj->buffer);
   } else {
      drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
   }

   obj->Pointer = intel_obj->buffer->virtual + offset;
   return obj->Pointer;
}
示例#4
0
/**
 * \copydoc gen6_blorp_exec()
 */
void
gen7_blorp_exec(struct intel_context *intel,
                const brw_blorp_params *params)
{
   struct gl_context *ctx = &intel->ctx;
   struct brw_context *brw = brw_context(ctx);
   brw_blorp_prog_data *prog_data = NULL;
   uint32_t cc_blend_state_offset = 0;
   uint32_t cc_state_offset = 0;
   uint32_t depthstencil_offset;
   uint32_t wm_push_const_offset = 0;
   uint32_t wm_bind_bo_offset = 0;
   uint32_t sampler_offset = 0;

   uint32_t prog_offset = params->get_wm_prog(brw, &prog_data);
   gen6_blorp_emit_batch_head(brw, params);
   gen7_allocate_push_constants(brw);
   gen6_emit_3dstate_multisample(brw, params->num_samples);
   gen6_emit_3dstate_sample_mask(brw, params->num_samples, 1.0, false);
   gen6_blorp_emit_state_base_address(brw, params);
   gen6_blorp_emit_vertices(brw, params);
   gen7_blorp_emit_urb_config(brw, params);
   if (params->use_wm_prog) {
      cc_blend_state_offset = gen6_blorp_emit_blend_state(brw, params);
      cc_state_offset = gen6_blorp_emit_cc_state(brw, params);
      gen7_blorp_emit_blend_state_pointer(brw, params, cc_blend_state_offset);
      gen7_blorp_emit_cc_state_pointer(brw, params, cc_state_offset);
   }
   depthstencil_offset = gen6_blorp_emit_depth_stencil_state(brw, params);
   gen7_blorp_emit_depth_stencil_state_pointers(brw, params,
                                                depthstencil_offset);
   if (params->use_wm_prog) {
      uint32_t wm_surf_offset_renderbuffer;
      uint32_t wm_surf_offset_texture;
      wm_push_const_offset = gen6_blorp_emit_wm_constants(brw, params);
      wm_surf_offset_renderbuffer =
         gen7_blorp_emit_surface_state(brw, params, &params->dst,
                                       I915_GEM_DOMAIN_RENDER,
                                       I915_GEM_DOMAIN_RENDER,
                                       true /* is_render_target */);
      wm_surf_offset_texture =
         gen7_blorp_emit_surface_state(brw, params, &params->src,
                                       I915_GEM_DOMAIN_SAMPLER, 0,
                                       false /* is_render_target */);
      wm_bind_bo_offset =
         gen6_blorp_emit_binding_table(brw, params,
                                       wm_surf_offset_renderbuffer,
                                       wm_surf_offset_texture);
      sampler_offset = gen7_blorp_emit_sampler_state(brw, params);
   }
   gen6_blorp_emit_vs_disable(brw, params);
   gen7_blorp_emit_hs_disable(brw, params);
   gen7_blorp_emit_te_disable(brw, params);
   gen7_blorp_emit_ds_disable(brw, params);
   gen6_blorp_emit_gs_disable(brw, params);
   gen7_blorp_emit_streamout_disable(brw, params);
   gen6_blorp_emit_clip_disable(brw, params);
   gen7_blorp_emit_sf_config(brw, params);
   gen7_blorp_emit_wm_config(brw, params, prog_data);
   if (params->use_wm_prog) {
      gen7_blorp_emit_binding_table_pointers_ps(brw, params,
                                                wm_bind_bo_offset);
      gen7_blorp_emit_sampler_state_pointers_ps(brw, params, sampler_offset);
      gen7_blorp_emit_constant_ps(brw, params, wm_push_const_offset);
   }
   gen7_blorp_emit_ps_config(brw, params, prog_offset, prog_data);
   gen7_blorp_emit_cc_viewport(brw, params);

   if (params->depth.mt)
      gen7_blorp_emit_depth_stencil_config(brw, params);
   else
      gen7_blorp_emit_depth_disable(brw, params);
   gen7_blorp_emit_clear_params(brw, params);
   gen6_blorp_emit_drawing_rectangle(brw, params);
   gen7_blorp_emit_primitive(brw, params);

   /* See comments above at first invocation of intel_flush() in
    * gen6_blorp_emit_batch_head().
    */
   intel_flush(ctx);

   /* Be safe. */
   brw->state.dirty.brw = ~0;
   brw->state.dirty.cache = ~0;
}
示例#5
0
/**
 * CopyPixels with the blitter.  Don't support zooming, pixel transfer, etc.
 */
static bool
do_blit_copypixels(struct gl_context * ctx,
                   GLint srcx, GLint srcy,
                   GLsizei width, GLsizei height,
                   GLint dstx, GLint dsty, GLenum type)
{
   struct intel_context *intel = intel_context(ctx);
   struct gl_framebuffer *fb = ctx->DrawBuffer;
   struct gl_framebuffer *read_fb = ctx->ReadBuffer;
   GLint orig_dstx;
   GLint orig_dsty;
   GLint orig_srcx;
   GLint orig_srcy;
   struct intel_renderbuffer *draw_irb = NULL;
   struct intel_renderbuffer *read_irb = NULL;

   /* Update draw buffer bounds */
   _mesa_update_state(ctx);

   switch (type) {
   case GL_COLOR:
      if (fb->_NumColorDrawBuffers != 1) {
	 perf_debug("glCopyPixels() fallback: MRT\n");
	 return false;
      }

      draw_irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
      read_irb = intel_renderbuffer(read_fb->_ColorReadBuffer);
      break;
   case GL_DEPTH_STENCIL_EXT:
      draw_irb = intel_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
      read_irb =
	 intel_renderbuffer(read_fb->Attachment[BUFFER_DEPTH].Renderbuffer);
      break;
   case GL_DEPTH:
      perf_debug("glCopyPixels() fallback: GL_DEPTH\n");
      return false;
   case GL_STENCIL:
      perf_debug("glCopyPixels() fallback: GL_STENCIL\n");
      return false;
   default:
      perf_debug("glCopyPixels(): Unknown type\n");
      return false;
   }

   if (!draw_irb) {
      perf_debug("glCopyPixels() fallback: missing draw buffer\n");
      return false;
   }

   if (!read_irb) {
      perf_debug("glCopyPixels() fallback: missing read buffer\n");
      return false;
   }

   if (ctx->_ImageTransferState) {
      perf_debug("glCopyPixels(): Unsupported image transfer state\n");
      return false;
   }

   if (ctx->Depth.Test) {
      perf_debug("glCopyPixels(): Unsupported depth test state\n");
      return false;
   }

   if (ctx->Stencil._Enabled) {
      perf_debug("glCopyPixels(): Unsupported stencil test state\n");
      return false;
   }

   if (ctx->Fog.Enabled ||
       ctx->Texture._MaxEnabledTexImageUnit != -1 ||
       ctx->FragmentProgram._Enabled) {
      perf_debug("glCopyPixels(): Unsupported fragment shader state\n");
      return false;
   }

   if (ctx->Color.AlphaEnabled ||
       ctx->Color.BlendEnabled) {
      perf_debug("glCopyPixels(): Unsupported blend state\n");
      return false;
   }

   if (!ctx->Color.ColorMask[0][0] ||
       !ctx->Color.ColorMask[0][1] ||
       !ctx->Color.ColorMask[0][2] ||
       !ctx->Color.ColorMask[0][3]) {
      perf_debug("glCopyPixels(): Unsupported color mask state\n");
      return false;
   }

   if (ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F) {
      perf_debug("glCopyPixles(): Unsupported pixel zoom\n");
      return false;
   }

   intel_prepare_render(intel);

   intel_flush(&intel->ctx);

   /* Clip to destination buffer. */
   orig_dstx = dstx;
   orig_dsty = dsty;
   if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
			     fb->_Xmax, fb->_Ymax,
			     &dstx, &dsty, &width, &height))
      goto out;
   /* Adjust src coords for our post-clipped destination origin */
   srcx += dstx - orig_dstx;
   srcy += dsty - orig_dsty;

   /* Clip to source buffer. */
   orig_srcx = srcx;
   orig_srcy = srcy;
   if (!_mesa_clip_to_region(0, 0,
			     read_fb->Width, read_fb->Height,
			     &srcx, &srcy, &width, &height))
      goto out;
   /* Adjust dst coords for our post-clipped source origin */
   dstx += srcx - orig_srcx;
   dsty += srcy - orig_srcy;

   if (!intel_miptree_blit(intel,
                           read_irb->mt, read_irb->mt_level, read_irb->mt_layer,
                           srcx, srcy, _mesa_is_winsys_fbo(read_fb),
                           draw_irb->mt, draw_irb->mt_level, draw_irb->mt_layer,
                           dstx, dsty, _mesa_is_winsys_fbo(fb),
                           width, height,
                           (ctx->Color.ColorLogicOpEnabled ?
                            ctx->Color.LogicOp : GL_COPY))) {
      DBG("%s: blit failure\n", __func__);
      return false;
   }

   if (ctx->Query.CurrentOcclusionObject)
      ctx->Query.CurrentOcclusionObject->Result += width * height;

out:
   intel_check_front_buffer_rendering(intel);

   DBG("%s: success\n", __func__);
   return true;
}
示例#6
0
void
intelFlush(GLcontext * ctx)
{
   intel_flush(ctx, GL_FALSE);
}
示例#7
0
void
intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
{
   struct gl_framebuffer *fb = drawable->driverPrivate;
   struct intel_renderbuffer *rb;
   struct intel_region *region, *depth_region;
   struct intel_context *intel = context->driverPrivate;
   struct intel_renderbuffer *front_rb, *back_rb, *depth_rb, *stencil_rb;
   __DRIbuffer *buffers = NULL;
   __DRIscreen *screen;
   int i, count;
   unsigned int attachments[10];
   const char *region_name;

   /* If we're rendering to the fake front buffer, make sure all the
    * pending drawing has landed on the real front buffer.  Otherwise
    * when we eventually get to DRI2GetBuffersWithFormat the stale
    * real front buffer contents will get copied to the new fake front
    * buffer.
    */
   if (intel->is_front_buffer_rendering)
      intel_flush(&intel->ctx, GL_FALSE);

   /* Set this up front, so that in case our buffers get invalidated
    * while we're getting new buffers, we don't clobber the stamp and
    * thus ignore the invalidate. */
   drawable->lastStamp = drawable->dri2.stamp;

   if (INTEL_DEBUG & DEBUG_DRI)
      fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);

   screen = intel->intelScreen->driScrnPriv;

   if (screen->dri2.loader
       && (screen->dri2.loader->base.version > 2)
       && (screen->dri2.loader->getBuffersWithFormat != NULL)) {

      front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
      back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
      depth_rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
      stencil_rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);

      i = 0;
      if ((intel->is_front_buffer_rendering ||
	   intel->is_front_buffer_reading ||
	   !back_rb) && front_rb) {
	 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
	 attachments[i++] = intel_bits_per_pixel(front_rb);
      }

      if (back_rb) {
	 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
	 attachments[i++] = intel_bits_per_pixel(back_rb);
      }

      if ((depth_rb != NULL) && (stencil_rb != NULL)) {
	 attachments[i++] = __DRI_BUFFER_DEPTH_STENCIL;
	 attachments[i++] = intel_bits_per_pixel(depth_rb);
      } else if (depth_rb != NULL) {
	 attachments[i++] = __DRI_BUFFER_DEPTH;
	 attachments[i++] = intel_bits_per_pixel(depth_rb);
      } else if (stencil_rb != NULL) {
	 attachments[i++] = __DRI_BUFFER_STENCIL;
	 attachments[i++] = intel_bits_per_pixel(stencil_rb);
      }

      buffers =
	 (*screen->dri2.loader->getBuffersWithFormat)(drawable,
						      &drawable->w,
						      &drawable->h,
						      attachments, i / 2,
						      &count,
						      drawable->loaderPrivate);
   } else if (screen->dri2.loader) {
      i = 0;
      if (intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT))
	 attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
      if (intel_get_renderbuffer(fb, BUFFER_BACK_LEFT))
	 attachments[i++] = __DRI_BUFFER_BACK_LEFT;
      if (intel_get_renderbuffer(fb, BUFFER_DEPTH))
	 attachments[i++] = __DRI_BUFFER_DEPTH;
      if (intel_get_renderbuffer(fb, BUFFER_STENCIL))
	 attachments[i++] = __DRI_BUFFER_STENCIL;

      buffers = (*screen->dri2.loader->getBuffers)(drawable,
						   &drawable->w,
						   &drawable->h,
						   attachments, i,
						   &count,
						   drawable->loaderPrivate);
   }

   if (buffers == NULL)
      return;

   drawable->x = 0;
   drawable->y = 0;
   drawable->backX = 0;
   drawable->backY = 0;
   drawable->numClipRects = 1;
   drawable->pClipRects[0].x1 = 0;
   drawable->pClipRects[0].y1 = 0;
   drawable->pClipRects[0].x2 = drawable->w;
   drawable->pClipRects[0].y2 = drawable->h;
   drawable->numBackClipRects = 1;
   drawable->pBackClipRects[0].x1 = 0;
   drawable->pBackClipRects[0].y1 = 0;
   drawable->pBackClipRects[0].x2 = drawable->w;
   drawable->pBackClipRects[0].y2 = drawable->h;

   depth_region = NULL;
   for (i = 0; i < count; i++) {
       switch (buffers[i].attachment) {
       case __DRI_BUFFER_FRONT_LEFT:
	   rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
	   region_name = "dri2 front buffer";
	   break;

       case __DRI_BUFFER_FAKE_FRONT_LEFT:
	   rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
	   region_name = "dri2 fake front buffer";
	   break;

       case __DRI_BUFFER_BACK_LEFT:
	   rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
	   region_name = "dri2 back buffer";
	   break;

       case __DRI_BUFFER_DEPTH:
	   rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
	   region_name = "dri2 depth buffer";
	   break;

       case __DRI_BUFFER_DEPTH_STENCIL:
	   rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
	   region_name = "dri2 depth / stencil buffer";
	   break;

       case __DRI_BUFFER_STENCIL:
	   rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
	   region_name = "dri2 stencil buffer";
	   break;

       case __DRI_BUFFER_ACCUM:
       default:
	   fprintf(stderr,
		   "unhandled buffer attach event, attacment type %d\n",
		   buffers[i].attachment);
	   return;
       }

       if (rb == NULL)
	  continue;

       if (rb->region && rb->region->name == buffers[i].name)
	     continue;

       if (INTEL_DEBUG & DEBUG_DRI)
	  fprintf(stderr,
		  "attaching buffer %d, at %d, cpp %d, pitch %d\n",
		  buffers[i].name, buffers[i].attachment,
		  buffers[i].cpp, buffers[i].pitch);
       
       if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_region) {
	  if (INTEL_DEBUG & DEBUG_DRI)
	     fprintf(stderr, "(reusing depth buffer as stencil)\n");
	  intel_region_reference(&region, depth_region);
       }
       else
          region = intel_region_alloc_for_handle(intel, buffers[i].cpp,
						 drawable->w,
						 drawable->h,
						 buffers[i].pitch / buffers[i].cpp,
						 buffers[i].name,
						 region_name);

       if (buffers[i].attachment == __DRI_BUFFER_DEPTH)
	  depth_region = region;

       intel_renderbuffer_set_region(intel, rb, region);
       intel_region_release(&region);

       if (buffers[i].attachment == __DRI_BUFFER_DEPTH_STENCIL) {
	  rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
	  if (rb != NULL) {
	     struct intel_region *stencil_region = NULL;

	     if (rb->region && rb->region->name == buffers[i].name)
		   continue;

	     intel_region_reference(&stencil_region, region);
	     intel_renderbuffer_set_region(intel, rb, stencil_region);
	     intel_region_release(&stencil_region);
	  }
       }
   }

   driUpdateFramebufferSize(&intel->ctx, drawable);
}
示例#8
0
/**
 * Called via glMapBufferRange and glMapBuffer
 *
 * The goal of this extension is to allow apps to accumulate their rendering
 * at the same time as they accumulate their buffer object.  Without it,
 * you'd end up blocking on execution of rendering every time you mapped
 * the buffer to put new data in.
 *
 * We support it in 3 ways: If unsynchronized, then don't bother
 * flushing the batchbuffer before mapping the buffer, which can save blocking
 * in many cases.  If we would still block, and they allow the whole buffer
 * to be invalidated, then just allocate a new buffer to replace the old one.
 * If not, and we'd block, and they allow the subrange of the buffer to be
 * invalidated, then we can make a new little BO, let them write into that,
 * and blit it into the real BO at unmap time.
 */
static void *
intel_bufferobj_map_range(struct gl_context * ctx,
			  GLintptr offset, GLsizeiptr length,
			  GLbitfield access, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   assert(intel_obj);

   /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
    * internally uses our functions directly.
    */
   obj->Offset = offset;
   obj->Length = length;
   obj->AccessFlags = access;

   if (intel_obj->sys_buffer) {
      const bool read_only =
	 (access & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) == GL_MAP_READ_BIT;

      if (!read_only && intel_obj->source)
	 release_buffer(intel_obj);

      if (!intel_obj->buffer || intel_obj->source) {
	 obj->Pointer = intel_obj->sys_buffer + offset;
	 return obj->Pointer;
      }

      free(intel_obj->sys_buffer);
      intel_obj->sys_buffer = NULL;
   }

   /* If the mapping is synchronized with other GL operations, flush
    * the batchbuffer so that GEM knows about the buffer access for later
    * syncing.
    */
   if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
       drm_intel_bo_references(intel->batch.bo, intel_obj->buffer))
      intel_flush(ctx);

   if (intel_obj->buffer == NULL) {
      obj->Pointer = NULL;
      return NULL;
   }

   /* If the user doesn't care about existing buffer contents and mapping
    * would cause us to block, then throw out the old buffer.
    */
   if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
       (access & GL_MAP_INVALIDATE_BUFFER_BIT) &&
       drm_intel_bo_busy(intel_obj->buffer)) {
      drm_intel_bo_unreference(intel_obj->buffer);
      intel_bufferobj_alloc_buffer(intel, intel_obj);
   }

   /* If the user is mapping a range of an active buffer object but
    * doesn't require the current contents of that range, make a new
    * BO, and we'll copy what they put in there out at unmap or
    * FlushRange time.
    */
   if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
       drm_intel_bo_busy(intel_obj->buffer)) {
      if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
	 intel_obj->range_map_buffer = malloc(length);
	 obj->Pointer = intel_obj->range_map_buffer;
      } else {
	 intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
						      "range map",
						      length, 64);
	 if (!(access & GL_MAP_READ_BIT)) {
	    drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
	    intel_obj->mapped_gtt = true;
	 } else {
	    drm_intel_bo_map(intel_obj->range_map_bo,
			     (access & GL_MAP_WRITE_BIT) != 0);
	    intel_obj->mapped_gtt = false;
	 }
	 obj->Pointer = intel_obj->range_map_bo->virtual;
      }
      return obj->Pointer;
   }

   if (!(access & GL_MAP_READ_BIT)) {
      drm_intel_gem_bo_map_gtt(intel_obj->buffer);
      intel_obj->mapped_gtt = true;
   } else {
      drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
      intel_obj->mapped_gtt = false;
   }

   obj->Pointer = intel_obj->buffer->virtual + offset;
   return obj->Pointer;
}
示例#9
0
文件: gen7_hiz.c 项目: curro/mesa
/**
 * \copydoc gen6_hiz_exec()
 */
static void
gen7_hiz_exec(struct intel_context *intel,
              struct intel_mipmap_tree *mt,
              unsigned int level,
              unsigned int layer,
              enum gen6_hiz_op op)
{
   struct gl_context *ctx = &intel->ctx;
   struct brw_context *brw = brw_context(ctx);

   assert(op != GEN6_HIZ_OP_DEPTH_CLEAR); /* Not implemented yet. */
   assert(mt->hiz_mt != NULL);
   intel_miptree_check_level_layer(mt, level, layer);

   uint32_t depth_format;
   switch (mt->format) {
   case MESA_FORMAT_Z16:       depth_format = BRW_DEPTHFORMAT_D16_UNORM; break;
   case MESA_FORMAT_Z32_FLOAT: depth_format = BRW_DEPTHFORMAT_D32_FLOAT; break;
   case MESA_FORMAT_X8_Z24:    depth_format = BRW_DEPTHFORMAT_D24_UNORM_X8_UINT; break;
   default:                    assert(0); break;
   }

   gen6_hiz_emit_batch_head(brw);
   gen6_hiz_emit_vertices(brw, mt, level, layer);

   /* 3DSTATE_URB_VS
    * 3DSTATE_URB_HS
    * 3DSTATE_URB_DS
    * 3DSTATE_URB_GS
    *
    * If the 3DSTATE_URB_VS is emitted, than the others must be also. From the
    * BSpec, Volume 2a "3D Pipeline Overview", Section 1.7.1 3DSTATE_URB_VS:
    *     3DSTATE_URB_HS, 3DSTATE_URB_DS, and 3DSTATE_URB_GS must also be
    *     programmed in order for the programming of this state to be
    *     valid.
    */
   {
      /* The minimum valid value is 32. See 3DSTATE_URB_VS,
       * Dword 1.15:0 "VS Number of URB Entries".
       */
      int num_vs_entries = 32;

      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_URB_VS << 16 | (2 - 2));
      OUT_BATCH(1 << GEN7_URB_ENTRY_SIZE_SHIFT |
                0 << GEN7_URB_STARTING_ADDRESS_SHIFT |
                num_vs_entries);
      ADVANCE_BATCH();

      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_URB_GS << 16 | (2 - 2));
      OUT_BATCH(0);
      ADVANCE_BATCH();

      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_URB_HS << 16 | (2 - 2));
      OUT_BATCH(0);
      ADVANCE_BATCH();

      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_URB_DS << 16 | (2 - 2));
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_DEPTH_STENCIL_STATE_POINTERS
    *
    * The offset is relative to CMD_STATE_BASE_ADDRESS.DynamicStateBaseAddress.
    */
   {
      uint32_t depthstencil_offset;
      gen6_hiz_emit_depth_stencil_state(brw, op, &depthstencil_offset);

      BEGIN_BATCH(2);
      OUT_BATCH(_3DSTATE_DEPTH_STENCIL_STATE_POINTERS << 16 | (2 - 2));
      OUT_BATCH(depthstencil_offset | 1);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_VS
    *
    * Disable vertex shader.
    */
   {
      BEGIN_BATCH(6);
      OUT_BATCH(_3DSTATE_VS << 16 | (6 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_HS
    *
    * Disable the hull shader.
    */
   {
      BEGIN_BATCH(7);
      OUT_BATCH(_3DSTATE_HS << 16 | (7 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_TE
    *
    * Disable the tesselation engine.
    */
   {
      BEGIN_BATCH(4);
      OUT_BATCH(_3DSTATE_TE << 16 | (4 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_DS
    *
    * Disable the domain shader.
    */
   {
      BEGIN_BATCH(6);
      OUT_BATCH(_3DSTATE_DS << 16 | (6 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_GS
    *
    * Disable the geometry shader.
    */
   {
      BEGIN_BATCH(7);
      OUT_BATCH(_3DSTATE_GS << 16 | (7 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_STREAMOUT
    *
    * Disable streamout.
    */
   {
      BEGIN_BATCH(3);
      OUT_BATCH(_3DSTATE_STREAMOUT << 16 | (3 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_CLIP
    *
    * Disable the clipper.
    *
    * The HiZ op emits a rectangle primitive, which requires clipping to
    * be disabled. From page 10 of the Sandy Bridge PRM Volume 2 Part 1
    * Section 1.3 "3D Primitives Overview":
    *    RECTLIST:
    *    Either the CLIP unit should be DISABLED, or the CLIP unit's Clip
    *    Mode should be set to a value other than CLIPMODE_NORMAL.
    *
    * Also disable perspective divide. This doesn't change the clipper's
    * output, but does spare a few electrons.
    */
   {
      BEGIN_BATCH(4);
      OUT_BATCH(_3DSTATE_CLIP << 16 | (4 - 2));
      OUT_BATCH(0);
      OUT_BATCH(GEN6_CLIP_PERSPECTIVE_DIVIDE_DISABLE);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_SF
    *
    * Disable ViewportTransformEnable (dw1.1)
    *
    * From the SandyBridge PRM, Volume 2, Part 1, Section 1.3, "3D
    * Primitives Overview":
    *     RECTLIST: Viewport Mapping must be DISABLED (as is typical with the
    *     use of screen- space coordinates).
    *
    * A solid rectangle must be rendered, so set FrontFaceFillMode (dw1.6:5)
    * and BackFaceFillMode (dw1.4:3) to SOLID(0).
    *
    * From the Sandy Bridge PRM, Volume 2, Part 1, Section
    * 6.4.1.1 3DSTATE_SF, Field FrontFaceFillMode:
    *     SOLID: Any triangle or rectangle object found to be front-facing
    *     is rendered as a solid object. This setting is required when
    *     (rendering rectangle (RECTLIST) objects.
    */
   {
      BEGIN_BATCH(7);
      OUT_BATCH(_3DSTATE_SF << 16 | (7 - 2));
      OUT_BATCH(depth_format << GEN7_SF_DEPTH_BUFFER_SURFACE_FORMAT_SHIFT);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_SBE */
   {
      BEGIN_BATCH(14);
      OUT_BATCH(_3DSTATE_SBE << 16 | (14 - 2));
      OUT_BATCH((1 - 1) << GEN7_SBE_NUM_OUTPUTS_SHIFT | /* only position */
                1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT |
                0 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT);
      for (int i = 0; i < 12; ++i)
         OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_WM
    *
    * Disable PS thread dispatch (dw1.29) and enable the HiZ op.
    */
   {
      uint32_t dw1 = 0;

      switch (op) {
      case GEN6_HIZ_OP_DEPTH_CLEAR:
         assert(!"not implemented");
         dw1 |= GEN7_WM_DEPTH_CLEAR;
         break;
      case GEN6_HIZ_OP_DEPTH_RESOLVE:
         dw1 |= GEN7_WM_DEPTH_RESOLVE;
         break;
      case GEN6_HIZ_OP_HIZ_RESOLVE:
         dw1 |= GEN7_WM_HIERARCHICAL_DEPTH_RESOLVE;
         break;
      default:
         assert(0);
         break;
      }

      BEGIN_BATCH(3);
      OUT_BATCH(_3DSTATE_WM << 16 | (3 - 2));
      OUT_BATCH(dw1);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_PS
    *
    * Pixel shader dispatch is disabled above in 3DSTATE_WM, dw1.29. Despite
    * that, thread dispatch info must still be specified.
    *     - Maximum Number of Threads (dw4.24:31) must be nonzero, as the BSpec
    *       states that the valid range for this field is [0x3, 0x2f].
    *     - A dispatch mode must be given; that is, at least one of the
    *       "N Pixel Dispatch Enable" (N=8,16,32) fields must be set. This was
    *       discovered through simulator error messages.
    */
   {
      BEGIN_BATCH(8);
      OUT_BATCH(_3DSTATE_PS << 16 | (8 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(((brw->max_wm_threads - 1) << IVB_PS_MAX_THREADS_SHIFT) |
		GEN7_PS_32_DISPATCH_ENABLE);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_DEPTH_BUFFER */
   {
      uint32_t width = mt->level[level].width;
      uint32_t height = mt->level[level].height;

      uint32_t tile_x;
      uint32_t tile_y;
      uint32_t offset;
      {
         /* Construct a dummy renderbuffer just to extract tile offsets. */
         struct intel_renderbuffer rb;
         rb.mt = mt;
         rb.mt_level = level;
         rb.mt_layer = layer;
         intel_renderbuffer_set_draw_offset(&rb);
         offset = intel_renderbuffer_tile_offsets(&rb, &tile_x, &tile_y);
      }

      intel_emit_depth_stall_flushes(intel);

      BEGIN_BATCH(7);
      OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
      OUT_BATCH(((mt->region->pitch * mt->region->cpp) - 1) |
                depth_format << 18 |
                1 << 22 | /* hiz enable */
                1 << 28 | /* depth write */
                BRW_SURFACE_2D << 29);
      OUT_RELOC(mt->region->bo,
                I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
                offset);
      OUT_BATCH((width + tile_x - 1) << 4 |
                (height + tile_y - 1) << 18);
      OUT_BATCH(0);
      OUT_BATCH(tile_x |
                tile_y << 16);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_HIER_DEPTH_BUFFER */
   {
      struct intel_region *hiz_region = mt->hiz_mt->region;

      BEGIN_BATCH(3);
      OUT_BATCH((GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
      OUT_BATCH(hiz_region->pitch * hiz_region->cpp - 1);
      OUT_RELOC(hiz_region->bo,
                I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
                0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_STENCIL_BUFFER */
   {
      BEGIN_BATCH(3);
      OUT_BATCH((GEN7_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_CLEAR_PARAMS
    *
    * From the BSpec, Volume 2a.11 Windower, Section 1.5.6.3.2
    * 3DSTATE_CLEAR_PARAMS:
    *    [DevIVB] 3DSTATE_CLEAR_PARAMS must always be programmed in the along
    *    with the other Depth/Stencil state commands(i.e.  3DSTATE_DEPTH_BUFFER,
    *    3DSTATE_STENCIL_BUFFER, or 3DSTATE_HIER_DEPTH_BUFFER).
    */
   {
      BEGIN_BATCH(3);
      OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS << 16 | (3 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DSTATE_DRAWING_RECTANGLE */
   {
      BEGIN_BATCH(4);
      OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
      OUT_BATCH(0);
      OUT_BATCH(((mt->level[level].width - 1) & 0xffff) |
                ((mt->level[level].height - 1) << 16));
      OUT_BATCH(0);
      ADVANCE_BATCH();
   }

   /* 3DPRIMITIVE */
   {
     BEGIN_BATCH(7);
     OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
     OUT_BATCH(GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL |
               _3DPRIM_RECTLIST);
     OUT_BATCH(3); /* vertex count per instance */
     OUT_BATCH(0);
     OUT_BATCH(1); /* instance count */
     OUT_BATCH(0);
     OUT_BATCH(0);
     ADVANCE_BATCH();
   }

   /* See comments above at first invocation of intel_flush() in
    * gen6_hiz_emit_batch_head().
    */
   intel_flush(ctx);

   /* Be safe. */
   brw->state.dirty.brw = ~0;
   brw->state.dirty.cache = ~0;
}