Exemple #1
0
static void test_nv_i915_reimport_twice_check_flink_name(void)
{
    drm_intel_bo *intel_bo = NULL, *intel_bo2 = NULL;
    int prime_fd;
    struct nouveau_bo *nvbo = NULL;
    uint32_t flink_name1, flink_name2;

    igt_assert(nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
                              0, BO_SIZE, NULL, &nvbo) == 0);

    igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);

    intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
    igt_assert(intel_bo);
    close(prime_fd);
    igt_assert(nouveau_bo_set_prime(nvbo, &prime_fd) == 0);

    intel_bo2 = drm_intel_bo_gem_create_from_prime(bufmgr2, prime_fd, BO_SIZE);
    igt_assert(intel_bo2);
    close(prime_fd);

    igt_assert(drm_intel_bo_flink(intel_bo, &flink_name1) == 0);
    igt_assert(drm_intel_bo_flink(intel_bo2, &flink_name2) == 0);

    igt_assert_eq_u32(flink_name1, flink_name2);

    nouveau_bo_ref(NULL, &nvbo);
    drm_intel_bo_unreference(intel_bo);
    drm_intel_bo_unreference(intel_bo2);
}
struct intel_region *
intel_region_alloc_for_fd(struct intel_screen *screen,
                          GLuint cpp,
                          GLuint width, GLuint height, GLuint pitch,
                          GLuint size,
                          int fd, const char *name)
{
   struct intel_region *region;
   drm_intel_bo *buffer;
   int ret;
   uint32_t bit_6_swizzle, tiling;

   buffer = drm_intel_bo_gem_create_from_prime(screen->bufmgr, fd, size);
   if (buffer == NULL)
      return NULL;
   ret = drm_intel_bo_get_tiling(buffer, &tiling, &bit_6_swizzle);
   if (ret != 0) {
      fprintf(stderr, "Couldn't get tiling of buffer (%s): %s\n",
	      name, strerror(-ret));
      drm_intel_bo_unreference(buffer);
      return NULL;
   }

   region = intel_region_alloc_internal(screen, cpp,
					width, height, pitch, tiling, buffer);
   if (region == NULL) {
      drm_intel_bo_unreference(buffer);
      return NULL;
   }

   return region;
}
void
intelDestroyContext(__DRIcontext * driContextPriv)
{
   struct brw_context *brw =
      (struct brw_context *) driContextPriv->driverPrivate;
   struct gl_context *ctx = &brw->ctx;

   assert(brw); /* should never be null */
   if (!brw)
      return;

   /* Dump a final BMP in case the application doesn't call SwapBuffers */
   if (INTEL_DEBUG & DEBUG_AUB) {
      intel_batchbuffer_flush(brw);
      aub_dump_bmp(&brw->ctx);
   }

   _mesa_meta_free(&brw->ctx);

   if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
      /* Force a report. */
      brw->shader_time.report_time = 0;

      brw_collect_and_report_shader_time(brw);
      brw_destroy_shader_time(brw);
   }

   brw_destroy_state(brw);
   brw_draw_destroy(brw);

   drm_intel_bo_unreference(brw->curbe.curbe_bo);

   free(brw->curbe.last_buf);
   free(brw->curbe.next_buf);

   drm_intel_gem_context_destroy(brw->hw_ctx);

   if (ctx->swrast_context) {
      _swsetup_DestroyContext(&brw->ctx);
      _tnl_DestroyContext(&brw->ctx);
   }
   _vbo_DestroyContext(&brw->ctx);

   if (ctx->swrast_context)
      _swrast_DestroyContext(&brw->ctx);

   intel_batchbuffer_free(brw);

   drm_intel_bo_unreference(brw->first_post_swapbuffers_batch);
   brw->first_post_swapbuffers_batch = NULL;

   driDestroyOptionCache(&brw->optionCache);

   /* free the Mesa context */
   _mesa_free_context_data(&brw->ctx);

   ralloc_free(brw);
   driContextPriv->driverPrivate = NULL;
}
Exemple #4
0
void
intel_batchbuffer_free(struct intel_context *intel)
{
   drm_intel_bo_unreference(intel->batch.last_bo);
   drm_intel_bo_unreference(intel->batch.bo);
   drm_intel_bo_unreference(intel->batch.workaround_bo);
   clear_cache(intel);
}
Exemple #5
0
void
brw_upload_vec4_pull_constants(struct brw_context *brw,
                               GLbitfield brw_new_constbuf,
                               const struct gl_program *prog,
                               struct brw_stage_state *stage_state,
                               const struct brw_vec4_prog_data *prog_data)
{
   int i;
   uint32_t surf_index = prog_data->base.binding_table.pull_constants_start;

   /* Updates the ParamaterValues[i] pointers for all parameters of the
    * basic type of PROGRAM_STATE_VAR.
    */
   _mesa_load_state_parameters(&brw->ctx, prog->Parameters);

   if (!prog_data->nr_pull_params) {
      if (stage_state->const_bo) {
	 drm_intel_bo_unreference(stage_state->const_bo);
	 stage_state->const_bo = NULL;
	 stage_state->surf_offset[surf_index] = 0;
	 brw->state.dirty.brw |= brw_new_constbuf;
      }
      return;
   }

   /* _NEW_PROGRAM_CONSTANTS */
   drm_intel_bo_unreference(stage_state->const_bo);
   uint32_t size = prog_data->nr_pull_params * 4;
   stage_state->const_bo = drm_intel_bo_alloc(brw->bufmgr, "vec4_const_buffer",
                                           size, 64);

   drm_intel_gem_bo_map_gtt(stage_state->const_bo);

   for (i = 0; i < prog_data->nr_pull_params; i++) {
      memcpy(stage_state->const_bo->virtual + i * 4,
	     prog_data->pull_param[i],
	     4);
   }

   if (0) {
      for (i = 0; i < ALIGN(prog_data->nr_pull_params, 4) / 4; i++) {
	 float *row = (float *)stage_state->const_bo->virtual + i * 4;
	 printf("const surface %3d: %4.3f %4.3f %4.3f %4.3f\n",
		i, row[0], row[1], row[2], row[3]);
      }
   }

   drm_intel_gem_bo_unmap_gtt(stage_state->const_bo);

   brw_create_constant_surface(brw, stage_state->const_bo, 0, size,
                               &stage_state->surf_offset[surf_index],
                               false);

   brw->state.dirty.brw |= brw_new_constbuf;
}
Exemple #6
0
void
intelDestroyContext(__DRIcontext * driContextPriv)
{
   struct intel_context *intel =
      (struct intel_context *) driContextPriv->driverPrivate;
   struct gl_context *ctx = &intel->ctx;

   assert(intel);               /* should never be null */
   if (intel) {
      INTEL_FIREVERTICES(intel);

      /* Dump a final BMP in case the application doesn't call SwapBuffers */
      if (INTEL_DEBUG & DEBUG_AUB) {
         intel_batchbuffer_flush(intel);
	 aub_dump_bmp(&intel->ctx);
      }

      _mesa_meta_free(&intel->ctx);

      intel->vtbl.destroy(intel);

      if (ctx->swrast_context) {
         _swsetup_DestroyContext(&intel->ctx);
         _tnl_DestroyContext(&intel->ctx);
      }
      _vbo_DestroyContext(&intel->ctx);

      if (ctx->swrast_context)
         _swrast_DestroyContext(&intel->ctx);
      intel->Fallback = 0x0;      /* don't call _swrast_Flush later */

      intel_batchbuffer_free(intel);

      free(intel->prim.vb);
      intel->prim.vb = NULL;
      drm_intel_bo_unreference(intel->prim.vb_bo);
      intel->prim.vb_bo = NULL;
      drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
      intel->first_post_swapbuffers_batch = NULL;

      driDestroyOptionCache(&intel->optionCache);

      /* free the Mesa context */
      _mesa_free_context_data(&intel->ctx);

      _math_matrix_dtr(&intel->ViewportMatrix);

      ralloc_free(intel);
      driContextPriv->driverPrivate = NULL;
   }
}
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
	drm_intel_bo_unreference(batch->bo);
	batch->bo = NULL;
	free(batch);
}
Exemple #8
0
/**
 * Interface for getting memory for uploading streamed data to the GPU
 *
 * In most cases, streamed data (for GPU state structures, for example) is
 * uploaded through brw_state_batch(), since that interface allows relocations
 * from the streamed space returned to other BOs.  However, that interface has
 * the restriction that the amount of space allocated has to be "small" (see
 * estimated_max_prim_size in brw_draw.c).
 *
 * This interface, on the other hand, is able to handle arbitrary sized
 * allocation requests, though it will batch small allocations into the same
 * BO for efficiency and reduced memory footprint.
 *
 * \note The returned pointer is valid only until intel_upload_finish(), which
 * will happen at batch flush or the next
 * intel_upload_space()/intel_upload_data().
 *
 * \param out_bo Pointer to a BO, which must point to a valid BO or NULL on
 * entry, and will have a reference to the new BO containing the state on
 * return.
 *
 * \param out_offset Offset within the buffer object that the data will land.
 */
void *
intel_upload_space(struct brw_context *brw,
                   uint32_t size,
                   uint32_t alignment,
                   drm_intel_bo **out_bo,
                   uint32_t *out_offset)
{
   uint32_t offset;

   offset = ALIGN_NPOT(brw->upload.next_offset, alignment);
   if (brw->upload.bo && offset + size > brw->upload.bo->size) {
      intel_upload_finish(brw);
      offset = 0;
   }

   if (!brw->upload.bo) {
      brw->upload.bo = drm_intel_bo_alloc(brw->bufmgr, "streamed data",
                                          MAX2(INTEL_UPLOAD_SIZE, size), 4096);
      if (brw->has_llc)
         drm_intel_bo_map(brw->upload.bo, true);
      else
         drm_intel_gem_bo_map_gtt(brw->upload.bo);
   }

   brw->upload.next_offset = offset + size;

   *out_offset = offset;
   if (*out_bo != brw->upload.bo) {
      drm_intel_bo_unreference(*out_bo);
      *out_bo = brw->upload.bo;
      drm_intel_bo_reference(brw->upload.bo);
   }

   return brw->upload.bo->virtual + offset;
}
Exemple #9
0
/**
 * Return true if the function successfully signals or has already signalled.
 * (This matches the behavior expected from __DRI2fence::client_wait_sync).
 */
static bool
brw_fence_client_wait(struct brw_context *brw, struct brw_fence *fence,
                      uint64_t timeout)
{
   if (fence->signalled)
      return true;

   assert(fence->batch_bo);

   /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and returns
    * immediately for timeouts <= 0.  The best we can do is to clamp the
    * timeout to INT64_MAX.  This limits the maximum timeout from 584 years to
    * 292 years - likely not a big deal.
    */
   if (timeout > INT64_MAX)
      timeout = INT64_MAX;

   if (drm_intel_gem_bo_wait(fence->batch_bo, timeout) != 0)
      return false;

   fence->signalled = true;
   drm_intel_bo_unreference(fence->batch_bo);
   fence->batch_bo = NULL;

   return true;
}
static int test2(void)
{
	drm_intel_bo *test_intel_bo;
	uint32_t fb_id;
	drmModeClip clip;
	int prime_fd;
	uint32_t udl_handle;
	int ret;

	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);

	ret = drmPrimeFDToHandle(udl_fd, prime_fd, &udl_handle);
	if (ret)
		goto out;

	ret = drmModeAddFB(udl_fd, 640, 480, 16, 16, 640, udl_handle, &fb_id);
	if (ret)
		goto out;

	clip.x1 = 0;
	clip.y1 = 0;
	clip.x2 = 10;
	clip.y2 = 10;
	ret = drmModeDirtyFB(udl_fd, fb_id, &clip, 1);
	if (ret) {
		return ret;
	}
out:
	dumb_bo_destroy(udl_fd, udl_handle);
	drm_intel_bo_unreference(test_intel_bo);
	return ret;
}
Exemple #11
0
/* export handle from intel driver - reimport to another intel driver bufmgr
   see if you get same object */
static void test_i915_self_import_to_different_fd(void)
{
    drm_intel_bo *test_intel_bo, *test_intel_bo2;
    int prime_fd;

    test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

    drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);

    test_intel_bo2 = drm_intel_bo_gem_create_from_prime(bufmgr2, prime_fd, BO_SIZE);
    close(prime_fd);
    igt_assert(test_intel_bo2);

    drm_intel_bo_unreference(test_intel_bo2);
    drm_intel_bo_unreference(test_intel_bo);
}
Exemple #12
0
static void test_i915_nv_reimport_twice_check_flink_name(void)
{
    drm_intel_bo *test_intel_bo;
    int prime_fd;
    struct nouveau_bo *nvbo = NULL, *nvbo2 = NULL;
    uint32_t flink_name1, flink_name2;

    test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

    igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0);

    igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);

    /* create a new dma-buf */
    close(prime_fd);
    igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0);

    igt_assert(nouveau_bo_prime_handle_ref(ndev2, prime_fd, &nvbo2) == 0);
    close(prime_fd);

    igt_assert(nouveau_bo_name_get(nvbo, &flink_name1) == 0);
    igt_assert(nouveau_bo_name_get(nvbo2, &flink_name2) == 0);

    igt_assert_eq_u32(flink_name1, flink_name2);

    nouveau_bo_ref(NULL, &nvbo2);
    nouveau_bo_ref(NULL, &nvbo);
    drm_intel_bo_unreference(test_intel_bo);
}
Exemple #13
0
struct intel_bo *
intel_winsys_alloc_texture(struct intel_winsys *winsys,
                           const char *name,
                           int width, int height, int cpp,
                           enum intel_tiling_mode tiling,
                           uint32_t initial_domain,
                           unsigned long *pitch)
{
   const unsigned long flags =
      (initial_domain & (INTEL_DOMAIN_RENDER | INTEL_DOMAIN_INSTRUCTION)) ?
      BO_ALLOC_FOR_RENDER : 0;
   uint32_t real_tiling = tiling;
   drm_intel_bo *bo;

   bo = drm_intel_bo_alloc_tiled(winsys->bufmgr, name,
         width, height, cpp, &real_tiling, pitch, flags);
   if (!bo)
      return NULL;

   if (real_tiling != tiling) {
      assert(!"tiling mismatch");
      drm_intel_bo_unreference(bo);
      return NULL;
   }

   return (struct intel_bo *) bo;
}
/**
 * Deallocate/free a vertex/pixel buffer object.
 * Called via glDeleteBuffersARB().
 */
static void
intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);

   assert(intel_obj);

   /* Buffer objects are automatically unmapped when deleting according
    * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
    * (though it does if you call glDeleteBuffers)
    */
   if (obj->Pointer)
      intel_bufferobj_unmap(ctx, 0, obj);

   free(intel_obj->sys_buffer);
   if (intel_obj->region) {
      intel_bufferobj_release_region(intel, intel_obj);
   }
   else if (intel_obj->buffer) {
      drm_intel_bo_unreference(intel_obj->buffer);
   }

   free(intel_obj);
}
static void *
drmmode_crtc_shadow_allocate(xf86CrtcPtr crtc, int width, int height)
{
	ScrnInfoPtr scrn = crtc->scrn;
	drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
	drmmode_ptr drmmode = drmmode_crtc->drmmode;
	unsigned long rotate_pitch;
	uint32_t tiling;
	int ret;

	drmmode_crtc->rotate_bo = intel_allocate_framebuffer(scrn,
							     width, height,
							     drmmode->cpp,
							     &rotate_pitch,
							     &tiling);

	if (!drmmode_crtc->rotate_bo) {
		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
			   "Couldn't allocate shadow memory for rotated CRTC\n");
		return NULL;
	}

	ret = drmModeAddFB(drmmode->fd, width, height, crtc->scrn->depth,
			   crtc->scrn->bitsPerPixel, rotate_pitch,
			   drmmode_crtc->rotate_bo->handle,
			   &drmmode_crtc->rotate_fb_id);
	if (ret) {
		ErrorF("failed to add rotate fb\n");
		drm_intel_bo_unreference(drmmode_crtc->rotate_bo);
		return NULL;
	}

	drmmode_crtc->rotate_pitch = rotate_pitch;
	return drmmode_crtc->rotate_bo;
}
Exemple #16
0
struct intel_region *
intel_region_alloc(struct intel_screen *screen,
		   uint32_t tiling,
                   GLuint cpp, GLuint width, GLuint height,
		   bool expect_accelerated_upload)
{
   drm_intel_bo *buffer;
   unsigned long flags = 0;
   unsigned long aligned_pitch;
   struct intel_region *region;

   if (expect_accelerated_upload)
      flags |= BO_ALLOC_FOR_RENDER;

   buffer = drm_intel_bo_alloc_tiled(screen->bufmgr, "region",
				     width, height, cpp,
				     &tiling, &aligned_pitch, flags);
   if (buffer == NULL)
      return NULL;

   region = intel_region_alloc_internal(screen, cpp, width, height,
                                        aligned_pitch, tiling, buffer);
   if (region == NULL) {
      drm_intel_bo_unreference(buffer);
      return NULL;
   }

   return region;
}
static void
intel_glFlush(GLcontext *ctx)
{
   struct intel_context *intel = intel_context(ctx);

   intel_flush(ctx, GL_TRUE);

   /* We're using glFlush as an indicator that a frame is done, which is
    * what DRI2 does before calling SwapBuffers (and means we should catch
    * people doing front-buffer rendering, as well)..
    *
    * Wait for the swapbuffers before the one we just emitted, so we don't
    * get too many swaps outstanding for apps that are GPU-heavy but not
    * CPU-heavy.
    *
    * Unfortunately, we don't have a handle to the batch containing the swap,
    * and getting our hands on that doesn't seem worth it, so we just us the
    * first batch we emitted after the last swap.
    */
   if (/* !intel->using_dri2_swapbuffers && */
       intel->first_post_swapbuffers_batch != NULL) {
      drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
      drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
      intel->first_post_swapbuffers_batch = NULL;
   }
}
/**
 * Allocates a block of space in the batchbuffer for indirect state.
 *
 * We don't want to allocate separate BOs for every bit of indirect
 * state in the driver.  It means overallocating by a significant
 * margin (4096 bytes, even if the object is just a 20-byte surface
 * state), and more buffers to walk and count for aperture size checking.
 *
 * However, due to the restrictions inposed by the aperture size
 * checking performance hacks, we can't have the batch point at a
 * separate indirect state buffer, because once the batch points at
 * it, no more relocations can be added to it.  So, we sneak these
 * buffers in at the top of the batchbuffer.
 */
void *
brw_state_batch(struct brw_context *brw,
		int size,
		int alignment,
		drm_intel_bo **out_bo,
		uint32_t *out_offset)
{
   struct intel_batchbuffer *batch = brw->intel.batch;
   uint32_t offset;

   assert(size < batch->buf->size);
   offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);

   /* If allocating from the top would wrap below the batchbuffer, or
    * if the batch's used space (plus the reserved pad) collides with our
    * space, then flush and try again.
    */
   if (batch->state_batch_offset < size ||
       offset < batch->ptr - batch->map + batch->reserved_space) {
      intel_batchbuffer_flush(batch);
      offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
   }

   batch->state_batch_offset = offset;

   if (*out_bo != batch->buf) {
      drm_intel_bo_unreference(*out_bo);
      drm_intel_bo_reference(batch->buf);
      *out_bo = batch->buf;
   }

   *out_offset = offset;
   return batch->map + offset;
}
Exemple #19
0
static void brw_merge_inputs( struct brw_context *brw,
		       const struct gl_client_array *arrays[])
{
   struct brw_vertex_info old = brw->vb.info;
   GLuint i;

   for (i = 0; i < brw->vb.nr_buffers; i++) {
      drm_intel_bo_unreference(brw->vb.buffers[i].bo);
      brw->vb.buffers[i].bo = NULL;
   }
   brw->vb.nr_buffers = 0;

   memset(&brw->vb.info, 0, sizeof(brw->vb.info));

   for (i = 0; i < VERT_ATTRIB_MAX; i++) {
      brw->vb.inputs[i].buffer = -1;
      brw->vb.inputs[i].glarray = arrays[i];
      brw->vb.inputs[i].attrib = (gl_vert_attrib) i;

      if (arrays[i]->StrideB != 0)
	 brw->vb.info.sizes[i/16] |= (brw->vb.inputs[i].glarray->Size - 1) <<
	    ((i%16) * 2);
   }

   /* Raise statechanges if input sizes have changed. */
   if (memcmp(brw->vb.info.sizes, old.sizes, sizeof(old.sizes)) != 0)
      brw->state.dirty.brw |= BRW_NEW_INPUT_DIMENSIONS;
}
/* Ideally we'd use a BO to avoid taking up cache space for the temporary
 * data, but FlushMappedBufferRange may be followed by further writes to
 * the pointer, so we would have to re-map after emitting our blit, which
 * would defeat the point.
 */
static void
intel_bufferobj_flush_mapped_range(GLcontext *ctx, GLenum target,
				   GLintptr offset, GLsizeiptr length,
				   struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
   drm_intel_bo *temp_bo;

   /* Unless we're in the range map using a temporary system buffer,
    * there's no work to do.
    */
   if (intel_obj->range_map_buffer == NULL)
      return;

   temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);

   drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);

   intel_emit_linear_blit(intel,
			  intel_obj->buffer, obj->Offset + offset,
			  temp_bo, 0,
			  length);

   drm_intel_bo_unreference(temp_bo);
}
Exemple #21
0
void
brw_delete_transform_feedback(struct gl_context *ctx,
                              struct gl_transform_feedback_object *obj)
{
   struct brw_transform_feedback_object *brw_obj =
      (struct brw_transform_feedback_object *) obj;

   for (unsigned i = 0; i < ARRAY_SIZE(obj->Buffers); i++) {
      _mesa_reference_buffer_object(ctx, &obj->Buffers[i], NULL);
   }

   drm_intel_bo_unreference(brw_obj->offset_bo);
   drm_intel_bo_unreference(brw_obj->prim_count_bo);

   free(brw_obj);
}
void intelFiniBatchBuffer(void)
{
	if (xvmc_driver->batch.buf == NULL)
		return;

	drm_intel_bo_unreference(xvmc_driver->batch.buf);
}
Exemple #23
0
static void
release_buffer(struct intel_buffer_object *intel_obj)
{
   drm_intel_bo_unreference(intel_obj->buffer);
   intel_obj->buffer = NULL;
   intel_obj->offset = 0;
   intel_obj->source = 0;
}
Exemple #24
0
void brw_draw_destroy( struct brw_context *brw )
{
   int i;

   if (brw->vb.upload.bo != NULL) {
      drm_intel_bo_unreference(brw->vb.upload.bo);
      brw->vb.upload.bo = NULL;
   }

   for (i = 0; i < VERT_ATTRIB_MAX; i++) {
      drm_intel_bo_unreference(brw->vb.inputs[i].bo);
      brw->vb.inputs[i].bo = NULL;
   }

   drm_intel_bo_unreference(brw->ib.bo);
   brw->ib.bo = NULL;
}
Exemple #25
0
static void
brw_fence_finish(struct brw_fence *fence)
{
   if (fence->batch_bo)
      drm_intel_bo_unreference(fence->batch_bo);

   mtx_destroy(&fence->mutex);
}
static void
intel_delete_sync_object(GLcontext *ctx, struct gl_sync_object *s)
{
   struct intel_sync_object *sync = (struct intel_sync_object *)s;

   drm_intel_bo_unreference(sync->bo);
   _mesa_free(sync);
}
Exemple #27
0
static void
intelReleaseBuffer(__DRIscreen *screen, __DRIbuffer *buffer)
{
   struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;

   drm_intel_bo_unreference(intelBuffer->bo);
   free(intelBuffer);
}
Exemple #28
0
static void
brw_merge_inputs(struct brw_context *brw,
                 const struct gl_client_array *arrays[])
{
   const struct gl_context *ctx = &brw->ctx;
   GLuint i;

   for (i = 0; i < brw->vb.nr_buffers; i++) {
      drm_intel_bo_unreference(brw->vb.buffers[i].bo);
      brw->vb.buffers[i].bo = NULL;
   }
   brw->vb.nr_buffers = 0;

   for (i = 0; i < VERT_ATTRIB_MAX; i++) {
      brw->vb.inputs[i].buffer = -1;
      brw->vb.inputs[i].glarray = arrays[i];
   }

   if (brw->gen < 8 && !brw->is_haswell) {
      struct gl_program *vp = &ctx->VertexProgram._Current->Base;
      /* Prior to Haswell, the hardware can't natively support GL_FIXED or
       * 2_10_10_10_REV vertex formats.  Set appropriate workaround flags.
       */
      for (i = 0; i < VERT_ATTRIB_MAX; i++) {
         if (!(vp->InputsRead & BITFIELD64_BIT(i)))
            continue;

         uint8_t wa_flags = 0;

         switch (brw->vb.inputs[i].glarray->Type) {

         case GL_FIXED:
            wa_flags = brw->vb.inputs[i].glarray->Size;
            break;

         case GL_INT_2_10_10_10_REV:
            wa_flags |= BRW_ATTRIB_WA_SIGN;
            /* fallthough */

         case GL_UNSIGNED_INT_2_10_10_10_REV:
            if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
               wa_flags |= BRW_ATTRIB_WA_BGRA;

            if (brw->vb.inputs[i].glarray->Normalized)
               wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
            else if (!brw->vb.inputs[i].glarray->Integer)
               wa_flags |= BRW_ATTRIB_WA_SCALE;

            break;
         }

         if (brw->vb.attrib_wa_flags[i] != wa_flags) {
            brw->vb.attrib_wa_flags[i] = wa_flags;
            brw->ctx.NewDriverState |= BRW_NEW_VS_ATTRIB_WORKAROUNDS;
         }
      }
   }
}
Exemple #29
0
void brw_draw_destroy( struct brw_context *brw )
{
   int i;

   for (i = 0; i < brw->vb.nr_buffers; i++) {
      drm_intel_bo_unreference(brw->vb.buffers[i].bo);
      brw->vb.buffers[i].bo = NULL;
   }
   brw->vb.nr_buffers = 0;

   for (i = 0; i < brw->vb.nr_enabled; i++) {
      brw->vb.enabled[i]->buffer = -1;
   }
   brw->vb.nr_enabled = 0;

   drm_intel_bo_unreference(brw->ib.bo);
   brw->ib.bo = NULL;
}
static void intel_check_sync(GLcontext *ctx, struct gl_sync_object *s)
{
   struct intel_sync_object *sync = (struct intel_sync_object *)s;

   if (sync->bo && !drm_intel_bo_busy(sync->bo)) {
      drm_intel_bo_unreference(sync->bo);
      sync->bo = NULL;
      s->StatusFlag = 1;
   }
}