int main(int argc, char **argv)
{
	int fd;
	int devid;

	igt_skip_on_simulation();

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		igt_fail(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);
	if (!HAS_BLT_RING(devid)) {
		fprintf(stderr, "not (yet) implemented for pre-snb\n");
		return 77;
	}

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		igt_fail(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		igt_fail(-1);
	}

	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
	if (!target_buffer) {
		fprintf(stderr, "failed to alloc target buffer\n");
		igt_fail(-1);
	}

	blt_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4*4096*4096, 4096);
	if (!blt_bo) {
		fprintf(stderr, "failed to alloc blt buffer\n");
		igt_fail(-1);
	}

	dummy_reloc_loop();

	drm_intel_bo_unreference(target_buffer);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
Exemplo n.º 2
0
/* Creates a new VS constant buffer reflecting the current VS program's
 * constants, if needed by the VS program.
 *
 * Otherwise, constants go through the CURBEs using the brw_constant_buffer
 * state atom.
 */
static void
brw_upload_vs_pull_constants(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->intel.ctx;
   struct intel_context *intel = &brw->intel;
   /* BRW_NEW_VERTEX_PROGRAM */
   struct brw_vertex_program *vp =
      (struct brw_vertex_program *) brw->vertex_program;
   const struct gl_program_parameter_list *params = vp->program.Base.Parameters;
   int i;

   if (vp->program.IsNVProgram)
      _mesa_load_tracked_matrices(ctx);

   /* Updates the ParamaterValues[i] pointers for all parameters of the
    * basic type of PROGRAM_STATE_VAR.
    */
   _mesa_load_state_parameters(&brw->intel.ctx, vp->program.Base.Parameters);

   /* CACHE_NEW_VS_PROG */
   if (!brw->vs.prog_data->nr_pull_params) {
      if (brw->vs.const_bo) {
	 drm_intel_bo_unreference(brw->vs.const_bo);
	 brw->vs.const_bo = NULL;
	 brw->bind.surf_offset[SURF_INDEX_VERT_CONST_BUFFER] = 0;
	 brw->state.dirty.brw |= BRW_NEW_VS_CONSTBUF;
      }
      return;
   }

   /* _NEW_PROGRAM_CONSTANTS */
   drm_intel_bo_unreference(brw->vs.const_bo);
   brw->vs.const_bo = drm_intel_bo_alloc(intel->bufmgr, "vp_const_buffer",
					 brw->vs.prog_data->nr_pull_params * 4,
					 64);

   drm_intel_gem_bo_map_gtt(brw->vs.const_bo);
   for (i = 0; i < brw->vs.prog_data->nr_pull_params; i++) {
      memcpy(brw->vs.const_bo->virtual + i * 4,
	     brw->vs.prog_data->pull_param[i],
	     4);
   }

   if (0) {
      for (i = 0; i < params->NumParameters; i++) {
	 float *row = (float *)brw->vs.const_bo->virtual + i * 4;
	 printf("vs const surface %3d: %4.3f %4.3f %4.3f %4.3f\n",
		i, row[0], row[1], row[2], row[3]);
      }
   }

   drm_intel_gem_bo_unmap_gtt(brw->vs.const_bo);

   const int surf = SURF_INDEX_VERT_CONST_BUFFER;
   intel->vtbl.create_constant_surface(brw, brw->vs.const_bo,
				       params->NumParameters,
				       &brw->bind.surf_offset[surf]);

   brw->state.dirty.brw |= BRW_NEW_VS_CONSTBUF;
}
Exemplo n.º 3
0
/**
 * Interface for getting memory for uploading streamed data to the GPU
 *
 * In most cases, streamed data (for GPU state structures, for example) is
 * uploaded through brw_state_batch(), since that interface allows relocations
 * from the streamed space returned to other BOs.  However, that interface has
 * the restriction that the amount of space allocated has to be "small" (see
 * estimated_max_prim_size in brw_draw.c).
 *
 * This interface, on the other hand, is able to handle arbitrary sized
 * allocation requests, though it will batch small allocations into the same
 * BO for efficiency and reduced memory footprint.
 *
 * \note The returned pointer is valid only until intel_upload_finish(), which
 * will happen at batch flush or the next
 * intel_upload_space()/intel_upload_data().
 *
 * \param out_bo Pointer to a BO, which must point to a valid BO or NULL on
 * entry, and will have a reference to the new BO containing the state on
 * return.
 *
 * \param out_offset Offset within the buffer object that the data will land.
 */
void *
intel_upload_space(struct brw_context *brw,
                   uint32_t size,
                   uint32_t alignment,
                   drm_intel_bo **out_bo,
                   uint32_t *out_offset)
{
   uint32_t offset;

   offset = ALIGN_NPOT(brw->upload.next_offset, alignment);
   if (brw->upload.bo && offset + size > brw->upload.bo->size) {
      intel_upload_finish(brw);
      offset = 0;
   }

   if (!brw->upload.bo) {
      brw->upload.bo = drm_intel_bo_alloc(brw->bufmgr, "streamed data",
                                          MAX2(INTEL_UPLOAD_SIZE, size), 4096);
      if (brw->has_llc)
         drm_intel_bo_map(brw->upload.bo, true);
      else
         drm_intel_gem_bo_map_gtt(brw->upload.bo);
   }

   brw->upload.next_offset = offset + size;

   *out_offset = offset;
   if (*out_bo != brw->upload.bo) {
      drm_intel_bo_unreference(*out_bo);
      *out_bo = brw->upload.bo;
      drm_intel_bo_reference(brw->upload.bo);
   }

   return brw->upload.bo->virtual + offset;
}
Exemplo n.º 4
0
static void test_i915_nv_reimport_twice_check_flink_name(void)
{
    drm_intel_bo *test_intel_bo;
    int prime_fd;
    struct nouveau_bo *nvbo = NULL, *nvbo2 = NULL;
    uint32_t flink_name1, flink_name2;

    test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

    igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0);

    igt_assert(nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo) == 0);

    /* create a new dma-buf */
    close(prime_fd);
    igt_assert(drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd) == 0);

    igt_assert(nouveau_bo_prime_handle_ref(ndev2, prime_fd, &nvbo2) == 0);
    close(prime_fd);

    igt_assert(nouveau_bo_name_get(nvbo, &flink_name1) == 0);
    igt_assert(nouveau_bo_name_get(nvbo2, &flink_name2) == 0);

    igt_assert_eq_u32(flink_name1, flink_name2);

    nouveau_bo_ref(NULL, &nvbo2);
    nouveau_bo_ref(NULL, &nvbo);
    drm_intel_bo_unreference(test_intel_bo);
}
/* Ideally we'd use a BO to avoid taking up cache space for the temporary
 * data, but FlushMappedBufferRange may be followed by further writes to
 * the pointer, so we would have to re-map after emitting our blit, which
 * would defeat the point.
 */
static void
intel_bufferobj_flush_mapped_range(GLcontext *ctx, GLenum target,
				   GLintptr offset, GLsizeiptr length,
				   struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
   drm_intel_bo *temp_bo;

   /* Unless we're in the range map using a temporary system buffer,
    * there's no work to do.
    */
   if (intel_obj->range_map_buffer == NULL)
      return;

   temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);

   drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);

   intel_emit_linear_blit(intel,
			  intel_obj->buffer, obj->Offset + offset,
			  temp_bo, 0,
			  length);

   drm_intel_bo_unreference(temp_bo);
}
Exemplo n.º 6
0
/**
 * Replace data in a subrange of buffer object.  If the data range
 * specified by size + offset extends beyond the end of the buffer or
 * if data is NULL, no copy is performed.
 * Called via glBufferSubDataARB().
 */
static void
intel_bufferobj_subdata(struct gl_context * ctx,
                        GLintptrARB offset,
                        GLsizeiptrARB size,
                        const GLvoid * data, struct gl_buffer_object *obj)
{
   struct intel_context *intel = intel_context(ctx);
   struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
   bool busy;

   if (size == 0)
      return;

   assert(intel_obj);

   /* If we have a single copy in system memory, update that */
   if (intel_obj->sys_buffer) {
      if (intel_obj->source)
	 release_buffer(intel_obj);

      if (intel_obj->buffer == NULL) {
	 memcpy((char *)intel_obj->sys_buffer + offset, data, size);
	 return;
      }

      free(intel_obj->sys_buffer);
      intel_obj->sys_buffer = NULL;
   }

   /* Otherwise we need to update the copy in video memory. */
   busy =
      drm_intel_bo_busy(intel_obj->buffer) ||
      drm_intel_bo_references(intel->batch.bo, intel_obj->buffer);

   if (busy) {
      if (size == intel_obj->Base.Size) {
	 /* Replace the current busy bo with fresh data. */
	 drm_intel_bo_unreference(intel_obj->buffer);
	 intel_bufferobj_alloc_buffer(intel, intel_obj);
	 drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
      } else {
         perf_debug("Using a blit copy to avoid stalling on %ldb "
                    "glBufferSubData() to a busy buffer object.\n",
                    (long)size);
	 drm_intel_bo *temp_bo =
	    drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);

	 drm_intel_bo_subdata(temp_bo, 0, size, data);

	 intel_emit_linear_blit(intel,
				intel_obj->buffer, offset,
				temp_bo, 0,
				size);

	 drm_intel_bo_unreference(temp_bo);
      }
   } else {
      drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
   }
}
Exemplo n.º 7
0
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
static void
intel_bufferobj_alloc_buffer(struct intel_context *intel,
                             struct intel_buffer_object *intel_obj)
{
    intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
                                           intel_obj->Base.Size, 64);
}
Exemplo n.º 8
0
static struct i915_winsys_buffer *
i915_drm_buffer_create(struct i915_winsys *iws,
                        unsigned size,
                        enum i915_winsys_buffer_type type)
{
   struct i915_drm_buffer *buf = CALLOC_STRUCT(i915_drm_buffer);
   struct i915_drm_winsys *idws = i915_drm_winsys(iws);

   if (!buf)
      return NULL;

   buf->magic = 0xDEAD1337;
   buf->flinked = FALSE;
   buf->flink = 0;

   buf->bo = drm_intel_bo_alloc(idws->gem_manager,
                                i915_drm_type_to_name(type), size, 0);

   if (!buf->bo)
      goto err;

   return (struct i915_winsys_buffer *)buf;

err:
   assert(0);
   FREE(buf);
   return NULL;
}
Exemplo n.º 9
0
static int test2(void)
{
	drm_intel_bo *test_intel_bo;
	uint32_t fb_id;
	drmModeClip clip;
	int prime_fd;
	uint32_t udl_handle;
	int ret;

	test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

	drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);

	ret = drmPrimeFDToHandle(udl_fd, prime_fd, &udl_handle);
	if (ret)
		goto out;

	ret = drmModeAddFB(udl_fd, 640, 480, 16, 16, 640, udl_handle, &fb_id);
	if (ret)
		goto out;

	clip.x1 = 0;
	clip.y1 = 0;
	clip.x2 = 10;
	clip.y2 = 10;
	ret = drmModeDirtyFB(udl_fd, fb_id, &clip, 1);
	if (ret) {
		return ret;
	}
out:
	dumb_bo_destroy(udl_fd, udl_handle);
	drm_intel_bo_unreference(test_intel_bo);
	return ret;
}
/*
 * Function: XvMCCreateSurface
 */
_X_EXPORT Status XvMCCreateSurface(Display * display, XvMCContext * context,
				   XvMCSurface * surface)
{
	Status ret;
	int priv_count;
	CARD32 *priv_data;
	intel_xvmc_surface_ptr intel_surf = NULL;
	struct intel_xvmc_context *intel_ctx;

	if (!display || !context)
		return XvMCBadContext;

	if (!surface)
		return XvMCBadSurface;

	intel_ctx = context->privData;

	if ((ret = _xvmc_create_surface(display, context, surface,
					&priv_count, &priv_data))) {
		XVMC_ERR("Unable to create XvMCSurface.");
		return ret;
	}

	XFree(priv_data);

	surface->privData = calloc(1, sizeof(struct intel_xvmc_surface));

	if (!(intel_surf = surface->privData)) {
		PPTHREAD_MUTEX_UNLOCK();
		return BadAlloc;
	}

	intel_surf->bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
					      "surface",
					      intel_ctx->surface_bo_size,
					      GTT_PAGE_SIZE);
	if (!intel_surf->bo) {
		free(intel_surf);
		return BadAlloc;
	}

	drm_intel_bo_disable_reuse(intel_surf->bo);

	intel_surf = surface->privData;
	intel_surf->context = context;

	intel_surf->image = XvCreateImage(display, context->port,
					  FOURCC_XVMC,
					  (char *) &intel_surf->gem_handle,
					  surface->width, surface->height);
	if (!intel_surf->image) {
		XVMC_ERR("Can't create XvImage for surface\n");
		free(intel_surf);
		_xvmc_destroy_surface(display, surface);
		return BadAlloc;
	}

	return Success;
}
Exemplo n.º 11
0
struct gl_transform_feedback_object *
brw_new_transform_feedback(struct gl_context *ctx, GLuint name)
{
   struct brw_context *brw = brw_context(ctx);
   struct brw_transform_feedback_object *brw_obj =
      CALLOC_STRUCT(brw_transform_feedback_object);
   if (!brw_obj)
      return NULL;

   _mesa_init_transform_feedback_object(&brw_obj->base, name);

   brw_obj->offset_bo =
      drm_intel_bo_alloc(brw->bufmgr, "transform feedback offsets", 16, 64);
   brw_obj->prim_count_bo =
      drm_intel_bo_alloc(brw->bufmgr, "xfb primitive counts", 4096, 64);

   return &brw_obj->base;
}
Exemplo n.º 12
0
static void wrap_buffers(struct intel_context *intel, GLuint size)
{
   intel_upload_finish(intel);

   if (size < INTEL_UPLOAD_SIZE)
      size = INTEL_UPLOAD_SIZE;

   intel->upload.bo = drm_intel_bo_alloc(intel->bufmgr, "upload", size, 0);
   intel->upload.offset = 0;
}
static void init_buffer(drm_intel_bufmgr *bufmgr,
			struct scratch_buf *buf,
			uint32_t size)
{
	buf->bo = drm_intel_bo_alloc(bufmgr, "", size, 4096);
	buf->size = size;
	assert(buf->bo);
	buf->tiling = I915_TILING_NONE;
	buf->stride = 4096;
}
Exemplo n.º 14
0
void
brw_upload_vec4_pull_constants(struct brw_context *brw,
                               GLbitfield brw_new_constbuf,
                               const struct gl_program *prog,
                               struct brw_stage_state *stage_state,
                               const struct brw_vec4_prog_data *prog_data)
{
   int i;
   uint32_t surf_index = prog_data->base.binding_table.pull_constants_start;

   /* Updates the ParamaterValues[i] pointers for all parameters of the
    * basic type of PROGRAM_STATE_VAR.
    */
   _mesa_load_state_parameters(&brw->ctx, prog->Parameters);

   if (!prog_data->nr_pull_params) {
      if (stage_state->const_bo) {
	 drm_intel_bo_unreference(stage_state->const_bo);
	 stage_state->const_bo = NULL;
	 stage_state->surf_offset[surf_index] = 0;
	 brw->state.dirty.brw |= brw_new_constbuf;
      }
      return;
   }

   /* _NEW_PROGRAM_CONSTANTS */
   drm_intel_bo_unreference(stage_state->const_bo);
   uint32_t size = prog_data->nr_pull_params * 4;
   stage_state->const_bo = drm_intel_bo_alloc(brw->bufmgr, "vec4_const_buffer",
                                           size, 64);

   drm_intel_gem_bo_map_gtt(stage_state->const_bo);

   for (i = 0; i < prog_data->nr_pull_params; i++) {
      memcpy(stage_state->const_bo->virtual + i * 4,
	     prog_data->pull_param[i],
	     4);
   }

   if (0) {
      for (i = 0; i < ALIGN(prog_data->nr_pull_params, 4) / 4; i++) {
	 float *row = (float *)stage_state->const_bo->virtual + i * 4;
	 printf("const surface %3d: %4.3f %4.3f %4.3f %4.3f\n",
		i, row[0], row[1], row[2], row[3]);
      }
   }

   drm_intel_gem_bo_unmap_gtt(stage_state->const_bo);

   brw_create_constant_surface(brw, stage_state->const_bo, 0, size,
                               &stage_state->surf_offset[surf_index],
                               false);

   brw->state.dirty.brw |= brw_new_constbuf;
}
Exemplo n.º 15
0
int main(int argc, char **argv)
{
	int fd;
	int i;
	drm_intel_bo *src_bo, *dst_bo;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	src_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);
	dst_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);

	/* The ring we've been using is 128k, and each rendering op
	 * will use at least 8 dwords:
	 *
	 * BATCH_START
	 * BATCH_START offset
	 * MI_FLUSH
	 * STORE_DATA_INDEX
	 * STORE_DATA_INDEX offset
	 * STORE_DATA_INDEX value
	 * MI_USER_INTERRUPT
	 * (padding)
	 *
	 * So iterate just a little more than that -- if we don't fill the ring
	 * doing this, we aren't likely to with this test.
	 */
	for (i = 0; i < 128 * 1024 / (8 * 4) * 1.25; i++) {
		intel_copy_bo(batch, dst_bo, src_bo, width, height);
		intel_batchbuffer_flush(batch);
	}

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
int main(int argc, char **argv)
{
	int fd, i;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		exit(-1);
	}
	/* don't enable buffer reuse!! */
	//drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
	assert(batch);

	/* put some load onto the gpu to keep the light buffers active for long
	 * enough */
	for (i = 0; i < 1000; i++) {
		load_bo = drm_intel_bo_alloc(bufmgr, "target bo", 1024*4096, 4096);
		if (!load_bo) {
			fprintf(stderr, "failed to alloc target buffer\n");
			exit(-1);
		}

		BEGIN_BATCH(8);
		OUT_BATCH(XY_SRC_COPY_BLT_CMD |
			  XY_SRC_COPY_BLT_WRITE_ALPHA |
			  XY_SRC_COPY_BLT_WRITE_RGB);
		OUT_BATCH((3 << 24) | /* 32 bits */
			  (0xcc << 16) | /* copy ROP */
			  4096);
		OUT_BATCH(0); /* dst x1,y1 */
		OUT_BATCH((1024 << 16) | 512);
		OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
		OUT_BATCH((0 << 16) | 512); /* src x1, y1 */
		OUT_BATCH(4096);
		OUT_RELOC(load_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
		ADVANCE_BATCH();

		intel_batchbuffer_flush(batch);

		drm_intel_bo_disable_reuse(load_bo);
		drm_intel_bo_unreference(load_bo);
	}

	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
Exemplo n.º 17
0
drm_intel_bo *intel_allocate_framebuffer(ScrnInfoPtr scrn,
					 int width, int height, int cpp,
					 int *out_stride,
					 uint32_t *out_tiling)
{
	intel_screen_private *intel = intel_get_screen_private(scrn);
	uint32_t tiling;
	int stride, size;
	drm_intel_bo *bo;

	intel_set_gem_max_sizes(scrn);

	if (intel->tiling & INTEL_TILING_FB)
		tiling = I915_TILING_X;
	else
		tiling = I915_TILING_NONE;

retry:
	size = intel_compute_size(intel,
                                  width, height,
                                  intel->cpp*8, 0,
                                  &tiling, &stride);
	if (!intel_check_display_stride(scrn, stride, tiling)) {
		if (tiling != I915_TILING_NONE) {
			tiling = I915_TILING_NONE;
			goto retry;
		}

		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
			   "Front buffer stride %d kB "
			   "exceeds display limit\n", stride / 1024);
		return NULL;
	}

	bo = drm_intel_bo_alloc(intel->bufmgr, "front buffer", size, 0);
	if (bo == NULL)
		return FALSE;

	if (tiling != I915_TILING_NONE)
		drm_intel_bo_set_tiling(bo, &tiling, stride);

	xf86DrvMsg(scrn->scrnIndex, X_INFO,
		   "Allocated new frame buffer %dx%d stride %d, %s\n",
		   width, height, stride,
		   tiling == I915_TILING_NONE ? "untiled" : "tiled");

	drm_intel_bo_disable_reuse(bo);

	*out_stride = stride;
	*out_tiling = tiling;
	return bo;
}
Exemplo n.º 18
0
struct intel_bo *
intel_winsys_alloc_bo(struct intel_winsys *winsys,
                      const char *name,
                      enum intel_tiling_mode tiling,
                      unsigned long pitch,
                      unsigned long height,
                      bool cpu_init)
{
   const unsigned int alignment = 4096; /* always page-aligned */
   unsigned long size;
   drm_intel_bo *bo;

   switch (tiling) {
   case INTEL_TILING_X:
      if (pitch % 512)
         return NULL;
      break;
   case INTEL_TILING_Y:
      if (pitch % 128)
         return NULL;
      break;
   default:
      break;
   }

   if (pitch > ULONG_MAX / height)
      return NULL;

   size = pitch * height;

   if (cpu_init) {
      bo = drm_intel_bo_alloc(winsys->bufmgr, name, size, alignment);
   }
   else {
      bo = drm_intel_bo_alloc_for_render(winsys->bufmgr,
            name, size, alignment);
   }

   if (bo && tiling != INTEL_TILING_NONE) {
      uint32_t real_tiling = tiling;
      int err;

      err = drm_intel_bo_set_tiling(bo, &real_tiling, pitch);
      if (err || real_tiling != tiling) {
         assert(!"tiling mismatch");
         drm_intel_bo_unreference(bo);
         return NULL;
      }
   }

   return (struct intel_bo *) bo;
}
Exemplo n.º 19
0
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
static void
intel_bufferobj_alloc_buffer(struct brw_context *brw,
			     struct intel_buffer_object *intel_obj)
{
   intel_obj->buffer = drm_intel_bo_alloc(brw->bufmgr, "bufferobj",
					  intel_obj->Base.Size, 64);

   /* the buffer might be bound as a uniform buffer, need to update it
    */
   brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;

   intel_bufferobj_mark_inactive(intel_obj);
}
static int
batch_next(struct intel_info *info)
{
	info->cur = info->batch;

	if (info->batch_ibo)
		drm_intel_bo_unreference(info->batch_ibo);

	info->batch_ibo = drm_intel_bo_alloc(info->bufmgr,
			"gralloc-batchbuffer", info->size, 4096);

	return (info->batch_ibo) ? 0 : -ENOMEM;
}
Exemplo n.º 21
0
void
intel_batchbuffer_reset(struct intel_batchbuffer *batch)
{
	if (batch->bo != NULL) {
		drm_intel_bo_unreference(batch->bo);
		batch->bo = NULL;
	}

	batch->bo = drm_intel_bo_alloc(batch->bufmgr, "batchbuffer",
				       BATCH_SZ, 4096);

	batch->ptr = batch->buffer;
}
Exemplo n.º 22
0
/**
 * Enable hardware binding tables and set up the binding table pool.
 */
void
gen7_enable_hw_binding_tables(struct brw_context *brw)
{
   if (!brw->use_resource_streamer)
      return;

   if (!brw->hw_bt_pool.bo) {
      /* We use a single re-usable buffer object for the lifetime of the
       * context and size it to maximum allowed binding tables that can be
       * programmed per batch:
       *
       * From the Haswell PRM, Volume 7: 3D Media GPGPU,
       * 3DSTATE_BINDING_TABLE_POOL_ALLOC > Programming Note:
       * "A maximum of 16,383 Binding tables are allowed in any batch buffer"
       */
      static const int max_size = 16383 * 4;
      brw->hw_bt_pool.bo = drm_intel_bo_alloc(brw->bufmgr, "hw_bt",
                                              max_size, 64);
      brw->hw_bt_pool.next_offset = 0;
   }

   /* From the Haswell PRM, Volume 7: 3D Media GPGPU,
    * 3DSTATE_BINDING_TABLE_POOL_ALLOC > Programming Note:
    *
    * "When switching between HW and SW binding table generation, SW must
    * issue a state cache invalidate."
    */
   brw_emit_pipe_control_flush(brw, PIPE_CONTROL_STATE_CACHE_INVALIDATE);

   int pkt_len = brw->gen >= 8 ? 4 : 3;
   uint32_t dw1 = BRW_HW_BINDING_TABLE_ENABLE;
   if (brw->is_haswell) {
      dw1 |= SET_FIELD(GEN7_MOCS_L3, GEN7_HW_BT_POOL_MOCS) |
             HSW_BT_POOL_ALLOC_MUST_BE_ONE;
   } else if (brw->gen >= 8) {
      dw1 |= BDW_MOCS_WB;
   }

   BEGIN_BATCH(pkt_len);
   OUT_BATCH(_3DSTATE_BINDING_TABLE_POOL_ALLOC << 16 | (pkt_len - 2));
   if (brw->gen >= 8) {
      OUT_RELOC64(brw->hw_bt_pool.bo, I915_GEM_DOMAIN_SAMPLER, 0, dw1);
      OUT_BATCH(brw->hw_bt_pool.bo->size);
   } else {
      OUT_RELOC(brw->hw_bt_pool.bo, I915_GEM_DOMAIN_SAMPLER, 0, dw1);
      OUT_RELOC(brw->hw_bt_pool.bo, I915_GEM_DOMAIN_SAMPLER, 0,
             brw->hw_bt_pool.bo->size);
   }
   ADVANCE_BATCH();
}
Exemplo n.º 23
0
struct intel_region *
intel_region_alloc(struct intel_context *intel,
		   uint32_t tiling,
                   GLuint cpp, GLuint width, GLuint height, GLuint pitch,
		   GLboolean expect_accelerated_upload)
{
   dri_bo *buffer;
   struct intel_region *region;

   /* If we're tiled, our allocations are in 8 or 32-row blocks, so
    * failure to align our height means that we won't allocate enough pages.
    *
    * If we're untiled, we still have to align to 2 rows high because the
    * data port accesses 2x2 blocks even if the bottom row isn't to be
    * rendered, so failure to align means we could walk off the end of the
    * GTT and fault.
    */
   if (tiling == I915_TILING_X)
      height = ALIGN(height, 8);
   else if (tiling == I915_TILING_Y)
      height = ALIGN(height, 32);
   else
      height = ALIGN(height, 2);

   /* If we're untiled, we have to align to 2 rows high because the
    * data port accesses 2x2 blocks even if the bottom row isn't to be
    * rendered, so failure to align means we could walk off the end of the
    * GTT and fault.
    */
   height = ALIGN(height, 2);

   if (expect_accelerated_upload) {
      buffer = drm_intel_bo_alloc_for_render(intel->bufmgr, "region",
					     pitch * cpp * height, 64);
   } else {
      buffer = drm_intel_bo_alloc(intel->bufmgr, "region",
				  pitch * cpp * height, 64);
   }

   region = intel_region_alloc_internal(intel, cpp, width, height,
					pitch, buffer);

   if (tiling != I915_TILING_NONE) {
      assert(((pitch * cpp) & 127) == 0);
      drm_intel_bo_set_tiling(buffer, &tiling, pitch * cpp);
      drm_intel_bo_get_tiling(buffer, &region->tiling, &region->bit_6_swizzle);
   }

   return region;
}
void
brw_get_scratch_bo(struct brw_context *brw,
		   drm_intel_bo **scratch_bo, int size)
{
   drm_intel_bo *old_bo = *scratch_bo;

   if (old_bo && old_bo->size < size) {
      drm_intel_bo_unreference(old_bo);
      old_bo = NULL;
   }

   if (!old_bo) {
      *scratch_bo = drm_intel_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
   }
}
Exemplo n.º 25
0
void
intel_batchbuffer_init(struct intel_context *intel)
{
   intel_batchbuffer_reset(intel);

   if (intel->gen >= 6) {
      /* We can't just use brw_state_batch to get a chunk of space for
       * the gen6 workaround because it involves actually writing to
       * the buffer, and the kernel doesn't let us write to the batch.
       */
      intel->batch.workaround_bo = drm_intel_bo_alloc(intel->bufmgr,
						      "pipe_control workaround",
						      4096, 4096);
   }
}
Exemplo n.º 26
0
/* export handle from intel driver - reimport to another intel driver bufmgr
   see if you get same object */
static void test_i915_self_import_to_different_fd(void)
{
    drm_intel_bo *test_intel_bo, *test_intel_bo2;
    int prime_fd;

    test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);

    drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);

    test_intel_bo2 = drm_intel_bo_gem_create_from_prime(bufmgr2, prime_fd, BO_SIZE);
    close(prime_fd);
    igt_assert(test_intel_bo2);

    drm_intel_bo_unreference(test_intel_bo2);
    drm_intel_bo_unreference(test_intel_bo);
}
Exemplo n.º 27
0
/** Allocates a new drm_intel_bo to store the data for the buffer object. */
static void
intel_bufferobj_alloc_buffer(struct intel_context *intel,
			     struct intel_buffer_object *intel_obj)
{
   intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
					  intel_obj->Base.Size, 64);

#ifndef I915
   /* the buffer might be bound as a uniform buffer, need to update it
    */
   {
      struct brw_context *brw = brw_context(&intel->ctx);
      brw->state.dirty.brw |= BRW_NEW_UNIFORM_BUFFER;
   }
#endif
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		exit(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);
	if (!HAS_BLT_RING(devid)) {
		fprintf(stderr, "inter ring check needs gen6+\n");
		return 77;
	}


	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		exit(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		exit(-1);
	}

	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
	if (!target_buffer) {
		fprintf(stderr, "failed to alloc target buffer\n");
		exit(-1);
	}

	store_dword_loop(I915_EXEC_RENDER);

	drm_intel_bo_unreference(target_buffer);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
Exemplo n.º 29
0
Bool intelInitBatchBuffer(void)
{
	if ((xvmc_driver->batch.buf =
	     drm_intel_bo_alloc(xvmc_driver->bufmgr,
				"batch buffer", BATCH_SIZE, 0x1000)) == NULL) {
		fprintf(stderr, "unable to alloc batch buffer\n");
		return False;
	}

	if (drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf)) {
		drm_intel_bo_unreference(xvmc_driver->batch.buf);
		return False;
	}

	reset_batch();
	return True;
}
Exemplo n.º 30
0
static void
intel_batchbuffer_reset(struct brw_context *brw)
{
   if (brw->batch.last_bo != NULL) {
      drm_intel_bo_unreference(brw->batch.last_bo);
      brw->batch.last_bo = NULL;
   }
   brw->batch.last_bo = brw->batch.bo;

   brw_render_cache_set_clear(brw);

   brw->batch.bo = drm_intel_bo_alloc(brw->bufmgr, "batchbuffer",
					BATCH_SZ, 4096);
   if (brw->has_llc) {
      drm_intel_bo_map(brw->batch.bo, true);
      brw->batch.map = brw->batch.bo->virtual;
   }