Exemplo n.º 1
0
static void
intel_driver_memman_init(intel_driver_t *driver)
{
  driver->bufmgr = drm_intel_bufmgr_gem_init(driver->fd, BATCH_SIZE);
  assert(driver->bufmgr);
  drm_intel_bufmgr_gem_enable_reuse(driver->bufmgr);
}
Exemplo n.º 2
0
struct i915_winsys *
i915_drm_winsys_create(int drmFD)
{
   struct i915_drm_winsys *idws;
   unsigned int deviceID;

   idws = CALLOC_STRUCT(i915_drm_winsys);
   if (!idws)
      return NULL;

   i915_drm_get_device_id(drmFD, &deviceID);

   i915_drm_winsys_init_batchbuffer_functions(idws);
   i915_drm_winsys_init_buffer_functions(idws);
   i915_drm_winsys_init_fence_functions(idws);

   idws->fd = drmFD;
   idws->base.pci_id = deviceID;
   idws->max_batch_size = 16 * 4096;

   idws->base.destroy = i915_drm_winsys_destroy;

   idws->gem_manager = drm_intel_bufmgr_gem_init(idws->fd, idws->max_batch_size);
   drm_intel_bufmgr_gem_enable_reuse(idws->gem_manager);
   drm_intel_bufmgr_gem_enable_fenced_relocs(idws->gem_manager);

   idws->dump_cmd = debug_get_bool_option("I915_DUMP_CMD", FALSE);
   idws->dump_raw_file = debug_get_option("I915_DUMP_RAW_FILE", NULL);
   idws->send_cmd = !debug_get_bool_option("I915_NO_HW", FALSE);

   return &idws->base;
}
struct brw_winsys_screen *
i965_drm_winsys_screen_create(int drmFD)
{
   struct i965_libdrm_winsys *idws;

   debug_printf("%s\n", __FUNCTION__);

   idws = CALLOC_STRUCT(i965_libdrm_winsys);
   if (!idws)
      return NULL;

   i965_libdrm_get_device_id(&idws->base.pci_id);

   i965_libdrm_winsys_init_buffer_functions(idws);

   idws->fd = drmFD;

   idws->base.destroy = i965_libdrm_winsys_destroy;

   idws->gem = drm_intel_bufmgr_gem_init(idws->fd, BRW_BATCH_SIZE);
   drm_intel_bufmgr_gem_enable_reuse(idws->gem);

   idws->send_cmd = !debug_get_bool_option("BRW_NO_HW", FALSE);

   return &idws->base;
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	igt_skip_on_simulation();

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		igt_fail(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);
	if (!HAS_BLT_RING(devid)) {
		fprintf(stderr, "not (yet) implemented for pre-snb\n");
		return 77;
	}

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		igt_fail(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		igt_fail(-1);
	}

	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
	if (!target_buffer) {
		fprintf(stderr, "failed to alloc target buffer\n");
		igt_fail(-1);
	}

	blt_bo = drm_intel_bo_alloc(bufmgr, "target bo", 4*4096*4096, 4096);
	if (!blt_bo) {
		fprintf(stderr, "failed to alloc blt buffer\n");
		igt_fail(-1);
	}

	dummy_reloc_loop();

	drm_intel_bo_unreference(target_buffer);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		exit(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);
	if (!HAS_BLT_RING(devid)) {
		fprintf(stderr, "inter ring check needs gen6+\n");
		return 77;
	}


	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		exit(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		exit(-1);
	}

	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
	if (!target_buffer) {
		fprintf(stderr, "failed to alloc target buffer\n");
		exit(-1);
	}

	store_dword_loop(I915_EXEC_RENDER);

	drm_intel_bo_unreference(target_buffer);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
Exemplo n.º 6
0
static void setup_drm(void)
{
	int i;

	drm.fd = drm_open_driver_master(DRIVER_INTEL);

	drm.res = drmModeGetResources(drm.fd);
	igt_assert(drm.res->count_connectors <= MAX_CONNECTORS);

	for (i = 0; i < drm.res->count_connectors; i++)
		drm.connectors[i] = drmModeGetConnector(drm.fd,
						drm.res->connectors[i]);

	drm.bufmgr = drm_intel_bufmgr_gem_init(drm.fd, 4096);
	igt_assert(drm.bufmgr);
	drm_intel_bufmgr_gem_enable_reuse(drm.bufmgr);
}
int main(int argc, char **argv)
{
	int fd;
	int object_size = OBJECT_WIDTH * OBJECT_HEIGHT * 4;
	double start_time, end_time;
	drm_intel_bo *dst_bo;
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	int i;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	dst_bo = drm_intel_bo_alloc(bufmgr, "dst", object_size, 4096);

	/* Prep loop to get us warmed up. */
	for (i = 0; i < 60; i++) {
		do_render(bufmgr, batch, dst_bo, OBJECT_WIDTH, OBJECT_HEIGHT);
	}
	drm_intel_bo_wait_rendering(dst_bo);

	/* Do the actual timing. */
	start_time = get_time_in_secs();
	for (i = 0; i < 200; i++) {
		do_render(bufmgr, batch, dst_bo, OBJECT_WIDTH, OBJECT_HEIGHT);
	}
	drm_intel_bo_wait_rendering(dst_bo);
	end_time = get_time_in_secs();

	printf("%d iterations in %.03f secs: %.01f MB/sec\n", i,
	       end_time - start_time,
	       (double)i * OBJECT_WIDTH * OBJECT_HEIGHT * 4 / 1024.0 / 1024.0 /
	       (end_time - start_time));

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
int main(int argc, char **argv)
{
	int fd;
	int devid;

	if (argc != 1) {
		fprintf(stderr, "usage: %s\n", argv[0]);
		igt_fail(-1);
	}

	fd = drm_open_any();
	devid = intel_get_drm_devid(fd);

	if (HAS_BSD_RING(devid))
		num_rings++;

	if (HAS_BLT_RING(devid))
		num_rings++;


	printf("num rings detected: %i\n", num_rings);

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	if (!bufmgr) {
		fprintf(stderr, "failed to init libdrm\n");
		igt_fail(-1);
	}
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);

	batch = intel_batchbuffer_alloc(bufmgr, devid);
	if (!batch) {
		fprintf(stderr, "failed to create batch buffer\n");
		igt_fail(-1);
	}

	mi_lri_loop();
	gem_quiescent_gpu(fd);

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
Exemplo n.º 9
0
int main(int argc, char **argv)
{
	int fd;
	int i;
	drm_intel_bo *src_bo, *dst_bo;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	src_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);
	dst_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);

	/* The ring we've been using is 128k, and each rendering op
	 * will use at least 8 dwords:
	 *
	 * BATCH_START
	 * BATCH_START offset
	 * MI_FLUSH
	 * STORE_DATA_INDEX
	 * STORE_DATA_INDEX offset
	 * STORE_DATA_INDEX value
	 * MI_USER_INTERRUPT
	 * (padding)
	 *
	 * So iterate just a little more than that -- if we don't fill the ring
	 * doing this, we aren't likely to with this test.
	 */
	for (i = 0; i < 128 * 1024 / (8 * 4) * 1.25; i++) {
		intel_copy_bo(batch, dst_bo, src_bo, width, height);
		intel_batchbuffer_flush(batch);
	}

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
Exemplo n.º 10
0
struct intel_winsys *
intel_winsys_create_for_fd(int fd)
{
   struct intel_winsys *winsys;

   winsys = CALLOC_STRUCT(intel_winsys);
   if (!winsys)
      return NULL;

   winsys->fd = fd;

   winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, BATCH_SZ);
   if (!winsys->bufmgr) {
      debug_error("failed to create GEM buffer manager");
      FREE(winsys);
      return NULL;
   }

   pipe_mutex_init(winsys->mutex);

   if (!probe_winsys(winsys)) {
      drm_intel_bufmgr_destroy(winsys->bufmgr);
      FREE(winsys);
      return NULL;
   }

   /*
    * No need to implicitly set up a fence register for each non-linear reloc
    * entry.  When a fence register is needed for a reloc entry,
    * drm_intel_bo_emit_reloc_fence() will be called explicitly.
    *
    * intel_bo_add_reloc() currently lacks "bool fenced" for this to work.
    * But we never need a fence register on GEN4+ so we do not need to worry
    * about it yet.
    */
   drm_intel_bufmgr_gem_enable_fenced_relocs(winsys->bufmgr);

   drm_intel_bufmgr_gem_enable_reuse(winsys->bufmgr);

   return winsys;
}
Exemplo n.º 11
0
struct intel_winsys *
intel_winsys_create_for_fd(int fd)
{
   /* so that we can have enough (up to 4094) relocs per bo */
   const int batch_size = sizeof(uint32_t) * 8192;
   struct intel_winsys *winsys;

   winsys = CALLOC_STRUCT(intel_winsys);
   if (!winsys)
      return NULL;

   winsys->fd = fd;

   winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, batch_size);
   if (!winsys->bufmgr) {
      debug_error("failed to create GEM buffer manager");
      FREE(winsys);
      return NULL;
   }

   pipe_mutex_init(winsys->mutex);

   if (!probe_winsys(winsys)) {
      pipe_mutex_destroy(winsys->mutex);
      drm_intel_bufmgr_destroy(winsys->bufmgr);
      FREE(winsys);
      return NULL;
   }

   /*
    * No need to implicitly set up a fence register for each non-linear reloc
    * entry.  INTEL_RELOC_FENCE will be set on reloc entries that need them.
    */
   drm_intel_bufmgr_gem_enable_fenced_relocs(winsys->bufmgr);

   drm_intel_bufmgr_gem_enable_reuse(winsys->bufmgr);

   return winsys;
}
Exemplo n.º 12
0
int main(int argc, char **argv)
{
	drm_intel_bo *src;
	int fd;

	fd = drm_open_any();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	src = drm_intel_bo_alloc(bufmgr, "src", 128 * 128, 4096);

	bad_blit(src, batch->devid);

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
Exemplo n.º 13
0
static struct pipe_screen *
intel_drm_create_screen(struct drm_api *api, int drmFD,
                        struct drm_create_screen_arg *arg)
{
   struct intel_drm_winsys *idws;
   unsigned int deviceID;

   if (arg != NULL) {
      switch(arg->mode) {
      case DRM_CREATE_NORMAL:
         break;
      default:
         return NULL;
      }
   }

   idws = CALLOC_STRUCT(intel_drm_winsys);
   if (!idws)
      return NULL;

   intel_drm_get_device_id(&deviceID);

   intel_drm_winsys_init_batchbuffer_functions(idws);
   intel_drm_winsys_init_buffer_functions(idws);
   intel_drm_winsys_init_fence_functions(idws);

   idws->fd = drmFD;
   idws->id = deviceID;
   idws->max_batch_size = 16 * 4096;

   idws->base.destroy = intel_drm_winsys_destroy;

   idws->pools.gem = drm_intel_bufmgr_gem_init(idws->fd, idws->max_batch_size);
   drm_intel_bufmgr_gem_enable_reuse(idws->pools.gem);

   idws->dump_cmd = debug_get_bool_option("INTEL_DUMP_CMD", FALSE);

   return i915_create_screen(&idws->base, deviceID);
}
Exemplo n.º 14
0
static void init(void)
{
	int i;
	unsigned tmp;

	if (options.num_buffers == 0) {
		tmp = gem_aperture_size(drm_fd);
		tmp = tmp > 256*(1024*1024) ? 256*(1024*1024) : tmp;
		num_buffers = 2 * tmp / options.scratch_buf_size / 3;
		num_buffers /= 2;
		printf("using %u buffers\n", num_buffers);
	} else
		num_buffers = options.num_buffers;

	bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	drm_intel_bufmgr_gem_enable_fenced_relocs(bufmgr);
	num_fences = get_num_fences();
	batch = intel_batchbuffer_alloc(bufmgr, devid);

	busy_bo = drm_intel_bo_alloc(bufmgr, "tiled bo", BUSY_BUF_SIZE, 4096);
	if (options.forced_tiling >= 0) {
		tmp = options.forced_tiling;
		set_tiling(busy_bo, &tmp, 4096);
		assert(tmp == options.forced_tiling);
	}

	for (i = 0; i < num_buffers; i++) {
		init_buffer(&buffers[0][i], options.scratch_buf_size);
		init_buffer(&buffers[1][i], options.scratch_buf_size);

		num_total_tiles += buffers[0][i].num_tiles;
	}
	current_set = 0;

	/* just in case it helps reproducability */
	srandom(0xdeadbeef);
}
int main(int argc, char **argv)
{
	drm_intel_bo *bo[4096];
	uint32_t bo_start_val[4096];
	uint32_t start = 0;
	int fd, i, count;

	igt_skip_on_simulation();

	fd = drm_open_any();
	count = 3 * gem_aperture_size(fd) / (1024*1024) / 2;
	if (count > intel_get_total_ram_mb() * 9 / 10) {
		count = intel_get_total_ram_mb() * 9 / 10;
		printf("not enough RAM to run test, reducing buffer count\n");
	}
	count |= 1;
	printf("Using %d 1MiB buffers\n", count);

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	for (i = 0; i < count; i++) {
		bo[i] = create_bo(fd, start);
		bo_start_val[i] = start;

		/*
		printf("Creating bo %d\n", i);
		check_bo(bo[i], bo_start_val[i]);
		*/

		start += 1024 * 1024 / 4;
	}

	for (i = 0; i < count; i++) {
		int src = count - i - 1;
		intel_copy_bo(batch, bo[i], bo[src], width, height);
		bo_start_val[i] = bo_start_val[src];
	}

	for (i = 0; i < count * 4; i++) {
		int src = random() % count;
		int dst = random() % count;

		if (src == dst)
			continue;

		intel_copy_bo(batch, bo[dst], bo[src], width, height);
		bo_start_val[dst] = bo_start_val[src];

		/*
		check_bo(bo[dst], bo_start_val[dst]);
		printf("%d: copy bo %d to %d\n", i, src, dst);
		*/
	}

	for (i = 0; i < count; i++) {
		/*
		printf("check %d\n", i);
		*/
		check_bo(fd, bo[i], bo_start_val[i]);

		drm_intel_bo_unreference(bo[i]);
		bo[i] = NULL;
	}

	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);

	close(fd);

	return 0;
}
/*
* Function: XvMCCreateContext
* Description: Create a XvMC context for the given surface parameters.
* Arguments:
*   display - Connection to the X server.
*   port - XvPortID to use as avertised by the X connection.
*   surface_type_id - Unique identifier for the Surface type.
*   width - Width of the surfaces.
*   height - Height of the surfaces.
*   flags - one or more of the following
*      XVMC_DIRECT - A direct rendered context is requested.
*
* Notes: surface_type_id and width/height parameters must match those
*        returned by XvMCListSurfaceTypes.
* Returns: Status
*/
_X_EXPORT Status XvMCCreateContext(Display * display, XvPortID port,
				   int surface_type_id, int width, int height,
				   int flags, XvMCContext * context)
{
	Status ret;
	CARD32 *priv_data = NULL;
	struct intel_xvmc_hw_context *comm;
	int major, minor;
	int error_base;
	int event_base;
	int priv_count;

	/* Verify Obvious things first */
	if (!display || !context)
		return BadValue;

	if (!(flags & XVMC_DIRECT)) {
		XVMC_ERR("Indirect Rendering not supported! Using Direct.");
		return BadValue;
	}

	/*
	   Width, Height, and flags are checked against surface_type_id
	   and port for validity inside the X server, no need to check
	   here.
	 */
	context->surface_type_id = surface_type_id;
	context->width = (unsigned short)((width + 15) & ~15);
	context->height = (unsigned short)((height + 15) & ~15);
	context->flags = flags;
	context->port = port;

	if (!XvMCQueryExtension(display, &event_base, &error_base)) {
		XVMC_ERR("XvMCExtension is not available!");
		return BadValue;
	}

	ret = XvMCQueryVersion(display, &major, &minor);
	if (ret) {
		XVMC_ERR
		    ("XvMCQueryVersion Failed, unable to determine protocol version.");
		return ret;
	}

	/* XXX: major and minor could be checked in future for XvMC
	 * protocol capability (i.e H.264/AVC decode available)
	 */

	/*
	   Pass control to the X server to create a drm_context_t for us and
	   validate the with/height and flags.
	 */
	if ((ret =
	     _xvmc_create_context(display, context, &priv_count, &priv_data))) {
		XVMC_ERR("Unable to create XvMC Context.");
		return ret;
	}

	comm = (struct intel_xvmc_hw_context *)priv_data;

	if (xvmc_driver == NULL || xvmc_driver->type != comm->type) {
		switch (comm->type) {
		case XVMC_I915_MPEG2_MC:
			xvmc_driver = &i915_xvmc_mc_driver;
			break;
		case XVMC_I965_MPEG2_MC:
			xvmc_driver = &i965_xvmc_mc_driver;
			break;
		case XVMC_I965_MPEG2_VLD:
			xvmc_driver = &xvmc_vld_driver;
			break;
		case XVMC_I945_MPEG2_VLD:
		default:
			XVMC_ERR("unimplemented xvmc type %d", comm->type);
			XFree(priv_data);
			priv_data = NULL;
			return BadValue;
		}
	}

	if (xvmc_driver == NULL || xvmc_driver->type != comm->type) {
		XVMC_ERR("fail to load xvmc driver for type %d\n", comm->type);
		return BadValue;
	}

	XVMC_INFO("decoder type is %s", intel_xvmc_decoder_string(comm->type));

	/* check DRI2 */
	ret = Success;
	xvmc_driver->fd = -1;

	ret = dri2_connect(display);
	if (ret != Success) {
		XFree(priv_data);
		context->privData = NULL;
		if (xvmc_driver->fd >= 0)
			close(xvmc_driver->fd);
		xvmc_driver = NULL;
		return ret;
	}

	if ((xvmc_driver->bufmgr =
	     intel_bufmgr_gem_init(xvmc_driver->fd, 1024 * 64)) == NULL) {
		XVMC_ERR("Can't init bufmgr\n");
		return BadAlloc;
	}
	drm_intel_bufmgr_gem_enable_reuse(xvmc_driver->bufmgr);

	/* call driver hook.
	 * driver hook should free priv_data after return if success.*/
	ret =
	    (xvmc_driver->create_context) (display, context, priv_count,
					   priv_data);
	if (ret) {
		XVMC_ERR("driver create context failed\n");
		XFree(priv_data);
		context->privData = NULL;
		xvmc_driver = NULL;
		return ret;
	}

	pthread_mutex_init(&xvmc_driver->ctxmutex, NULL);
	intelInitBatchBuffer();
	intel_xvmc_dump_open();

	return Success;
}
Exemplo n.º 17
0
static void render_timeout(int fd)
{
	drm_intel_bufmgr *bufmgr;
	struct intel_batchbuffer *batch;
	int64_t timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	int64_t negative_timeout = -1;
	int ret;
	const bool do_signals = true; /* signals will seem to make the operation
				       * use less process CPU time */
	bool done = false;
	int i, iter = 1;

	igt_skip_on_simulation();

	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
	batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));

	dst = drm_intel_bo_alloc(bufmgr, "dst", BUF_SIZE, 4096);
	dst2 = drm_intel_bo_alloc(bufmgr, "dst2", BUF_SIZE, 4096);

	igt_skip_on_f(gem_bo_wait_timeout(fd, dst->handle, &timeout) == -EINVAL,
		      "kernel doesn't support wait_timeout, skipping test\n");
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;

	/* Figure out a rough number of fills required to consume 1 second of
	 * GPU work.
	 */
	do {
		struct timespec start, end;
		long diff;

#ifndef CLOCK_MONOTONIC_RAW
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
#endif

		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &start) == 0);
		for (i = 0; i < iter; i++)
			blt_color_fill(batch, dst, BUF_PAGES);
		intel_batchbuffer_flush(batch);
		drm_intel_bo_wait_rendering(dst);
		igt_assert(clock_gettime(CLOCK_MONOTONIC_RAW, &end) == 0);

		diff = do_time_diff(&end, &start);
		igt_assert(diff >= 0);

		if ((diff / MSEC_PER_SEC) > ENOUGH_WORK_IN_SECONDS)
			done = true;
		else
			iter <<= 1;
	} while (!done && iter < 1000000);

	igt_assert_lt(iter, 1000000);

	igt_info("%d iters is enough work\n", iter);
	gem_quiescent_gpu(fd);
	if (do_signals)
		igt_fork_signal_helper();

	/* We should be able to do half as much work in the same amount of time,
	 * but because we might schedule almost twice as much as required, we
	 * might accidentally time out. Hence add some fudge. */
	for (i = 0; i < iter/3; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);
	igt_assert_neq(timeout, 0);
	if (timeout ==  (ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC))
		igt_info("Buffer was already done!\n");
	else {
		igt_info("Finished with %" PRIu64 " time remaining\n", timeout);
	}

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), 0);
	igt_assert_eq(timeout, 0);

	/* Now check that we correctly time out, twice the auto-tune load should
	 * be good enough. */
	timeout = ENOUGH_WORK_IN_SECONDS * NSEC_PER_SEC;
	for (i = 0; i < iter*2; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	ret = gem_bo_wait_timeout(fd, dst2->handle, &timeout);
	igt_assert_eq(ret, -ETIME);
	igt_assert_eq(timeout, 0);
	igt_assert(gem_bo_busy(fd, dst2->handle) == true);

	/* check that polling with timeout=0 works. */
	timeout = 0;
	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &timeout), -ETIME);
	igt_assert_eq(timeout, 0);


	/* Now check that we can pass negative (infinite) timeouts. */
	negative_timeout = -1;
	for (i = 0; i < iter; i++)
		blt_color_fill(batch, dst2, BUF_PAGES);

	intel_batchbuffer_flush(batch);

	igt_assert_eq(gem_bo_wait_timeout(fd, dst2->handle, &negative_timeout), 0);
	igt_assert_eq(negative_timeout, -1); /* infinity always remains */
	igt_assert(gem_bo_busy(fd, dst2->handle) == false);

	if (do_signals)
		igt_stop_signal_helper();
	drm_intel_bo_unreference(dst2);
	drm_intel_bo_unreference(dst);
	intel_batchbuffer_free(batch);
	drm_intel_bufmgr_destroy(bufmgr);
}