static void test_swapping_evictions(int fd, int size, int count)
{
	int trash_count;

	trash_count = intel_get_total_ram_mb() * 11 / 10;
	intel_require_memory(trash_count, size, CHECK_RAM | CHECK_SWAP);

	swapping_evictions(fd, &fault_ops, size, count, trash_count);
}
static void test_forking_evictions(int fd, int size, int count,
					unsigned flags)
{
	int trash_count;

	trash_count = intel_get_total_ram_mb() * 11 / 10;
	intel_require_memory(trash_count, size, CHECK_RAM | CHECK_SWAP);

	forking_evictions(fd, &fault_ops, size, count, trash_count, flags);
}
Esempio n. 3
0
static void
test_huge_bo(int huge, int tiling)
{
	uint64_t huge_object_size, last_offset;
	char *ptr_cpu;
	char *cpu_pattern;
	uint32_t bo;
	int i;

	switch (huge) {
	case -1:
		huge_object_size = gem_mappable_aperture_size() / 2;
		break;
	case 0:
		huge_object_size = gem_mappable_aperture_size() + PAGE_SIZE;
		break;
	default:
		huge_object_size = gem_aperture_size(fd) + PAGE_SIZE;
		break;
	}
	intel_require_memory(1, huge_object_size, CHECK_RAM);

	last_offset = huge_object_size - PAGE_SIZE;

	cpu_pattern = malloc(PAGE_SIZE);
	igt_assert(cpu_pattern);
	for (i = 0; i < PAGE_SIZE; i++)
		cpu_pattern[i] = i;

	bo = gem_create(fd, huge_object_size);

	/* Obtain CPU mapping for the object. */
	ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size,
				PROT_READ | PROT_WRITE);
	igt_assert(ptr_cpu);
	gem_set_domain(fd, bo, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
	gem_close(fd, bo);

	/* Write first page through the mapping and assert reading it back
	 * works. */
	memcpy(ptr_cpu, cpu_pattern, PAGE_SIZE);
	igt_assert(memcmp(ptr_cpu, cpu_pattern, PAGE_SIZE) == 0);

	/* Write last page through the mapping and assert reading it back
	 * works. */
	memcpy(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE);
	igt_assert(memcmp(ptr_cpu + last_offset, cpu_pattern, PAGE_SIZE) == 0);

	/* Cross check that accessing two simultaneous pages works. */
	igt_assert(memcmp(ptr_cpu, ptr_cpu + last_offset, PAGE_SIZE) == 0);

	munmap(ptr_cpu, huge_object_size);
	free(cpu_pattern);
}
static unsigned get_num_contexts(int fd)
{
	uint64_t ggtt_size;
	unsigned size;
	unsigned count;

	/* Compute the number of contexts we can allocate to fill the GGTT */
	if (intel_gen(intel_get_drm_devid(fd)) >= 8)
		ggtt_size = 1ull << 32;
	else
		ggtt_size = 1ull << 31;

	size = 64 << 10; /* Most gen require at least 64k for ctx */

	count = 3 * (ggtt_size / size) / 2;
	igt_info("Creating %lld contexts (assuming of size %lld)\n",
		 (long long)count, (long long)size);

	intel_require_memory(count, size, CHECK_RAM | CHECK_SWAP);
	return count;
}
Esempio n. 5
0
static void
test_huge_copy(int fd, int huge, int tiling_a, int tiling_b)
{
	uint64_t huge_object_size, i;
	uint32_t bo, *pattern_a, *pattern_b;
	char *a, *b;

	switch (huge) {
	case -2:
		huge_object_size = gem_mappable_aperture_size() / 4;
		break;
	case -1:
		huge_object_size = gem_mappable_aperture_size() / 2;
		break;
	case 0:
		huge_object_size = gem_mappable_aperture_size() + PAGE_SIZE;
		break;
	default:
		huge_object_size = gem_aperture_size(fd) + PAGE_SIZE;
		break;
	}
	intel_require_memory(2, huge_object_size, CHECK_RAM);

	pattern_a = malloc(PAGE_SIZE);
	for (i = 0; i < PAGE_SIZE/4; i++)
		pattern_a[i] = i;

	pattern_b = malloc(PAGE_SIZE);
	for (i = 0; i < PAGE_SIZE/4; i++)
		pattern_b[i] = ~i;

	bo = gem_create(fd, huge_object_size);
	if (tiling_a)
		gem_set_tiling(fd, bo, tiling_a,
			       tiling_a == I915_TILING_Y ? 128 : 512);
	a = __gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE);
	igt_require(a);
	gem_close(fd, bo);

	for (i = 0; i < huge_object_size / PAGE_SIZE; i++)
		memcpy(a + PAGE_SIZE*i, pattern_a, PAGE_SIZE);

	bo = gem_create(fd, huge_object_size);
	if (tiling_b)
		gem_set_tiling(fd, bo, tiling_b, 
			       tiling_b == I915_TILING_Y ? 128 : 512);
	b = __gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE);
	igt_require(b);
	gem_close(fd, bo);

	for (i = 0; i < huge_object_size / PAGE_SIZE; i++)
		memcpy(b + PAGE_SIZE*i, pattern_b, PAGE_SIZE);

	for (i = 0; i < huge_object_size / PAGE_SIZE; i++) {
		if (i & 1)
			memcpy(a + i *PAGE_SIZE, b + i*PAGE_SIZE, PAGE_SIZE);
		else
			memcpy(b + i *PAGE_SIZE, a + i*PAGE_SIZE, PAGE_SIZE);
	}

	for (i = 0; i < huge_object_size / PAGE_SIZE; i++) {
		if (i & 1)
			igt_assert(memcmp(pattern_b, a + PAGE_SIZE*i, PAGE_SIZE) == 0);
		else
			igt_assert(memcmp(pattern_a, a + PAGE_SIZE*i, PAGE_SIZE) == 0);
	}
	munmap(a, huge_object_size);

	for (i = 0; i < huge_object_size / PAGE_SIZE; i++) {
		if (i & 1)
			igt_assert(memcmp(pattern_b, b + PAGE_SIZE*i, PAGE_SIZE) == 0);
		else
			igt_assert(memcmp(pattern_a, b + PAGE_SIZE*i, PAGE_SIZE) == 0);
	}
	munmap(b, huge_object_size);

	free(pattern_a);
	free(pattern_b);
}
Esempio n. 6
0
static void
test_huge_bo(int fd, int huge, int tiling)
{
	uint32_t bo;
	char *ptr;
	char *tiled_pattern;
	char *linear_pattern;
	uint64_t size, last_offset;
	int pitch = tiling == I915_TILING_Y ? 128 : 512;
	int i;

	switch (huge) {
	case -1:
		size = gem_mappable_aperture_size() / 2;
		break;
	case 0:
		size = gem_mappable_aperture_size() + PAGE_SIZE;
		break;
	default:
		size = gem_aperture_size(fd) + PAGE_SIZE;
		break;
	}
	intel_require_memory(1, size, CHECK_RAM);

	last_offset = size - PAGE_SIZE;

	/* Create pattern */
	bo = gem_create(fd, PAGE_SIZE);
	if (tiling)
		gem_set_tiling(fd, bo, tiling, pitch);
	linear_pattern = gem_mmap__gtt(fd, bo, PAGE_SIZE,
				       PROT_READ | PROT_WRITE);
	for (i = 0; i < PAGE_SIZE; i++)
		linear_pattern[i] = i;
	tiled_pattern = gem_mmap__cpu(fd, bo, 0, PAGE_SIZE, PROT_READ);

	gem_set_domain(fd, bo, I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT, 0);
	gem_close(fd, bo);

	bo = gem_create(fd, size);
	if (tiling)
		gem_set_tiling(fd, bo, tiling, pitch);

	/* Initialise first/last page through CPU mmap */
	ptr = gem_mmap__cpu(fd, bo, 0, size, PROT_READ | PROT_WRITE);
	memcpy(ptr, tiled_pattern, PAGE_SIZE);
	memcpy(ptr + last_offset, tiled_pattern, PAGE_SIZE);
	munmap(ptr, size);

	/* Obtain mapping for the object through GTT. */
	ptr = __gem_mmap__gtt(fd, bo, size, PROT_READ | PROT_WRITE);
	igt_require_f(ptr, "Huge BO GTT mapping not supported.\n");

	set_domain_gtt(fd, bo);

	/* Access through GTT should still provide the CPU written values. */
	igt_assert(memcmp(ptr              , linear_pattern, PAGE_SIZE) == 0);
	igt_assert(memcmp(ptr + last_offset, linear_pattern, PAGE_SIZE) == 0);

	gem_set_tiling(fd, bo, I915_TILING_NONE, 0);

	igt_assert(memcmp(ptr              , tiled_pattern, PAGE_SIZE) == 0);
	igt_assert(memcmp(ptr + last_offset, tiled_pattern, PAGE_SIZE) == 0);

	munmap(ptr, size);

	gem_close(fd, bo);
	munmap(tiled_pattern, PAGE_SIZE);
	munmap(linear_pattern, PAGE_SIZE);
}