igt/gem_exec_flush: Add a read-only variation

Alternate between two values written by the GPU so that we can look for
stale cachelines without having to overwrite the value with the CPU.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2016-05-01 15:14:37 +01:00
parent b481705208
commit a0d6645d45

View File

@ -29,6 +29,7 @@ IGT_TEST_DESCRIPTION("Basic check of flushing after batches");
#define UNCACHED 0
#define COHERENT 1
#define WRITE 2
static void run(int fd, unsigned ring, int nchild, unsigned flags, int timeout)
{
@ -40,8 +41,9 @@ static void run(int fd, unsigned ring, int nchild, unsigned flags, int timeout)
igt_fork(child, nchild) {
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj[2];
struct drm_i915_gem_relocation_entry reloc[1024];
struct drm_i915_gem_exec_object2 obj[3];
struct drm_i915_gem_relocation_entry reloc0[1024];
struct drm_i915_gem_relocation_entry reloc1[1024];
struct drm_i915_gem_execbuffer2 execbuf;
unsigned long cycles = 0;
uint32_t *ptr;
@ -67,34 +69,36 @@ static void run(int fd, unsigned ring, int nchild, unsigned flags, int timeout)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = (uintptr_t)obj;
execbuf.buffer_count = 2;
execbuf.buffer_count = 3;
execbuf.flags = ring | (1 << 11) | (1<<12);
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
obj[1].handle = gem_create(fd, 1024*64);
gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
obj[2].handle = gem_create(fd, 1024*64);
gem_write(fd, obj[2].handle, 0, &bbe, sizeof(bbe));
igt_require(__gem_execbuf(fd, &execbuf) == 0);
obj[1].relocation_count = 1;
obj[2].relocation_count = 1;
ptr = gem_mmap__wc(fd, obj[1].handle, 0, 64*1024,
PROT_WRITE | PROT_READ);
gem_set_domain(fd, obj[1].handle,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
memset(reloc, 0, sizeof(reloc));
memset(reloc0, 0, sizeof(reloc0));
for (i = 0; i < 1024; i++) {
uint64_t offset;
uint32_t *b = &ptr[16 * i];
reloc[i].presumed_offset = obj[0].offset;
reloc[i].offset = (b - ptr + 1) * sizeof(*ptr);
reloc[i].delta = i * sizeof(uint32_t);
reloc[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
reloc[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
reloc0[i].presumed_offset = obj[0].offset;
reloc0[i].offset = (b - ptr + 1) * sizeof(*ptr);
reloc0[i].delta = i * sizeof(uint32_t);
reloc0[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
reloc0[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
offset = obj[0].offset + reloc[i].delta;
offset = obj[0].offset + reloc0[i].delta;
*b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
if (gen >= 8) {
*b++ = offset;
@ -102,7 +106,7 @@ static void run(int fd, unsigned ring, int nchild, unsigned flags, int timeout)
} else if (gen >= 4) {
*b++ = 0;
*b++ = offset;
reloc[i].offset += sizeof(*ptr);
reloc0[i].offset += sizeof(*ptr);
} else {
b[-1] -= 1;
*b++ = offset;
@ -112,11 +116,48 @@ static void run(int fd, unsigned ring, int nchild, unsigned flags, int timeout)
}
munmap(ptr, 64*1024);
ptr = gem_mmap__wc(fd, obj[2].handle, 0, 64*1024,
PROT_WRITE | PROT_READ);
gem_set_domain(fd, obj[2].handle,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
memset(reloc1, 0, sizeof(reloc1));
for (i = 0; i < 1024; i++) {
uint64_t offset;
uint32_t *b = &ptr[16 * i];
reloc1[i].presumed_offset = obj[0].offset;
reloc1[i].offset = (b - ptr + 1) * sizeof(*ptr);
reloc1[i].delta = i * sizeof(uint32_t);
reloc1[i].read_domains = I915_GEM_DOMAIN_INSTRUCTION;
reloc1[i].write_domain = I915_GEM_DOMAIN_INSTRUCTION;
offset = obj[0].offset + reloc1[i].delta;
*b++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
if (gen >= 8) {
*b++ = offset;
*b++ = offset >> 32;
} else if (gen >= 4) {
*b++ = 0;
*b++ = offset;
reloc1[i].offset += sizeof(*ptr);
} else {
b[-1] -= 1;
*b++ = offset;
}
*b++ = i ^ 0xffffffff;
*b++ = MI_BATCH_BUFFER_END;
}
munmap(ptr, 64*1024);
igt_timeout(timeout) {
bool xor = (cycles >> 10) & 1;
i = cycles++ % 1024;
obj[1].relocs_ptr = (uintptr_t)&reloc[i];
obj[1].relocs_ptr = (uintptr_t)&reloc0[i];
obj[2].relocs_ptr = (uintptr_t)&reloc1[i];
execbuf.batch_start_offset = 64*i;
execbuf.buffer_count = 2 + xor;
gem_execbuf(fd, &execbuf);
gem_sync(fd, obj[0].handle);
@ -124,14 +165,20 @@ static void run(int fd, unsigned ring, int nchild, unsigned flags, int timeout)
if (!(flags & COHERENT) && !gem_has_llc(fd))
igt_clflush_range(&map[i], sizeof(map[i]));
igt_assert_eq_u32(map[i], i);
if (xor)
igt_assert_eq_u32(map[i], i ^ 0xffffffff);
else
igt_assert_eq_u32(map[i], i);
map[i] = 0xdeadbeef;
if (!(flags & COHERENT))
igt_clflush_range(&map[i], sizeof(map[i]));
if (flags & WRITE) {
map[i] = 0xdeadbeef;
if (!(flags & COHERENT))
igt_clflush_range(&map[i], sizeof(map[i]));
}
}
igt_info("Child[%d]: %lu cycles\n", child, cycles);
gem_close(fd, obj[2].handle);
gem_close(fd, obj[1].handle);
munmap(map, 4096);
@ -156,13 +203,25 @@ igt_main
igt_fork_hang_detector(fd);
for (e = intel_execution_engines; e->name; e++) {
igt_subtest_f("%suc-%s",
igt_subtest_f("%suc-ro-%s",
e->exec_id == 0 ? "basic-" : "", e->name)
run(fd, e->exec_id | e->flags, ncpus, UNCACHED, 10);
run(fd, e->exec_id | e->flags, ncpus,
UNCACHED, 2 + 120*!!e->exec_id);
igt_subtest_f("%swb-%s",
igt_subtest_f("%suc-rw-%s",
e->exec_id == 0 ? "basic-" : "", e->name)
run(fd, e->exec_id | e->flags, ncpus, COHERENT, 10);
run(fd, e->exec_id | e->flags, ncpus,
UNCACHED | WRITE, 2 + 120*!!e->exec_id);
igt_subtest_f("%swb-ro-%s",
e->exec_id == 0 ? "basic-" : "", e->name)
run(fd, e->exec_id | e->flags, ncpus,
COHERENT, 2 + 120*!!e->exec_id);
igt_subtest_f("%swb-rw-%s",
e->exec_id == 0 ? "basic-" : "", e->name)
run(fd, e->exec_id | e->flags, ncpus,
COHERENT | WRITE, 2 + 120*!!e->exec_id);
}
igt_stop_hang_detector();