gem_ringfill: Make this actually test that all the batches executed.

I was looking into some strange behavior in Mesa that looks like
batches maybe being skipped, but this test didn't catch it.
This commit is contained in:
Eric Anholt 2012-02-20 10:04:02 -08:00 committed by Daniel Vetter
parent 732547793f
commit 5cf5558682

View File

@ -31,6 +31,7 @@
* catching failure to manage the ring properly near full. * catching failure to manage the ring properly near full.
*/ */
#include <stdbool.h>
#include <stdlib.h> #include <stdlib.h>
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
@ -50,13 +51,17 @@
static drm_intel_bufmgr *bufmgr; static drm_intel_bufmgr *bufmgr;
struct intel_batchbuffer *batch; struct intel_batchbuffer *batch;
static const int width = 512, height = 512; static const int width = 512, height = 512;
static const int size = 1024 * 1024;
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
int fd; int fd;
int i; int i;
drm_intel_bo *src_bo, *dst_bo; drm_intel_bo *src_bo, *dst_bo;
uint32_t *map;
int fails = 0;
int pitch = width * 4;
int size = pitch * height;
int blits;
fd = drm_open_any(); fd = drm_open_any();
@ -67,6 +72,20 @@ int main(int argc, char **argv)
src_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096); src_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);
dst_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096); dst_bo = drm_intel_bo_alloc(bufmgr, "src bo", size, 4096);
/* Fill the src with indexes of the pixels */
drm_intel_bo_map(src_bo, true);
map = src_bo->virtual;
for (i = 0; i < width * height; i++)
map[i] = i;
drm_intel_bo_unmap(src_bo);
/* Fill the dst with garbage. */
drm_intel_bo_map(dst_bo, true);
map = dst_bo->virtual;
for (i = 0; i < width * height; i++)
map[i] = 0xd0d0d0d0;
drm_intel_bo_unmap(dst_bo);
/* The ring we've been using is 128k, and each rendering op /* The ring we've been using is 128k, and each rendering op
* will use at least 8 dwords: * will use at least 8 dwords:
* *
@ -82,15 +101,53 @@ int main(int argc, char **argv)
* So iterate just a little more than that -- if we don't fill the ring * So iterate just a little more than that -- if we don't fill the ring
* doing this, we aren't likely to with this test. * doing this, we aren't likely to with this test.
*/ */
for (i = 0; i < 128 * 1024 / (8 * 4) * 1.25; i++) { blits = width * height;
intel_copy_bo(batch, dst_bo, src_bo, width, height); for (i = 0; i < blits; i++) {
int x = i % width;
int y = i / width;
assert(y < height);
BEGIN_BATCH(8);
OUT_BATCH(XY_SRC_COPY_BLT_CMD |
XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
pitch);
OUT_BATCH((y << 16) | x); /* dst x1,y1 */
OUT_BATCH(((y + 1) << 16) | (x + 1)); /* dst x2,y2 */
OUT_RELOC(dst_bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH((y << 16) | x); /* src x1,y1 */
OUT_BATCH(pitch);
OUT_RELOC(src_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH();
intel_batchbuffer_flush(batch); intel_batchbuffer_flush(batch);
} }
/* verify */
drm_intel_bo_map(dst_bo, false);
map = dst_bo->virtual;
for (i = 0; i < blits; i++) {
int x = i % width;
int y = i / width;
if (map[i] != i) {
printf("Copy #%d at %d,%d failed: read 0x%08x\n",
i, x, y, map[i]);
if (fails++ > 9)
exit(1);
}
}
drm_intel_bo_unmap(dst_bo);
intel_batchbuffer_free(batch); intel_batchbuffer_free(batch);
drm_intel_bufmgr_destroy(bufmgr); drm_intel_bufmgr_destroy(bufmgr);
close(fd); close(fd);
return 0; return fails != 0;
} }