mirror of
https://github.com/tiagovignatti/intel-gpu-tools.git
synced 2025-06-20 06:16:13 +00:00
batch: Specify number of relocations to accommodate
Since relocations are variable size, depending upon generation, it is easier to handle the resizing of the batch request inside the BEGIN_BATCH macro. This still leaves us with having to resize commands in a few places - which still need adaption for gen8+. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
255bade1ea
commit
10552b5ca6
@ -97,7 +97,7 @@ do_render(drm_intel_bufmgr *bufmgr, struct intel_batchbuffer *batch,
|
|||||||
drm_intel_bo_subdata(src_bo, 0, sizeof(data), data);
|
drm_intel_bo_subdata(src_bo, 0, sizeof(data), data);
|
||||||
|
|
||||||
/* Render the junk to the dst. */
|
/* Render the junk to the dst. */
|
||||||
BLIT_COPY_BATCH_START(batch->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
(width * 4) /* dst pitch */);
|
(width * 4) /* dst pitch */);
|
||||||
|
@ -95,7 +95,7 @@ do_render(drm_intel_bufmgr *bufmgr, struct intel_batchbuffer *batch,
|
|||||||
drm_intel_gem_bo_unmap_gtt(src_bo);
|
drm_intel_gem_bo_unmap_gtt(src_bo);
|
||||||
|
|
||||||
/* Render the junk to the dst. */
|
/* Render the junk to the dst. */
|
||||||
BLIT_COPY_BATCH_START(batch->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
(width * 4) /* dst pitch */);
|
(width * 4) /* dst pitch */);
|
||||||
|
@ -98,7 +98,7 @@ do_render(drm_intel_bufmgr *bufmgr, struct intel_batchbuffer *batch,
|
|||||||
drm_intel_bo_unmap(src_bo);
|
drm_intel_bo_unmap(src_bo);
|
||||||
|
|
||||||
/* Render the junk to the dst. */
|
/* Render the junk to the dst. */
|
||||||
BLIT_COPY_BATCH_START(batch->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
(width * 4) /* dst pitch */);
|
(width * 4) /* dst pitch */);
|
||||||
|
@ -108,7 +108,7 @@ do_render(drm_intel_bufmgr *bufmgr, struct intel_batchbuffer *batch,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Render the junk to the dst. */
|
/* Render the junk to the dst. */
|
||||||
BLIT_COPY_BATCH_START(batch->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
(width * 4) /* dst pitch */);
|
(width * 4) /* dst pitch */);
|
||||||
|
@ -360,9 +360,7 @@ intel_blt_copy(struct intel_batchbuffer *batch,
|
|||||||
igt_fail(1);
|
igt_fail(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
BEGIN_BATCH(gen >= 8 ? 10 : 8);
|
BLIT_COPY_BATCH_START(cmd_bits);
|
||||||
OUT_BATCH(XY_SRC_COPY_BLT_CMD | cmd_bits |
|
|
||||||
(gen >= 8 ? 8 : 6));
|
|
||||||
OUT_BATCH((br13_bits) |
|
OUT_BATCH((br13_bits) |
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
dst_pitch);
|
dst_pitch);
|
||||||
@ -376,12 +374,14 @@ intel_blt_copy(struct intel_batchbuffer *batch,
|
|||||||
|
|
||||||
#define CMD_POLY_STIPPLE_OFFSET 0x7906
|
#define CMD_POLY_STIPPLE_OFFSET 0x7906
|
||||||
if (gen == 5) {
|
if (gen == 5) {
|
||||||
|
BEGIN_BATCH(2, 0);
|
||||||
OUT_BATCH(CMD_POLY_STIPPLE_OFFSET << 16);
|
OUT_BATCH(CMD_POLY_STIPPLE_OFFSET << 16);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
|
ADVANCE_BATCH();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gen >= 6 && src_bo == dst_bo) {
|
if (gen >= 6 && src_bo == dst_bo) {
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 0);
|
||||||
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
|
@ -77,6 +77,7 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
|
|||||||
/**
|
/**
|
||||||
* BEGIN_BATCH:
|
* BEGIN_BATCH:
|
||||||
* @n: number of DWORDS to emit
|
* @n: number of DWORDS to emit
|
||||||
|
* @r: number of RELOCS to emit
|
||||||
*
|
*
|
||||||
* Prepares a batch to emit @n DWORDS, flushing it if there's not enough space
|
* Prepares a batch to emit @n DWORDS, flushing it if there's not enough space
|
||||||
* available.
|
* available.
|
||||||
@ -84,10 +85,13 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
|
|||||||
* This macro needs a pointer to an #intel_batchbuffer structure called batch in
|
* This macro needs a pointer to an #intel_batchbuffer structure called batch in
|
||||||
* scope.
|
* scope.
|
||||||
*/
|
*/
|
||||||
#define BEGIN_BATCH(n) do { \
|
#define BEGIN_BATCH(n, r) do { \
|
||||||
|
int __n = (n); \
|
||||||
igt_assert(batch->end == NULL); \
|
igt_assert(batch->end == NULL); \
|
||||||
intel_batchbuffer_require_space(batch, (n)*4); \
|
if (batch->gen >= 8) __n += r; \
|
||||||
batch->end = batch->ptr + (n) * 4; \
|
__n *= 4; \
|
||||||
|
intel_batchbuffer_require_space(batch, __n); \
|
||||||
|
batch->end = batch->ptr + __n; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -150,35 +154,21 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
|
|||||||
batch->end = NULL; \
|
batch->end = NULL; \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#define BLIT_COPY_BATCH_START(devid, flags) do { \
|
#define BLIT_COPY_BATCH_START(flags) do { \
|
||||||
if (intel_gen(devid) >= 8) { \
|
BEGIN_BATCH(8, 2); \
|
||||||
BEGIN_BATCH(10); \
|
OUT_BATCH(XY_SRC_COPY_BLT_CMD | \
|
||||||
OUT_BATCH(XY_SRC_COPY_BLT_CMD | \
|
XY_SRC_COPY_BLT_WRITE_ALPHA | \
|
||||||
XY_SRC_COPY_BLT_WRITE_ALPHA | \
|
XY_SRC_COPY_BLT_WRITE_RGB | \
|
||||||
XY_SRC_COPY_BLT_WRITE_RGB | \
|
(flags) | \
|
||||||
(flags) | 8); \
|
(6 + (2*batch->gen >= 8))); \
|
||||||
} else { \
|
|
||||||
BEGIN_BATCH(8); \
|
|
||||||
OUT_BATCH(XY_SRC_COPY_BLT_CMD | \
|
|
||||||
XY_SRC_COPY_BLT_WRITE_ALPHA | \
|
|
||||||
XY_SRC_COPY_BLT_WRITE_RGB | \
|
|
||||||
(flags) | 6); \
|
|
||||||
} \
|
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
#define COLOR_BLIT_COPY_BATCH_START(devid, flags) do { \
|
#define COLOR_BLIT_COPY_BATCH_START(flags) do { \
|
||||||
if (intel_gen(devid) >= 8) { \
|
BEGIN_BATCH(6, 1); \
|
||||||
BEGIN_BATCH(8); \
|
OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | \
|
||||||
OUT_BATCH(MI_NOOP); \
|
COLOR_BLT_WRITE_ALPHA | \
|
||||||
OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | 0x5 | \
|
XY_COLOR_BLT_WRITE_RGB | \
|
||||||
COLOR_BLT_WRITE_ALPHA | \
|
(4 + (batch->gen >= 8))); \
|
||||||
XY_COLOR_BLT_WRITE_RGB); \
|
|
||||||
} else { \
|
|
||||||
BEGIN_BATCH(6); \
|
|
||||||
OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | 0x4 | \
|
|
||||||
COLOR_BLT_WRITE_ALPHA | \
|
|
||||||
XY_COLOR_BLT_WRITE_RGB); \
|
|
||||||
} \
|
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -81,7 +81,7 @@ igt_simple_main
|
|||||||
/* put some load onto the gpu to keep the light buffers active for long
|
/* put some load onto the gpu to keep the light buffers active for long
|
||||||
* enough */
|
* enough */
|
||||||
for (i = 0; i < 10000; i++) {
|
for (i = 0; i < 10000; i++) {
|
||||||
BLIT_COPY_BATCH_START(batch->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
4096);
|
4096);
|
||||||
@ -110,7 +110,7 @@ igt_simple_main
|
|||||||
drm_intel_gem_bo_unmap_gtt(bo[j]);
|
drm_intel_gem_bo_unmap_gtt(bo[j]);
|
||||||
|
|
||||||
/* put it onto the active list ... */
|
/* put it onto the active list ... */
|
||||||
COLOR_BLIT_COPY_BATCH_START(intel_get_drm_devid(fd), 0);
|
COLOR_BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
128);
|
128);
|
||||||
OUT_BATCH(0); /* dst x1,y1 */
|
OUT_BATCH(0); /* dst x1,y1 */
|
||||||
|
@ -50,7 +50,7 @@ struct intel_batchbuffer *batch;
|
|||||||
static void
|
static void
|
||||||
bad_store(void)
|
bad_store(void)
|
||||||
{
|
{
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 0);
|
||||||
OUT_BATCH(MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL | 1 << 21);
|
OUT_BATCH(MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL | 1 << 21);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(BAD_GTT_DEST);
|
OUT_BATCH(BAD_GTT_DEST);
|
||||||
|
@ -48,7 +48,7 @@ struct intel_batchbuffer *batch;
|
|||||||
static void
|
static void
|
||||||
bad_batch(void)
|
bad_batch(void)
|
||||||
{
|
{
|
||||||
BEGIN_BATCH(2);
|
BEGIN_BATCH(2, 0);
|
||||||
OUT_BATCH(MI_BATCH_BUFFER_START);
|
OUT_BATCH(MI_BATCH_BUFFER_START);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
@ -78,7 +78,7 @@ bad_blit(drm_intel_bo *src_bo, uint32_t devid)
|
|||||||
cmd_bits |= XY_SRC_COPY_BLT_DST_TILED;
|
cmd_bits |= XY_SRC_COPY_BLT_DST_TILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
BLIT_COPY_BATCH_START(devid, cmd_bits);
|
BLIT_COPY_BATCH_START(cmd_bits);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
dst_pitch);
|
dst_pitch);
|
||||||
|
@ -63,7 +63,7 @@ int fd;
|
|||||||
static void
|
static void
|
||||||
copy_bo(drm_intel_bo *src, drm_intel_bo *dst)
|
copy_bo(drm_intel_bo *src, drm_intel_bo *dst)
|
||||||
{
|
{
|
||||||
BLIT_COPY_BATCH_START(devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
4096);
|
4096);
|
||||||
|
@ -134,7 +134,7 @@ igt_simple_main
|
|||||||
|
|
||||||
/* copy the sample batch with the gpu to the new one, so that we
|
/* copy the sample batch with the gpu to the new one, so that we
|
||||||
* also test the unmappable part of the gtt. */
|
* also test the unmappable part of the gtt. */
|
||||||
BLIT_COPY_BATCH_START(batch->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
4096);
|
4096);
|
||||||
|
@ -62,7 +62,7 @@ dummy_reloc_loop(void)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < 0x800; i++) {
|
for (i = 0; i < 0x800; i++) {
|
||||||
BLIT_COPY_BATCH_START(batch->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
4*4096);
|
4*4096);
|
||||||
@ -75,7 +75,7 @@ dummy_reloc_loop(void)
|
|||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
intel_batchbuffer_flush(batch);
|
intel_batchbuffer_flush(batch);
|
||||||
|
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_FLUSH_DW | 1);
|
OUT_BATCH(MI_FLUSH_DW | 1);
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
||||||
|
@ -71,23 +71,21 @@ dummy_reloc_loop(int ring)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < 0x100000; i++) {
|
for (i = 0; i < 0x100000; i++) {
|
||||||
|
BEGIN_BATCH(4, 1);
|
||||||
if (ring == I915_EXEC_RENDER) {
|
if (ring == I915_EXEC_RENDER) {
|
||||||
BEGIN_BATCH(4);
|
|
||||||
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
||||||
OUT_BATCH(0xffffffff); /* compare dword */
|
OUT_BATCH(0xffffffff); /* compare dword */
|
||||||
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
||||||
I915_GEM_DOMAIN_RENDER, 0);
|
I915_GEM_DOMAIN_RENDER, 0);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
ADVANCE_BATCH();
|
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH(4);
|
|
||||||
OUT_BATCH(MI_FLUSH_DW | 1);
|
OUT_BATCH(MI_FLUSH_DW | 1);
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
||||||
I915_GEM_DOMAIN_RENDER, 0);
|
I915_GEM_DOMAIN_RENDER, 0);
|
||||||
OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
|
OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
|
||||||
ADVANCE_BATCH();
|
|
||||||
}
|
}
|
||||||
|
ADVANCE_BATCH();
|
||||||
intel_batchbuffer_flush_on_ring(batch, ring);
|
intel_batchbuffer_flush_on_ring(batch, ring);
|
||||||
|
|
||||||
drm_intel_bo_map(target_buffer, 0);
|
drm_intel_bo_map(target_buffer, 0);
|
||||||
@ -106,23 +104,21 @@ dummy_reloc_loop_random_ring(int num_rings)
|
|||||||
for (i = 0; i < 0x100000; i++) {
|
for (i = 0; i < 0x100000; i++) {
|
||||||
int ring = random() % num_rings + 1;
|
int ring = random() % num_rings + 1;
|
||||||
|
|
||||||
|
BEGIN_BATCH(4, 1);
|
||||||
if (ring == I915_EXEC_RENDER) {
|
if (ring == I915_EXEC_RENDER) {
|
||||||
BEGIN_BATCH(4);
|
|
||||||
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
||||||
OUT_BATCH(0xffffffff); /* compare dword */
|
OUT_BATCH(0xffffffff); /* compare dword */
|
||||||
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
||||||
I915_GEM_DOMAIN_RENDER, 0);
|
I915_GEM_DOMAIN_RENDER, 0);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
ADVANCE_BATCH();
|
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH(4);
|
|
||||||
OUT_BATCH(MI_FLUSH_DW | 1);
|
OUT_BATCH(MI_FLUSH_DW | 1);
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
||||||
I915_GEM_DOMAIN_RENDER, 0);
|
I915_GEM_DOMAIN_RENDER, 0);
|
||||||
OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
|
OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
|
||||||
ADVANCE_BATCH();
|
|
||||||
}
|
}
|
||||||
|
ADVANCE_BATCH();
|
||||||
intel_batchbuffer_flush_on_ring(batch, ring);
|
intel_batchbuffer_flush_on_ring(batch, ring);
|
||||||
|
|
||||||
drm_intel_bo_map(target_buffer, 0);
|
drm_intel_bo_map(target_buffer, 0);
|
||||||
@ -148,23 +144,21 @@ dummy_reloc_loop_random_ring_multi_fd(int num_rings)
|
|||||||
mindex = random() % NUM_FD;
|
mindex = random() % NUM_FD;
|
||||||
batch = mbatch[mindex];
|
batch = mbatch[mindex];
|
||||||
|
|
||||||
|
BEGIN_BATCH(4, 1);
|
||||||
if (ring == I915_EXEC_RENDER) {
|
if (ring == I915_EXEC_RENDER) {
|
||||||
BEGIN_BATCH(4);
|
|
||||||
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
||||||
OUT_BATCH(0xffffffff); /* compare dword */
|
OUT_BATCH(0xffffffff); /* compare dword */
|
||||||
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
|
||||||
I915_GEM_DOMAIN_RENDER, 0);
|
I915_GEM_DOMAIN_RENDER, 0);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
ADVANCE_BATCH();
|
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH(4);
|
|
||||||
OUT_BATCH(MI_FLUSH_DW | 1);
|
OUT_BATCH(MI_FLUSH_DW | 1);
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
|
||||||
I915_GEM_DOMAIN_RENDER, 0);
|
I915_GEM_DOMAIN_RENDER, 0);
|
||||||
OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
|
OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
|
||||||
ADVANCE_BATCH();
|
|
||||||
}
|
}
|
||||||
|
ADVANCE_BATCH();
|
||||||
intel_batchbuffer_flush_on_ring(batch, ring);
|
intel_batchbuffer_flush_on_ring(batch, ring);
|
||||||
|
|
||||||
drm_intel_bo_map(target_buffer, 0);
|
drm_intel_bo_map(target_buffer, 0);
|
||||||
|
@ -163,13 +163,13 @@ igt_main
|
|||||||
}
|
}
|
||||||
|
|
||||||
igt_subtest("cpu-domain") {
|
igt_subtest("cpu-domain") {
|
||||||
BEGIN_BATCH(2);
|
BEGIN_BATCH(2, 1);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_RELOC(tmp, I915_GEM_DOMAIN_CPU, 0, 0);
|
OUT_RELOC(tmp, I915_GEM_DOMAIN_CPU, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
igt_assert(run_batch() == -EINVAL);
|
igt_assert(run_batch() == -EINVAL);
|
||||||
|
|
||||||
BEGIN_BATCH(2);
|
BEGIN_BATCH(2, 1);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_RELOC(tmp, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU, 0);
|
OUT_RELOC(tmp, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
@ -177,13 +177,13 @@ igt_main
|
|||||||
}
|
}
|
||||||
|
|
||||||
igt_subtest("gtt-domain") {
|
igt_subtest("gtt-domain") {
|
||||||
BEGIN_BATCH(2);
|
BEGIN_BATCH(2, 1);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT, 0, 0);
|
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
igt_assert(run_batch() == -EINVAL);
|
igt_assert(run_batch() == -EINVAL);
|
||||||
|
|
||||||
BEGIN_BATCH(2);
|
BEGIN_BATCH(2, 1);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT, 0);
|
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
@ -193,7 +193,7 @@ igt_main
|
|||||||
/* Note: Older kernels disallow this. Punt on the skip check though
|
/* Note: Older kernels disallow this. Punt on the skip check though
|
||||||
* since this is too old. */
|
* since this is too old. */
|
||||||
igt_subtest("conflicting-write-domain") {
|
igt_subtest("conflicting-write-domain") {
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 2);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_RELOC(tmp, I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(tmp, I915_GEM_DOMAIN_RENDER,
|
||||||
I915_GEM_DOMAIN_RENDER, 0);
|
I915_GEM_DOMAIN_RENDER, 0);
|
||||||
@ -208,14 +208,14 @@ igt_main
|
|||||||
multi_write_domain(fd);
|
multi_write_domain(fd);
|
||||||
|
|
||||||
igt_subtest("invalid-gpu-domain") {
|
igt_subtest("invalid-gpu-domain") {
|
||||||
BEGIN_BATCH(2);
|
BEGIN_BATCH(2, 1);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_RELOC(tmp, ~(I915_GEM_GPU_DOMAINS | I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU),
|
OUT_RELOC(tmp, ~(I915_GEM_GPU_DOMAINS | I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU),
|
||||||
0, 0);
|
0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
igt_assert(run_batch() == -EINVAL);
|
igt_assert(run_batch() == -EINVAL);
|
||||||
|
|
||||||
BEGIN_BATCH(2);
|
BEGIN_BATCH(2, 1);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT << 1,
|
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT << 1,
|
||||||
I915_GEM_DOMAIN_GTT << 1, 0);
|
I915_GEM_DOMAIN_GTT << 1, 0);
|
||||||
|
@ -85,7 +85,7 @@ static void emit_dummy_load(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 5; i++) {
|
for (i = 0; i < 5; i++) {
|
||||||
BLIT_COPY_BATCH_START(devid, tile_flags);
|
BLIT_COPY_BATCH_START(tile_flags);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
pitch);
|
pitch);
|
||||||
@ -97,8 +97,8 @@ static void emit_dummy_load(void)
|
|||||||
OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
if (IS_GEN6(devid) || IS_GEN7(devid)) {
|
if (batch->gen >= 6) {
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 0);
|
||||||
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
|
@ -54,7 +54,7 @@ gpu_hang(void)
|
|||||||
cmd = bad_pipe ? MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW :
|
cmd = bad_pipe ? MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW :
|
||||||
MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
|
MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
|
||||||
|
|
||||||
BEGIN_BATCH(6);
|
BEGIN_BATCH(6, 0);
|
||||||
/* The documentation says that the LOAD_SCAN_LINES command
|
/* The documentation says that the LOAD_SCAN_LINES command
|
||||||
* always comes in pairs. Don't ask me why. */
|
* always comes in pairs. Don't ask me why. */
|
||||||
OUT_BATCH(MI_LOAD_SCAN_LINES_INCL | (bad_pipe << 20));
|
OUT_BATCH(MI_LOAD_SCAN_LINES_INCL | (bad_pipe << 20));
|
||||||
|
@ -88,9 +88,8 @@ igt_simple_main
|
|||||||
pitch /= 4;
|
pitch /= 4;
|
||||||
|
|
||||||
for (i = 0; i < 10000; i++) {
|
for (i = 0; i < 10000; i++) {
|
||||||
BLIT_COPY_BATCH_START(devid,
|
BLIT_COPY_BATCH_START(XY_SRC_COPY_BLT_SRC_TILED |
|
||||||
XY_SRC_COPY_BLT_SRC_TILED |
|
XY_SRC_COPY_BLT_DST_TILED);
|
||||||
XY_SRC_COPY_BLT_DST_TILED);
|
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
pitch);
|
pitch);
|
||||||
@ -102,8 +101,8 @@ igt_simple_main
|
|||||||
OUT_RELOC_FENCED(bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC_FENCED(bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
if (IS_GEN6(devid) || IS_GEN7(devid)) {
|
if (batch->gen >= 6) {
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 0);
|
||||||
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
|
@ -78,7 +78,7 @@ store_dword_loop(int fd)
|
|||||||
mindex = random() % NUM_FD;
|
mindex = random() % NUM_FD;
|
||||||
batch = mbatch[mindex];
|
batch = mbatch[mindex];
|
||||||
if (ring == I915_EXEC_RENDER) {
|
if (ring == I915_EXEC_RENDER) {
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
||||||
OUT_BATCH(0xffffffff); /* compare dword */
|
OUT_BATCH(0xffffffff); /* compare dword */
|
||||||
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
|
||||||
@ -86,7 +86,7 @@ store_dword_loop(int fd)
|
|||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_FLUSH_DW | 1);
|
OUT_BATCH(MI_FLUSH_DW | 1);
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
|
||||||
|
@ -66,7 +66,7 @@ mi_lri_loop(void)
|
|||||||
for (i = 0; i < 0x100; i++) {
|
for (i = 0; i < 0x100; i++) {
|
||||||
int ring = random() % num_rings + 1;
|
int ring = random() % num_rings + 1;
|
||||||
|
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 0);
|
||||||
OUT_BATCH(MI_LOAD_REGISTER_IMM | 1);
|
OUT_BATCH(MI_LOAD_REGISTER_IMM | 1);
|
||||||
OUT_BATCH(0x203c); /* RENDER RING CTL */
|
OUT_BATCH(0x203c); /* RENDER RING CTL */
|
||||||
OUT_BATCH(0); /* try to stop the ring */
|
OUT_BATCH(0); /* try to stop the ring */
|
||||||
|
@ -63,7 +63,7 @@ int fd;
|
|||||||
static void
|
static void
|
||||||
copy_bo(drm_intel_bo *src, drm_intel_bo *dst)
|
copy_bo(drm_intel_bo *src, drm_intel_bo *dst)
|
||||||
{
|
{
|
||||||
BLIT_COPY_BATCH_START(devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
4096);
|
4096);
|
||||||
|
@ -125,7 +125,7 @@ static void emit_dummy_load(int pitch)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 5; i++) {
|
for (i = 0; i < 5; i++) {
|
||||||
BLIT_COPY_BATCH_START(devid, tile_flags);
|
BLIT_COPY_BATCH_START(tile_flags);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
pitch);
|
pitch);
|
||||||
@ -137,8 +137,8 @@ static void emit_dummy_load(int pitch)
|
|||||||
OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
if (intel_gen(devid) >= 6) {
|
if (batch->gen >= 6) {
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 0);
|
||||||
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
|
@ -77,7 +77,7 @@ store_pipe_control_loop(bool preuse_buffer)
|
|||||||
igt_assert(target_bo);
|
igt_assert(target_bo);
|
||||||
|
|
||||||
if (preuse_buffer) {
|
if (preuse_buffer) {
|
||||||
COLOR_BLIT_COPY_BATCH_START(devid, 0);
|
COLOR_BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | (0xf0 << 16) | 64);
|
OUT_BATCH((3 << 24) | (0xf0 << 16) | 64);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(1 << 16 | 1);
|
OUT_BATCH(1 << 16 | 1);
|
||||||
@ -99,8 +99,8 @@ store_pipe_control_loop(bool preuse_buffer)
|
|||||||
/* gem_storedw_batches_loop.c is a bit overenthusiastic with
|
/* gem_storedw_batches_loop.c is a bit overenthusiastic with
|
||||||
* creating new batchbuffers - with buffer reuse disabled, the
|
* creating new batchbuffers - with buffer reuse disabled, the
|
||||||
* support code will do that for us. */
|
* support code will do that for us. */
|
||||||
if (intel_gen(devid) >= 8) {
|
if (batch->gen >= 8) {
|
||||||
BEGIN_BATCH(5);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(GFX_OP_PIPE_CONTROL + 1);
|
OUT_BATCH(GFX_OP_PIPE_CONTROL + 1);
|
||||||
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
|
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
|
||||||
OUT_RELOC_FENCED(target_bo,
|
OUT_RELOC_FENCED(target_bo,
|
||||||
@ -109,10 +109,10 @@ store_pipe_control_loop(bool preuse_buffer)
|
|||||||
OUT_BATCH(val); /* write data */
|
OUT_BATCH(val); /* write data */
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
} else if (intel_gen(devid) >= 6) {
|
} else if (batch->gen >= 6) {
|
||||||
/* work-around hw issue, see intel_emit_post_sync_nonzero_flush
|
/* work-around hw issue, see intel_emit_post_sync_nonzero_flush
|
||||||
* in mesa sources. */
|
* in mesa sources. */
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(GFX_OP_PIPE_CONTROL);
|
OUT_BATCH(GFX_OP_PIPE_CONTROL);
|
||||||
OUT_BATCH(PIPE_CONTROL_CS_STALL |
|
OUT_BATCH(PIPE_CONTROL_CS_STALL |
|
||||||
PIPE_CONTROL_STALL_AT_SCOREBOARD);
|
PIPE_CONTROL_STALL_AT_SCOREBOARD);
|
||||||
@ -120,7 +120,7 @@ store_pipe_control_loop(bool preuse_buffer)
|
|||||||
OUT_BATCH(0); /* write data */
|
OUT_BATCH(0); /* write data */
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(GFX_OP_PIPE_CONTROL);
|
OUT_BATCH(GFX_OP_PIPE_CONTROL);
|
||||||
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
|
OUT_BATCH(PIPE_CONTROL_WRITE_IMMEDIATE);
|
||||||
OUT_RELOC(target_bo,
|
OUT_RELOC(target_bo,
|
||||||
@ -128,8 +128,8 @@ store_pipe_control_loop(bool preuse_buffer)
|
|||||||
PIPE_CONTROL_GLOBAL_GTT);
|
PIPE_CONTROL_GLOBAL_GTT);
|
||||||
OUT_BATCH(val); /* write data */
|
OUT_BATCH(val); /* write data */
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
} else if (intel_gen(devid) >= 4) {
|
} else if (batch->gen >= 4) {
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_WC_FLUSH |
|
OUT_BATCH(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_WC_FLUSH |
|
||||||
PIPE_CONTROL_TC_FLUSH |
|
PIPE_CONTROL_TC_FLUSH |
|
||||||
PIPE_CONTROL_WRITE_IMMEDIATE | 2);
|
PIPE_CONTROL_WRITE_IMMEDIATE | 2);
|
||||||
|
@ -117,7 +117,7 @@ static void emit_dummy_load(int pitch)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < 10; i++) {
|
for (i = 0; i < 10; i++) {
|
||||||
BLIT_COPY_BATCH_START(devid, tile_flags);
|
BLIT_COPY_BATCH_START(tile_flags);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
pitch);
|
pitch);
|
||||||
@ -129,8 +129,8 @@ static void emit_dummy_load(int pitch)
|
|||||||
OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC_FENCED(dummy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
if (intel_gen(devid) >= 6) {
|
if (batch->gen >= 6) {
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 0);
|
||||||
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
|
@ -67,7 +67,7 @@ store_dword_loop(int fd)
|
|||||||
int ring = random() % num_rings + 1;
|
int ring = random() % num_rings + 1;
|
||||||
|
|
||||||
if (ring == I915_EXEC_RENDER) {
|
if (ring == I915_EXEC_RENDER) {
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
||||||
OUT_BATCH(0xffffffff); /* compare dword */
|
OUT_BATCH(0xffffffff); /* compare dword */
|
||||||
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
||||||
@ -75,7 +75,7 @@ store_dword_loop(int fd)
|
|||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_FLUSH_DW | 1);
|
OUT_BATCH(MI_FLUSH_DW | 1);
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_RENDER,
|
||||||
|
@ -178,7 +178,7 @@ static void blt_copy(struct intel_batchbuffer *batch,
|
|||||||
unsigned w, unsigned h,
|
unsigned w, unsigned h,
|
||||||
struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
|
struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
|
||||||
{
|
{
|
||||||
BLIT_COPY_BATCH_START(batch->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
dst->stride);
|
dst->stride);
|
||||||
|
@ -86,7 +86,7 @@ static void do_test(uint32_t tiling, unsigned stride,
|
|||||||
busy_bo = drm_intel_bo_alloc(bufmgr, "busy bo bo", 16*1024*1024, 4096);
|
busy_bo = drm_intel_bo_alloc(bufmgr, "busy bo bo", 16*1024*1024, 4096);
|
||||||
|
|
||||||
for (i = 0; i < 250; i++) {
|
for (i = 0; i < 250; i++) {
|
||||||
BLIT_COPY_BATCH_START(devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
2*1024*4);
|
2*1024*4);
|
||||||
@ -98,8 +98,8 @@ static void do_test(uint32_t tiling, unsigned stride,
|
|||||||
OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
if (IS_GEN6(devid) || IS_GEN7(devid)) {
|
if (batch->gen >= 6) {
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 0);
|
||||||
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
@ -157,7 +157,7 @@ static void do_test(uint32_t tiling, unsigned stride,
|
|||||||
blt_bits = XY_SRC_COPY_BLT_SRC_TILED;
|
blt_bits = XY_SRC_COPY_BLT_SRC_TILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
BLIT_COPY_BATCH_START(devid, blt_bits);
|
BLIT_COPY_BATCH_START(blt_bits);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
stride);
|
stride);
|
||||||
@ -181,7 +181,7 @@ static void do_test(uint32_t tiling, unsigned stride,
|
|||||||
/* Note: We don't care about gen4+ here because the blitter doesn't use
|
/* Note: We don't care about gen4+ here because the blitter doesn't use
|
||||||
* fences there. So not setting tiling flags on the tiled buffer is ok.
|
* fences there. So not setting tiling flags on the tiled buffer is ok.
|
||||||
*/
|
*/
|
||||||
BLIT_COPY_BATCH_START(devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
stride_after);
|
stride_after);
|
||||||
|
@ -59,22 +59,19 @@ emit_store_dword_imm(int devid, drm_intel_bo *dest, uint32_t val)
|
|||||||
if (!has_ppgtt)
|
if (!has_ppgtt)
|
||||||
cmd |= MI_MEM_VIRTUAL;
|
cmd |= MI_MEM_VIRTUAL;
|
||||||
|
|
||||||
if (intel_gen(devid) >= 8) {
|
BEGIN_BATCH(4, 0);
|
||||||
BEGIN_BATCH(4);
|
OUT_BATCH(cmd);
|
||||||
OUT_BATCH(cmd);
|
if (batch->gen >= 8) {
|
||||||
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||||
OUT_BATCH(val);
|
OUT_BATCH(val);
|
||||||
ADVANCE_BATCH();
|
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH(4);
|
|
||||||
OUT_BATCH(cmd);
|
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||||
OUT_BATCH(val);
|
OUT_BATCH(val);
|
||||||
ADVANCE_BATCH();
|
|
||||||
}
|
}
|
||||||
|
ADVANCE_BATCH();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -59,23 +59,19 @@ emit_store_dword_imm(int devid, drm_intel_bo *dest, uint32_t val)
|
|||||||
if (!has_ppgtt)
|
if (!has_ppgtt)
|
||||||
cmd |= MI_MEM_VIRTUAL;
|
cmd |= MI_MEM_VIRTUAL;
|
||||||
|
|
||||||
if (intel_gen(devid) >= 8) {
|
BEGIN_BATCH(4, 0);
|
||||||
BEGIN_BATCH(4);
|
OUT_BATCH(cmd);
|
||||||
OUT_BATCH(cmd);
|
if (batch->gen >= 8) {
|
||||||
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||||
OUT_BATCH(0);
|
|
||||||
OUT_BATCH(val);
|
OUT_BATCH(val);
|
||||||
ADVANCE_BATCH();
|
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH(4);
|
|
||||||
OUT_BATCH(cmd);
|
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||||
OUT_BATCH(val);
|
OUT_BATCH(val);
|
||||||
ADVANCE_BATCH();
|
|
||||||
}
|
}
|
||||||
|
ADVANCE_BATCH();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -59,23 +59,19 @@ emit_store_dword_imm(int devid, drm_intel_bo *dest, uint32_t val)
|
|||||||
if (!has_ppgtt)
|
if (!has_ppgtt)
|
||||||
cmd |= MI_MEM_VIRTUAL;
|
cmd |= MI_MEM_VIRTUAL;
|
||||||
|
|
||||||
if (intel_gen(devid) >= 8) {
|
BEGIN_BATCH(4, 0);
|
||||||
BEGIN_BATCH(4);
|
OUT_BATCH(cmd);
|
||||||
OUT_BATCH(cmd);
|
if (batch->gen >= 8) {
|
||||||
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||||
OUT_BATCH(0);
|
|
||||||
OUT_BATCH(val);
|
OUT_BATCH(val);
|
||||||
ADVANCE_BATCH();
|
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH(4);
|
|
||||||
OUT_BATCH(cmd);
|
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
OUT_RELOC(dest, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||||
OUT_BATCH(val);
|
OUT_BATCH(val);
|
||||||
ADVANCE_BATCH();
|
|
||||||
}
|
}
|
||||||
|
ADVANCE_BATCH();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -62,9 +62,9 @@ store_dword_loop(int divider)
|
|||||||
cmd = MI_STORE_DWORD_IMM;
|
cmd = MI_STORE_DWORD_IMM;
|
||||||
|
|
||||||
for (i = 0; i < SLOW_QUICK(0x2000, 0x10); i++) {
|
for (i = 0; i < SLOW_QUICK(0x2000, 0x10); i++) {
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 0);
|
||||||
OUT_BATCH(cmd);
|
OUT_BATCH(cmd);
|
||||||
if (intel_gen(batch->devid) < 8)
|
if (batch->gen < 8)
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_INSTRUCTION,
|
OUT_RELOC(target_buffer, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||||
|
@ -163,7 +163,7 @@ static void emit_blt(drm_intel_bo *src_bo, uint32_t src_tiling, unsigned src_pit
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* copy lower half to upper half */
|
/* copy lower half to upper half */
|
||||||
BLIT_COPY_BATCH_START(devid, cmd_bits);
|
BLIT_COPY_BATCH_START(cmd_bits);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
dst_pitch);
|
dst_pitch);
|
||||||
@ -175,8 +175,8 @@ static void emit_blt(drm_intel_bo *src_bo, uint32_t src_tiling, unsigned src_pit
|
|||||||
OUT_RELOC_FENCED(src_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC_FENCED(src_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
if (IS_GEN6(devid) || IS_GEN7(devid)) {
|
if (batch->gen >= 6) {
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 0);
|
||||||
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
|
@ -84,7 +84,7 @@ copy_bo(drm_intel_bo *src, int src_tiled,
|
|||||||
cmd_bits |= XY_SRC_COPY_BLT_SRC_TILED;
|
cmd_bits |= XY_SRC_COPY_BLT_SRC_TILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
BLIT_COPY_BATCH_START(devid, cmd_bits);
|
BLIT_COPY_BATCH_START(cmd_bits);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
dst_pitch);
|
dst_pitch);
|
||||||
|
@ -86,7 +86,7 @@ igt_simple_main
|
|||||||
busy_bo = drm_intel_bo_alloc(bufmgr, "busy bo bo", 16*1024*1024, 4096);
|
busy_bo = drm_intel_bo_alloc(bufmgr, "busy bo bo", 16*1024*1024, 4096);
|
||||||
|
|
||||||
for (i = 0; i < 250; i++) {
|
for (i = 0; i < 250; i++) {
|
||||||
BLIT_COPY_BATCH_START(devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
2*1024*4);
|
2*1024*4);
|
||||||
@ -98,8 +98,8 @@ igt_simple_main
|
|||||||
OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
if (IS_GEN6(devid) || IS_GEN7(devid)) {
|
if (batch->gen >= 6) {
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 0);
|
||||||
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
@ -119,7 +119,7 @@ igt_simple_main
|
|||||||
|
|
||||||
drm_intel_bo_disable_reuse(test_bo);
|
drm_intel_bo_disable_reuse(test_bo);
|
||||||
|
|
||||||
BLIT_COPY_BATCH_START(devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
TEST_STRIDE);
|
TEST_STRIDE);
|
||||||
@ -138,7 +138,7 @@ igt_simple_main
|
|||||||
|
|
||||||
/* launch a few batchs to ensure the damaged slab objects get reused. */
|
/* launch a few batchs to ensure the damaged slab objects get reused. */
|
||||||
for (i = 0; i < 10; i++) {
|
for (i = 0; i < 10; i++) {
|
||||||
BLIT_COPY_BATCH_START(devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
2*1024*4);
|
2*1024*4);
|
||||||
@ -150,8 +150,8 @@ igt_simple_main
|
|||||||
OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC_FENCED(busy_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
if (IS_GEN6(devid) || IS_GEN7(devid)) {
|
if (batch->gen >= 8) {
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 0);
|
||||||
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
|
@ -74,7 +74,7 @@ igt_simple_main
|
|||||||
load_bo = drm_intel_bo_alloc(bufmgr, "target bo", 1024*4096, 4096);
|
load_bo = drm_intel_bo_alloc(bufmgr, "target bo", 1024*4096, 4096);
|
||||||
igt_assert(load_bo);
|
igt_assert(load_bo);
|
||||||
|
|
||||||
BLIT_COPY_BATCH_START(batch->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
4096);
|
4096);
|
||||||
|
@ -104,16 +104,8 @@ static void blt_color_fill(struct intel_batchbuffer *batch,
|
|||||||
const unsigned short height = pages/4;
|
const unsigned short height = pages/4;
|
||||||
const unsigned short width = 4096;
|
const unsigned short width = 4096;
|
||||||
|
|
||||||
if (intel_gen(batch->devid) >= 8) {
|
COLOR_BLIT_COPY_BATCH_START(COLOR_BLT_WRITE_ALPHA |
|
||||||
BEGIN_BATCH(8);
|
XY_COLOR_BLT_WRITE_RGB);
|
||||||
OUT_BATCH(MI_NOOP);
|
|
||||||
OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | 5 |
|
|
||||||
COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
|
|
||||||
} else {
|
|
||||||
BEGIN_BATCH(6);
|
|
||||||
OUT_BATCH(XY_COLOR_BLT_CMD_NOLEN | 4 |
|
|
||||||
COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
|
|
||||||
}
|
|
||||||
OUT_BATCH((3 << 24) | /* 32 Bit Color */
|
OUT_BATCH((3 << 24) | /* 32 Bit Color */
|
||||||
(0xF0 << 16) | /* Raster OP copy background register */
|
(0xF0 << 16) | /* Raster OP copy background register */
|
||||||
0); /* Dest pitch is 0 */
|
0); /* Dest pitch is 0 */
|
||||||
|
@ -80,7 +80,7 @@ static void run_test(int ring)
|
|||||||
/* put some load onto the gpu to keep the light buffers active for long
|
/* put some load onto the gpu to keep the light buffers active for long
|
||||||
* enough */
|
* enough */
|
||||||
for (i = 0; i < 1000; i++) {
|
for (i = 0; i < 1000; i++) {
|
||||||
BLIT_COPY_BATCH_START(batch->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
4096);
|
4096);
|
||||||
@ -93,7 +93,7 @@ static void run_test(int ring)
|
|||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
}
|
}
|
||||||
|
|
||||||
COLOR_BLIT_COPY_BATCH_START(batch->devid, 0);
|
COLOR_BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xff << 16) |
|
(0xff << 16) |
|
||||||
128);
|
128);
|
||||||
@ -107,7 +107,7 @@ static void run_test(int ring)
|
|||||||
|
|
||||||
/* Emit an empty batch so that signalled seqno on the target ring >
|
/* Emit an empty batch so that signalled seqno on the target ring >
|
||||||
* signalled seqnoe on the blt ring. This is required to hit the bug. */
|
* signalled seqnoe on the blt ring. This is required to hit the bug. */
|
||||||
BEGIN_BATCH(2);
|
BEGIN_BATCH(2, 0);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
@ -116,14 +116,14 @@ static void run_test(int ring)
|
|||||||
/* For the ring->ring sync it's important to only emit a read reloc, for
|
/* For the ring->ring sync it's important to only emit a read reloc, for
|
||||||
* otherwise the obj->last_write_seqno will be updated. */
|
* otherwise the obj->last_write_seqno will be updated. */
|
||||||
if (ring == I915_EXEC_RENDER) {
|
if (ring == I915_EXEC_RENDER) {
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
|
||||||
OUT_BATCH(0xffffffff); /* compare dword */
|
OUT_BATCH(0xffffffff); /* compare dword */
|
||||||
OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_FLUSH_DW | 1);
|
OUT_BATCH(MI_FLUSH_DW | 1);
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
|
@ -83,20 +83,33 @@ static const char *test_mode_str(enum test_mode mode)
|
|||||||
return test_modes[mode];
|
return test_modes[mode];
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fill_blt(data_t *data, uint32_t handle, unsigned char color)
|
static void fill_blt(data_t *data,
|
||||||
|
uint32_t handle,
|
||||||
|
struct igt_fb *fb,
|
||||||
|
unsigned char color)
|
||||||
{
|
{
|
||||||
drm_intel_bo *dst = gem_handle_to_libdrm_bo(data->bufmgr,
|
drm_intel_bo *dst = gem_handle_to_libdrm_bo(data->bufmgr,
|
||||||
data->drm_fd,
|
data->drm_fd,
|
||||||
"", handle);
|
"", handle);
|
||||||
struct intel_batchbuffer *batch;
|
struct intel_batchbuffer *batch;
|
||||||
|
unsigned flags;
|
||||||
|
int pitch;
|
||||||
|
|
||||||
batch = intel_batchbuffer_alloc(data->bufmgr, data->devid);
|
batch = intel_batchbuffer_alloc(data->bufmgr, data->devid);
|
||||||
igt_assert(batch);
|
igt_assert(batch);
|
||||||
|
|
||||||
COLOR_BLIT_COPY_BATCH_START(batch->devid, 0);
|
pitch = fb->stride;
|
||||||
OUT_BATCH((0 << 24) | (0xf0 << 16) | 0);
|
flags = XY_COLOR_BLT_WRITE_ALPHA |
|
||||||
|
XY_COLOR_BLT_WRITE_RGB;
|
||||||
|
if (fb->tiling && batch->gen >= 4) {
|
||||||
|
flags |= XY_COLOR_BLT_TILED;
|
||||||
|
pitch /= 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
COLOR_BLIT_COPY_BATCH_START(flags);
|
||||||
|
OUT_BATCH(3 << 24 | 0xf0 << 16 | pitch);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(1 << 16 | 4);
|
OUT_BATCH(fb->height << 16 | fb->width);
|
||||||
OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
||||||
OUT_BATCH(color);
|
OUT_BATCH(color);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
@ -127,7 +140,7 @@ static void exec_nop(data_t *data, uint32_t handle, drm_intel_context *context)
|
|||||||
igt_assert(batch);
|
igt_assert(batch);
|
||||||
|
|
||||||
/* add the reloc to make sure the kernel will think we write to dst */
|
/* add the reloc to make sure the kernel will think we write to dst */
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_BATCH_BUFFER_END);
|
OUT_BATCH(MI_BATCH_BUFFER_END);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
||||||
@ -226,7 +239,7 @@ static void test_crc(data_t *data, enum test_mode mode)
|
|||||||
break;
|
break;
|
||||||
case TEST_BLT:
|
case TEST_BLT:
|
||||||
case TEST_PAGE_FLIP_AND_BLT:
|
case TEST_PAGE_FLIP_AND_BLT:
|
||||||
fill_blt(data, handle, 0xff);
|
fill_blt(data, handle, data->fb, ~0);
|
||||||
break;
|
break;
|
||||||
case TEST_RENDER:
|
case TEST_RENDER:
|
||||||
case TEST_CONTEXT:
|
case TEST_CONTEXT:
|
||||||
|
@ -54,7 +54,7 @@ static void exec_nop(data_t *data, uint32_t handle, drm_intel_context *context)
|
|||||||
igt_assert(batch);
|
igt_assert(batch);
|
||||||
|
|
||||||
/* add the reloc to make sure the kernel will think we write to dst */
|
/* add the reloc to make sure the kernel will think we write to dst */
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_BATCH_BUFFER_END);
|
OUT_BATCH(MI_BATCH_BUFFER_END);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
||||||
|
@ -179,7 +179,7 @@ static void emit_dummy_load__bcs(struct test_output *o)
|
|||||||
igt_assert(target_bo);
|
igt_assert(target_bo);
|
||||||
|
|
||||||
for (i = 0; i < limit; i++) {
|
for (i = 0; i < limit; i++) {
|
||||||
BLIT_COPY_BATCH_START(devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
pitch);
|
pitch);
|
||||||
@ -191,8 +191,8 @@ static void emit_dummy_load__bcs(struct test_output *o)
|
|||||||
OUT_RELOC_FENCED(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
OUT_RELOC_FENCED(target_bo, I915_GEM_DOMAIN_RENDER, 0, 0);
|
||||||
ADVANCE_BATCH();
|
ADVANCE_BATCH();
|
||||||
|
|
||||||
if (IS_GEN6(devid) || IS_GEN7(devid)) {
|
if (batch->gen >= 6) {
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 0);
|
||||||
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
OUT_BATCH(0);
|
OUT_BATCH(0);
|
||||||
|
@ -55,7 +55,7 @@ static void exec_nop(data_t *data, uint32_t handle, unsigned int ring)
|
|||||||
igt_assert(bo);
|
igt_assert(bo);
|
||||||
|
|
||||||
/* add relocs to make sure the kernel will think we write to dst */
|
/* add relocs to make sure the kernel will think we write to dst */
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_BATCH_BUFFER_END);
|
OUT_BATCH(MI_BATCH_BUFFER_END);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
||||||
@ -81,7 +81,7 @@ static void exec_blt(data_t *data)
|
|||||||
pitch = w * 4;
|
pitch = w * 4;
|
||||||
|
|
||||||
for (i = 0; i < 40; i++) {
|
for (i = 0; i < 40; i++) {
|
||||||
BLIT_COPY_BATCH_START(data->devid, 0);
|
BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH((3 << 24) | /* 32 bits */
|
OUT_BATCH((3 << 24) | /* 32 bits */
|
||||||
(0xcc << 16) | /* copy ROP */
|
(0xcc << 16) | /* copy ROP */
|
||||||
pitch);
|
pitch);
|
||||||
|
@ -180,8 +180,7 @@ static void fill_blt(data_t *data, uint32_t handle, unsigned char color)
|
|||||||
batch = intel_batchbuffer_alloc(data->bufmgr, data->devid);
|
batch = intel_batchbuffer_alloc(data->bufmgr, data->devid);
|
||||||
igt_assert(batch);
|
igt_assert(batch);
|
||||||
|
|
||||||
BEGIN_BATCH(5);
|
COLOR_BLIT_COPY_BATCH_START(0);
|
||||||
OUT_BATCH(COLOR_BLT_CMD);
|
|
||||||
OUT_BATCH((1 << 24) | (0xf0 << 16) | 0);
|
OUT_BATCH((1 << 24) | (0xf0 << 16) | 0);
|
||||||
OUT_BATCH(1 << 16 | 4);
|
OUT_BATCH(1 << 16 | 4);
|
||||||
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
||||||
@ -214,7 +213,7 @@ static void exec_nop(data_t *data, uint32_t handle, drm_intel_context *context)
|
|||||||
igt_assert(batch);
|
igt_assert(batch);
|
||||||
|
|
||||||
/* add the reloc to make sure the kernel will think we write to dst */
|
/* add the reloc to make sure the kernel will think we write to dst */
|
||||||
BEGIN_BATCH(4);
|
BEGIN_BATCH(4, 1);
|
||||||
OUT_BATCH(MI_BATCH_BUFFER_END);
|
OUT_BATCH(MI_BATCH_BUFFER_END);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
OUT_RELOC(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
|
||||||
|
@ -178,23 +178,19 @@ static void emit_store_dword_imm(uint32_t val)
|
|||||||
if (!lh.has_ppgtt)
|
if (!lh.has_ppgtt)
|
||||||
cmd |= MI_MEM_VIRTUAL;
|
cmd |= MI_MEM_VIRTUAL;
|
||||||
|
|
||||||
if (intel_gen(lh.devid) >= 8) {
|
BEGIN_BATCH(4, 1);
|
||||||
BEGIN_BATCH(4);
|
OUT_BATCH(cmd);
|
||||||
OUT_BATCH(cmd);
|
if (batch->gen >= 8) {
|
||||||
OUT_RELOC(lh.target_buffer, I915_GEM_DOMAIN_INSTRUCTION,
|
OUT_RELOC(lh.target_buffer, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||||
OUT_BATCH(0);
|
|
||||||
OUT_BATCH(val);
|
OUT_BATCH(val);
|
||||||
ADVANCE_BATCH();
|
|
||||||
} else {
|
} else {
|
||||||
BEGIN_BATCH(4);
|
|
||||||
OUT_BATCH(cmd);
|
|
||||||
OUT_BATCH(0); /* reserved */
|
OUT_BATCH(0); /* reserved */
|
||||||
OUT_RELOC(lh.target_buffer, I915_GEM_DOMAIN_INSTRUCTION,
|
OUT_RELOC(lh.target_buffer, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||||
OUT_BATCH(val);
|
OUT_BATCH(val);
|
||||||
ADVANCE_BATCH();
|
|
||||||
}
|
}
|
||||||
|
ADVANCE_BATCH();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define LOAD_HELPER_PAUSE_USEC 500
|
#define LOAD_HELPER_PAUSE_USEC 500
|
||||||
|
@ -166,7 +166,7 @@ BEGIN_NVXX(struct nouveau_pushbuf *push, int subc, int mthd, int size)
|
|||||||
static void
|
static void
|
||||||
noop_intel(drm_intel_bo *bo)
|
noop_intel(drm_intel_bo *bo)
|
||||||
{
|
{
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 1);
|
||||||
OUT_BATCH(MI_NOOP);
|
OUT_BATCH(MI_NOOP);
|
||||||
OUT_BATCH(MI_BATCH_BUFFER_END);
|
OUT_BATCH(MI_BATCH_BUFFER_END);
|
||||||
OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER,
|
OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER,
|
||||||
|
@ -332,7 +332,7 @@ gen5_get_counters(void)
|
|||||||
|
|
||||||
stats_bo = drm_intel_bo_alloc(bufmgr, "stats", 4096, 4096);
|
stats_bo = drm_intel_bo_alloc(bufmgr, "stats", 4096, 4096);
|
||||||
|
|
||||||
BEGIN_BATCH(6);
|
BEGIN_BATCH(6, 2);
|
||||||
OUT_BATCH(GEN5_MI_REPORT_PERF_COUNT | MI_COUNTER_SET_0);
|
OUT_BATCH(GEN5_MI_REPORT_PERF_COUNT | MI_COUNTER_SET_0);
|
||||||
OUT_RELOC(stats_bo,
|
OUT_RELOC(stats_bo,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
|
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
@ -380,7 +380,7 @@ gen6_get_counters(void)
|
|||||||
|
|
||||||
stats_bo = drm_intel_bo_alloc(bufmgr, "stats", 4096, 4096);
|
stats_bo = drm_intel_bo_alloc(bufmgr, "stats", 4096, 4096);
|
||||||
|
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 1);
|
||||||
OUT_BATCH(GEN6_MI_REPORT_PERF_COUNT | (3 - 2));
|
OUT_BATCH(GEN6_MI_REPORT_PERF_COUNT | (3 - 2));
|
||||||
OUT_RELOC(stats_bo,
|
OUT_RELOC(stats_bo,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
|
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
|
||||||
@ -410,7 +410,7 @@ gen7_get_counters(void)
|
|||||||
|
|
||||||
stats_bo = drm_intel_bo_alloc(bufmgr, "stats", 4096, 4096);
|
stats_bo = drm_intel_bo_alloc(bufmgr, "stats", 4096, 4096);
|
||||||
|
|
||||||
BEGIN_BATCH(3);
|
BEGIN_BATCH(3, 1);
|
||||||
OUT_BATCH(GEN6_MI_REPORT_PERF_COUNT | (3 - 2));
|
OUT_BATCH(GEN6_MI_REPORT_PERF_COUNT | (3 - 2));
|
||||||
OUT_RELOC(stats_bo,
|
OUT_RELOC(stats_bo,
|
||||||
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
|
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user