gem_stress: move keep_gpu_busy stuff out of render copy functions

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Daniel Vetter 2012-01-18 17:51:48 +01:00
parent b9fe673f49
commit f7c2dab5bd
4 changed files with 12 additions and 25 deletions

View File

@ -168,7 +168,7 @@ static void emit_blt(drm_intel_bo *src_bo, uint32_t src_tiling, unsigned src_pit
/* All this gem trashing wastes too much cpu time, so give the gpu something to /* All this gem trashing wastes too much cpu time, so give the gpu something to
* do to increase changes for races. */ * do to increase changes for races. */
void keep_gpu_busy(void) static void keep_gpu_busy(void)
{ {
int tmp; int tmp;
@ -322,6 +322,12 @@ static void render_copyfunc(struct scratch_buf *src, unsigned src_x, unsigned sr
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y, struct scratch_buf *dst, unsigned dst_x, unsigned dst_y,
unsigned logical_tile_no) unsigned logical_tile_no)
{ {
static unsigned keep_gpu_busy_counter = 0;
/* check both edges of the fence usage */
if (keep_gpu_busy_counter & 1)
keep_gpu_busy();
if (IS_GEN2(devid)) if (IS_GEN2(devid))
gen2_render_copyfunc(batch, gen2_render_copyfunc(batch,
src, src_x, src_y, src, src_x, src_y,
@ -341,6 +347,11 @@ static void render_copyfunc(struct scratch_buf *src, unsigned src_x, unsigned sr
blitter_copyfunc(src, src_x, src_y, blitter_copyfunc(src, src_x, src_y,
dst, dst_x, dst_y, dst, dst_x, dst_y,
logical_tile_no); logical_tile_no);
if (!(keep_gpu_busy_counter & 1))
keep_gpu_busy();
keep_gpu_busy_counter++;
intel_batchbuffer_flush(batch);
} }
static void next_copyfunc(int tile) static void next_copyfunc(int tile)

View File

@ -26,8 +26,6 @@ struct scratch_buf {
unsigned num_tiles; unsigned num_tiles;
}; };
void keep_gpu_busy(void);
static inline void emit_vertex_2s(struct intel_batchbuffer *batch, static inline void emit_vertex_2s(struct intel_batchbuffer *batch,
int16_t x, int16_t y) int16_t x, int16_t y)
{ {

View File

@ -57,12 +57,6 @@ void gen2_render_copyfunc(struct intel_batchbuffer *batch,
unsigned width, unsigned height, unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y) struct scratch_buf *dst, unsigned dst_x, unsigned dst_y)
{ {
static unsigned keep_gpu_busy_counter = 0;
/* check both edges of the fence usage */
if (keep_gpu_busy_counter & 1)
keep_gpu_busy();
/* invariant state */ /* invariant state */
{ {
OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(0)); OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(0));
@ -346,10 +340,5 @@ void gen2_render_copyfunc(struct intel_batchbuffer *batch,
emit_vertex_normalized(batch, src_x, buf_width(src)); emit_vertex_normalized(batch, src_x, buf_width(src));
emit_vertex_normalized(batch, src_y, buf_height(src)); emit_vertex_normalized(batch, src_y, buf_height(src));
if (!(keep_gpu_busy_counter & 1))
keep_gpu_busy();
keep_gpu_busy_counter++;
intel_batchbuffer_flush(batch); intel_batchbuffer_flush(batch);
} }

View File

@ -7,12 +7,6 @@ void gen3_render_copyfunc(struct intel_batchbuffer *batch,
unsigned width, unsigned height, unsigned width, unsigned height,
struct scratch_buf *dst, unsigned dst_x, unsigned dst_y) struct scratch_buf *dst, unsigned dst_x, unsigned dst_y)
{ {
static unsigned keep_gpu_busy_counter = 0;
/* check both edges of the fence usage */
if (keep_gpu_busy_counter & 1)
keep_gpu_busy();
/* invariant state */ /* invariant state */
{ {
OUT_BATCH(_3DSTATE_AA_CMD | OUT_BATCH(_3DSTATE_AA_CMD |
@ -182,10 +176,5 @@ void gen3_render_copyfunc(struct intel_batchbuffer *batch,
emit_vertex(batch, src_x); emit_vertex(batch, src_x);
emit_vertex(batch, src_y); emit_vertex(batch, src_y);
if (!(keep_gpu_busy_counter & 1))
keep_gpu_busy();
keep_gpu_busy_counter++;
intel_batchbuffer_flush(batch); intel_batchbuffer_flush(batch);
} }