lib: api docs for intel_batchbuffer

- I didn't bother to document the BLIT batch header macros - I'm not
  too happy with them and they're fairly obscure.
- intel_copy_bo could use some interface love, added a FIXME comment
  for now.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Daniel Vetter 2014-03-13 01:13:28 +01:00
parent 49e3877ae7
commit ec5f9e8788
2 changed files with 175 additions and 1 deletions

View File

@ -39,6 +39,28 @@
#include "intel_reg.h"
#include <i915_drm.h>
/**
* SECTION:intel_batchbuffer
* @short_description: Batchbuffer and blitter support
* @title: intel batchbuffer
*
* This library provides some basic support for batchbuffers and using the
* blitter engine based upon libdrm. A new batchbuffer is allocated with
* intel_batchbuffer_alloc() and for simple blitter commands submitted with
* intel_batchbuffer_flush().
*
* It also provides some convenient macros to easily emit commands into
* batchbuffers. All those macros presume that a pointer to a #intel_batchbuffer
* structure called batch is in scope. The basic macros are #BEGIN_BATCH,
* #OUT_BATCH, #OUT_RELOC and #ADVANCE_BATCH.
*/
/**
* intel_batchbuffer_reset:
* @batch: batchbuffer object
*
* Resets @batch by allocating a new gem buffer object as backing storage.
*/
void
intel_batchbuffer_reset(struct intel_batchbuffer *batch)
{
@ -55,6 +77,16 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch)
batch->ptr = batch->buffer;
}
/**
* intel_batchbuffer_reset:
* @bufmgr: libdrm buffer manager
* @devid: pci device id of the drm device
*
* Allocates a new batchbuffer object. @devid must be supplied since libdrm
* doesn't expose it directly.
*
* Returns: The allocated and initialized batchbuffer object.
*/
struct intel_batchbuffer *
intel_batchbuffer_alloc(drm_intel_bufmgr *bufmgr, uint32_t devid)
{
@ -67,6 +99,12 @@ intel_batchbuffer_alloc(drm_intel_bufmgr *bufmgr, uint32_t devid)
return batch;
}
/**
* intel_batchbuffer_reset:
* @batch: batchbuffer object
*
* Releases all resource of the batchbuffer object @batch.
*/
void
intel_batchbuffer_free(struct intel_batchbuffer *batch)
{
@ -106,6 +144,13 @@ flush_on_ring_common(struct intel_batchbuffer *batch, int ring)
return batch->ptr - batch->buffer;
}
/**
* intel_batchbuffer_flush_on_ring:
* @batch: batchbuffer object
* @ring: execbuf ring flag
*
* Submits the batch for execution on @ring.
*/
void
intel_batchbuffer_flush_on_ring(struct intel_batchbuffer *batch, int ring)
{
@ -123,6 +168,14 @@ intel_batchbuffer_flush_on_ring(struct intel_batchbuffer *batch, int ring)
intel_batchbuffer_reset(batch);
}
/**
* intel_batchbuffer_flush_with_context:
* @batch: batchbuffer object
* @context: libdrm hardware context object
*
* Submits the batch for execution on the render engine with the supplied
* hardware context.
*/
void
intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch,
drm_intel_context *context)
@ -145,6 +198,13 @@ intel_batchbuffer_flush_with_context(struct intel_batchbuffer *batch,
intel_batchbuffer_reset(batch);
}
/**
* intel_batchbuffer_flush:
* @batch: batchbuffer object
*
* Submits the batch for execution on the blitter engine, selecting the right
* ring depending upon the hardware platform.
*/
void
intel_batchbuffer_flush(struct intel_batchbuffer *batch)
{
@ -155,7 +215,21 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch)
}
/* This is the only way buffers get added to the validate list.
/**
* intel_batchbuffer_emit_reloc:
* @batch: batchbuffer object
* @buffer: relocation target libdrm buffer object
* @delta: delta value to add to @buffer's gpu address
* @read_domains: gem domain bits for the relocation
* @write_domain: gem domain bit for the relocation
* @fenced: whether this gpu access requires fences
*
* Emits both a libdrm relocation entry pointing at @buffer and the pre-computed
* DWORD of @batch's presumed gpu address plus the supplied @delta into @batch.
*
* Note that @fenced is only relevant if @buffer is actually tiled.
*
* This is the only way buffers get added to the validate list.
*/
void
intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
@ -183,6 +257,15 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
assert(ret == 0);
}
/**
* intel_batchbuffer_data:
* @batch: batchbuffer object
* @data: pointer to the data to write into the batchbuffer
* @bytes: number of bytes to write into the batchbuffer
*
* This transfers the given @data into the batchbuffer. Note that the length
* must be DWORD aligned, i.e. multiples of 32bits.
*/
void
intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, unsigned int bytes)
@ -193,6 +276,24 @@ intel_batchbuffer_data(struct intel_batchbuffer *batch,
batch->ptr += bytes;
}
/**
* intel_blt_copy:
* @batch: batchbuffer object
* @src_bo: source libdrm buffer object
* @src_x1: source pixel x-coordination
* @src_y1: source pixel y-coordination
* @src_pitch: @src_bo's pitch in bytes
* @dst_bo: destination libdrm buffer object
* @dst_x1: source pixel x-coordination
* @dst_y1: source pixel y-coordination
* @dst_pitch: @dst_bo's pitch in bytes
* @width: width of the copied rectangle
* @height: height of the copied rectangle
* @bpp: bits per pixel
*
* This emits a 2D copy operation using blitter commands into the supplied batch
* buffer object.
*/
void
intel_blt_copy(struct intel_batchbuffer *batch,
drm_intel_bo *src_bo, int src_x1, int src_y1, int src_pitch,
@ -260,6 +361,22 @@ intel_blt_copy(struct intel_batchbuffer *batch,
intel_batchbuffer_flush(batch);
}
/**
* intel_copy_bo:
* @batch: batchbuffer object
* @src_bo: source libdrm buffer object
* @dst_bo: destination libdrm buffer object
* @width: width of the copied area in 4-byte pixels
* @height: height of the copied area in lines
*
* This emits a copy operation using blitter commands into the supplied batch
* buffer object. A total of @width times @height bytes from the start of
* @src_bo is copied over to @dst_bo.
*
* FIXME: We need @width and @height to avoid hitting into platform specific
* of the blitter. It would be easier to just accept a size and do the math
* ourselves.
*/
void
intel_copy_bo(struct intel_batchbuffer *batch,
drm_intel_bo *dst_bo, drm_intel_bo *src_bo,

View File

@ -71,24 +71,75 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
intel_batchbuffer_flush(batch);
}
/**
* BEGIN_BATCH:
* @n: number of DWORDS to emit
*
* Prepares a batch to emit @n DWORDS, flushing it if there's not enough space
* available.
*
* This macro needs a pointer to an #intel_batchbuffer structure called batch in
* scope.
*/
#define BEGIN_BATCH(n) do { \
intel_batchbuffer_require_space(batch, (n)*4); \
} while (0)
/**
* OUT_BATCH:
* @d: DWORD to emit
*
* Emits @d into a batch.
*
* This macro needs a pointer to an #intel_batchbuffer structure called batch in
* scope.
*/
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(batch, d)
/**
* OUT_RELOC_FENCED:
* @buf: relocation target libdrm buffer object
* @read_domains: gem domain bits for the relocation
* @write_domain: gem domain bit for the relocation
* @delta: delta value to add to @buffer's gpu address
*
* Emits a fenced relocation into a batch.
*
* This macro needs a pointer to an #intel_batchbuffer structure called batch in
* scope.
*/
#define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
assert((delta) >= 0); \
intel_batchbuffer_emit_reloc(batch, buf, delta, \
read_domains, write_domain, 1); \
} while (0)
/**
* OUT_RELOC_FENCED:
* @buf: relocation target libdrm buffer object
* @read_domains: gem domain bits for the relocation
* @write_domain: gem domain bit for the relocation
* @delta: delta value to add to @buffer's gpu address
*
* Emits a normal, unfenced relocation into a batch.
*
* This macro needs a pointer to an #intel_batchbuffer structure called batch in
* scope.
*/
#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
assert((delta) >= 0); \
intel_batchbuffer_emit_reloc(batch, buf, delta, \
read_domains, write_domain, 0); \
} while (0)
/**
* ADVANCE_BATCH:
*
* Completes the batch command emission sequence started with #BEGIN_BATCH.
*
* This macro needs a pointer to an #intel_batchbuffer structure called batch in
* scope.
*/
#define ADVANCE_BATCH() do { \
} while(0)
@ -123,6 +174,12 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
} \
} while(0)
/**
* BLIT_RELOC_UDW:
* @devid: pci device id of the drm device
*
* Emits the upper relocation DWORD on gen8+ and nothing on earlier generations.
*/
#define BLIT_RELOC_UDW(devid) do { \
if (intel_gen(devid) >= 8) { \
OUT_BATCH(0); \