mirror of
https://github.com/tiagovignatti/intel-gpu-tools.git
synced 2025-06-11 01:46:14 +00:00
lib: Add read/write direction support for dmabuf synchronisation
Allow read-only synchronisation on dmabuf mmaps, useful to allow concurrent read-read testing between the CPU and GPU. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
925e5e1cae
commit
aed69b56d4
@ -1539,12 +1539,15 @@ off_t prime_get_size(int dma_buf_fd)
|
|||||||
* prime_sync_start
|
* prime_sync_start
|
||||||
* @dma_buf_fd: dma-buf fd handle
|
* @dma_buf_fd: dma-buf fd handle
|
||||||
*/
|
*/
|
||||||
void prime_sync_start(int dma_buf_fd)
|
void prime_sync_start(int dma_buf_fd, bool write)
|
||||||
{
|
{
|
||||||
struct local_dma_buf_sync sync_start;
|
struct local_dma_buf_sync sync_start;
|
||||||
|
|
||||||
memset(&sync_start, 0, sizeof(sync_start));
|
memset(&sync_start, 0, sizeof(sync_start));
|
||||||
sync_start.flags = LOCAL_DMA_BUF_SYNC_START | LOCAL_DMA_BUF_SYNC_RW;
|
sync_start.flags = LOCAL_DMA_BUF_SYNC_START;
|
||||||
|
sync_start.flags |= LOCAL_DMA_BUF_SYNC_READ;
|
||||||
|
if (write)
|
||||||
|
sync_start.flags |= LOCAL_DMA_BUF_SYNC_WRITE;
|
||||||
do_ioctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync_start);
|
do_ioctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1552,12 +1555,15 @@ void prime_sync_start(int dma_buf_fd)
|
|||||||
* prime_sync_end
|
* prime_sync_end
|
||||||
* @dma_buf_fd: dma-buf fd handle
|
* @dma_buf_fd: dma-buf fd handle
|
||||||
*/
|
*/
|
||||||
void prime_sync_end(int dma_buf_fd)
|
void prime_sync_end(int dma_buf_fd, bool write)
|
||||||
{
|
{
|
||||||
struct local_dma_buf_sync sync_end;
|
struct local_dma_buf_sync sync_end;
|
||||||
|
|
||||||
memset(&sync_end, 0, sizeof(sync_end));
|
memset(&sync_end, 0, sizeof(sync_end));
|
||||||
sync_end.flags = LOCAL_DMA_BUF_SYNC_END | LOCAL_DMA_BUF_SYNC_RW;
|
sync_end.flags = LOCAL_DMA_BUF_SYNC_END;
|
||||||
|
sync_end.flags |= LOCAL_DMA_BUF_SYNC_READ;
|
||||||
|
if (write)
|
||||||
|
sync_end.flags |= LOCAL_DMA_BUF_SYNC_WRITE;
|
||||||
do_ioctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync_end);
|
do_ioctl(dma_buf_fd, LOCAL_DMA_BUF_IOCTL_SYNC, &sync_end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,8 +177,8 @@ int prime_handle_to_fd(int fd, uint32_t handle);
|
|||||||
int prime_handle_to_fd_for_mmap(int fd, uint32_t handle);
|
int prime_handle_to_fd_for_mmap(int fd, uint32_t handle);
|
||||||
uint32_t prime_fd_to_handle(int fd, int dma_buf_fd);
|
uint32_t prime_fd_to_handle(int fd, int dma_buf_fd);
|
||||||
off_t prime_get_size(int dma_buf_fd);
|
off_t prime_get_size(int dma_buf_fd);
|
||||||
void prime_sync_start(int dma_buf_fd);
|
void prime_sync_start(int dma_buf_fd, bool write);
|
||||||
void prime_sync_end(int dma_buf_fd);
|
void prime_sync_end(int dma_buf_fd, bool write);
|
||||||
|
|
||||||
/* addfb2 fb modifiers */
|
/* addfb2 fb modifiers */
|
||||||
struct local_drm_mode_fb_cmd2 {
|
struct local_drm_mode_fb_cmd2 {
|
||||||
|
@ -365,10 +365,10 @@ dmabuf_set_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
|
|||||||
uint32_t *v;
|
uint32_t *v;
|
||||||
int size;
|
int size;
|
||||||
|
|
||||||
prime_sync_start(dmabuf->fd);
|
prime_sync_start(dmabuf->fd, true);
|
||||||
for (v = dmabuf->map, size = b->size; size--; v++)
|
for (v = dmabuf->map, size = b->size; size--; v++)
|
||||||
*v = val;
|
*v = val;
|
||||||
prime_sync_end(dmabuf->fd);
|
prime_sync_end(dmabuf->fd, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -378,10 +378,10 @@ dmabuf_cmp_bo(struct buffers *b, drm_intel_bo *bo, uint32_t val)
|
|||||||
uint32_t *v;
|
uint32_t *v;
|
||||||
int size;
|
int size;
|
||||||
|
|
||||||
prime_sync_start(dmabuf->fd);
|
prime_sync_start(dmabuf->fd, false);
|
||||||
for (v = dmabuf->map, size = b->size; size--; v++)
|
for (v = dmabuf->map, size = b->size; size--; v++)
|
||||||
igt_assert_eq_u32(*v, val);
|
igt_assert_eq_u32(*v, val);
|
||||||
prime_sync_end(dmabuf->fd);
|
prime_sync_end(dmabuf->fd, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -110,7 +110,7 @@ static void test(data_t *data)
|
|||||||
* firstly demonstrate the need for DMA_BUF_SYNC_START ("begin_cpu_access")
|
* firstly demonstrate the need for DMA_BUF_SYNC_START ("begin_cpu_access")
|
||||||
*/
|
*/
|
||||||
if (ioctl_sync)
|
if (ioctl_sync)
|
||||||
prime_sync_start(dma_buf_fd);
|
prime_sync_start(dma_buf_fd, true);
|
||||||
|
|
||||||
/* use dmabuf pointer to make the other fb all white too */
|
/* use dmabuf pointer to make the other fb all white too */
|
||||||
buf = malloc(fb->size);
|
buf = malloc(fb->size);
|
||||||
@ -142,7 +142,7 @@ static void test(data_t *data)
|
|||||||
|
|
||||||
/* sync start, to move to CPU domain */
|
/* sync start, to move to CPU domain */
|
||||||
if (ioctl_sync)
|
if (ioctl_sync)
|
||||||
prime_sync_start(dma_buf_fd);
|
prime_sync_start(dma_buf_fd, true);
|
||||||
|
|
||||||
/* use dmabuf pointer in the same fb to make it all white */
|
/* use dmabuf pointer in the same fb to make it all white */
|
||||||
buf = malloc(fb->size);
|
buf = malloc(fb->size);
|
||||||
@ -154,7 +154,7 @@ static void test(data_t *data)
|
|||||||
/* if we don't change to the GTT domain again, the whites won't get flushed
|
/* if we don't change to the GTT domain again, the whites won't get flushed
|
||||||
* and therefore we demonstrates the need for sync end here */
|
* and therefore we demonstrates the need for sync end here */
|
||||||
if (ioctl_sync)
|
if (ioctl_sync)
|
||||||
prime_sync_end(dma_buf_fd);
|
prime_sync_end(dma_buf_fd, true);
|
||||||
|
|
||||||
/* check that the crc is as expected, which requires that caches got flushed */
|
/* check that the crc is as expected, which requires that caches got flushed */
|
||||||
igt_pipe_crc_collect_crc(data->pipe_crc, &crc);
|
igt_pipe_crc_collect_crc(data->pipe_crc, &crc);
|
||||||
|
@ -97,7 +97,7 @@ static void test_read_flush(bool expect_stale_cache)
|
|||||||
* until we try to read them again in step #4. This behavior could be fixed
|
* until we try to read them again in step #4. This behavior could be fixed
|
||||||
* by flush CPU read right before accessing the CPU pointer */
|
* by flush CPU read right before accessing the CPU pointer */
|
||||||
if (!expect_stale_cache)
|
if (!expect_stale_cache)
|
||||||
prime_sync_start(dma_buf_fd);
|
prime_sync_start(dma_buf_fd, false);
|
||||||
|
|
||||||
for (i = 0; i < (width * height) / 4; i++)
|
for (i = 0; i < (width * height) / 4; i++)
|
||||||
if (ptr_cpu[i] != 0x11111111) {
|
if (ptr_cpu[i] != 0x11111111) {
|
||||||
@ -149,7 +149,7 @@ static void test_write_flush(bool expect_stale_cache)
|
|||||||
/* This is the main point of this test: !llc hw requires a cache write
|
/* This is the main point of this test: !llc hw requires a cache write
|
||||||
* flush right here (explained in step #4). */
|
* flush right here (explained in step #4). */
|
||||||
if (!expect_stale_cache)
|
if (!expect_stale_cache)
|
||||||
prime_sync_start(dma_buf_fd);
|
prime_sync_start(dma_buf_fd, true);
|
||||||
|
|
||||||
memset(ptr_cpu, 0x11, width * height);
|
memset(ptr_cpu, 0x11, width * height);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user