diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c index 53bd635d..941fa66d 100644 --- a/lib/ioctl_wrappers.c +++ b/lib/ioctl_wrappers.c @@ -1125,6 +1125,9 @@ void gem_require_ring(int fd, int ring_id) /* prime */ +#ifndef DRM_RDWR +#define DRM_RDWR O_RDWR +#endif /** * prime_handle_to_fd: * @fd: open i915 drm file descriptor @@ -1142,7 +1145,7 @@ int prime_handle_to_fd(int fd, uint32_t handle) memset(&args, 0, sizeof(args)); args.handle = handle; - args.flags = DRM_CLOEXEC; + args.flags = DRM_CLOEXEC | DRM_RDWR; args.fd = -1; do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args); diff --git a/tests/prime_mmap.c b/tests/prime_mmap.c index dc59e8f2..ad913712 100644 --- a/tests/prime_mmap.c +++ b/tests/prime_mmap.c @@ -22,6 +22,7 @@ * * Authors: * Rob Bradford + * Tiago Vignatti * */ @@ -65,6 +66,12 @@ fill_bo(uint32_t handle, size_t size) } } +static void +fill_bo_cpu(char *ptr) +{ + memcpy(ptr, pattern, sizeof(pattern)); +} + static void test_correct(void) { @@ -180,6 +187,62 @@ test_forked(void) gem_close(fd, handle); } +/* test CPU write. This has a rather big implication for the driver which must + * guarantee cache synchronization when writing the bo using CPU. */ +static void +test_correct_cpu_write(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + + handle = gem_create(fd, BO_SIZE); + + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + + /* Check correctness of map using write protection (PROT_WRITE) */ + ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + + /* Fill bo using CPU */ + fill_bo_cpu(ptr); + + /* Check pattern correctness */ + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0); + + munmap(ptr, BO_SIZE); + close(dma_buf_fd); + gem_close(fd, handle); +} + +/* map from another process and then write using CPU */ +static void +test_forked_cpu_write(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + + handle = gem_create(fd, BO_SIZE); + + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + + igt_fork(childno, 1) { + ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE , MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + fill_bo_cpu(ptr); + + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0); + munmap(ptr, BO_SIZE); + close(dma_buf_fd); + } + close(dma_buf_fd); + igt_waitchildren(); + gem_close(fd, handle); +} + static void test_refcounting(void) { @@ -346,6 +409,8 @@ igt_main { "test_map_unmap", test_map_unmap }, { "test_reprime", test_reprime }, { "test_forked", test_forked }, + { "test_correct_cpu_write", test_correct_cpu_write }, + { "test_forked_cpu_write", test_forked_cpu_write }, { "test_refcounting", test_refcounting }, { "test_dup", test_dup }, { "test_errors", test_errors },