mirror of
https://github.com/tiagovignatti/intel-gpu-tools.git
synced 2025-06-10 09:26:10 +00:00
Also sprinkle igt_assert and igt_require over the setup code to clean up code while at it. To avoid gcc getting upset about unitialized variables just move them out of main as global data (where they always get initialized to 0) - gcc can't see through our igt_fixture and igt_subtest maze properly. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
516 lines
11 KiB
C
516 lines
11 KiB
C
/* basic set of prime tests between intel and nouveau */
|
|
|
|
/* test list -
|
|
1. share buffer from intel -> nouveau.
|
|
2. share buffer from nouveau -> intel
|
|
3. share intel->nouveau, map on both, write intel, read nouveau
|
|
4. share intel->nouveau, blit intel fill, readback on nouveau
|
|
test 1 + map buffer, read/write, map other size.
|
|
do some hw actions on the buffer
|
|
some illegal operations -
|
|
close prime fd try and map
|
|
|
|
TODO add some nouveau rendering tests
|
|
*/
|
|
|
|
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <unistd.h>
|
|
#include <fcntl.h>
|
|
#include <string.h>
|
|
#include <sys/stat.h>
|
|
#include <sys/ioctl.h>
|
|
|
|
#include "i915_drm.h"
|
|
#include "intel_bufmgr.h"
|
|
#include "nouveau.h"
|
|
#include "intel_gpu_tools.h"
|
|
#include "intel_batchbuffer.h"
|
|
#include "drmtest.h"
|
|
|
|
int intel_fd = -1, nouveau_fd = -1;
|
|
drm_intel_bufmgr *bufmgr;
|
|
struct nouveau_device *ndev;
|
|
struct nouveau_client *nclient;
|
|
uint32_t devid;
|
|
struct intel_batchbuffer *intel_batch;
|
|
|
|
#define BO_SIZE (256*1024)
|
|
|
|
static int find_and_open_devices(void)
|
|
{
|
|
int i;
|
|
char path[80];
|
|
struct stat buf;
|
|
FILE *fl;
|
|
char vendor_id[8];
|
|
int venid;
|
|
for (i = 0; i < 9; i++) {
|
|
char *ret;
|
|
|
|
sprintf(path, "/sys/class/drm/card%d/device/vendor", i);
|
|
if (stat(path, &buf))
|
|
break;
|
|
|
|
fl = fopen(path, "r");
|
|
if (!fl)
|
|
break;
|
|
|
|
ret = fgets(vendor_id, 8, fl);
|
|
igt_assert(ret);
|
|
fclose(fl);
|
|
|
|
venid = strtoul(vendor_id, NULL, 16);
|
|
sprintf(path, "/dev/dri/card%d", i);
|
|
if (venid == 0x8086) {
|
|
intel_fd = open(path, O_RDWR);
|
|
if (!intel_fd)
|
|
return -1;
|
|
} else if (venid == 0x10de) {
|
|
nouveau_fd = open(path, O_RDWR);
|
|
if (!nouveau_fd)
|
|
return -1;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* prime test 1 -
|
|
* allocate buffer on intel,
|
|
* set prime on buffer,
|
|
* retrive buffer from nouveau,
|
|
* close prime_fd,
|
|
* unref buffers
|
|
*/
|
|
static int test_i915_nv_sharing(void)
|
|
{
|
|
int ret;
|
|
drm_intel_bo *test_intel_bo;
|
|
int prime_fd;
|
|
struct nouveau_bo *nvbo;
|
|
|
|
test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
|
|
|
|
drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
|
|
|
|
ret = nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo);
|
|
close(prime_fd);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
nouveau_bo_ref(NULL, &nvbo);
|
|
drm_intel_bo_unreference(test_intel_bo);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* prime test 2 -
|
|
* allocate buffer on nouveau
|
|
* set prime on buffer,
|
|
* retrive buffer from intel
|
|
* close prime_fd,
|
|
* unref buffers
|
|
*/
|
|
static int test_nv_i915_sharing(void)
|
|
{
|
|
int ret;
|
|
drm_intel_bo *test_intel_bo;
|
|
int prime_fd;
|
|
struct nouveau_bo *nvbo;
|
|
|
|
ret = nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
|
|
0, BO_SIZE, NULL, &nvbo);
|
|
if (ret < 0)
|
|
return ret;
|
|
ret = nouveau_bo_set_prime(nvbo, &prime_fd);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
|
|
close(prime_fd);
|
|
if (!test_intel_bo)
|
|
return -1;
|
|
|
|
nouveau_bo_ref(NULL, &nvbo);
|
|
drm_intel_bo_unreference(test_intel_bo);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* allocate intel, give to nouveau, map on nouveau
|
|
* write 0xdeadbeef, non-gtt map on intel, read
|
|
*/
|
|
static int test_nv_write_i915_cpu_mmap_read(void)
|
|
{
|
|
int ret;
|
|
drm_intel_bo *test_intel_bo;
|
|
int prime_fd;
|
|
struct nouveau_bo *nvbo = NULL;
|
|
uint32_t *ptr;
|
|
|
|
test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
|
|
|
|
drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
|
|
|
|
ret = nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo);
|
|
if (ret < 0) {
|
|
fprintf(stderr,"failed to ref prime buffer %d\n", ret);
|
|
close(prime_fd);
|
|
goto free_intel;
|
|
}
|
|
close(prime_fd);
|
|
goto free_intel;
|
|
|
|
ret = nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient);
|
|
if (ret < 0) {
|
|
fprintf(stderr,"failed to map nouveau bo\n");
|
|
goto out;
|
|
}
|
|
|
|
ptr = nvbo->map;
|
|
*ptr = 0xdeadbeef;
|
|
|
|
drm_intel_bo_map(test_intel_bo, 1);
|
|
|
|
ptr = test_intel_bo->virtual;
|
|
|
|
if (*ptr != 0xdeadbeef) {
|
|
fprintf(stderr,"mapped value doesn't match\n");
|
|
ret = -1;
|
|
}
|
|
out:
|
|
nouveau_bo_ref(NULL, &nvbo);
|
|
free_intel:
|
|
drm_intel_bo_unreference(test_intel_bo);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* allocate intel, give to nouveau, map on nouveau
|
|
* write 0xdeadbeef, gtt map on intel, read
|
|
*/
|
|
static int test_nv_write_i915_gtt_mmap_read(void)
|
|
{
|
|
int ret;
|
|
drm_intel_bo *test_intel_bo;
|
|
int prime_fd;
|
|
struct nouveau_bo *nvbo = NULL;
|
|
uint32_t *ptr;
|
|
|
|
test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
|
|
|
|
drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
|
|
|
|
ret = nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo);
|
|
close(prime_fd);
|
|
if (ret < 0) {
|
|
fprintf(stderr,"failed to ref prime buffer\n");
|
|
return ret;
|
|
}
|
|
|
|
ret = nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient);
|
|
if (ret < 0) {
|
|
fprintf(stderr,"failed to map nouveau bo\n");
|
|
goto out;
|
|
}
|
|
|
|
|
|
ptr = nvbo->map;
|
|
*ptr = 0xdeadbeef;
|
|
|
|
drm_intel_gem_bo_map_gtt(test_intel_bo);
|
|
ptr = test_intel_bo->virtual;
|
|
|
|
if (*ptr != 0xdeadbeef) {
|
|
fprintf(stderr,"mapped value doesn't match\n");
|
|
ret = -1;
|
|
}
|
|
out:
|
|
nouveau_bo_ref(NULL, &nvbo);
|
|
drm_intel_bo_unreference(test_intel_bo);
|
|
return ret;
|
|
}
|
|
|
|
/* test drm_intel_bo_map doesn't work properly,
|
|
this tries to map the backing shmem fd, which doesn't exist
|
|
for these objects */
|
|
static int test_i915_import_cpu_mmap(void)
|
|
{
|
|
int ret;
|
|
drm_intel_bo *test_intel_bo;
|
|
int prime_fd;
|
|
struct nouveau_bo *nvbo;
|
|
uint32_t *ptr;
|
|
|
|
ret = nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
|
|
0, BO_SIZE, NULL, &nvbo);
|
|
if (ret < 0)
|
|
return ret;
|
|
ret = nouveau_bo_set_prime(nvbo, &prime_fd);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
|
|
close(prime_fd);
|
|
if (!test_intel_bo)
|
|
return -1;
|
|
|
|
ret = nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient);
|
|
if (ret < 0) {
|
|
fprintf(stderr,"failed to map nouveau bo\n");
|
|
goto out;
|
|
}
|
|
|
|
ptr = nvbo->map;
|
|
*ptr = 0xdeadbeef;
|
|
|
|
ret = drm_intel_bo_map(test_intel_bo, 0);
|
|
if (ret != 0) {
|
|
fprintf(stderr,"failed to map imported bo on intel side\n");
|
|
goto out;
|
|
}
|
|
if (!test_intel_bo->virtual) {
|
|
ret = 0;
|
|
goto out;
|
|
}
|
|
ptr = test_intel_bo->virtual;
|
|
|
|
if (*ptr != 0xdeadbeef) {
|
|
fprintf(stderr,"mapped value doesn't match %08x\n", *ptr);
|
|
ret = -1;
|
|
}
|
|
out:
|
|
nouveau_bo_ref(NULL, &nvbo);
|
|
drm_intel_bo_unreference(test_intel_bo);
|
|
return ret;
|
|
}
|
|
|
|
/* test drm_intel_bo_map_gtt works properly,
|
|
this tries to map the backing shmem fd, which doesn't exist
|
|
for these objects */
|
|
static int test_i915_import_gtt_mmap(void)
|
|
{
|
|
int ret;
|
|
drm_intel_bo *test_intel_bo;
|
|
int prime_fd;
|
|
struct nouveau_bo *nvbo;
|
|
uint32_t *ptr;
|
|
|
|
ret = nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
|
|
0, BO_SIZE, NULL, &nvbo);
|
|
if (ret < 0)
|
|
return ret;
|
|
ret = nouveau_bo_set_prime(nvbo, &prime_fd);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
|
|
close(prime_fd);
|
|
if (!test_intel_bo)
|
|
return -1;
|
|
|
|
ret = nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient);
|
|
if (ret < 0) {
|
|
fprintf(stderr,"failed to map nouveau bo\n");
|
|
goto out;
|
|
}
|
|
|
|
ptr = nvbo->map;
|
|
*ptr = 0xdeadbeef;
|
|
*(ptr + 1) = 0xa55a55;
|
|
|
|
ret = drm_intel_gem_bo_map_gtt(test_intel_bo);
|
|
if (ret != 0) {
|
|
fprintf(stderr,"failed to map bo\n");
|
|
goto out;
|
|
}
|
|
if (!test_intel_bo->virtual) {
|
|
ret = -1;
|
|
fprintf(stderr,"failed to map bo\n");
|
|
goto out;
|
|
}
|
|
ptr = test_intel_bo->virtual;
|
|
|
|
if (*ptr != 0xdeadbeef) {
|
|
fprintf(stderr,"mapped value doesn't match %08x %08x\n", *ptr, *(ptr + 1));
|
|
ret = -1;
|
|
}
|
|
out:
|
|
nouveau_bo_ref(NULL, &nvbo);
|
|
drm_intel_bo_unreference(test_intel_bo);
|
|
return ret;
|
|
}
|
|
|
|
/* test 7 - import from nouveau into intel, test pread/pwrite fail */
|
|
static int test_i915_import_pread_pwrite(void)
|
|
{
|
|
int ret;
|
|
drm_intel_bo *test_intel_bo;
|
|
int prime_fd;
|
|
struct nouveau_bo *nvbo;
|
|
uint32_t *ptr;
|
|
uint32_t buf[64];
|
|
|
|
ret = nouveau_bo_new(ndev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
|
|
0, BO_SIZE, NULL, &nvbo);
|
|
if (ret < 0)
|
|
return ret;
|
|
ret = nouveau_bo_set_prime(nvbo, &prime_fd);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
test_intel_bo = drm_intel_bo_gem_create_from_prime(bufmgr, prime_fd, BO_SIZE);
|
|
close(prime_fd);
|
|
if (!test_intel_bo)
|
|
return -1;
|
|
|
|
ret = nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient);
|
|
if (ret < 0) {
|
|
fprintf(stderr,"failed to map nouveau bo\n");
|
|
goto out;
|
|
}
|
|
|
|
ptr = nvbo->map;
|
|
*ptr = 0xdeadbeef;
|
|
|
|
gem_read(intel_fd, test_intel_bo->handle, 0, buf, 256);
|
|
buf[0] = 0xabcdef55;
|
|
|
|
gem_write(intel_fd, test_intel_bo->handle, 0, buf, 4);
|
|
out:
|
|
nouveau_bo_ref(NULL, &nvbo);
|
|
drm_intel_bo_unreference(test_intel_bo);
|
|
return ret;
|
|
}
|
|
|
|
static void
|
|
set_bo(drm_intel_bo *bo, uint32_t val, int width, int height)
|
|
{
|
|
int size = width * height;
|
|
uint32_t *vaddr;
|
|
|
|
drm_intel_gem_bo_start_gtt_access(bo, true);
|
|
vaddr = bo->virtual;
|
|
while (size--)
|
|
*vaddr++ = val;
|
|
}
|
|
|
|
static drm_intel_bo *
|
|
create_bo(drm_intel_bufmgr *ibufmgr, uint32_t val, int width, int height)
|
|
{
|
|
drm_intel_bo *bo;
|
|
|
|
bo = drm_intel_bo_alloc(ibufmgr, "bo", 4*width*height, 0);
|
|
igt_assert(bo);
|
|
|
|
/* gtt map doesn't have a write parameter, so just keep the mapping
|
|
* around (to avoid the set_domain with the gtt write domain set) and
|
|
* manually tell the kernel when we start access the gtt. */
|
|
drm_intel_gem_bo_map_gtt(bo);
|
|
|
|
set_bo(bo, val, width, height);
|
|
|
|
return bo;
|
|
}
|
|
|
|
/* use intel hw to fill the BO with a blit from another BO,
|
|
then readback from the nouveau bo, check value is correct */
|
|
static int test_i915_blt_fill_nv_read(void)
|
|
{
|
|
int ret;
|
|
drm_intel_bo *test_intel_bo, *src_bo;
|
|
int prime_fd;
|
|
struct nouveau_bo *nvbo = NULL;
|
|
uint32_t *ptr;
|
|
|
|
src_bo = create_bo(bufmgr, 0xaa55aa55, 256, 1);
|
|
|
|
test_intel_bo = drm_intel_bo_alloc(bufmgr, "test bo", BO_SIZE, 4096);
|
|
|
|
drm_intel_bo_gem_export_to_prime(test_intel_bo, &prime_fd);
|
|
|
|
ret = nouveau_bo_prime_handle_ref(ndev, prime_fd, &nvbo);
|
|
close(prime_fd);
|
|
if (ret < 0) {
|
|
fprintf(stderr,"failed to ref prime buffer\n");
|
|
return ret;
|
|
}
|
|
|
|
intel_copy_bo(intel_batch, test_intel_bo, src_bo, 256, 1);
|
|
|
|
ret = nouveau_bo_map(nvbo, NOUVEAU_BO_RDWR, nclient);
|
|
if (ret < 0) {
|
|
fprintf(stderr,"failed to map nouveau bo\n");
|
|
goto out;
|
|
}
|
|
|
|
drm_intel_bo_map(test_intel_bo, 0);
|
|
|
|
ptr = nvbo->map;
|
|
if (*ptr != 0xaa55aa55) {
|
|
fprintf(stderr,"mapped value doesn't match\n");
|
|
ret = -1;
|
|
}
|
|
out:
|
|
nouveau_bo_ref(NULL, &nvbo);
|
|
drm_intel_bo_unreference(test_intel_bo);
|
|
return ret;
|
|
}
|
|
|
|
/* test 8 use nouveau to do blit */
|
|
|
|
/* test 9 nouveau copy engine?? */
|
|
|
|
int main(int argc, char **argv)
|
|
{
|
|
igt_subtest_init(argc, argv);
|
|
|
|
igt_fixture {
|
|
igt_assert(find_and_open_devices() == 0);
|
|
|
|
igt_require(nouveau_fd != -1);
|
|
igt_require(intel_fd != -1);
|
|
|
|
/* set up intel bufmgr */
|
|
bufmgr = drm_intel_bufmgr_gem_init(intel_fd, 4096);
|
|
igt_assert(bufmgr);
|
|
/* Do not enable reuse, we share (almost) all buffers. */
|
|
//drm_intel_bufmgr_gem_enable_reuse(bufmgr);
|
|
|
|
/* set up nouveau bufmgr */
|
|
igt_assert(nouveau_device_wrap(nouveau_fd, 0, &ndev) >= 0);
|
|
igt_assert(nouveau_client_new(ndev, &nclient) >= 0);
|
|
|
|
/* set up an intel batch buffer */
|
|
devid = intel_get_drm_devid(intel_fd);
|
|
intel_batch = intel_batchbuffer_alloc(bufmgr, devid);
|
|
}
|
|
|
|
#define xtest(name) \
|
|
igt_subtest(#name) \
|
|
igt_assert(test_##name() == 0);
|
|
|
|
xtest(i915_nv_sharing);
|
|
xtest(nv_i915_sharing);
|
|
xtest(nv_write_i915_cpu_mmap_read);
|
|
xtest(nv_write_i915_gtt_mmap_read);
|
|
xtest(i915_import_cpu_mmap);
|
|
xtest(i915_import_gtt_mmap);
|
|
xtest(i915_import_pread_pwrite);
|
|
xtest(i915_blt_fill_nv_read);
|
|
|
|
igt_fixture {
|
|
intel_batchbuffer_free(intel_batch);
|
|
|
|
nouveau_device_del(&ndev);
|
|
drm_intel_bufmgr_destroy(bufmgr);
|
|
|
|
close(intel_fd);
|
|
close(nouveau_fd);
|
|
}
|
|
|
|
igt_exit();
|
|
}
|