Make gem_mmap__{cpu,gtt,wc}() assert on failure

Rename the current gem_mmap__{cpu,gtt,wc}() functions into
__gem_mmap__{cpu,gtt,wc}(), and add back wrappers with the original name
that assert that the pointer is valid. Most callers will expect a valid
pointer and shouldn't have to bother with failures.

To avoid changing anything (yet), sed 's/gem_mmap__/__gem_mmap__/g'
over the entire codebase.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Stochastically-reviwewed-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Ville Syrjälä 2015-10-09 18:29:28 +03:00
parent 106fe21373
commit b8a77dd6c8
52 changed files with 212 additions and 152 deletions

View File

@ -177,7 +177,7 @@ static int run(int object, int batch, int count, int reps)
fd = drm_open_driver(DRIVER_INTEL); fd = drm_open_driver(DRIVER_INTEL);
handle = gem_create(fd, size); handle = gem_create(fd, size);
buf = gem_mmap__cpu(fd, handle, 0, size, PROT_WRITE); buf = __gem_mmap__cpu(fd, handle, 0, size, PROT_WRITE);
igt_assert(buf); igt_assert(buf);
gen = intel_gen(intel_get_drm_devid(fd)); gen = intel_gen(intel_get_drm_devid(fd));

View File

@ -115,13 +115,13 @@ static int run(unsigned batch_size,
if (num_relocs) { if (num_relocs) {
size = ALIGN(sizeof(*mem_reloc)*num_relocs, 4096); size = ALIGN(sizeof(*mem_reloc)*num_relocs, 4096);
reloc_handle = gem_create(fd, size); reloc_handle = gem_create(fd, size);
reloc = gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE); reloc = __gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE);
memcpy(reloc, mem_reloc, sizeof(*mem_reloc)*num_relocs); memcpy(reloc, mem_reloc, sizeof(*mem_reloc)*num_relocs);
munmap(reloc, size); munmap(reloc, size);
if (flags & FAULT) { if (flags & FAULT) {
igt_disable_prefault(); igt_disable_prefault();
reloc = gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE); reloc = __gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE);
} else } else
reloc = mem_reloc; reloc = mem_reloc;
} }
@ -162,7 +162,7 @@ static int run(unsigned batch_size,
} }
if (flags & FAULT && reloc) { if (flags & FAULT && reloc) {
munmap(reloc, size); munmap(reloc, size);
reloc = gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE); reloc = __gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE);
gem_exec[num_objects].relocs_ptr = (uintptr_t)reloc; gem_exec[num_objects].relocs_ptr = (uintptr_t)reloc;
} }
gem_execbuf(fd, &execbuf); gem_execbuf(fd, &execbuf);

View File

@ -115,17 +115,17 @@ int main(int argc, char **argv)
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
switch (map) { switch (map) {
case CPU: case CPU:
ptr = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); ptr = __gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
break; break;
case GTT: case GTT:
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
break; break;
case WC: case WC:
ptr = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); ptr = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
break; break;

View File

@ -359,7 +359,7 @@ static int prepare_primary_surface(int fd, int prim_width, int prim_height,
if (tiled) if (tiled)
gem_set_tiling(fd, *prim_handle, I915_TILING_X, *prim_stride); gem_set_tiling(fd, *prim_handle, I915_TILING_X, *prim_stride);
prim_fb_ptr = gem_mmap__gtt(fd, *prim_handle, *prim_size, PROT_READ | PROT_WRITE); prim_fb_ptr = __gem_mmap__gtt(fd, *prim_handle, *prim_size, PROT_READ | PROT_WRITE);
if (prim_fb_ptr != NULL) { if (prim_fb_ptr != NULL) {
// Write primary surface with gray background // Write primary surface with gray background
@ -454,7 +454,7 @@ static int prepare_sprite_surfaces(int fd, int sprite_width, int sprite_height,
gem_set_tiling(fd, sprite_handles[i], I915_TILING_X, *sprite_stride); gem_set_tiling(fd, sprite_handles[i], I915_TILING_X, *sprite_stride);
// Get pointer to the surface // Get pointer to the surface
sprite_fb_ptr = gem_mmap__gtt(fd, sprite_fb_ptr = __gem_mmap__gtt(fd,
sprite_handles[i], *sprite_size, sprite_handles[i], *sprite_size,
PROT_READ | PROT_WRITE); PROT_READ | PROT_WRITE);

View File

@ -252,7 +252,7 @@ static void draw_rect_mmap_cpu(int fd, struct buf_data *buf, struct rect *rect,
if (tiling != I915_TILING_NONE) if (tiling != I915_TILING_NONE)
igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5); igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5);
ptr = gem_mmap__cpu(fd, buf->handle, 0, buf->size, 0); ptr = __gem_mmap__cpu(fd, buf->handle, 0, buf->size, 0);
igt_assert(ptr); igt_assert(ptr);
switch (tiling) { switch (tiling) {
@ -281,7 +281,7 @@ static void draw_rect_mmap_gtt(int fd, struct buf_data *buf, struct rect *rect,
gem_set_domain(fd, buf->handle, I915_GEM_DOMAIN_GTT, gem_set_domain(fd, buf->handle, I915_GEM_DOMAIN_GTT,
I915_GEM_DOMAIN_GTT); I915_GEM_DOMAIN_GTT);
ptr = gem_mmap__gtt(fd, buf->handle, buf->size, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, buf->handle, buf->size, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
draw_rect_ptr_linear(ptr, buf->stride, rect, color, buf->bpp); draw_rect_ptr_linear(ptr, buf->stride, rect, color, buf->bpp);
@ -303,7 +303,7 @@ static void draw_rect_mmap_wc(int fd, struct buf_data *buf, struct rect *rect,
if (tiling != I915_TILING_NONE) if (tiling != I915_TILING_NONE)
igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5); igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5);
ptr = gem_mmap__wc(fd, buf->handle, 0, buf->size, ptr = __gem_mmap__wc(fd, buf->handle, 0, buf->size,
PROT_READ | PROT_WRITE); PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);

View File

@ -745,7 +745,7 @@ static void create_cairo_surface__blit(int fd, struct igt_fb *fb)
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
/* Setup cairo context */ /* Setup cairo context */
blit->linear.map = gem_mmap__cpu(fd, blit->linear.map = __gem_mmap__cpu(fd,
blit->linear.handle, blit->linear.handle,
0, 0,
blit->linear.size, blit->linear.size,
@ -774,7 +774,7 @@ static void destroy_cairo_surface__gtt(void *arg)
static void create_cairo_surface__gtt(int fd, struct igt_fb *fb) static void create_cairo_surface__gtt(int fd, struct igt_fb *fb)
{ {
void *ptr = gem_mmap__gtt(fd, fb->gem_handle, fb->size, PROT_READ | PROT_WRITE); void *ptr = __gem_mmap__gtt(fd, fb->gem_handle, fb->size, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
fb->cairo_surface = fb->cairo_surface =

View File

@ -447,7 +447,7 @@ void gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
} }
/** /**
* gem_mmap__gtt: * __gem_mmap__gtt:
* @fd: open i915 drm file descriptor * @fd: open i915 drm file descriptor
* @handle: gem buffer object handle * @handle: gem buffer object handle
* @size: size of the gem buffer * @size: size of the gem buffer
@ -458,7 +458,7 @@ void gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
* *
* Returns: A pointer to the created memory mapping, NULL on failure. * Returns: A pointer to the created memory mapping, NULL on failure.
*/ */
void *gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot) void *__gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot)
{ {
struct drm_i915_gem_mmap_gtt mmap_arg; struct drm_i915_gem_mmap_gtt mmap_arg;
void *ptr; void *ptr;
@ -477,6 +477,24 @@ void *gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot)
return ptr; return ptr;
} }
/**
* gem_mmap__gtt:
* @fd: open i915 drm file descriptor
* @handle: gem buffer object handle
* @size: size of the gem buffer
* @prot: memory protection bits as used by mmap()
*
* Like __gem_mmap__gtt() except we assert on failure.
*
* Returns: A pointer to the created memory mapping
*/
void *gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot)
{
void *ptr = __gem_mmap__gtt(fd, handle, size, prot);
igt_assert(ptr);
return ptr;
}
struct local_i915_gem_mmap_v2 { struct local_i915_gem_mmap_v2 {
uint32_t handle; uint32_t handle;
uint32_t pad; uint32_t pad;
@ -523,7 +541,7 @@ bool gem_mmap__has_wc(int fd)
} }
/** /**
* gem_mmap__wc: * __gem_mmap__wc:
* @fd: open i915 drm file descriptor * @fd: open i915 drm file descriptor
* @handle: gem buffer object handle * @handle: gem buffer object handle
* @offset: offset in the gem buffer of the mmap arena * @offset: offset in the gem buffer of the mmap arena
@ -537,7 +555,7 @@ bool gem_mmap__has_wc(int fd)
* *
* Returns: A pointer to the created memory mapping, NULL on failure. * Returns: A pointer to the created memory mapping, NULL on failure.
*/ */
void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot) void *__gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
{ {
struct local_i915_gem_mmap_v2 arg; struct local_i915_gem_mmap_v2 arg;
@ -559,7 +577,26 @@ void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsi
} }
/** /**
* gem_mmap__cpu: * gem_mmap__wc:
* @fd: open i915 drm file descriptor
* @handle: gem buffer object handle
* @offset: offset in the gem buffer of the mmap arena
* @size: size of the mmap arena
* @prot: memory protection bits as used by mmap()
*
* Like __gem_mmap__wc() except we assert on failure.
*
* Returns: A pointer to the created memory mapping
*/
void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
{
void *ptr = __gem_mmap__wc(fd, handle, offset, size, prot);
igt_assert(ptr);
return ptr;
}
/**
* __gem_mmap__cpu:
* @fd: open i915 drm file descriptor * @fd: open i915 drm file descriptor
* @handle: gem buffer object handle * @handle: gem buffer object handle
* @offset: offset in the gem buffer of the mmap arena * @offset: offset in the gem buffer of the mmap arena
@ -571,7 +608,7 @@ void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsi
* *
* Returns: A pointer to the created memory mapping, NULL on failure. * Returns: A pointer to the created memory mapping, NULL on failure.
*/ */
void *gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot) void *__gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
{ {
struct drm_i915_gem_mmap mmap_arg; struct drm_i915_gem_mmap mmap_arg;
@ -586,6 +623,25 @@ void *gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, uns
return (void *)(uintptr_t)mmap_arg.addr_ptr; return (void *)(uintptr_t)mmap_arg.addr_ptr;
} }
/**
* gem_mmap__cpu:
* @fd: open i915 drm file descriptor
* @handle: gem buffer object handle
* @offset: offset in the gem buffer of the mmap arena
* @size: size of the mmap arena
* @prot: memory protection bits as used by mmap()
*
* Like __gem_mmap__cpu() except we assert on failure.
*
* Returns: A pointer to the created memory mapping
*/
void *gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot)
{
void *ptr = __gem_mmap__cpu(fd, handle, offset, size, prot);
igt_assert(ptr);
return ptr;
}
/** /**
* gem_madvise: * gem_madvise:
* @fd: open i915 drm file descriptor * @fd: open i915 drm file descriptor

View File

@ -66,6 +66,10 @@ void *gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, uns
bool gem_mmap__has_wc(int fd); bool gem_mmap__has_wc(int fd);
void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot); void *gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot);
void *__gem_mmap__gtt(int fd, uint32_t handle, uint64_t size, unsigned prot);
void *__gem_mmap__cpu(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot);
void *__gem_mmap__wc(int fd, uint32_t handle, uint64_t offset, uint64_t size, unsigned prot);
/** /**
* gem_require_mmap_wc: * gem_require_mmap_wc:
* @fd: open i915 drm file descriptor * @fd: open i915 drm file descriptor

View File

@ -56,19 +56,19 @@ test_fence_restore(int fd, bool tiled2untiled, bool hibernate)
handle_tiled = gem_create(fd, OBJECT_SIZE); handle_tiled = gem_create(fd, OBJECT_SIZE);
/* Access the buffer objects in the order we want to have the laid out. */ /* Access the buffer objects in the order we want to have the laid out. */
ptr1 = gem_mmap__gtt(fd, handle1, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr1 = __gem_mmap__gtt(fd, handle1, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr1); igt_assert(ptr1);
for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++) for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++)
ptr1[i] = i; ptr1[i] = i;
ptr_tiled = gem_mmap__gtt(fd, handle_tiled, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr_tiled = __gem_mmap__gtt(fd, handle_tiled, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr_tiled); igt_assert(ptr_tiled);
if (tiled2untiled) if (tiled2untiled)
gem_set_tiling(fd, handle_tiled, I915_TILING_X, 2048); gem_set_tiling(fd, handle_tiled, I915_TILING_X, 2048);
for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++) for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++)
ptr_tiled[i] = i; ptr_tiled[i] = i;
ptr2 = gem_mmap__gtt(fd, handle2, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr2 = __gem_mmap__gtt(fd, handle2, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr2); igt_assert(ptr2);
for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++) for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++)
ptr2[i] = i; ptr2[i] = i;

View File

@ -171,7 +171,7 @@ wc_create_bo(drm_intel_bufmgr *bufmgr, int width, int height)
gem_require_mmap_wc(fd); gem_require_mmap_wc(fd);
bo = unmapped_create_bo(bufmgr, width, height); bo = unmapped_create_bo(bufmgr, width, height);
bo->virtual = gem_mmap__wc(fd, bo->handle, 0, bo->size, PROT_READ | PROT_WRITE); bo->virtual = __gem_mmap__wc(fd, bo->handle, 0, bo->size, PROT_READ | PROT_WRITE);
return bo; return bo;
} }
@ -471,9 +471,9 @@ static void cpu_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_CPU, 0); gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_CPU, 0);
gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
s = gem_mmap__cpu(fd, src->handle, 0, size, PROT_READ); s = __gem_mmap__cpu(fd, src->handle, 0, size, PROT_READ);
igt_assert(s); igt_assert(s);
d = gem_mmap__cpu(fd, dst->handle, 0, size, PROT_WRITE); d = __gem_mmap__cpu(fd, dst->handle, 0, size, PROT_WRITE);
igt_assert(d); igt_assert(d);
memcpy(d, s, size); memcpy(d, s, size);
@ -490,9 +490,9 @@ static void gtt_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0); gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
s = gem_mmap__gtt(fd, src->handle, size, PROT_READ); s = __gem_mmap__gtt(fd, src->handle, size, PROT_READ);
igt_assert(s); igt_assert(s);
d = gem_mmap__gtt(fd, dst->handle, size, PROT_WRITE); d = __gem_mmap__gtt(fd, dst->handle, size, PROT_WRITE);
igt_assert(d); igt_assert(d);
memcpy(d, s, size); memcpy(d, s, size);
@ -509,9 +509,9 @@ static void wc_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0); gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
s = gem_mmap__wc(fd, src->handle, 0, size, PROT_READ); s = __gem_mmap__wc(fd, src->handle, 0, size, PROT_READ);
igt_assert(s); igt_assert(s);
d = gem_mmap__wc(fd, dst->handle, 0, size, PROT_WRITE); d = __gem_mmap__wc(fd, dst->handle, 0, size, PROT_WRITE);
igt_assert(d); igt_assert(d);
memcpy(d, s, size); memcpy(d, s, size);

View File

@ -115,7 +115,7 @@ static void run_on_ring(int fd, unsigned ring_id, const char *ring_name)
igt_progress(buf, split, BATCH_SIZE/8 - 1); igt_progress(buf, split, BATCH_SIZE/8 - 1);
handle_new = gem_create(fd, BATCH_SIZE); handle_new = gem_create(fd, BATCH_SIZE);
batch_ptr = gem_mmap__cpu(fd, handle_new, 0, BATCH_SIZE, batch_ptr = __gem_mmap__cpu(fd, handle_new, 0, BATCH_SIZE,
PROT_READ | PROT_WRITE); PROT_READ | PROT_WRITE);
igt_assert(batch_ptr); igt_assert(batch_ptr);
batch_ptr[split*2] = MI_BATCH_BUFFER_END; batch_ptr[split*2] = MI_BATCH_BUFFER_END;

View File

@ -132,7 +132,7 @@ copy(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo)
static void clear(int fd, uint32_t handle, int size) static void clear(int fd, uint32_t handle, int size)
{ {
void *base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); void *base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
igt_assert(base != NULL); igt_assert(base != NULL);
memset(base, 0, size); memset(base, 0, size);

View File

@ -203,9 +203,9 @@ igt_simple_main
gem_write(fd, handle, 0, batch, sizeof(batch)); gem_write(fd, handle, 0, batch, sizeof(batch));
if (!FORCE_PREAD_PWRITE && gem_has_llc(fd)) if (!FORCE_PREAD_PWRITE && gem_has_llc(fd))
ptr = gem_mmap__cpu(fd, handle, 0, batch_size, PROT_READ); ptr = __gem_mmap__cpu(fd, handle, 0, batch_size, PROT_READ);
else if (!FORCE_PREAD_PWRITE && gem_mmap__has_wc(fd)) else if (!FORCE_PREAD_PWRITE && gem_mmap__has_wc(fd))
ptr = gem_mmap__wc(fd, handle, 0, batch_size, PROT_READ); ptr = __gem_mmap__wc(fd, handle, 0, batch_size, PROT_READ);
else else
ptr = NULL; ptr = NULL;

View File

@ -200,7 +200,7 @@ static void run(int object_size)
handle_relocs = gem_create(fd, 4096); handle_relocs = gem_create(fd, 4096);
gem_write(fd, handle_relocs, 0, reloc, sizeof(reloc)); gem_write(fd, handle_relocs, 0, reloc, sizeof(reloc));
gtt_relocs = gem_mmap__gtt(fd, handle_relocs, 4096, gtt_relocs = __gem_mmap__gtt(fd, handle_relocs, 4096,
PROT_READ | PROT_WRITE); PROT_READ | PROT_WRITE);
igt_assert(gtt_relocs); igt_assert(gtt_relocs);

View File

@ -123,7 +123,7 @@ igt_simple_main
size = ALIGN(sizeof(mem_reloc), 4096); size = ALIGN(sizeof(mem_reloc), 4096);
reloc_handle = gem_create(fd, size); reloc_handle = gem_create(fd, size);
reloc = gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE); reloc = __gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE);
igt_assert(reloc); igt_assert(reloc);
for (n = 0; n < MAX_NUM_RELOC; n++) { for (n = 0; n < MAX_NUM_RELOC; n++) {
reloc[n].offset = 1024; reloc[n].offset = 1024;
@ -148,7 +148,7 @@ igt_simple_main
struct timeval start, end; struct timeval start, end;
if (p->flags & FAULT) if (p->flags & FAULT)
reloc = gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE); reloc = __gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE);
else else
reloc = mem_reloc; reloc = mem_reloc;
@ -182,7 +182,7 @@ igt_simple_main
} }
if (p->flags & FAULT) { if (p->flags & FAULT) {
munmap(reloc, size); munmap(reloc, size);
reloc = gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE); reloc = __gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE);
gem_exec[MAX_NUM_EXEC].relocs_ptr = (uintptr_t)reloc; gem_exec[MAX_NUM_EXEC].relocs_ptr = (uintptr_t)reloc;
} }
gem_execbuf(fd, &execbuf); gem_execbuf(fd, &execbuf);
@ -212,7 +212,7 @@ igt_simple_main
} }
if (p->flags & FAULT) { if (p->flags & FAULT) {
munmap(reloc, size); munmap(reloc, size);
reloc = gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE); reloc = __gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE);
gem_exec[MAX_NUM_EXEC].relocs_ptr = (uintptr_t)reloc; gem_exec[MAX_NUM_EXEC].relocs_ptr = (uintptr_t)reloc;
} }
gem_execbuf(fd, &execbuf); gem_execbuf(fd, &execbuf);

View File

@ -67,14 +67,14 @@ bo_create (int fd, int tiling)
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
/* dirty cpu caches a bit ... */ /* dirty cpu caches a bit ... */
ptr = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
memset(ptr, 0, OBJECT_SIZE); memset(ptr, 0, OBJECT_SIZE);
munmap(ptr, OBJECT_SIZE); munmap(ptr, OBJECT_SIZE);
gem_set_tiling(fd, handle, tiling, 1024); gem_set_tiling(fd, handle, tiling, 1024);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);

View File

@ -68,7 +68,7 @@ static void performance(void)
for (n = 0; n < count; n++) { for (n = 0; n < count; n++) {
handle[n] = gem_create(fd, OBJECT_SIZE); handle[n] = gem_create(fd, OBJECT_SIZE);
ptr[n] = gem_mmap__gtt(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr[n] = __gem_mmap__gtt(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr[n]); igt_assert(ptr[n]);
} }
@ -176,7 +176,7 @@ static void thread_performance(unsigned mask)
for (n = 0; n < count; n++) { for (n = 0; n < count; n++) {
handle[n] = gem_create(fd, OBJECT_SIZE); handle[n] = gem_create(fd, OBJECT_SIZE);
ptr[n] = gem_mmap__gtt(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr[n] = __gem_mmap__gtt(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr[n]); igt_assert(ptr[n]);
if (mask & READ) { if (mask & READ) {
@ -257,7 +257,7 @@ static void *no_contention(void *closure)
int n; int n;
for (n = 0; n < t->loops; n++) { for (n = 0; n < t->loops; n++) {
uint32_t *ptr = gem_mmap__gtt(t->fd, t->handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); uint32_t *ptr = __gem_mmap__gtt(t->fd, t->handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
memset(ptr + (rand() % 256) * 4096 / 4, 0, 4096); memset(ptr + (rand() % 256) * 4096 / 4, 0, 4096);
munmap(ptr, OBJECT_SIZE); munmap(ptr, OBJECT_SIZE);
@ -272,7 +272,7 @@ static void *wc_mmap(void *closure)
int n; int n;
for (n = 0; n < t->loops; n++) { for (n = 0; n < t->loops; n++) {
uint32_t *ptr = gem_mmap__wc(t->fd, t->handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE); uint32_t *ptr = __gem_mmap__wc(t->fd, t->handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
memset(ptr + (rand() % 256) * 4096 / 4, 0, 4096); memset(ptr + (rand() % 256) * 4096 / 4, 0, 4096);
munmap(ptr, OBJECT_SIZE); munmap(ptr, OBJECT_SIZE);

View File

@ -59,7 +59,7 @@ create_bo(int fd)
handle = gem_create(fd, OBJ_SIZE); handle = gem_create(fd, OBJ_SIZE);
/* Fill the BO with dwords starting at start_val */ /* Fill the BO with dwords starting at start_val */
data = gem_mmap__gtt(fd, handle, OBJ_SIZE, PROT_READ | PROT_WRITE); data = __gem_mmap__gtt(fd, handle, OBJ_SIZE, PROT_READ | PROT_WRITE);
igt_assert(data); igt_assert(data);
for (i = 0; i < OBJ_SIZE/4; i++) for (i = 0; i < OBJ_SIZE/4; i++)
data[i] = i; data[i] = i;
@ -83,7 +83,7 @@ igt_simple_main
handle = gem_create(fd, OBJ_SIZE); handle = gem_create(fd, OBJ_SIZE);
/* touch one page */ /* touch one page */
ptr = gem_mmap__gtt(fd, handle, OBJ_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, OBJ_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
*ptr = 0xdeadbeef; *ptr = 0xdeadbeef;
munmap(ptr, OBJ_SIZE); munmap(ptr, OBJ_SIZE);

View File

@ -140,11 +140,11 @@ static void run(data_t *data, int child)
* set-to-gtt-domain within the fault handler. * set-to-gtt-domain within the fault handler.
*/ */
if (write) { if (write) {
ptr = gem_mmap__gtt(data->fd, handle, size, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(data->fd, handle, size, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
ptr[rand() % (size / 4)] = canary; ptr[rand() % (size / 4)] = canary;
} else { } else {
ptr = gem_mmap__gtt(data->fd, handle, size, PROT_READ); ptr = __gem_mmap__gtt(data->fd, handle, size, PROT_READ);
igt_assert(ptr); igt_assert(ptr);
} }
x = ptr[rand() % (size / 4)]; x = ptr[rand() % (size / 4)];

View File

@ -89,7 +89,7 @@ int main(int argc, char **argv)
I915_GEM_DOMAIN_CPU); I915_GEM_DOMAIN_CPU);
{ {
uint32_t *base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base; volatile uint32_t *ptr = base;
int x = 0; int x = 0;
@ -106,7 +106,7 @@ int main(int argc, char **argv)
/* mmap read */ /* mmap read */
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) { for (loop = 0; loop < 1000; loop++) {
base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
ptr = base; ptr = base;
x = 0; x = 0;
@ -127,7 +127,7 @@ int main(int argc, char **argv)
/* mmap write */ /* mmap write */
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) { for (loop = 0; loop < 1000; loop++) {
base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
ptr = base; ptr = base;
igt_assert(base); igt_assert(base);
@ -143,7 +143,7 @@ int main(int argc, char **argv)
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) { for (loop = 0; loop < 1000; loop++) {
base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
igt_assert(base); igt_assert(base);
memset(base, 0, size); memset(base, 0, size);
munmap(base, size); munmap(base, size);
@ -153,7 +153,7 @@ int main(int argc, char **argv)
size/1024, elapsed(&start, &end, loop)); size/1024, elapsed(&start, &end, loop));
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE); base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
igt_assert(base); igt_assert(base);
for (loop = 0; loop < 1000; loop++) for (loop = 0; loop < 1000; loop++)
memset(base, 0, size); memset(base, 0, size);
@ -182,7 +182,7 @@ int main(int argc, char **argv)
/* prefault into gtt */ /* prefault into gtt */
{ {
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base; volatile uint32_t *ptr = base;
int x = 0; int x = 0;
@ -199,7 +199,7 @@ int main(int argc, char **argv)
/* mmap read */ /* mmap read */
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) { for (loop = 0; loop < 1000; loop++) {
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base; volatile uint32_t *ptr = base;
int x = 0; int x = 0;
@ -220,7 +220,7 @@ int main(int argc, char **argv)
if (gem_mmap__has_wc(fd)) { if (gem_mmap__has_wc(fd)) {
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) { for (loop = 0; loop < 1000; loop++) {
uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base; volatile uint32_t *ptr = base;
int x = 0; int x = 0;
@ -243,7 +243,7 @@ int main(int argc, char **argv)
/* mmap write */ /* mmap write */
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) { for (loop = 0; loop < 1000; loop++) {
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base; volatile uint32_t *ptr = base;
igt_assert(base); igt_assert(base);
@ -261,7 +261,7 @@ int main(int argc, char **argv)
/* mmap write */ /* mmap write */
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) { for (loop = 0; loop < 1000; loop++) {
uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base; volatile uint32_t *ptr = base;
igt_assert(base); igt_assert(base);
@ -279,7 +279,7 @@ int main(int argc, char **argv)
/* mmap clear */ /* mmap clear */
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) { for (loop = 0; loop < 1000; loop++) {
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
memset(base, 0, size); memset(base, 0, size);
munmap(base, size); munmap(base, size);
} }
@ -291,7 +291,7 @@ int main(int argc, char **argv)
/* mmap clear */ /* mmap clear */
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) { for (loop = 0; loop < 1000; loop++) {
uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
memset(base, 0, size); memset(base, 0, size);
munmap(base, size); munmap(base, size);
} }
@ -301,7 +301,7 @@ int main(int argc, char **argv)
} }
gettimeofday(&start, NULL);{ gettimeofday(&start, NULL);{
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
for (loop = 0; loop < 1000; loop++) for (loop = 0; loop < 1000; loop++)
memset(base, 0, size); memset(base, 0, size);
munmap(base, size); munmap(base, size);
@ -311,7 +311,7 @@ int main(int argc, char **argv)
if (gem_mmap__has_wc(fd)) { if (gem_mmap__has_wc(fd)) {
gettimeofday(&start, NULL);{ gettimeofday(&start, NULL);{
uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
for (loop = 0; loop < 1000; loop++) for (loop = 0; loop < 1000; loop++)
memset(base, 0, size); memset(base, 0, size);
munmap(base, size); munmap(base, size);
@ -323,7 +323,7 @@ int main(int argc, char **argv)
/* mmap read */ /* mmap read */
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) { for (loop = 0; loop < 1000; loop++) {
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base; volatile uint32_t *ptr = base;
int x = 0; int x = 0;

View File

@ -64,7 +64,7 @@ test_large_object(int fd)
igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create) == 0); igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create) == 0);
/* prefault */ /* prefault */
ptr = gem_mmap__gtt(fd, create.handle, obj_size, PROT_WRITE | PROT_READ); ptr = __gem_mmap__gtt(fd, create.handle, obj_size, PROT_WRITE | PROT_READ);
igt_assert(ptr); igt_assert(ptr);
*ptr = 0; *ptr = 0;

View File

@ -63,7 +63,7 @@ dontneed_before_mmap(void)
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
gem_madvise(fd, handle, I915_MADV_DONTNEED); gem_madvise(fd, handle, I915_MADV_DONTNEED);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr == NULL); igt_assert(ptr == NULL);
igt_assert(errno == EFAULT); igt_assert(errno == EFAULT);
close(fd); close(fd);
@ -77,7 +77,7 @@ dontneed_after_mmap(void)
char *ptr; char *ptr;
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_madvise(fd, handle, I915_MADV_DONTNEED); gem_madvise(fd, handle, I915_MADV_DONTNEED);
close(fd); close(fd);

View File

@ -80,7 +80,7 @@ test_huge_bo(int huge)
bo = gem_create(fd, huge_object_size); bo = gem_create(fd, huge_object_size);
/* Obtain CPU mapping for the object. */ /* Obtain CPU mapping for the object. */
ptr_cpu = gem_mmap__cpu(fd, bo, 0, huge_object_size, ptr_cpu = __gem_mmap__cpu(fd, bo, 0, huge_object_size,
PROT_READ | PROT_WRITE); PROT_READ | PROT_WRITE);
igt_require(ptr_cpu); igt_require(ptr_cpu);
gem_set_domain(fd, bo, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); gem_set_domain(fd, bo, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
@ -166,7 +166,7 @@ igt_main
igt_subtest("short-mmap") { igt_subtest("short-mmap") {
igt_assert(OBJECT_SIZE > 4096); igt_assert(OBJECT_SIZE > 4096);
arg.handle = gem_create(fd, OBJECT_SIZE); arg.handle = gem_create(fd, OBJECT_SIZE);
addr = gem_mmap__cpu(fd, arg.handle, 0, 4096, PROT_WRITE); addr = __gem_mmap__cpu(fd, arg.handle, 0, 4096, PROT_WRITE);
igt_assert(addr); igt_assert(addr);
memset(addr, 0, 4096); memset(addr, 0, 4096);
munmap(addr, 4096); munmap(addr, 4096);

View File

@ -56,7 +56,7 @@ mmap_bo(int fd, uint32_t handle)
{ {
void *ptr; void *ptr;
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
return ptr; return ptr;
@ -179,7 +179,7 @@ test_read_write(int fd, enum test_read_write order)
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
if (order == READ_BEFORE_WRITE) { if (order == READ_BEFORE_WRITE) {
@ -203,10 +203,10 @@ test_read_write2(int fd, enum test_read_write order)
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
r = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ); r = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ);
igt_assert(r); igt_assert(r);
w = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); w = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(w); igt_assert(w);
if (order == READ_BEFORE_WRITE) { if (order == READ_BEFORE_WRITE) {
@ -291,12 +291,12 @@ test_huge_bo(int fd, int huge, int tiling)
bo = gem_create(fd, PAGE_SIZE); bo = gem_create(fd, PAGE_SIZE);
if (tiling) if (tiling)
gem_set_tiling(fd, bo, tiling, pitch); gem_set_tiling(fd, bo, tiling, pitch);
linear_pattern = gem_mmap__gtt(fd, bo, PAGE_SIZE, linear_pattern = __gem_mmap__gtt(fd, bo, PAGE_SIZE,
PROT_READ | PROT_WRITE); PROT_READ | PROT_WRITE);
igt_assert(linear_pattern); igt_assert(linear_pattern);
for (i = 0; i < PAGE_SIZE; i++) for (i = 0; i < PAGE_SIZE; i++)
linear_pattern[i] = i; linear_pattern[i] = i;
tiled_pattern = gem_mmap__cpu(fd, bo, 0, PAGE_SIZE, PROT_READ); tiled_pattern = __gem_mmap__cpu(fd, bo, 0, PAGE_SIZE, PROT_READ);
igt_assert(tiled_pattern); igt_assert(tiled_pattern);
gem_set_domain(fd, bo, I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT, 0); gem_set_domain(fd, bo, I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT, 0);
@ -307,13 +307,13 @@ test_huge_bo(int fd, int huge, int tiling)
gem_set_tiling(fd, bo, tiling, pitch); gem_set_tiling(fd, bo, tiling, pitch);
/* Initialise first/last page through CPU mmap */ /* Initialise first/last page through CPU mmap */
ptr = gem_mmap__cpu(fd, bo, 0, size, PROT_READ | PROT_WRITE); ptr = __gem_mmap__cpu(fd, bo, 0, size, PROT_READ | PROT_WRITE);
memcpy(ptr, tiled_pattern, PAGE_SIZE); memcpy(ptr, tiled_pattern, PAGE_SIZE);
memcpy(ptr + last_offset, tiled_pattern, PAGE_SIZE); memcpy(ptr + last_offset, tiled_pattern, PAGE_SIZE);
munmap(ptr, size); munmap(ptr, size);
/* Obtain mapping for the object through GTT. */ /* Obtain mapping for the object through GTT. */
ptr = gem_mmap__gtt(fd, bo, size, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, bo, size, PROT_READ | PROT_WRITE);
igt_require_f(ptr, "Huge BO GTT mapping not supported.\n"); igt_require_f(ptr, "Huge BO GTT mapping not supported.\n");
set_domain_gtt(fd, bo); set_domain_gtt(fd, bo);
@ -369,7 +369,7 @@ test_huge_copy(int fd, int huge, int tiling_a, int tiling_b)
if (tiling_a) if (tiling_a)
gem_set_tiling(fd, bo, tiling_a, gem_set_tiling(fd, bo, tiling_a,
tiling_a == I915_TILING_Y ? 128 : 512); tiling_a == I915_TILING_Y ? 128 : 512);
a = gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE); a = __gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE);
igt_require(a); igt_require(a);
gem_close(fd, bo); gem_close(fd, bo);
@ -380,7 +380,7 @@ test_huge_copy(int fd, int huge, int tiling_a, int tiling_b)
if (tiling_b) if (tiling_b)
gem_set_tiling(fd, bo, tiling_b, gem_set_tiling(fd, bo, tiling_b,
tiling_b == I915_TILING_Y ? 128 : 512); tiling_b == I915_TILING_Y ? 128 : 512);
b = gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE); b = __gem_mmap__gtt(fd, bo, huge_object_size, PROT_READ | PROT_WRITE);
igt_require(b); igt_require(b);
gem_close(fd, bo); gem_close(fd, bo);
@ -440,10 +440,10 @@ test_write_cpu_read_gtt(int fd)
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
dst = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ); dst = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ);
igt_assert(dst); igt_assert(dst);
src = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); src = __gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(src); igt_assert(src);
gem_close(fd, handle); gem_close(fd, handle);

View File

@ -60,7 +60,7 @@ create_and_map_bo(int fd)
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
/* touch it to force it into the gtt */ /* touch it to force it into the gtt */

View File

@ -62,7 +62,7 @@ mmap_bo(int fd, uint32_t handle)
{ {
void *ptr; void *ptr;
ptr = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
return ptr; return ptr;
@ -183,10 +183,10 @@ test_read_write2(int fd, enum test_read_write order)
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
set_domain(fd, handle); set_domain(fd, handle);
r = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ); r = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
igt_assert(r); igt_assert(r);
w = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE); w = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(w); igt_assert(w);
if (order == READ_BEFORE_WRITE) { if (order == READ_BEFORE_WRITE) {
@ -288,10 +288,10 @@ test_write_cpu_read_wc(int fd, int force_domain)
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
dst = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ); dst = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
igt_assert(dst); igt_assert(dst);
src = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE); src = __gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(src); igt_assert(src);
memset(src, 0xaa, OBJECT_SIZE); memset(src, 0xaa, OBJECT_SIZE);
@ -315,10 +315,10 @@ test_write_gtt_read_wc(int fd)
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
set_domain(fd, handle); set_domain(fd, handle);
dst = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ); dst = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
igt_assert(dst); igt_assert(dst);
src = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE); src = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE);
igt_assert(src); igt_assert(src);
memset(src, 0xaa, OBJECT_SIZE); memset(src, 0xaa, OBJECT_SIZE);

View File

@ -225,7 +225,7 @@ static void do_test(int fd, bool faulting_reloc)
relocs_bo_handle[i] = gem_create(fd, 4096); relocs_bo_handle[i] = gem_create(fd, 4096);
gem_write(fd, relocs_bo_handle[i], 0, reloc, sizeof(reloc)); gem_write(fd, relocs_bo_handle[i], 0, reloc, sizeof(reloc));
gtt_relocs_ptr[i] = gem_mmap__gtt(fd, relocs_bo_handle[i], 4096, gtt_relocs_ptr[i] = __gem_mmap__gtt(fd, relocs_bo_handle[i], 4096,
PROT_READ | PROT_WRITE); PROT_READ | PROT_WRITE);
igt_assert(gtt_relocs_ptr[i]); igt_assert(gtt_relocs_ptr[i]);

View File

@ -118,7 +118,7 @@ static void test_big_gtt(int fd, int scale)
handle = gem_create(fd, size); handle = gem_create(fd, size);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
ptr = gem_mmap__wc(fd, handle, 0, size, PROT_READ); ptr = __gem_mmap__wc(fd, handle, 0, size, PROT_READ);
igt_assert(ptr); igt_assert(ptr);
for (offset = 0; offset < size; offset += 4096) { for (offset = 0; offset < size; offset += 4096) {

View File

@ -112,9 +112,9 @@ static void as_gtt_mmap(int fd, uint32_t src, uint32_t dst, void *buf, int len,
uint32_t *src_ptr, *dst_ptr; uint32_t *src_ptr, *dst_ptr;
BUILD_EXEC; BUILD_EXEC;
src_ptr = gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE); src_ptr = __gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE);
igt_assert(src_ptr); igt_assert(src_ptr);
dst_ptr = gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_READ); dst_ptr = __gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_READ);
igt_assert(dst_ptr); igt_assert(dst_ptr);
while (loops--) { while (loops--) {
@ -139,9 +139,9 @@ static void as_cpu_mmap(int fd, uint32_t src, uint32_t dst, void *buf, int len,
uint32_t *src_ptr, *dst_ptr; uint32_t *src_ptr, *dst_ptr;
BUILD_EXEC; BUILD_EXEC;
src_ptr = gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_WRITE); src_ptr = __gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(src_ptr); igt_assert(src_ptr);
dst_ptr = gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ); dst_ptr = __gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
igt_assert(dst_ptr); igt_assert(dst_ptr);
while (loops--) { while (loops--) {
@ -186,9 +186,9 @@ static void test_as_gtt_mmap(int fd, uint32_t src, uint32_t dst, int len)
int i; int i;
BUILD_EXEC; BUILD_EXEC;
src_ptr = gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE); src_ptr = __gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE);
igt_assert(src_ptr); igt_assert(src_ptr);
dst_ptr = gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_READ); dst_ptr = __gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_READ);
igt_assert(dst_ptr); igt_assert(dst_ptr);
gem_set_domain(fd, src, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); gem_set_domain(fd, src, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
@ -212,9 +212,9 @@ static void test_as_cpu_mmap(int fd, uint32_t src, uint32_t dst, int len)
int i; int i;
BUILD_EXEC; BUILD_EXEC;
src_ptr = gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_WRITE); src_ptr = __gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(src_ptr); igt_assert(src_ptr);
dst_ptr = gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ); dst_ptr = __gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
igt_assert(dst_ptr); igt_assert(dst_ptr);
gem_set_domain(fd, src, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); gem_set_domain(fd, src, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);

View File

@ -85,7 +85,7 @@ static void source_offset_tests(int devid, bool reloc_gtt)
execbuf.buffer_count = 2; execbuf.buffer_count = 2;
if (reloc_gtt) { if (reloc_gtt) {
dst_gtt = gem_mmap__gtt(fd, handle, 8192, PROT_READ | PROT_WRITE); dst_gtt = __gem_mmap__gtt(fd, handle, 8192, PROT_READ | PROT_WRITE);
igt_assert(dst_gtt != MAP_FAILED); igt_assert(dst_gtt != MAP_FAILED);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
memset(dst_gtt, 0, 8192); memset(dst_gtt, 0, 8192);

View File

@ -162,7 +162,7 @@ static void reloc_and_emit(int fd, drm_intel_bo *target_bo, bool faulting_reloc)
handle_relocs = gem_create(fd, 4096); handle_relocs = gem_create(fd, 4096);
gem_write(fd, handle_relocs, 0, reloc, sizeof(reloc)); gem_write(fd, handle_relocs, 0, reloc, sizeof(reloc));
gtt_relocs = gem_mmap__gtt(fd, handle_relocs, 4096, gtt_relocs = __gem_mmap__gtt(fd, handle_relocs, 4096,
PROT_READ | PROT_WRITE); PROT_READ | PROT_WRITE);
igt_assert(gtt_relocs); igt_assert(gtt_relocs);

View File

@ -66,7 +66,7 @@ igt_simple_main
tile_height = 8; tile_height = 8;
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
/* gtt coherency is done with set_domain in libdrm, don't break that */ /* gtt coherency is done with set_domain in libdrm, don't break that */

View File

@ -62,7 +62,7 @@ igt_simple_main
data[i] = i; data[i] = i;
handle = gem_create(fd, OBJECT_SIZE); handle = gem_create(fd, OBJECT_SIZE);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_tiling(fd, handle, I915_TILING_X, TEST_STRIDE); gem_set_tiling(fd, handle, I915_TILING_X, TEST_STRIDE);

View File

@ -88,21 +88,21 @@ static void test_streaming(int fd, int mode, int sync)
switch (mode) { switch (mode) {
case 0: /* cpu/snoop */ case 0: /* cpu/snoop */
gem_set_caching(fd, src, I915_CACHING_CACHED); gem_set_caching(fd, src, I915_CACHING_CACHED);
s = gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE); s = __gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(s); igt_assert(s);
break; break;
case 1: /* gtt */ case 1: /* gtt */
s = gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_READ | PROT_WRITE); s = __gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(s); igt_assert(s);
break; break;
case 2: /* wc */ case 2: /* wc */
s = gem_mmap__wc(fd, src, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE); s = __gem_mmap__wc(fd, src, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(s); igt_assert(s);
break; break;
} }
*s = 0; /* fault the object into the mappable range first (for GTT) */ *s = 0; /* fault the object into the mappable range first (for GTT) */
d = gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ); d = __gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
igt_assert(d); igt_assert(d);
gem_write(fd, dst, 0, tmp, sizeof(tmp)); gem_write(fd, dst, 0, tmp, sizeof(tmp));
@ -154,7 +154,7 @@ static void test_streaming(int fd, int mode, int sync)
batch[i].handle = gem_create(fd, 4096); batch[i].handle = gem_create(fd, 4096);
batch[i].offset = 0; batch[i].offset = 0;
base = gem_mmap__cpu(fd, batch[i].handle, 0, 4096, PROT_WRITE); base = __gem_mmap__cpu(fd, batch[i].handle, 0, 4096, PROT_WRITE);
igt_assert(base); igt_assert(base);
for (int j = 0; j < 64; j++) { for (int j = 0; j < 64; j++) {
@ -254,10 +254,10 @@ static void test_batch(int fd, int mode, int reverse)
exec[DST].handle = gem_create(fd, OBJECT_SIZE); exec[DST].handle = gem_create(fd, OBJECT_SIZE);
exec[SRC].handle = gem_create(fd, OBJECT_SIZE); exec[SRC].handle = gem_create(fd, OBJECT_SIZE);
s = gem_mmap__wc(fd, src, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE); s = __gem_mmap__wc(fd, src, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(s); igt_assert(s);
d = gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ); d = __gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
igt_assert(d); igt_assert(d);
memset(reloc, 0, sizeof(reloc)); memset(reloc, 0, sizeof(reloc));
@ -285,15 +285,15 @@ static void test_batch(int fd, int mode, int reverse)
switch (mode) { switch (mode) {
case 0: /* cpu/snoop */ case 0: /* cpu/snoop */
igt_require(gem_has_llc(fd)); igt_require(gem_has_llc(fd));
base = gem_mmap__cpu(fd, exec[BATCH].handle, 0, batch_size, PROT_READ | PROT_WRITE); base = __gem_mmap__cpu(fd, exec[BATCH].handle, 0, batch_size, PROT_READ | PROT_WRITE);
igt_assert(base); igt_assert(base);
break; break;
case 1: /* gtt */ case 1: /* gtt */
base = gem_mmap__gtt(fd, exec[BATCH].handle, batch_size, PROT_READ | PROT_WRITE); base = __gem_mmap__gtt(fd, exec[BATCH].handle, batch_size, PROT_READ | PROT_WRITE);
igt_assert(base); igt_assert(base);
break; break;
case 2: /* wc */ case 2: /* wc */
base = gem_mmap__wc(fd, exec[BATCH].handle, 0, batch_size, PROT_READ | PROT_WRITE); base = __gem_mmap__wc(fd, exec[BATCH].handle, 0, batch_size, PROT_READ | PROT_WRITE);
igt_assert(base); igt_assert(base);
break; break;
} }

View File

@ -72,7 +72,7 @@ create_bo(int fd)
gem_set_tiling(fd, handle, I915_TILING_X, WIDTH * sizeof(uint32_t)); gem_set_tiling(fd, handle, I915_TILING_X, WIDTH * sizeof(uint32_t));
/* Fill the BO with dwords starting at start_val */ /* Fill the BO with dwords starting at start_val */
data = gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE); data = __gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE);
igt_assert(data); igt_assert(data);
for (i = 0; i < WIDTH*HEIGHT; i++) for (i = 0; i < WIDTH*HEIGHT; i++)
data[i] = i; data[i] = i;

View File

@ -79,7 +79,7 @@ create_bo_and_fill(int fd)
gem_set_tiling(fd, handle, current_tiling_mode, WIDTH * sizeof(uint32_t)); gem_set_tiling(fd, handle, current_tiling_mode, WIDTH * sizeof(uint32_t));
/* Fill the BO with dwords starting at start_val */ /* Fill the BO with dwords starting at start_val */
data = gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE); data = __gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE);
igt_assert(data); igt_assert(data);
for (i = 0; i < WIDTH*HEIGHT; i++) for (i = 0; i < WIDTH*HEIGHT; i++)
data[i] = i; data[i] = i;
@ -123,7 +123,7 @@ igt_simple_main
gem_write(fd, handle_target, 0, linear, sizeof(linear)); gem_write(fd, handle_target, 0, linear, sizeof(linear));
/* Check the target bo's contents. */ /* Check the target bo's contents. */
data = gem_mmap__gtt(fd, handle_target, sizeof(linear), PROT_READ | PROT_WRITE); data = __gem_mmap__gtt(fd, handle_target, sizeof(linear), PROT_READ | PROT_WRITE);
igt_assert(data); igt_assert(data);
for (j = 0; j < WIDTH*HEIGHT; j++) for (j = 0; j < WIDTH*HEIGHT; j++)
igt_assert_f(data[j] == j, igt_assert_f(data[j] == j,

View File

@ -78,7 +78,7 @@ create_bo(int fd)
handle = gem_create(fd, LINEAR_DWORDS); handle = gem_create(fd, LINEAR_DWORDS);
gem_set_tiling(fd, handle, current_tiling_mode, WIDTH * sizeof(uint32_t)); gem_set_tiling(fd, handle, current_tiling_mode, WIDTH * sizeof(uint32_t));
data = gem_mmap__gtt(fd, handle, LINEAR_DWORDS, PROT_READ | PROT_WRITE); data = __gem_mmap__gtt(fd, handle, LINEAR_DWORDS, PROT_READ | PROT_WRITE);
if (data == NULL) { if (data == NULL) {
gem_close(fd, handle); gem_close(fd, handle);
return 0; return 0;
@ -94,7 +94,7 @@ fill_bo(int fd, uint32_t handle)
uint32_t *data; uint32_t *data;
int i; int i;
data = gem_mmap__gtt(fd, handle, LINEAR_DWORDS, PROT_READ | PROT_WRITE); data = __gem_mmap__gtt(fd, handle, LINEAR_DWORDS, PROT_READ | PROT_WRITE);
igt_assert(data); igt_assert(data);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
@ -109,7 +109,7 @@ check_bo(int fd, uint32_t handle)
uint32_t *data; uint32_t *data;
int j; int j;
data = gem_mmap__gtt(fd, handle, LINEAR_DWORDS, PROT_READ); data = __gem_mmap__gtt(fd, handle, LINEAR_DWORDS, PROT_READ);
igt_assert(data); igt_assert(data);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, 0); gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, 0);
j = rand() % (WIDTH * HEIGHT); j = rand() % (WIDTH * HEIGHT);

View File

@ -71,7 +71,7 @@ create_bo(int fd)
/* Write throught the fence to tiled the data. /* Write throught the fence to tiled the data.
* We then manually detile on reading back through the mmap(wc). * We then manually detile on reading back through the mmap(wc).
*/ */
data = gem_mmap__gtt(fd, handle, SIZE, PROT_READ | PROT_WRITE); data = __gem_mmap__gtt(fd, handle, SIZE, PROT_READ | PROT_WRITE);
igt_assert(data); igt_assert(data);
for (i = 0; i < WIDTH*HEIGHT; i++) for (i = 0; i < WIDTH*HEIGHT; i++)
data[i] = i; data[i] = i;
@ -179,7 +179,7 @@ igt_simple_main
last_page = (offset + len + PAGE_SIZE) & ~(PAGE_SIZE-1); last_page = (offset + len + PAGE_SIZE) & ~(PAGE_SIZE-1);
offset -= first_page; offset -= first_page;
linear = gem_mmap__cpu(fd, handle, first_page, last_page - first_page, PROT_READ); linear = __gem_mmap__cpu(fd, handle, first_page, last_page - first_page, PROT_READ);
igt_assert(linear); igt_assert(linear);

View File

@ -65,7 +65,7 @@ create_bo(int fd)
gem_set_tiling(fd, handle, I915_TILING_X, WIDTH * sizeof(uint32_t)); gem_set_tiling(fd, handle, I915_TILING_X, WIDTH * sizeof(uint32_t));
/* Fill the BO with dwords starting at start_val */ /* Fill the BO with dwords starting at start_val */
data = gem_mmap__gtt(fd, handle, SIZE, PROT_READ | PROT_WRITE); data = __gem_mmap__gtt(fd, handle, SIZE, PROT_READ | PROT_WRITE);
igt_assert(data); igt_assert(data);
for (i = 0; i < WIDTH*HEIGHT; i++) for (i = 0; i < WIDTH*HEIGHT; i++)
data[i] = i; data[i] = i;
@ -174,7 +174,7 @@ igt_simple_main
first_page = offset & ~(PAGE_SIZE-1); first_page = offset & ~(PAGE_SIZE-1);
last_page = (offset + len + PAGE_SIZE) & ~(PAGE_SIZE-1); last_page = (offset + len + PAGE_SIZE) & ~(PAGE_SIZE-1);
linear = gem_mmap__wc(fd, handle, first_page, last_page - first_page, PROT_READ); linear = __gem_mmap__wc(fd, handle, first_page, last_page - first_page, PROT_READ);
igt_assert(linear); igt_assert(linear);
/* Translate from offsets in the read buffer to the swizzled /* Translate from offsets in the read buffer to the swizzled

View File

@ -97,7 +97,7 @@ igt_simple_main
handle = gem_create(fd, size); handle = gem_create(fd, size);
ptr = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
test_invalid_tiling(fd, handle, 0); test_invalid_tiling(fd, handle, 0);

View File

@ -544,7 +544,7 @@ static int test_invalid_gtt_mapping(int fd)
/* GTT mapping */ /* GTT mapping */
handle = create_bo(fd, 0); handle = create_bo(fd, 0);
ptr = gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE);
gem_close(fd, handle); gem_close(fd, handle);
igt_assert(ptr); igt_assert(ptr);
igt_assert(((unsigned long)ptr & (PAGE_SIZE - 1)) == 0); igt_assert(((unsigned long)ptr & (PAGE_SIZE - 1)) == 0);
@ -573,7 +573,7 @@ static void test_process_exit(int fd, int flags)
handle = create_userptr_bo(fd, sizeof(linear)); handle = create_userptr_bo(fd, sizeof(linear));
if (flags & PE_GTT_MAP) { if (flags & PE_GTT_MAP) {
uint32_t *ptr = gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE); uint32_t *ptr = __gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE);
if (ptr) if (ptr)
*ptr = 0; *ptr = 0;
} }
@ -688,12 +688,12 @@ static void *umap(int fd, uint32_t handle)
void *ptr; void *ptr;
if (gem_has_llc(fd)) { if (gem_has_llc(fd)) {
ptr = gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE); ptr = __gem_mmap__gtt(fd, handle, sizeof(linear), PROT_READ | PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
} else { } else {
uint32_t tmp = gem_create(fd, sizeof(linear)); uint32_t tmp = gem_create(fd, sizeof(linear));
copy(fd, tmp, handle, 0); copy(fd, tmp, handle, 0);
ptr = gem_mmap__cpu(fd, tmp, 0, sizeof(linear), PROT_READ); ptr = __gem_mmap__cpu(fd, tmp, 0, sizeof(linear), PROT_READ);
igt_assert(ptr); igt_assert(ptr);
gem_close(fd, tmp); gem_close(fd, tmp);
} }

View File

@ -426,7 +426,7 @@ create_bo(int fd, uint32_t val, int tiling)
gem_set_tiling(fd, handle, tiling, WIDTH*4); gem_set_tiling(fd, handle, tiling, WIDTH*4);
/* Fill the BO with dwords starting at val */ /* Fill the BO with dwords starting at val */
v = gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ | PROT_WRITE); v = __gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ | PROT_WRITE);
igt_assert(v); igt_assert(v);
for (i = 0; i < WIDTH*HEIGHT; i++) for (i = 0; i < WIDTH*HEIGHT; i++)
v[i] = val++; v[i] = val++;
@ -441,7 +441,7 @@ check_bo(int fd, uint32_t handle, uint32_t val)
uint32_t *v; uint32_t *v;
int i; int i;
v = gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ); v = __gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ);
igt_assert(v); igt_assert(v);
for (i = 0; i < WIDTH*HEIGHT; i++) { for (i = 0; i < WIDTH*HEIGHT; i++) {
igt_assert_f(v[i] == val, igt_assert_f(v[i] == val,

View File

@ -314,7 +314,7 @@ create_bo(int fd, uint32_t val, int tiling)
gem_set_tiling(fd, handle, tiling, WIDTH*4); gem_set_tiling(fd, handle, tiling, WIDTH*4);
/* Fill the BO with dwords starting at val */ /* Fill the BO with dwords starting at val */
v = gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ | PROT_WRITE); v = __gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ | PROT_WRITE);
igt_assert(v); igt_assert(v);
for (i = 0; i < WIDTH*HEIGHT; i++) for (i = 0; i < WIDTH*HEIGHT; i++)
v[i] = val++; v[i] = val++;
@ -329,7 +329,7 @@ check_bo(int fd, uint32_t handle, uint32_t val)
uint32_t *v; uint32_t *v;
int i; int i;
v = gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ); v = __gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ);
igt_assert(v); igt_assert(v);
for (i = 0; i < WIDTH*HEIGHT; i++) { for (i = 0; i < WIDTH*HEIGHT; i++) {
igt_assert_f(v[i] == val, igt_assert_f(v[i] == val,

View File

@ -301,7 +301,7 @@ create_bo(int fd, uint32_t val)
gem_set_tiling(fd, handle, I915_TILING_X, WIDTH*4); gem_set_tiling(fd, handle, I915_TILING_X, WIDTH*4);
/* Fill the BO with dwords starting at val */ /* Fill the BO with dwords starting at val */
v = gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ | PROT_WRITE); v = __gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ | PROT_WRITE);
igt_assert(v); igt_assert(v);
for (i = 0; i < WIDTH*HEIGHT; i++) for (i = 0; i < WIDTH*HEIGHT; i++)
v[i] = val++; v[i] = val++;
@ -316,7 +316,7 @@ check_bo(int fd, uint32_t handle, uint32_t val)
uint32_t *v; uint32_t *v;
int i; int i;
v = gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ); v = __gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ);
igt_assert(v); igt_assert(v);
for (i = 0; i < WIDTH*HEIGHT; i++) { for (i = 0; i < WIDTH*HEIGHT; i++) {
igt_assert_f(v[i] == val, igt_assert_f(v[i] == val,

View File

@ -301,7 +301,7 @@ create_bo(int fd, uint32_t val)
gem_set_tiling(fd, handle, I915_TILING_Y, WIDTH*4); gem_set_tiling(fd, handle, I915_TILING_Y, WIDTH*4);
/* Fill the BO with dwords starting at val */ /* Fill the BO with dwords starting at val */
v = gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ | PROT_WRITE); v = __gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ | PROT_WRITE);
igt_assert(v); igt_assert(v);
for (i = 0; i < WIDTH*HEIGHT; i++) for (i = 0; i < WIDTH*HEIGHT; i++)
v[i] = val++; v[i] = val++;
@ -316,7 +316,7 @@ check_bo(int fd, uint32_t handle, uint32_t val)
uint32_t *v; uint32_t *v;
int i; int i;
v = gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ); v = __gem_mmap__gtt(fd, handle, WIDTH*HEIGHT*4, PROT_READ);
igt_assert(v); igt_assert(v);
for (i = 0; i < WIDTH*HEIGHT; i++) { for (i = 0; i < WIDTH*HEIGHT; i++) {
igt_assert_f(v[i] == val, igt_assert_f(v[i] == val,

View File

@ -190,7 +190,7 @@ igt_simple_main
gem_execbuf(t[0].fd, &execbuf); gem_execbuf(t[0].fd, &execbuf);
gem_sync(t[0].fd, exec[1].handle); gem_sync(t[0].fd, exec[1].handle);
p = gem_mmap__gtt(t[0].fd, exec[0].handle, 4096, PROT_READ); p = __gem_mmap__gtt(t[0].fd, exec[0].handle, 4096, PROT_READ);
igt_assert(p); igt_assert(p);
igt_info("[%d]={ %08x %08x }\n", i, p[0], p[1]); igt_info("[%d]={ %08x %08x }\n", i, p[0], p[1]);

View File

@ -188,7 +188,7 @@ static void fill_mmap_cpu(data_t *data, uint32_t handle, unsigned char color)
{ {
void *ptr; void *ptr;
ptr = gem_mmap__cpu(data->drm_fd, handle, 0, 4096, PROT_WRITE); ptr = __gem_mmap__cpu(data->drm_fd, handle, 0, 4096, PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_domain(data->drm_fd, handle, I915_GEM_DOMAIN_CPU, gem_set_domain(data->drm_fd, handle, I915_GEM_DOMAIN_CPU,
I915_GEM_DOMAIN_CPU); I915_GEM_DOMAIN_CPU);
@ -201,7 +201,7 @@ static void fill_mmap_gtt(data_t *data, uint32_t handle, unsigned char color)
{ {
void *ptr; void *ptr;
ptr = gem_mmap__gtt(data->drm_fd, handle, 4096, PROT_WRITE); ptr = __gem_mmap__gtt(data->drm_fd, handle, 4096, PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_domain(data->drm_fd, handle, I915_GEM_DOMAIN_GTT, gem_set_domain(data->drm_fd, handle, I915_GEM_DOMAIN_GTT,
I915_GEM_DOMAIN_GTT); I915_GEM_DOMAIN_GTT);

View File

@ -89,7 +89,7 @@ static void touch_fences(data_t *data)
uint32_t handle = data->bos[i]->handle; uint32_t handle = data->bos[i]->handle;
void *ptr; void *ptr;
ptr = gem_mmap__gtt(data->drm_fd, handle, 4096, PROT_WRITE); ptr = __gem_mmap__gtt(data->drm_fd, handle, 4096, PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_domain(data->drm_fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); gem_set_domain(data->drm_fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
memset(ptr, 0, 4); memset(ptr, 0, 4);

View File

@ -379,7 +379,7 @@ static void run_test(data_t *data)
expected = "still GREEN"; expected = "still GREEN";
break; break;
case MMAP_GTT: case MMAP_GTT:
ptr = gem_mmap__gtt(data->drm_fd, handle, data->mod_size, ptr = __gem_mmap__gtt(data->drm_fd, handle, data->mod_size,
PROT_WRITE); PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_domain(data->drm_fd, handle, gem_set_domain(data->drm_fd, handle,
@ -389,7 +389,7 @@ static void run_test(data_t *data)
expected = "BLACK or TRANSPARENT mark on top of plane in test"; expected = "BLACK or TRANSPARENT mark on top of plane in test";
break; break;
case MMAP_GTT_WAITING: case MMAP_GTT_WAITING:
ptr = gem_mmap__gtt(data->drm_fd, handle, data->mod_size, ptr = __gem_mmap__gtt(data->drm_fd, handle, data->mod_size,
PROT_WRITE); PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_domain(data->drm_fd, handle, gem_set_domain(data->drm_fd, handle,
@ -413,7 +413,7 @@ static void run_test(data_t *data)
expected = "BLACK or TRANSPARENT mark on top of plane in test"; expected = "BLACK or TRANSPARENT mark on top of plane in test";
break; break;
case MMAP_CPU: case MMAP_CPU:
ptr = gem_mmap__cpu(data->drm_fd, handle, 0, data->mod_size, PROT_WRITE); ptr = __gem_mmap__cpu(data->drm_fd, handle, 0, data->mod_size, PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
gem_set_domain(data->drm_fd, handle, gem_set_domain(data->drm_fd, handle,
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);

View File

@ -973,12 +973,12 @@ static void gem_mmap_subtest(bool gtt_mmap)
handle = gem_create(drm_fd, buf_size); handle = gem_create(drm_fd, buf_size);
if (gtt_mmap) { if (gtt_mmap) {
gem_buf = gem_mmap__gtt(drm_fd, handle, buf_size, gem_buf = __gem_mmap__gtt(drm_fd, handle, buf_size,
PROT_READ | PROT_WRITE); PROT_READ | PROT_WRITE);
igt_assert(gem_buf); igt_assert(gem_buf);
} }
else { else {
gem_buf = gem_mmap__cpu(drm_fd, handle, 0, buf_size, 0); gem_buf = __gem_mmap__cpu(drm_fd, handle, 0, buf_size, 0);
igt_assert(gem_buf); igt_assert(gem_buf);
} }
@ -1012,12 +1012,12 @@ static void gem_mmap_subtest(bool gtt_mmap)
disable_all_screens_and_wait(&ms_data); disable_all_screens_and_wait(&ms_data);
if (gtt_mmap) { if (gtt_mmap) {
gem_buf = gem_mmap__gtt(drm_fd, handle, buf_size, gem_buf = __gem_mmap__gtt(drm_fd, handle, buf_size,
PROT_READ | PROT_WRITE); PROT_READ | PROT_WRITE);
igt_assert(gem_buf); igt_assert(gem_buf);
} }
else { else {
gem_buf = gem_mmap__cpu(drm_fd, handle, 0, buf_size, 0); gem_buf = __gem_mmap__cpu(drm_fd, handle, 0, buf_size, 0);
igt_assert(gem_buf); igt_assert(gem_buf);
} }
@ -1474,7 +1474,7 @@ static void fill_igt_fb(struct igt_fb *fb, uint32_t color)
int i; int i;
uint32_t *ptr; uint32_t *ptr;
ptr = gem_mmap__gtt(drm_fd, fb->gem_handle, fb->size, PROT_WRITE); ptr = __gem_mmap__gtt(drm_fd, fb->gem_handle, fb->size, PROT_WRITE);
igt_assert(ptr); igt_assert(ptr);
for (i = 0; i < fb->size/sizeof(uint32_t); i++) for (i = 0; i < fb->size/sizeof(uint32_t); i++)
ptr[i] = color; ptr[i] = color;
@ -1756,7 +1756,7 @@ static void fences_subtest(bool dpms)
gem_get_tiling(drm_fd, params.fb.gem_handle, &tiling, &swizzle); gem_get_tiling(drm_fd, params.fb.gem_handle, &tiling, &swizzle);
igt_assert(tiling); igt_assert(tiling);
buf_ptr = gem_mmap__gtt(drm_fd, params.fb.gem_handle, buf_ptr = __gem_mmap__gtt(drm_fd, params.fb.gem_handle,
params.fb.size, PROT_WRITE | PROT_READ); params.fb.size, PROT_WRITE | PROT_READ);
igt_assert(buf_ptr); igt_assert(buf_ptr);
for (i = 0; i < params.fb.size/sizeof(uint32_t); i++) for (i = 0; i < params.fb.size/sizeof(uint32_t); i++)

View File

@ -60,9 +60,9 @@ check_bo(int fd1, uint32_t handle1, int fd2, uint32_t handle2)
char *ptr1, *ptr2; char *ptr1, *ptr2;
int i; int i;
ptr1 = gem_mmap__gtt(fd1, handle1, BO_SIZE, PROT_READ | PROT_WRITE); ptr1 = __gem_mmap__gtt(fd1, handle1, BO_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr1); igt_assert(ptr1);
ptr2 = gem_mmap__gtt(fd2, handle2, BO_SIZE, PROT_READ | PROT_WRITE); ptr2 = __gem_mmap__gtt(fd2, handle2, BO_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr2); igt_assert(ptr2);
/* check whether it's still our old object first. */ /* check whether it's still our old object first. */

View File

@ -221,7 +221,7 @@ paint_color_key(struct igt_fb *fb_info)
int i, j; int i, j;
uint32_t *fb_ptr; uint32_t *fb_ptr;
fb_ptr = gem_mmap__gtt(drm_fd, fb_info->gem_handle, fb_ptr = __gem_mmap__gtt(drm_fd, fb_info->gem_handle,
fb_info->size, PROT_READ | PROT_WRITE); fb_info->size, PROT_READ | PROT_WRITE);
igt_assert(fb_ptr); igt_assert(fb_ptr);