lib: Measure the cost of calling timer_settimer() for sigiter

We wish to delay the first signal from the igt_sigiter_ioctl
sufficiently to skip over the timer_settime() and into the drmIoctl
kernel context before firing. If we fire too early, we will think that
the ioctl doesn't respond to signals and ignore it in future. If we fire
too late, we won't probe the ioctl for signal handling at all. Let's try
measuring the timer_settime() call time as a first approximation.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2016-03-21 16:09:56 +00:00
parent f1a3d0d96f
commit 8520037e54

View File

@ -161,8 +161,10 @@ static bool igt_sigiter_start(struct igt_sigiter *iter, bool enable)
igt_ioctl = drmIoctl;
if (enable) {
struct timespec start, end;
struct sigevent sev;
struct sigaction act;
struct itimerspec its;
igt_ioctl = sig_ioctl;
__igt_sigiter.tid = gettid();
@ -178,8 +180,25 @@ static bool igt_sigiter_start(struct igt_sigiter *iter, bool enable)
act.sa_flags = SA_SIGINFO;
igt_assert(sigaction(SIGRTMIN, &act, NULL) == 0);
__igt_sigiter.offset.tv_sec = 0;
__igt_sigiter.offset.tv_nsec = 50;
/* Try to find the approximate delay required to skip over
* the timer_setttime and into the following ioctl() to try
* and avoid the timer firing before we enter the drmIoctl.
*/
igt_assert(clock_gettime(CLOCK_MONOTONIC, &start) == 0);
memset(&its, 0, sizeof(its));
igt_assert(timer_settime(__igt_sigiter.timer, 0, &its, NULL) == 0);
igt_assert(clock_gettime(CLOCK_MONOTONIC, &end) == 0);
__igt_sigiter.offset.tv_sec = end.tv_sec - start.tv_sec;
__igt_sigiter.offset.tv_nsec = end.tv_nsec - start.tv_nsec;
if (__igt_sigiter.offset.tv_nsec < 0) {
__igt_sigiter.offset.tv_nsec += NSEC_PER_SEC;
__igt_sigiter.offset.tv_sec -= 1;
}
igt_debug("Initial delay for interruption: %ld.%09lds\n",
__igt_sigiter.offset.tv_sec,
__igt_sigiter.offset.tv_nsec);
}
return true;