summaryrefslogtreecommitdiff
path: root/aclocal.m4
diff options
context:
space:
mode:
authorHerbert Valerio Riedel <hvr@gnu.org>2015-11-17 15:30:56 +0100
committerBen Gamari <ben@smart-cactus.org>2015-11-17 16:19:52 +0100
commit8ad9e74f1f88d5c86d2e76f9992f9b2d267421d3 (patch)
treea12bef1f530830078cc91712c576ba007791e1e5 /aclocal.m4
parentb3d6c0f77df0c2cbb5cedaa48a8e874fad3e9942 (diff)
downloadhaskell-8ad9e74f1f88d5c86d2e76f9992f9b2d267421d3.tar.gz
Make `timer_create(CLOCK_REALTIME)` autoconf test more reliable
I've noticed that on a platform with a coarse timer/scheduling granularity of 10ms this autoconf tests fails to detect a working `timer_create(CLOCK_REALTIME)`. On AIX, this effectively means that intervals/timers are rounded up to multiples of 10ms, so a 13ms delay is effectively a 20ms delay. By using a 100ms timeout we are on the safe side. Reviewers: austin, bgamari Reviewed By: bgamari Subscribers: thomie, erikd Differential Revision: https://phabricator.haskell.org/D1483
Diffstat (limited to 'aclocal.m4')
-rw-r--r--aclocal.m49
1 files changed, 5 insertions, 4 deletions
diff --git a/aclocal.m4 b/aclocal.m4
index 2bf27bedf0..33f05e5a6f 100644
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -1591,17 +1591,18 @@ int main(int argc, char *argv[])
exit(2);
}
+ tock = 0;
+
it.it_value.tv_sec = 0;
- it.it_value.tv_nsec = 1000000;
+ it.it_value.tv_nsec = 1000000; // 1ms
it.it_interval = it.it_value;
if (timer_settime(timer, 0, &it, NULL) != 0) {
fprintf(stderr,"settime problem\n");
exit(4);
}
- tock = 0;
-
- usleep(3000);
+ // some environments have coarse scheduler/timer granularity of ~10ms and worse
+ usleep(100000); // 100ms
if (!tock) {
fprintf(stderr,"no CLOCK_REALTIME signal\n");