diff options
author | harendra <> | 2019-03-28 15:20:00 (GMT) |
---|---|---|
committer | hdiff <hdiff@hdiff.luite.com> | 2019-03-28 15:20:00 (GMT) |
commit | 99be4ede39cc58a8770e366830902f31ca906677 (patch) | |
tree | bb063bc012aa0b1fd09bd30c54a21256aea052e0 | |
parent | 34bfb30864ec4630fb02eefc34911548a6dcaca1 (diff) |
-rw-r--r-- | Changelog.md | 12 | ||||
-rw-r--r-- | Setup.hs | 6 | ||||
-rwxr-xr-x | configure | 4331 | ||||
-rw-r--r-- | configure.ac | 17 | ||||
-rw-r--r-- | examples/AcidRain.hs | 4 | ||||
-rw-r--r-- | jsbits/clock.js | 31 | ||||
-rw-r--r-- | src/Streamly/Atomics.hs | 82 | ||||
-rw-r--r-- | src/Streamly/SVar.hs | 548 | ||||
-rw-r--r-- | src/Streamly/Streams/Async.hs | 1 | ||||
-rw-r--r-- | src/Streamly/Streams/SVar.hs | 2 | ||||
-rw-r--r-- | src/Streamly/Time/Clock.hsc | 309 | ||||
-rw-r--r-- | src/Streamly/Time/Darwin.c | 36 | ||||
-rw-r--r-- | src/Streamly/Time/Units.hs | 471 | ||||
-rw-r--r-- | src/Streamly/Time/Windows.c | 115 | ||||
-rw-r--r-- | src/Streamly/Time/config.h.in | 55 | ||||
-rw-r--r-- | src/Streamly/Tutorial.hs | 12 | ||||
-rw-r--r-- | stack-7.10.yaml | 2 | ||||
-rw-r--r-- | stack-8.0.yaml | 1 | ||||
-rw-r--r-- | stack.yaml | 6 | ||||
-rw-r--r-- | streamly.cabal | 72 | ||||
-rw-r--r-- | test/MaxRate.hs | 174 |
21 files changed, 5938 insertions, 349 deletions
diff --git a/Changelog.md b/Changelog.md index 1721c8b..1624afc 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,3 +1,15 @@ +## 0.6.1 + +### Bug Fixes + +* Fix a bug that caused `maxThreads` directive to be ignored when rate control + was not used. + +### Enhancements + +* Add GHCJS support +* Remove dependency on "clock" package + ## 0.6.0 ### Breaking changes diff --git a/Setup.hs b/Setup.hs new file mode 100644 index 0000000..54f57d6 --- /dev/null +++ b/Setup.hs @@ -0,0 +1,6 @@ +module Main (main) where + +import Distribution.Simple + +main :: IO () +main = defaultMainWithHooks autoconfUserHooks diff --git a/configure b/configure new file mode 100755 index 0000000..660e8cc --- /dev/null +++ b/configure @@ -0,0 +1,4331 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.69 for streamly 0.6.0. +# +# Report bugs to <harendra.kumar@gmail.com>. +# +# +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and +$0: harendra.kumar@gmail.com about your system, including +$0: any error possibly output before this message. Then +$0: install a modern shell, or manually run the script +$0: under such a shell if you do have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +test -n "$DJDIR" || exec 7<&0 </dev/null +exec 6>&1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='streamly' +PACKAGE_TARNAME='streamly' +PACKAGE_VERSION='0.6.0' +PACKAGE_STRING='streamly 0.6.0' +PACKAGE_BUGREPORT='harendra.kumar@gmail.com' +PACKAGE_URL='' + +# Factoring default headers for most tests. +ac_includes_default="\ +#include <stdio.h> +#ifdef HAVE_SYS_TYPES_H +# include <sys/types.h> +#endif +#ifdef HAVE_SYS_STAT_H +# include <sys/stat.h> +#endif +#ifdef STDC_HEADERS +# include <stdlib.h> +# include <stddef.h> +#else +# ifdef HAVE_STDLIB_H +# include <stdlib.h> +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include <memory.h> +# endif +# include <string.h> +#endif +#ifdef HAVE_STRINGS_H +# include <strings.h> +#endif +#ifdef HAVE_INTTYPES_H +# include <inttypes.h> +#endif +#ifdef HAVE_STDINT_H +# include <stdint.h> +#endif +#ifdef HAVE_UNISTD_H +# include <unistd.h> +#endif" + +ac_subst_vars='LTLIBOBJS +LIBOBJS +EGREP +GREP +CPP +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +with_compiler +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CPP' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures streamly 0.6.0 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/streamly] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of streamly 0.6.0:";; + esac + cat <<\_ACEOF + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) +GHC + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L<lib dir> if you have libraries in a + nonstandard directory <lib dir> + LIBS libraries to pass to the linker, e.g. -l<library> + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if + you have headers in a nonstandard directory <include dir> + CPP C preprocessor + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to <harendra.kumar@gmail.com>. +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +streamly configure 0.6.0 +generated by GNU Autoconf 2.69 + +Copyright (C) 2012 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp + +# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists, giving a warning if it cannot be compiled using +# the include files in INCLUDES and setting the cache variable VAR +# accordingly. +ac_fn_c_check_header_mongrel () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if eval \${$3+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 +$as_echo_n "checking $2 usability... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_header_compiler=yes +else + ac_header_compiler=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 +$as_echo_n "checking $2 presence... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <$2> +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + ac_header_preproc=yes +else + ac_header_preproc=no +fi +rm -f conftest.err conftest.i conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( + yes:no: ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; + no:yes:* ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} +( $as_echo "## --------------------------------------- ## +## Report this to harendra.kumar@gmail.com ## +## --------------------------------------- ##" + ) | sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=\$ac_header_compiler" +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_mongrel + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_run + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case <limits.h> declares $2. + For example, HP-UX 11i <limits.h> declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer <limits.h> to <assert.h> if __STDC__ is defined, since + <limits.h> exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include <limits.h> +#else +# include <assert.h> +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by streamly $as_me 0.6.0, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +# To suppress "WARNING: unrecognized options: --with-compiler" + +# Check whether --with-compiler was given. +if test "${with_compiler+set}" = set; then : + withval=$with_compiler; +fi + + +# Check headers and functions required +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <stdio.h> +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <stdarg.h> +#include <stdio.h> +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if ${ac_cv_prog_CPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since + # <limits.h> exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include <limits.h> +#else +# include <assert.h> +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <ac_nonexistent.h> +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since + # <limits.h> exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include <limits.h> +#else +# include <assert.h> +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <ac_nonexistent.h> +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <stdlib.h> +#include <stdarg.h> +#include <string.h> +#include <float.h> + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <string.h> + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <stdlib.h> + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <ctype.h> +#include <stdlib.h> +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + +for ac_header in time.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "time.h" "ac_cv_header_time_h" "$ac_includes_default" +if test "x$ac_cv_header_time_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_TIME_H 1 +_ACEOF + +fi + +done + +for ac_func in clock_gettime +do : + ac_fn_c_check_func "$LINENO" "clock_gettime" "ac_cv_func_clock_gettime" +if test "x$ac_cv_func_clock_gettime" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_CLOCK_GETTIME 1 +_ACEOF + +fi +done + + +# Output +ac_config_headers="$ac_config_headers src/Streamly/Time/config.h" + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by streamly $as_me 0.6.0, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_headers="$ac_config_headers" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration headers: +$config_headers + +Report bugs to <harendra.kumar@gmail.com>." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +streamly config.status 0.6.0 +configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "src/Streamly/Time/config.h") CONFIG_HEADERS="$CONFIG_HEADERS src/Streamly/Time/config.h" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' <confdefs.h | sed ' +s/'"$ac_delim"'/"\\\ +"/g' >>$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :H $CONFIG_HEADERS " +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi + ;; + + + esac + +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + diff --git a/configure.ac b/configure.ac new file mode 100644 index 0000000..a8f51af --- /dev/null +++ b/configure.ac @@ -0,0 +1,17 @@ +# Input file for autoconf to generate the configure script. + +# See https://www.gnu.org/software/autoconf/manual/autoconf.html for help on +# the macros used in this file. + +AC_INIT([streamly], [0.6.0], [harendra.kumar@gmail.com], [streamly]) + +# To suppress "WARNING: unrecognized options: --with-compiler" +AC_ARG_WITH([compiler], [GHC]) + +# Check headers and functions required +AC_CHECK_HEADERS([time.h]) +AC_CHECK_FUNCS([clock_gettime]) + +# Output +AC_CONFIG_HEADERS([src/Streamly/Time/config.h]) +AC_OUTPUT diff --git a/examples/AcidRain.hs b/examples/AcidRain.hs index b39c9ce..aa52383 100644 --- a/examples/AcidRain.hs +++ b/examples/AcidRain.hs @@ -5,9 +5,9 @@ import Streamly import Streamly.Prelude as S -import Control.Monad (void, when) +import Control.Monad (void) import Control.Monad.IO.Class (MonadIO(liftIO)) -import Control.Monad.State (MonadState, get, modify, runStateT, put) +import Control.Monad.State (MonadState, get, modify, runStateT) data Event = Quit | Harm Int | Heal Int deriving (Show) diff --git a/jsbits/clock.js b/jsbits/clock.js new file mode 100644 index 0000000..b81f09d --- /dev/null +++ b/jsbits/clock.js @@ -0,0 +1,31 @@ +function h$clock_gettime_js(when, p_d, p_o) { + /* XXX: guess if we have to write 64 bit values: + + alloca is often used and will give us 16 bytes + if timespec contains two 64 bit values + + but we really should fix this by not having hsc2hs values + from the build system leak here + */ + var is64 = p_d.i3.length == 4 && p_o == 0; + var o = p_o >> 2, + t = Date.now ? Date.now() : new Date().getTime(), + tf = Math.floor(t / 1000), + tn = 1000000 * (t - (1000 * tf)); + if(is64) { + p_d.i3[o] = tf|0; + p_d.i3[o+1] = 0; + p_d.i3[o+2] = tn|0; + p_d.i3[o+3] = 0; + } else { + p_d.i3[o] = tf|0; + p_d.i3[o+1] = tn|0; + } + return 0; +} +/* Hack! Supporting code for "clock" package + * "hspec" depends on clock. + */ +function h$hs_clock_darwin_gettime(when, p_d, p_o) { + h$clock_gettime_js(when, p_d, p_o); +} diff --git a/src/Streamly/Atomics.hs b/src/Streamly/Atomics.hs new file mode 100644 index 0000000..8d0656f --- /dev/null +++ b/src/Streamly/Atomics.hs @@ -0,0 +1,82 @@ +{-# LANGUAGE CPP #-} + +-- | +-- Module : Streamly.Atomics +-- Copyright : (c) 2018-2019 Composewell Technologies +-- +-- License : BSD3 +-- Maintainer : harendra.kumar@gmail.com +-- Stability : experimental +-- Portability : GHC + +module Streamly.Atomics + ( + atomicModifyIORefCAS + , atomicModifyIORefCAS_ + , writeBarrier + , storeLoadBarrier + ) +where + +import Data.IORef (IORef, atomicModifyIORef) +#ifdef ghcjs_HOST_OS +import Data.IORef (modifyIORef) +#else +import qualified Data.Atomics as A +#endif + +#ifndef ghcjs_HOST_OS + +-- XXX Does it make sense to have replacements for atomicModifyIORef etc. on a +-- single threaded system. +-- +-- Slightly faster version of CAS. Gained some improvement by avoiding the use +-- of "evaluate" because we know we do not have exceptions in fn. +{-# INLINE atomicModifyIORefCAS #-} +atomicModifyIORefCAS :: IORef a -> (a -> (a,b)) -> IO b +atomicModifyIORefCAS ref fn = do + tkt <- A.readForCAS ref + loop tkt retries + + where + + retries = 25 :: Int + loop _ 0 = atomicModifyIORef ref fn + loop old tries = do + let (new, result) = fn $ A.peekTicket old + (success, tkt) <- A.casIORef ref old new + if success + then return result + else loop tkt (tries - 1) + +{-# INLINE atomicModifyIORefCAS_ #-} +atomicModifyIORefCAS_ :: IORef t -> (t -> t) -> IO () +atomicModifyIORefCAS_ = A.atomicModifyIORefCAS_ + +{-# INLINE writeBarrier #-} +writeBarrier :: IO () +writeBarrier = A.writeBarrier + +{-# INLINE storeLoadBarrier #-} +storeLoadBarrier :: IO () +storeLoadBarrier = A.storeLoadBarrier + +#else + +{-# INLINE atomicModifyIORefCAS #-} +atomicModifyIORefCAS :: IORef a -> (a -> (a,b)) -> IO b +atomicModifyIORefCAS = atomicModifyIORef + +{-# INLINE atomicModifyIORefCAS_ #-} +atomicModifyIORefCAS_ :: IORef a -> (a -> a) -> IO () +atomicModifyIORefCAS_ = modifyIORef + +{-# INLINE writeBarrier #-} +writeBarrier :: IO () +writeBarrier = return () + +{-# INLINE storeLoadBarrier #-} +storeLoadBarrier :: IO () +storeLoadBarrier = return () + +#endif diff --git a/src/Streamly/SVar.hs b/src/Streamly/SVar.hs index 0903806..674e489 100644 --- a/src/Streamly/SVar.hs +++ b/src/Streamly/SVar.hs @@ -53,7 +53,6 @@ module Streamly.SVar , captureMonadState , RunInIO (..) - , atomicModifyIORefCAS , WorkerInfo (..) , YieldRateInfo (..) , ThreadAbort (..) @@ -100,7 +99,6 @@ module Streamly.SVar , toStreamVar , SVarStats (..) - , NanoSecs (..) , dumpSVar ) where @@ -117,9 +115,9 @@ import Control.Monad (when) import Control.Monad.Catch (MonadThrow) import Control.Monad.IO.Class (MonadIO(..)) import Control.Monad.Trans.Control (MonadBaseControl, control, StM) -import Data.Atomics - (casIORef, readForCAS, peekTicket, atomicModifyIORefCAS_, - writeBarrier, storeLoadBarrier) +import Streamly.Atomics + (atomicModifyIORefCAS, atomicModifyIORefCAS_, writeBarrier, + storeLoadBarrier) import Data.Concurrent.Queue.MichaelScott (LinkedQueue, pushL) import Data.Functor (void) import Data.Heap (Heap, Entry(..)) @@ -133,27 +131,15 @@ import Data.Set (Set) import GHC.Conc (ThreadId(..)) import GHC.Exts import GHC.IO (IO(..)) -import System.Clock (TimeSpec, Clock(Monotonic), getTime, toNanoSecs) +import Streamly.Time.Clock (Clock(..), getTime) +import Streamly.Time.Units + (AbsTime, NanoSecond64(..), MicroSecond64(..), diffAbsTime64, + fromRelTime64, toRelTime64, showNanoSecond64, showRelTime64) import System.IO (hPutStrLn, stderr) -import Text.Printf (printf) import qualified Data.Heap as H import qualified Data.Set as S --- Always use signed arithmetic to avoid inadvertant overflows of signed values --- on conversion when comparing unsigned quantities with signed. -newtype NanoSecs = NanoSecs Int64 - deriving ( Eq - , Read - , Show - , Enum - , Bounded - , Num - , Real - , Integral - , Ord - ) - newtype Count = Count Int64 deriving ( Eq , Read @@ -230,7 +216,7 @@ data WorkerInfo = WorkerInfo -- total number of yields by the worker till now , workerYieldCount :: IORef Count -- yieldCount at start, timestamp - , workerLatencyStart :: IORef (Count, TimeSpec) + , workerLatencyStart :: IORef (Count, AbsTime) } @@ -262,13 +248,13 @@ data Rate = Rate } data LatencyRange = LatencyRange - { minLatency :: NanoSecs - , maxLatency :: NanoSecs + { minLatency :: NanoSecond64 + , maxLatency :: NanoSecond64 } deriving Show -- Rate control. data YieldRateInfo = YieldRateInfo - { svarLatencyTarget :: NanoSecs + { svarLatencyTarget :: NanoSecond64 , svarLatencyRange :: LatencyRange , svarRateBuffer :: Int , svarGainedLostYields :: IORef Count @@ -279,11 +265,11 @@ data YieldRateInfo = YieldRateInfo -- rate. The idle time of workers is adjusted in this, so that we only -- account for the rate when the consumer actually demands data. -- XXX interval latency is enough, we can move this under diagnostics build - , svarAllTimeLatency :: IORef (Count, TimeSpec) + , svarAllTimeLatency :: IORef (Count, AbsTime) -- XXX Worker latency specified by the user to be used before the first -- actual measurement arrives. Not yet implemented - , workerBootstrapLatency :: Maybe NanoSecs + , workerBootstrapLatency :: Maybe NanoSecond64 -- After how many yields the worker should update the latency information. -- If the latency is high, this count is kept lower and vice-versa. XXX If @@ -295,20 +281,23 @@ data YieldRateInfo = YieldRateInfo -- This is in progress latency stats maintained by the workers which we -- empty into workerCollectedLatency stats at certain intervals - whenever - -- we process the stream elements yielded in this period. - -- (yieldCount, timeTaken) - , workerPendingLatency :: IORef (Count, NanoSecs) + -- we process the stream elements yielded in this period. The first count + -- is all yields, the second count is only those yields for which the + -- latency was measured to be non-zero (note that if the timer resolution + -- is low the measured latency may be zero e.g. on JS platform). + -- (allYieldCount, yieldCount, timeTaken) + , workerPendingLatency :: IORef (Count, Count, NanoSecond64) -- This is the second level stat which is an accmulation from -- workerPendingLatency stats. We keep accumulating latencies in this -- bucket until we have stats for a sufficient period and then we reset it -- to start collecting for the next period and retain the computed average -- latency for the last period in workerMeasuredLatency. - -- (yieldCount, timeTaken) - , workerCollectedLatency :: IORef (Count, NanoSecs) + -- (allYieldCount, yieldCount, timeTaken) + , workerCollectedLatency :: IORef (Count, Count, NanoSecond64) -- Latency as measured by workers, aggregated for the last period. - , workerMeasuredLatency :: IORef NanoSecs + , workerMeasuredLatency :: IORef NanoSecond64 } data SVarStats = SVarStats { @@ -317,10 +306,10 @@ data SVarStats = SVarStats { , maxOutQSize :: IORef Int , maxHeapSize :: IORef Int , maxWorkQSize :: IORef Int - , avgWorkerLatency :: IORef (Count, NanoSecs) - , minWorkerLatency :: IORef NanoSecs - , maxWorkerLatency :: IORef NanoSecs - , svarStopTime :: IORef (Maybe TimeSpec) + , avgWorkerLatency :: IORef (Count, NanoSecond64) + , minWorkerLatency :: IORef NanoSecond64 + , maxWorkerLatency :: IORef NanoSecond64 + , svarStopTime :: IORef (Maybe AbsTime) } data Limit = Unlimited | Limited Word deriving Show @@ -393,7 +382,7 @@ data State t m a = State , _threadsHigh :: Limit , _bufferHigh :: Limit -- XXX these two can be collapsed into a single type - , _streamLatency :: Maybe NanoSecs -- bootstrap latency + , _streamLatency :: Maybe NanoSecond64 -- bootstrap latency , _maxStreamRate :: Maybe Rate , _inspectMode :: Bool } @@ -505,7 +494,7 @@ setStreamLatency n st = else Just (fromIntegral n) } -getStreamLatency :: State t m a -> Maybe NanoSecs +getStreamLatency :: State t m a -> Maybe NanoSecond64 getStreamLatency = _streamLatency setInspectMode :: State t m a -> State t m a @@ -532,79 +521,133 @@ cleanupSVarFromWorker sv = do (S.toList workers \\ [self]) ------------------------------------------------------------------------------- --- Dumping the SVar for debug/diag +-- Worker latency data collection ------------------------------------------------------------------------------- --- | Convert a number of seconds to a string. The string will consist --- of four decimal places, followed by a short description of the time --- units. -secs :: Double -> String -secs k - | k < 0 = '-' : secs (-k) - | k >= 1 = k `with` "s" - | k >= 1e-3 = (k*1e3) `with` "ms" -#ifdef mingw32_HOST_OS - | k >= 1e-6 = (k*1e6) `with` "us" -#else - | k >= 1e-6 = (k*1e6) `with` "μs" -#endif - | k >= 1e-9 = (k*1e9) `with` "ns" - | k >= 1e-12 = (k*1e12) `with` "ps" - | k >= 1e-15 = (k*1e15) `with` "fs" - | k >= 1e-18 = (k*1e18) `with` "as" - | otherwise = printf "%g s" k - where with (t :: Double) (u :: String) - | t >= 1e9 = printf "%.4g %s" t u - | t >= 1e3 = printf "%.0f %s" t u - | t >= 1e2 = printf "%.1f %s" t u - | t >= 1e1 = printf "%.2f %s" t u - | otherwise = printf "%.3f %s" t u - --- XXX Code duplicated from collectLatency -drainLatency :: SVar t m a -> YieldRateInfo -> IO (Count, TimeSpec, NanoSecs) -drainLatency sv yinfo = do +-- Every once in a while workers update the latencies and check the yield rate. +-- They return if we are above the expected yield rate. If we check too often +-- it may impact performance, if we check less often we may have a stale +-- picture. We update every minThreadDelay but we translate that into a yield +-- count based on latency so that the checking overhead is little. +-- +-- XXX use a generation count to indicate that the value is updated. If the +-- value is updated an existing worker must check it again on the next yield. +-- Otherwise it is possible that we may keep updating it and because of the mod +-- worker keeps skipping it. +updateWorkerPollingInterval :: YieldRateInfo -> NanoSecond64 -> IO () +updateWorkerPollingInterval yinfo latency = do + let periodRef = workerPollingInterval yinfo + cnt = max 1 $ minThreadDelay `div` latency + period = min cnt (fromIntegral magicMaxBuffer) + + writeIORef periodRef (fromIntegral period) + +{-# INLINE recordMinMaxLatency #-} +recordMinMaxLatency :: SVar t m a -> NanoSecond64 -> IO () +recordMinMaxLatency sv new = do + let ss = svarStats sv + minLat <- readIORef (minWorkerLatency ss) + when (new < minLat || minLat == 0) $ + writeIORef (minWorkerLatency ss) new + + maxLat <- readIORef (maxWorkerLatency ss) + when (new > maxLat) $ writeIORef (maxWorkerLatency ss) new + +recordAvgLatency :: SVar t m a -> (Count, NanoSecond64) -> IO () +recordAvgLatency sv (count, time) = do + let ss = svarStats sv + modifyIORef (avgWorkerLatency ss) $ + \(cnt, t) -> (cnt + count, t + time) + +-- Pour the pending latency stats into a collection bucket +{-# INLINE collectWorkerPendingLatency #-} +collectWorkerPendingLatency + :: IORef (Count, Count, NanoSecond64) + -> IORef (Count, Count, NanoSecond64) + -> IO (Count, Maybe (Count, NanoSecond64)) +collectWorkerPendingLatency cur col = do + (fcount, count, time) <- atomicModifyIORefCAS cur $ \v -> ((0,0,0), v) + + (fcnt, cnt, t) <- readIORef col + let totalCount = fcnt + fcount + latCount = cnt + count + latTime = t + time + writeIORef col (totalCount, latCount, latTime) + + assert (latCount == 0 || latTime /= 0) (return ()) + let latPair = + if latCount > 0 && latTime > 0 + then Just $ (latCount, latTime) + else Nothing + return (totalCount, latPair) + +{-# INLINE shouldUseCollectedBatch #-} +shouldUseCollectedBatch + :: Count + -> NanoSecond64 + -> NanoSecond64 + -> NanoSecond64 + -> Bool +shouldUseCollectedBatch collectedYields collectedTime newLat prevLat = + let r = fromIntegral newLat / fromIntegral prevLat :: Double + in (collectedYields > fromIntegral magicMaxBuffer) + || (collectedTime > minThreadDelay) + || (prevLat > 0 && (r > 2 || r < 0.5)) + || (prevLat == 0) + +-- Returns a triple, (1) yield count since last collection, (2) the base time +-- when we started counting, (3) average latency in the last measurement +-- period. The former two are used for accurate measurement of the going rate +-- whereas the average is used for future estimates e.g. how many workers +-- should be maintained to maintain the rate. +-- CAUTION! keep it in sync with getWorkerLatency +collectLatency :: SVar t m a + -> YieldRateInfo + -> Bool + -> IO (Count, AbsTime, NanoSecond64) +collectLatency sv yinfo drain = do let cur = workerPendingLatency yinfo col = workerCollectedLatency yinfo longTerm = svarAllTimeLatency yinfo measured = workerMeasuredLatency yinfo - (count, time) <- atomicModifyIORefCAS cur $ \v -> ((0,0), v) - (colCount, colTime) <- readIORef col - (lcount, ltime) <- readIORef longTerm - prev <- readIORef measured - - let pendingCount = colCount + count - pendingTime = colTime + time - - lcount' = lcount + pendingCount - notUpdated = (lcount', ltime, prev) + (newCount, newLatPair) <- collectWorkerPendingLatency cur col + (lcount, ltime) <- readIORef longTerm + prevLat <- readIORef measured + + let newLcount = lcount + newCount + retWith lat = return (newLcount, ltime, lat) + + case newLatPair of + Nothing -> retWith prevLat + Just (count, time) -> do + let newLat = time `div` (fromIntegral count) + when (svarInspectMode sv) $ recordMinMaxLatency sv newLat + -- When we have collected a significant sized batch we compute the + -- new latency using that batch and return the new latency, + -- otherwise we return the previous latency derived from the + -- previous batch. + if shouldUseCollectedBatch newCount time newLat prevLat || drain + then do + -- XXX make this NOINLINE? + updateWorkerPollingInterval yinfo (max newLat prevLat) + when (svarInspectMode sv) $ recordAvgLatency sv (count, time) + writeIORef col (0, 0, 0) + writeIORef measured ((prevLat + newLat) `div` 2) + modifyIORef longTerm $ \(_, t) -> (newLcount, t) + retWith newLat + else retWith prevLat - if (pendingCount > 0) - then do - let new = pendingTime `div` (fromIntegral pendingCount) - when (svarInspectMode sv) $ do - let ss = svarStats sv - minLat <- readIORef (minWorkerLatency ss) - when (new < minLat || minLat == 0) $ - writeIORef (minWorkerLatency ss) new - - maxLat <- readIORef (maxWorkerLatency ss) - when (new > maxLat) $ writeIORef (maxWorkerLatency ss) new - modifyIORef (avgWorkerLatency ss) $ - \(cnt, t) -> (cnt + pendingCount, t + pendingTime) - -- To avoid minor fluctuations update in batches - writeIORef col (0, 0) - writeIORef measured new - modifyIORef longTerm $ \(_, t) -> (lcount', t) - return (lcount', ltime, new) - else return notUpdated +------------------------------------------------------------------------------- +-- Dumping the SVar for debug/diag +------------------------------------------------------------------------------- dumpSVarStats :: SVar t m a -> SVarStats -> SVarStyle -> IO String dumpSVarStats sv ss style = do case yieldRateInfo sv of Nothing -> return () Just yinfo -> do - _ <- liftIO $ drainLatency sv yinfo + _ <- liftIO $ collectLatency sv yinfo True return () dispatches <- readIORef $ totalDispatches ss @@ -625,10 +668,10 @@ dumpSVarStats sv ss style = do case t of Nothing -> do now <- getTime Monotonic - let interval = toNanoSecs (now - startTime) + let interval = diffAbsTime64 now startTime return (cnt, gl, interval `div` fromIntegral cnt) Just stopTime -> do - let interval = toNanoSecs (stopTime - startTime) + let interval = diffAbsTime64 stopTime startTime return (cnt, gl, interval `div` fromIntegral cnt) else return (0, 0, 0) @@ -640,21 +683,17 @@ dumpSVarStats sv ss style = do then "\nheap max size = " <> show maxHp else "") <> (if minLat > 0 - then "\nmin worker latency = " - <> secs (fromIntegral minLat * 1e-9) + then "\nmin worker latency = " <> showNanoSecond64 minLat else "") <> (if maxLat > 0 - then "\nmax worker latency = " - <> secs (fromIntegral maxLat * 1e-9) + then "\nmax worker latency = " <> showNanoSecond64 maxLat else "") <> (if avgCnt > 0 then let lat = avgTime `div` fromIntegral avgCnt - in "\navg worker latency = " - <> secs (fromIntegral lat * 1e-9) + in "\navg worker latency = " <> showNanoSecond64 lat else "") <> (if svarLat > 0 - then "\nSVar latency = " - <> secs (fromIntegral svarLat * 1e-9) + then "\nSVar latency = " <> showRelTime64 svarLat else "") <> (if svarCnt > 0 then "\nSVar yield count = " <> show svarCnt @@ -739,46 +778,8 @@ withDiagMVar sv label action = ] else action -------------------------------------------------------------------------------- --- CAS -------------------------------------------------------------------------------- - --- Slightly faster version of CAS. Gained some improvement by avoiding the use --- of "evaluate" because we know we do not have exceptions in fn. -{-# INLINE atomicModifyIORefCAS #-} -atomicModifyIORefCAS :: IORef a -> (a -> (a,b)) -> IO b -atomicModifyIORefCAS ref fn = do - tkt <- readForCAS ref - loop tkt retries - - where - - retries = 25 :: Int - loop _ 0 = atomicModifyIORef ref fn - loop old tries = do - let (new, result) = fn $ peekTicket old - (success, tkt) <- casIORef ref old new - if success - then return result - else loop tkt (tries - 1) - -{-# INLINE ringDoorBell #-} -ringDoorBell :: SVar t m a -> IO () -ringDoorBell sv = do - storeLoadBarrier - w <- readIORef $ needDoorBell sv - when w $ do - -- Note: the sequence of operations is important for correctness here. - -- We need to set the flag to false strictly before sending the - -- outputDoorBell, otherwise the outputDoorBell may get processed too early and - -- then we may set the flag to False to later making the consumer lose - -- the flag, even without receiving a outputDoorBell. - atomicModifyIORefCAS_ (needDoorBell sv) (const False) - void $ tryPutMVar (outputDoorBell sv) () - - ------------------------------------------------------------------------------ --- Spawning threads and collecting result in streamed fashion +-- Spawning threads ------------------------------------------------------------------------------ -- | A monad that can perform concurrent or parallel IO operations. Streams @@ -823,6 +824,10 @@ doFork action (RunInIO mrun) exHandler = exHandler run (return tid) +------------------------------------------------------------------------------ +-- Collecting results from child workers in a streamed fashion +------------------------------------------------------------------------------ + -- XXX Can we make access to remainingWork and yieldRateInfo fields in sv -- faster, along with the fields in sv required by send? -- XXX make it noinline @@ -898,7 +903,7 @@ send sv msg = do active <- readIORef (workerCount sv) return $ len < (fromIntegral lim - active) -workerCollectLatency :: WorkerInfo -> IO (Maybe (Count, NanoSecs)) +workerCollectLatency :: WorkerInfo -> IO (Maybe (Count, NanoSecond64)) workerCollectLatency winfo = do (cnt0, t0) <- readIORef (workerLatencyStart winfo) cnt1 <- readIORef (workerYieldCount winfo) @@ -907,7 +912,7 @@ workerCollectLatency winfo = do if cnt > 0 then do t1 <- getTime Monotonic - let period = fromInteger $ toNanoSecs (t1 - t0) + let period = fromRelTime64 $ diffAbsTime64 t1 t0 writeIORef (workerLatencyStart winfo) (cnt1, t1) return $ Just (cnt, period) else return Nothing @@ -921,6 +926,8 @@ workerCollectLatency winfo = do -- described next. -- 3) It is possible that a worker returns without yielding anything because it -- never got a chance to pick up work. +-- 4) If the system timer resolution is lower than the latency, the latency +-- computation turns out to be zero. -- -- We can fix this if we measure the latencies by counting the work items -- picked rather than based on the outputs yielded. @@ -929,8 +936,16 @@ workerUpdateLatency yinfo winfo = do r <- workerCollectLatency winfo case r of Just (cnt, period) -> do + -- NOTE: On JS platform the timer resolution could be pretty low. When + -- the timer resolution is low, measurement of latencies could be + -- tricky. All the worker latencies will turn out to be zero if they + -- are lower than the resolution. We only take into account those + -- measurements which are more than the timer resolution. + let ref = workerPendingLatency yinfo - atomicModifyIORefCAS_ ref $ \(n, t) -> (n + cnt, t + period) + (cnt1, t1) = if period > 0 then (cnt, period) else (0, 0) + atomicModifyIORefCAS_ ref $ + \(fc, n, t) -> (fc + cnt, n + cnt1, t + t1) Nothing -> return () updateYieldCount :: WorkerInfo -> IO Count @@ -1008,6 +1023,24 @@ sendStop sv mwinfo = do myThreadId >>= \tid -> void $ send sv (ChildStop tid Nothing) ------------------------------------------------------------------------------- +-- Doorbell +------------------------------------------------------------------------------- + +{-# INLINE ringDoorBell #-} +ringDoorBell :: SVar t m a -> IO () +ringDoorBell sv = do + storeLoadBarrier + w <- readIORef $ needDoorBell sv + when w $ do + -- Note: the sequence of operations is important for correctness here. + -- We need to set the flag to false strictly before sending the + -- outputDoorBell, otherwise the outputDoorBell may get processed too + -- early and then we may set the flag to False to later making the + -- consumer lose the flag, even without receiving a outputDoorBell. + atomicModifyIORefCAS_ (needDoorBell sv) (const False) + void $ tryPutMVar (outputDoorBell sv) () + +------------------------------------------------------------------------------- -- Async ------------------------------------------------------------------------------- @@ -1235,6 +1268,10 @@ updateHeapSeq hpVar seqNo = -- remaining computation at the back of the queue instead of the heap, and -- increment the sequence number. +------------------------------------------------------------------------------- +-- Dispatching workers and tracking them +------------------------------------------------------------------------------- + -- Thread tracking is needed for two reasons: -- -- 1) Killing threads on exceptions. Threads may not be left to go away by @@ -1389,29 +1426,36 @@ dispatchWorker yieldCount sv = do Nothing -> return workerLimit Just ref -> do n <- liftIO $ readIORef ref - return $ - case workerLimit of - Unlimited -> Limited (fromIntegral n) - Limited lim -> Limited $ min lim (fromIntegral n) - - -- XXX for ahead streams shall we take the heap yields into account for - -- controlling the dispatch? We should not dispatch if the heap has - -- already got the limit covered. + case yieldRateInfo sv of + Just _ -> return workerLimit + Nothing -> + return $ + case workerLimit of + Unlimited -> Limited (fromIntegral n) + Limited lim -> Limited $ min lim (fromIntegral n) + + -- XXX for ahead streams shall we take the heap yields into account + -- for controlling the dispatch? We should not dispatch if the heap + -- has already got the limit covered. let dispatch = pushWorker yieldCount sv >> return True in case limit of Unlimited -> dispatch -- Note that the use of remainingWork and workerCount is not - -- atomic and the counts may even have changed between reading and - -- using them here, so this is just approximate logic and we cannot - -- rely on it for correctness. We may actually dispatch more - -- workers than required. - Limited lim | lim > 0 -> dispatch + -- atomic and the counts may even have changed between reading + -- and using them here, so this is just approximate logic and + -- we cannot rely on it for correctness. We may actually + -- dispatch more workers than required. + Limited lim | lim > fromIntegral active -> dispatch _ -> return False else do when (active <= 0) $ pushWorker 0 sv return False else return False +------------------------------------------------------------------------------- +-- Dispatch workers with rate control +------------------------------------------------------------------------------- + -- | This is a magic number and it is overloaded, and used at several places to -- achieve batching: -- @@ -1420,24 +1464,21 @@ dispatchWorker yieldCount sv = do -- sleep time is accumulated. -- 3. Collected latencies are computed and transferred to measured latency -- after a minimum of this period. -minThreadDelay :: NanoSecs -minThreadDelay = 10^(6 :: Int) +minThreadDelay :: NanoSecond64 +minThreadDelay = 1000000 -- | Another magic number! When we have to start more workers to cover up a -- number of yields that we are lagging by then we cannot start one worker for -- each yield because that may be a very big number and if the latency of the -- workers is low these number of yields could be very high. We assume that we -- run each extra worker for at least this much time. -rateRecoveryTime :: NanoSecs +rateRecoveryTime :: NanoSecond64 rateRecoveryTime = 1000000 -nanoToMicroSecs :: NanoSecs -> Int -nanoToMicroSecs s = fromIntegral s `div` 1000 - -- We either block, or send one worker with limited yield count or one or more -- workers with unlimited yield count. data Work - = BlockWait NanoSecs + = BlockWait NanoSecond64 | PartialWorker Count | ManyWorkers Int Count deriving Show @@ -1447,9 +1488,9 @@ estimateWorkers :: Limit -> Count -> Count - -> NanoSecs - -> NanoSecs - -> NanoSecs + -> NanoSecond64 + -> NanoSecond64 + -> NanoSecond64 -> LatencyRange -> Work estimateWorkers workerLimit svarYields gainLossYields @@ -1500,7 +1541,7 @@ estimateWorkers workerLimit svarYields gainLossYields fromIntegral rateRecoveryTime yieldsFreq = 1.0 / fromIntegral targetLat totalYieldsFreq = yieldsFreq + deltaYieldsFreq - requiredLat = NanoSecs $ round $ 1.0 / totalYieldsFreq + requiredLat = NanoSecond64 $ round $ 1.0 / totalYieldsFreq adjustedLat = min (max requiredLat (minLatency range)) (maxLatency range) in assert (adjustedLat > 0) $ @@ -1527,33 +1568,34 @@ estimateWorkers workerLimit svarYields gainLossYields -- | Get the worker latency without resetting workerPendingLatency -- Returns (total yield count, base time, measured latency) -- CAUTION! keep it in sync with collectLatency -getWorkerLatency :: YieldRateInfo -> IO (Count, TimeSpec, NanoSecs) +getWorkerLatency :: YieldRateInfo -> IO (Count, AbsTime, NanoSecond64) getWorkerLatency yinfo = do let cur = workerPendingLatency yinfo col = workerCollectedLatency yinfo longTerm = svarAllTimeLatency yinfo measured = workerMeasuredLatency yinfo - (count, time) <- readIORef cur - (colCount, colTime) <- readIORef col + (curTotalCount, curCount, curTime) <- readIORef cur + (colTotalCount, colCount, colTime) <- readIORef col (lcount, ltime) <- readIORef longTerm - prev <- readIORef measured - - let pendingCount = colCount + count - pendingTime = colTime + time - new = - if pendingCount > 0 - then let lat = pendingTime `div` fromIntegral pendingCount + prevLat <- readIORef measured + + let latCount = colCount + curCount + latTime = colTime + curTime + totalCount = colTotalCount + curTotalCount + newLat = + if latCount > 0 && latTime > 0 + then let lat = latTime `div` fromIntegral latCount -- XXX Give more weight to new? - in (lat + prev) `div` 2 - else prev - return (lcount + pendingCount, ltime, new) + in (lat + prevLat) `div` 2 + else prevLat + return (lcount + totalCount, ltime, newLat) isBeyondMaxRate :: SVar t m a -> YieldRateInfo -> IO Bool isBeyondMaxRate sv yinfo = do (count, tstamp, wLatency) <- getWorkerLatency yinfo now <- getTime Monotonic - let duration = fromInteger $ toNanoSecs $ now - tstamp + let duration = fromRelTime64 $ diffAbsTime64 now tstamp let targetLat = svarLatencyTarget yinfo gainLoss <- readIORef (svarGainedLostYields yinfo) let work = estimateWorkers (maxWorkerLimit sv) count gainLoss duration @@ -1565,82 +1607,6 @@ isBeyondMaxRate sv yinfo = do ManyWorkers n _ -> cnt > n BlockWait _ -> True --- Every once in a while workers update the latencies and check the yield rate. --- They return if we are above the expected yield rate. If we check too often --- it may impact performance, if we check less often we may have a stale --- picture. We update every minThreadDelay but we translate that into a yield --- count based on latency so that the checking overhead is little. --- --- XXX use a generation count to indicate that the value is updated. If the --- value is updated an existing worker must check it again on the next yield. --- Otherwise it is possible that we may keep updating it and because of the mod --- worker keeps skipping it. -updateWorkerPollingInterval :: YieldRateInfo -> NanoSecs -> IO () -updateWorkerPollingInterval yinfo latency = do - let periodRef = workerPollingInterval yinfo - cnt = max 1 $ minThreadDelay `div` latency - period = min cnt (fromIntegral magicMaxBuffer) - - writeIORef periodRef (fromIntegral period) - --- Returns a triple, (1) yield count since last collection, (2) the base time --- when we started counting, (3) average latency in the last measurement --- period. The former two are used for accurate measurement of the going rate --- whereas the average is used for future estimates e.g. how many workers --- should be maintained to maintain the rate. --- CAUTION! keep it in sync with getWorkerLatency -collectLatency :: SVar t m a -> YieldRateInfo -> IO (Count, TimeSpec, NanoSecs) -collectLatency sv yinfo = do - let cur = workerPendingLatency yinfo - col = workerCollectedLatency yinfo - longTerm = svarAllTimeLatency yinfo - measured = workerMeasuredLatency yinfo - - (count, time) <- atomicModifyIORefCAS cur $ \v -> ((0,0), v) - (colCount, colTime) <- readIORef col - (lcount, ltime) <- readIORef longTerm - prev <- readIORef measured - - let pendingCount = colCount + count - pendingTime = colTime + time - - lcount' = lcount + pendingCount - tripleWith lat = (lcount', ltime, lat) - - if pendingCount > 0 - then do - let new = pendingTime `div` (fromIntegral pendingCount) - when (svarInspectMode sv) $ do - let ss = svarStats sv - minLat <- readIORef (minWorkerLatency ss) - when (new < minLat || minLat == 0) $ - writeIORef (minWorkerLatency ss) new - - maxLat <- readIORef (maxWorkerLatency ss) - when (new > maxLat) $ writeIORef (maxWorkerLatency ss) new - -- When we have collected a significant sized batch we compute the new - -- latency using that batch and return the new latency, otherwise we - -- return the previous latency derived from the previous batch. - if (pendingCount > fromIntegral magicMaxBuffer) - || (pendingTime > minThreadDelay) - || (let r = fromIntegral new / fromIntegral prev :: Double - in prev > 0 && (r > 2 || r < 0.5)) - || (prev == 0) - then do - when (svarInspectMode sv) $ do - let ss = svarStats sv - modifyIORef (avgWorkerLatency ss) $ - \(cnt, t) -> (cnt + pendingCount, t + pendingTime) - updateWorkerPollingInterval yinfo (max new prev) - writeIORef col (0, 0) - writeIORef measured ((prev + new) `div` 2) - modifyIORef longTerm $ \(_, t) -> (lcount', t) - return $ tripleWith new - else do - writeIORef col (pendingCount, pendingTime) - return $ tripleWith prev - else return $ tripleWith prev - -- XXX in case of ahead style stream we need to take the heap size into account -- because we return the workers on the basis of that which causes a condition -- where we keep dispatching and they keep returning. So we must have exactly @@ -1655,8 +1621,8 @@ dispatchWorkerPaced sv = do (svarYields, svarElapsed, wLatency) <- do now <- liftIO $ getTime Monotonic (yieldCount, baseTime, lat) <- - liftIO $ collectLatency sv yinfo - let elapsed = fromInteger $ toNanoSecs $ now - baseTime + liftIO $ collectLatency sv yinfo False + let elapsed = fromRelTime64 $ diffAbsTime64 now baseTime let latency = if lat == 0 then @@ -1698,7 +1664,8 @@ dispatchWorkerPaced sv = do -- still have a Stop event waiting in the outputQueue. done <- allThreadsDone sv when done $ void $ do - liftIO $ threadDelay $ nanoToMicroSecs s + let us = fromRelTime64 (toRelTime64 s) :: MicroSecond64 + liftIO $ threadDelay (fromIntegral us) dispatchWorker 1 sv return False PartialWorker yields -> do @@ -1728,8 +1695,9 @@ dispatchWorkerPaced sv = do batch = max 1 $ fromIntegral $ minThreadDelay `div` targetLat -- XXX stagger the workers over a period? - -- XXX cannot sleep, as that would mean we cannot process the - -- outputs. need to try a different mechanism to stagger. + -- XXX cannot sleep, as that would mean we cannot process + -- the outputs. need to try a different mechanism to + -- stagger. -- when (total > batch) $ -- liftIO $ threadDelay $ nanoToMicroSecs minThreadDelay dispatchN (min total batch) @@ -1755,6 +1723,10 @@ dispatchWorkerPaced sv = do then dispatchN (n - 1) else return False +------------------------------------------------------------------------------- +-- Worker dispatch and wait loop +------------------------------------------------------------------------------- + sendWorkerDelayPaced :: SVar t m a -> IO () sendWorkerDelayPaced _ = return () @@ -1825,18 +1797,18 @@ sendWorkerWait delay dispatch sv = do -- queued items in the heap even though the outputQueue is empty, and -- we may have active workers which are deadlocked on those items to be -- processed by the consumer. We should either guarantee that any - -- worker, before returning, clears the heap or we send a worker to clear - -- it. Normally we always send a worker if no output is seen, but if - -- the thread limit is reached or we are using pacing then we may not - -- send a worker. See the concurrentApplication test in the tests, that - -- test case requires at least one yield from the producer to not + -- worker, before returning, clears the heap or we send a worker to + -- clear it. Normally we always send a worker if no output is seen, but + -- if the thread limit is reached or we are using pacing then we may + -- not send a worker. See the concurrentApplication test in the tests, + -- that test case requires at least one yield from the producer to not -- deadlock, if the last workers output is stuck in the heap then this -- test fails. This problem can be extended to n threads when the -- consumer may depend on the evaluation of next n items in the -- producer stream. - -- register for the outputDoorBell before we check the queue so that if we - -- sleep because the queue was empty we are guaranteed to get a + -- register for the outputDoorBell before we check the queue so that if + -- we sleep because the queue was empty we are guaranteed to get a -- doorbell on the next enqueue. liftIO $ atomicModifyIORefCAS_ (needDoorBell sv) $ const True @@ -1858,6 +1830,10 @@ sendWorkerWait delay dispatch sv = do (_, len) <- liftIO $ readIORef (outputQueue sv) when (len <= 0) $ sendWorkerWait delay dispatch sv +------------------------------------------------------------------------------- +-- Reading from the workers' output queue/buffer +------------------------------------------------------------------------------- + {-# INLINE readOutputQRaw #-} readOutputQRaw :: SVar t m a -> IO ([ChildEvent a], Int) readOutputQRaw sv = do @@ -1956,6 +1932,10 @@ postProcessPaced sv = do return r else return False +------------------------------------------------------------------------------- +-- Creating an SVar +------------------------------------------------------------------------------- + getYieldRateInfo :: State t m a -> IO (Maybe YieldRateInfo) getYieldRateInfo st = do -- convert rate in Hertz to latency in Nanoseconds @@ -1972,8 +1952,8 @@ getYieldRateInfo st = do mkYieldRateInfo latency latRange buf = do measured <- newIORef 0 - wcur <- newIORef (0,0) - wcol <- newIORef (0,0) + wcur <- newIORef (0,0,0) + wcol <- newIORef (0,0,0) now <- getTime Monotonic wlong <- newIORef (0,now) period <- newIORef 1 @@ -1999,9 +1979,9 @@ newSVarStats = do maxOq <- newIORef 0 maxHs <- newIORef 0 maxWq <- newIORef 0 - avgLat <- newIORef (0, NanoSecs 0) - maxLat <- newIORef (NanoSecs 0) - minLat <- newIORef (NanoSecs 0) + avgLat <- newIORef (0, NanoSecond64 0) + maxLat <- newIORef (NanoSecond64 0) + minLat <- newIORef (NanoSecond64 0) stpTime <- newIORef Nothing return SVarStats @@ -2162,7 +2142,7 @@ getParallelSVar st mrun = do $ takeMVar (outputDoorBell sv) case yieldRateInfo sv of Nothing -> return () - Just yinfo -> void $ collectLatency sv yinfo + Just yinfo -> void $ collectLatency sv yinfo False fst `fmap` readOutputQRaw sv sendFirstWorker :: MonadAsync m => SVar t m a -> t m a -> m (SVar t m a) @@ -2201,6 +2181,10 @@ newParallelVar st = do mrun <- captureMonadState liftIO $ getParallelSVar st mrun +------------------------------------------------------------------------------- +-- Write a stream to an SVar +------------------------------------------------------------------------------- + -- XXX this errors out for Parallel/Ahead SVars -- | Write a stream to an 'SVar' in a non-blocking manner. The stream can then -- be read back from the SVar using 'fromSVar'. diff --git a/src/Streamly/Streams/Async.hs b/src/Streamly/Streams/Async.hs index 9ac3f26..6279160 100644 --- a/src/Streamly/Streams/Async.hs +++ b/src/Streamly/Streams/Async.hs @@ -54,6 +54,7 @@ import Data.Semigroup (Semigroup(..)) import Prelude hiding (map) import qualified Data.Set as S +import Streamly.Atomics (atomicModifyIORefCAS) import Streamly.Streams.SVar (fromSVar) import Streamly.Streams.Serial (map) import Streamly.SVar diff --git a/src/Streamly/Streams/SVar.hs b/src/Streamly/Streams/SVar.hs index 7d30e09..a02ed2b 100644 --- a/src/Streamly/Streams/SVar.hs +++ b/src/Streamly/Streams/SVar.hs @@ -24,7 +24,7 @@ import Data.IORef (newIORef, readIORef, mkWeakIORef, writeIORef) import Data.Maybe (isNothing) import Data.Semigroup ((<>)) import System.IO (hPutStrLn, stderr) -import System.Clock (Clock(Monotonic), getTime) +import Streamly.Time.Clock (Clock(Monotonic), getTime) import System.Mem (performMajorGC) import Streamly.SVar diff --git a/src/Streamly/Time/Clock.hsc b/src/Streamly/Time/Clock.hsc new file mode 100644 index 0000000..b25ecad --- /dev/null +++ b/src/Streamly/Time/Clock.hsc @@ -0,0 +1,309 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE ScopedTypeVariables #-} + +#if __GLASGOW_HASKELL__ >= 800 +{-# OPTIONS_GHC -Wno-identities #-} +{-# OPTIONS_GHC -Wno-orphans #-} +{-# OPTIONS_GHC -fno-warn-unused-imports #-} +#endif + +#ifndef __GHCJS__ +#include "config.h" +#endif + +-- | +-- Module : Streamly.Time.Clock +-- Copyright : (c) 2019 Harendra Kumar +-- (c) 2009-2012, Cetin Sert +-- (c) 2010, Eugene Kirpichov +-- License : BSD3 +-- Maintainer : harendra.kumar@gmail.com +-- Stability : experimental +-- Portability : GHC + +-- A majority of the code below has been stolen from the "clock" package. + +#if __GHCJS__ +#define HS_CLOCK_GHCJS 1 +#elif (defined (HAVE_TIME_H) && defined(HAVE_CLOCK_GETTIME)) +#define HS_CLOCK_POSIX 1 +#elif __APPLE__ +#define HS_CLOCK_OSX 1 +#elif defined(_WIN32) +#define HS_CLOCK_WINDOWS 1 +#else +#error "Time/Clock functionality not implemented for this system" +#endif + +module Streamly.Time.Clock + ( + -- * get time from the system clock + Clock(..) + , getTime + ) +where + +import Data.Int (Int32, Int64) +import Data.Typeable (Typeable) +import Data.Word (Word32) +import Foreign.C (CInt(..), throwErrnoIfMinus1_, CTime(..), CLong(..)) +import Foreign.Marshal.Alloc (alloca) +import Foreign.Ptr (Ptr) +import Foreign.Storable (Storable(..), peek) +import GHC.Generics (Generic) + +import Streamly.Time.Units (TimeSpec(..), AbsTime(..)) + +------------------------------------------------------------------------------- +-- Clock Types +------------------------------------------------------------------------------- + +#if HS_CLOCK_POSIX +#include <time.h> + +#if defined(CLOCK_MONOTONIC_RAW) +#define HAVE_CLOCK_MONOTONIC_RAW +#endif + +-- XXX this may be RAW on apple not RAW on linux +#if __linux__ && defined(CLOCK_MONOTONIC_COARSE) +#define HAVE_CLOCK_MONOTONIC_COARSE +#endif + +#if __APPLE__ && defined(CLOCK_MONOTONIC_RAW_APPROX) +#define HAVE_CLOCK_MONOTONIC_COARSE +#endif + +#if __linux__ && defined(CLOCK_BOOTTIME) +#define HAVE_CLOCK_MONOTONIC_UPTIME +#endif + +#if __APPLE__ && defined(CLOCK_UPTIME_RAW) +#define HAVE_CLOCK_MONOTONIC_UPTIME +#endif + +#if __linux__ && defined(CLOCK_REALTIME_COARSE) +#define HAVE_CLOCK_REALTIME_COARSE +#endif + +#endif + +-- | Clock types. A clock may be system-wide (that is, visible to all processes) +-- or per-process (measuring time that is meaningful only within a process). +-- All implementations shall support CLOCK_REALTIME. (The only suspend-aware +-- monotonic is CLOCK_BOOTTIME on Linux.) +data Clock + + -- | The identifier for the system-wide monotonic clock, which is defined as + -- a clock measuring real time, whose value cannot be set via + -- @clock_settime@ and which cannot have negative clock jumps. The maximum + -- possible clock jump shall be implementation defined. For this clock, + -- the value returned by 'getTime' represents the amount of time (in + -- seconds and nanoseconds) since an unspecified point in the past (for + -- example, system start-up time, or the Epoch). This point does not + -- change after system start-up time. Note that the absolute value of the + -- monotonic clock is meaningless (because its origin is arbitrary), and + -- thus there is no need to set it. Furthermore, realtime applications can + -- rely on the fact that the value of this clock is never set. + = Monotonic + + -- | The identifier of the system-wide clock measuring real time. For this + -- clock, the value returned by 'getTime' represents the amount of time (in + -- seconds and nanoseconds) since the Epoch. + | Realtime + +#ifndef HS_CLOCK_GHCJS + -- | The identifier of the CPU-time clock associated with the calling + -- process. For this clock, the value returned by 'getTime' represents the + -- amount of execution time of the current process. + | ProcessCPUTime + + -- | The identifier of the CPU-time clock associated with the calling OS + -- thread. For this clock, the value returned by 'getTime' represents the + -- amount of execution time of the current OS thread. + | ThreadCPUTime +#endif + +#if defined (HAVE_CLOCK_MONOTONIC_RAW) + -- | (since Linux 2.6.28; Linux and Mac OSX) + -- Similar to CLOCK_MONOTONIC, but provides access to a + -- raw hardware-based time that is not subject to NTP + -- adjustments or the incremental adjustments performed by + -- adjtime(3). + | MonotonicRaw +#endif + +#if defined (HAVE_CLOCK_MONOTONIC_COARSE) + -- | (since Linux 2.6.32; Linux and Mac OSX) + -- A faster but less precise version of CLOCK_MONOTONIC. + -- Use when you need very fast, but not fine-grained timestamps. + | MonotonicCoarse +#endif + +#if defined (HAVE_CLOCK_MONOTONIC_UPTIME) + -- | (since Linux 2.6.39; Linux and Mac OSX) + -- Identical to CLOCK_MONOTONIC, except it also includes + -- any time that the system is suspended. This allows + -- applications to get a suspend-aware monotonic clock + -- without having to deal with the complications of + -- CLOCK_REALTIME, which may have discontinuities if the + -- time is changed using settimeofday(2). + | Uptime +#endif + +#if defined (HAVE_CLOCK_REALTIME_COARSE) + -- | (since Linux 2.6.32; Linux-specific) + -- A faster but less precise version of CLOCK_REALTIME. + -- Use when you need very fast, but not fine-grained timestamps. + | RealtimeCoarse +#endif + + deriving (Eq, Enum, Generic, Read, Show, Typeable) + +------------------------------------------------------------------------------- +-- Translate the Haskell "Clock" type to C +------------------------------------------------------------------------------- + +#if HS_CLOCK_POSIX +-- Posix systems (Linux and Mac OSX 10.12 and later) +clockToPosixClockId :: Clock -> #{type clockid_t} +clockToPosixClockId Monotonic = #const CLOCK_MONOTONIC +clockToPosixClockId Realtime = #const CLOCK_REALTIME +clockToPosixClockId ProcessCPUTime = #const CLOCK_PROCESS_CPUTIME_ID +clockToPosixClockId ThreadCPUTime = #const CLOCK_THREAD_CPUTIME_ID + +#if defined(CLOCK_MONOTONIC_RAW) +clockToPosixClockId MonotonicRaw = #const CLOCK_MONOTONIC_RAW +#endif + +#if __linux__ && defined (CLOCK_MONOTONIC_COARSE) +clockToPosixClockId MonotonicCoarse = #const CLOCK_MONOTONIC_COARSE +#elif __APPLE__ && defined(CLOCK_MONOTONIC_RAW_APPROX) +clockToPosixClockId MonotonicCoarse = #const CLOCK_MONOTONIC_RAW_APPROX +#endif + +#if __linux__ && defined (CLOCK_REALTIME_COARSE) +clockToPosixClockId RealtimeCoarse = #const CLOCK_REALTIME_COARSE +#endif + +#if __linux__ && defined(CLOCK_BOOTTIME) +clockToPosixClockId Uptime = #const CLOCK_BOOTTIME +#elif __APPLE__ && defined(CLOCK_UPTIME_RAW) +clockToPosixClockId Uptime = #const CLOCK_UPTIME_RAW +#endif + +#elif HS_CLOCK_OSX +-- Mac OSX versions prior to 10.12 +#include <time.h> +#include <mach/clock.h> + +clockToOSXClockId :: Clock -> #{type clock_id_t} +clockToOSXClockId Monotonic = #const SYSTEM_CLOCK +clockToOSXClockId Realtime = #const CALENDAR_CLOCK +clockToOSXClockId ProcessCPUTime = #const SYSTEM_CLOCK +clockToOSXClockId ThreadCPUTime = #const SYSTEM_CLOCK +#elif HS_CLOCK_GHCJS +-- XXX need to implement a monotonic clock for JS using performance.now() +clockToJSClockId :: Clock -> CInt +clockToJSClockId Monotonic = 0 +clockToJSClockId Realtime = 0 +#endif + +------------------------------------------------------------------------------- +-- Clock time +------------------------------------------------------------------------------- + +#if __GLASGOW_HASKELL__ < 800 +#let alignment t = "%lu", (unsigned long)offsetof(struct {char x__; t (y__); }, y__) +#endif + +#ifdef HS_CLOCK_GHCJS +instance Storable TimeSpec where + sizeOf _ = 8 + alignment _ = 4 + peek p = do + CTime s <- peekByteOff p 0 + CLong ns <- peekByteOff p 4 + return (TimeSpec (fromIntegral s) (fromIntegral ns)) + poke p (TimeSpec s ns) = do + pokeByteOff p 0 ((fromIntegral s) :: CTime) + pokeByteOff p 4 ((fromIntegral ns) :: CLong) + +#elif HS_CLOCK_WINDOWS +instance Storable TimeSpec where + sizeOf _ = sizeOf (undefined :: Int64) * 2 + alignment _ = alignment (undefined :: Int64) + peek ptr = do + s <- peekByteOff ptr 0 + ns <- peekByteOff ptr (sizeOf (undefined :: Int64)) + return (TimeSpec s ns) + poke ptr ts = do + pokeByteOff ptr 0 (sec ts) + pokeByteOff ptr (sizeOf (undefined :: Int64)) (nsec ts) +#else +instance Storable TimeSpec where + sizeOf _ = #{size struct timespec} + alignment _ = #{alignment struct timespec} + peek ptr = do + s :: #{type time_t} <- #{peek struct timespec, tv_sec} ptr + ns :: #{type long} <- #{peek struct timespec, tv_nsec} ptr + return $ TimeSpec (fromIntegral s) (fromIntegral ns) + poke ptr ts = do + let s :: #{type time_t} = fromIntegral $ sec ts + ns :: #{type long} = fromIntegral $ nsec ts + #{poke struct timespec, tv_sec} ptr (s) + #{poke struct timespec, tv_nsec} ptr (ns) +#endif + +{-# INLINE getTimeWith #-} +getTimeWith :: (Ptr TimeSpec -> IO ()) -> IO AbsTime +getTimeWith f = do + t <- alloca (\ptr -> f ptr >> peek ptr) + return $ AbsTime t + +#if HS_CLOCK_GHCJS + +foreign import ccall unsafe "time.h clock_gettime_js" + clock_gettime_js :: CInt -> Ptr TimeSpec -> IO CInt + +{-# INLINABLE getTime #-} +getTime :: Clock -> IO AbsTime +getTime clock = + getTimeWith (throwErrnoIfMinus1_ "clock_gettime" . + clock_gettime_js (clockToJSClockId clock)) + +#elif HS_CLOCK_POSIX + +foreign import ccall unsafe "time.h clock_gettime" + clock_gettime :: #{type clockid_t} -> Ptr TimeSpec -> IO CInt + +{-# INLINABLE getTime #-} +getTime :: Clock -> IO AbsTime +getTime clock = + getTimeWith (throwErrnoIfMinus1_ "clock_gettime" . + clock_gettime (clockToPosixClockId clock)) + +#elif HS_CLOCK_OSX + +-- XXX perform error checks inside c implementation +foreign import ccall + clock_gettime_darwin :: #{type clock_id_t} -> Ptr TimeSpec -> IO () + +{-# INLINABLE getTime #-} +getTime :: Clock -> IO AbsTime +getTime clock = getTimeWith $ clock_gettime_darwin (clockToOSXClockId clock) + +#elif HS_CLOCK_WINDOWS + +-- XXX perform error checks inside c implementation +foreign import ccall clock_gettime_win32_monotonic :: Ptr TimeSpec -> IO () + +{-# INLINABLE getTime #-} +getTime :: Clock -> IO AbsTime +getTime Monotonic = getTimeWith $ clock_gettime_win32_monotonic +getTime RealTime = getTimeWith $ clock_gettime_win32_realtime +getTime ProcessCPUTime = getTimeWith $ clock_gettime_win32_processtime +getTime ThreadCPUTime = getTimeWith $ clock_gettime_win32_threadtime +#endif diff --git a/src/Streamly/Time/Darwin.c b/src/Streamly/Time/Darwin.c new file mode 100644 index 0000000..ab68acf --- /dev/null +++ b/src/Streamly/Time/Darwin.c @@ -0,0 +1,36 @@ +/* + * Code taken from the Haskell "clock" package. + * + * Copyright (c) 2009-2012, Cetin Sert + * Copyright (c) 2010, Eugene Kirpichov + * + * OS X code was contributed by Gerolf Seitz on 2013-10-15. + */ + +#ifdef __MACH__ +#include <time.h> +#include <mach/clock.h> +#include <mach/mach.h> + +void clock_gettime_darwin(clock_id_t clock, struct timespec *ts) +{ + clock_serv_t cclock; + mach_timespec_t mts; + host_get_clock_service(mach_host_self(), clock, &cclock); + clock_get_time(cclock, &mts); + mach_port_deallocate(mach_task_self(), cclock); + ts->tv_sec = mts.tv_sec; + ts->tv_nsec = mts.tv_nsec; +} + +void clock_getres_darwin(clock_id_t clock, struct timespec *ts) +{ + clock_serv_t cclock; + int nsecs; + mach_msg_type_number_t count; + host_get_clock_service(mach_host_self(), clock, &cclock); + clock_get_attributes(cclock, CLOCK_GET_TIME_RES, (clock_attr_t)&nsecs, &count); + mach_port_deallocate(mach_task_self(), cclock); +} + +#endif /* __MACH__ */ diff --git a/src/Streamly/Time/Units.hs b/src/Streamly/Time/Units.hs new file mode 100644 index 0000000..ade70c1 --- /dev/null +++ b/src/Streamly/Time/Units.hs @@ -0,0 +1,471 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE ScopedTypeVariables #-} + +#include "inline.hs" + +-- | +-- Module : Streamly.Time.Units +-- Copyright : (c) 2019 Harendra Kumar +-- +-- License : BSD3 +-- Maintainer : harendra.kumar@gmail.com +-- Stability : experimental +-- Portability : GHC + +module Streamly.Time.Units + ( + -- * Time Unit Conversions + TimeUnit() + -- , TimeUnitWide() + , TimeUnit64() + + -- * Time Units + , TimeSpec(..) + , NanoSecond64(..) + , MicroSecond64(..) + , MilliSecond64(..) + , showNanoSecond64 + + -- * Absolute times (using TimeSpec) + , AbsTime(..) + , toAbsTime + , fromAbsTime + + -- * Relative times (using TimeSpec) + , RelTime + , toRelTime + , fromRelTime + , diffAbsTime + , addToAbsTime + + -- * Relative times (using NanoSecond64) + , RelTime64 + , toRelTime64 + , fromRelTime64 + , diffAbsTime64 + , addToAbsTime64 + , showRelTime64 + ) +where + +import Data.Int +import Text.Printf (printf) + +------------------------------------------------------------------------------- +-- Some constants +------------------------------------------------------------------------------- + +{-# INLINE tenPower3 #-} +tenPower3 :: Int64 +tenPower3 = 1000 + +{-# INLINE tenPower6 #-} +tenPower6 :: Int64 +tenPower6 = 1000000 + +{-# INLINE tenPower9 #-} +tenPower9 :: Int64 +tenPower9 = 1000000000 + +------------------------------------------------------------------------------- +-- Time Unit Representations +------------------------------------------------------------------------------- + +-- XXX We should be able to use type families to use different represenations +-- for a unit. +-- +-- Second Rational +-- Second Double +-- Second Int64 +-- Second Integer +-- NanoSecond Int64 +-- ... + +-- Double or Fixed would be a much better representation so that we do not lose +-- information between conversions. However, for faster arithmetic operations +-- we use an 'Int64' here. When we need convservation of values we can use a +-- different system of units with a Fixed precision. + +------------------------------------------------------------------------------- +-- Integral Units +------------------------------------------------------------------------------- + +-- | An 'Int64' time representation with a nanosecond resolution. It can +-- represent time up to ~292 years. +newtype NanoSecond64 = NanoSecond64 Int64 + deriving ( Eq + , Read + , Show + , Enum + , Bounded + , Num + , Real + , Integral + , Ord + ) + +-- | An 'Int64' time representation with a microsecond resolution. +-- It can represent time up to ~292,000 years. +newtype MicroSecond64 = MicroSecond64 Int64 + deriving ( Eq + , Read + , Show + , Enum + , Bounded + , Num + , Real + , Integral + , Ord + ) + +-- | An 'Int64' time representation with a millisecond resolution. +-- It can represent time up to ~292 million years. +newtype MilliSecond64 = MilliSecond64 Int64 + deriving ( Eq + , Read + , Show + , Enum + , Bounded + , Num + , Real + , Integral + , Ord + ) + +------------------------------------------------------------------------------- +-- Fractional Units +------------------------------------------------------------------------------- + +------------------------------------------------------------------------------- +-- TimeSpec representation +------------------------------------------------------------------------------- + +-- A structure storing seconds and nanoseconds as 'Int64' is the simplest and +-- fastest way to store practically large quantities of time with efficient +-- arithmetic operations. If we store nanoseconds using 'Integer' it can store +-- practically unbounded quantities but it may not be as efficient to +-- manipulate in performance critical applications. XXX need to measure the +-- performance. +-- +-- | Data type to represent practically large quantities of time efficiently. +-- It can represent time up to ~292 billion years at nanosecond resolution. +data TimeSpec = TimeSpec + { sec :: {-# UNPACK #-} !Int64 -- ^ seconds + , nsec :: {-# UNPACK #-} !Int64 -- ^ nanoseconds + } deriving (Eq, Read, Show) + +-- We assume that nsec is always less than 10^9. When TimeSpec is negative then +-- both sec and nsec are negative. +instance Ord TimeSpec where + compare (TimeSpec s1 ns1) (TimeSpec s2 ns2) = + if s1 == s2 + then compare ns1 ns2 + else compare s1 s2 + +-- make sure nsec is less than 10^9 +{-# INLINE addWithOverflow #-} +addWithOverflow :: TimeSpec -> TimeSpec -> TimeSpec +addWithOverflow (TimeSpec s1 ns1) (TimeSpec s2 ns2) = + let nsum = ns1 + ns2 + (s', ns) = if (nsum > tenPower9 || nsum < negate tenPower9) + then nsum `divMod` tenPower9 + else (0, nsum) + in TimeSpec (s1 + s2 + s') ns + +-- make sure both sec and nsec have the same sign +{-# INLINE adjustSign #-} +adjustSign :: TimeSpec -> TimeSpec +adjustSign (t@(TimeSpec s ns)) = + if (s > 0 && ns < 0) + then TimeSpec (s - 1) (ns + tenPower9) + else if (s < 0 && ns > 0) + then TimeSpec (s + 1) (ns - tenPower9) + else t + +{-# INLINE timeSpecToInteger #-} +timeSpecToInteger :: TimeSpec -> Integer +timeSpecToInteger (TimeSpec s ns) = toInteger $ s * tenPower9 + ns + +instance Num TimeSpec where + {-# INLINE (+) #-} + t1 + t2 = adjustSign (addWithOverflow t1 t2) + + -- XXX will this be more optimal if imlemented without "negate"? + {-# INLINE (-) #-} + t1 - t2 = t1 + (negate t2) + t1 * t2 = fromInteger $ timeSpecToInteger t1 * timeSpecToInteger t2 + + {-# INLINE negate #-} + negate (TimeSpec s ns) = TimeSpec (negate s) (negate ns) + {-# INLINE abs #-} + abs (TimeSpec s ns) = TimeSpec (abs s) (abs ns) + {-# INLINE signum #-} + signum (TimeSpec s ns) | s == 0 = TimeSpec (signum ns) 0 + | otherwise = TimeSpec (signum s) 0 + -- This is fromNanoSecond64 Integer + {-# INLINE fromInteger #-} + fromInteger nanosec = TimeSpec (fromInteger s) (fromInteger ns) + where (s, ns) = nanosec `divMod` toInteger tenPower9 + +------------------------------------------------------------------------------- +-- Time unit conversions +------------------------------------------------------------------------------- + +-- TODO: compare whether using TimeSpec instead of Integer provides significant +-- performance boost. If not then we can just use Integer nanoseconds and get +-- rid of TimeUnitWide. +-- +-- | A type class for converting between time units using 'Integer' as the +-- intermediate and the widest representation with a nanosecond resolution. +-- This system of units can represent arbitrarily large times but provides +-- least efficient arithmetic operations due to 'Integer' arithmetic. +-- +-- NOTE: Converting to and from units may truncate the value depending on the +-- original value and the size and resolution of the destination unit. +{- +class TimeUnitWide a where + toTimeInteger :: a -> Integer + fromTimeInteger :: Integer -> a +-} + +-- | A type class for converting between units of time using 'TimeSpec' as the +-- intermediate representation. This system of units can represent up to ~292 +-- billion years at nanosecond resolution with reasonably efficient arithmetic +-- operations. +-- +-- NOTE: Converting to and from units may truncate the value depending on the +-- original value and the size and resolution of the destination unit. +class TimeUnit a where + toTimeSpec :: a -> TimeSpec + fromTimeSpec :: TimeSpec -> a + +-- XXX we can use a fromNanoSecond64 for conversion with overflow check and +-- fromNanoSecond64Unsafe for conversion without overflow check. +-- +-- | A type class for converting between units of time using 'Int64' as the +-- intermediate representation with a nanosecond resolution. This system of +-- units can represent up to ~292 years at nanosecond resolution with fast +-- arithmetic operations. +-- +-- NOTE: Converting to and from units may truncate the value depending on the +-- original value and the size and resolution of the destination unit. +class TimeUnit64 a where + toNanoSecond64 :: a -> NanoSecond64 + fromNanoSecond64 :: NanoSecond64 -> a + +------------------------------------------------------------------------------- +-- Time units +------------------------------------------------------------------------------- + +instance TimeUnit TimeSpec where + toTimeSpec = id + fromTimeSpec = id + +instance TimeUnit NanoSecond64 where + {-# INLINE toTimeSpec #-} + toTimeSpec (NanoSecond64 t) = TimeSpec s ns + where (s, ns) = t `divMod` tenPower9 + + {-# INLINE fromTimeSpec #-} + fromTimeSpec (TimeSpec s ns) = + NanoSecond64 $ s * tenPower9 + ns + +instance TimeUnit64 NanoSecond64 where + {-# INLINE toNanoSecond64 #-} + toNanoSecond64 = id + + {-# INLINE fromNanoSecond64 #-} + fromNanoSecond64 = id + +instance TimeUnit MicroSecond64 where + {-# INLINE toTimeSpec #-} + toTimeSpec (MicroSecond64 t) = TimeSpec s us + where (s, us) = t `divMod` tenPower6 + + {-# INLINE fromTimeSpec #-} + fromTimeSpec (TimeSpec s us) = + MicroSecond64 $ s * tenPower6 + us + +instance TimeUnit64 MicroSecond64 where + {-# INLINE toNanoSecond64 #-} + toNanoSecond64 (MicroSecond64 us) = NanoSecond64 $ us * tenPower3 + + {-# INLINE fromNanoSecond64 #-} + fromNanoSecond64 (NanoSecond64 ns) = MicroSecond64 $ ns `div` tenPower3 + +instance TimeUnit MilliSecond64 where + {-# INLINE toTimeSpec #-} + toTimeSpec (MilliSecond64 t) = TimeSpec s us + where (s, us) = t `divMod` tenPower3 + + {-# INLINE fromTimeSpec #-} + fromTimeSpec (TimeSpec s us) = + MilliSecond64 $ s * tenPower3 + us + +instance TimeUnit64 MilliSecond64 where + {-# INLINE toNanoSecond64 #-} + toNanoSecond64 (MilliSecond64 us) = NanoSecond64 $ us * tenPower6 + + {-# INLINE fromNanoSecond64 #-} + fromNanoSecond64 (NanoSecond64 ns) = MilliSecond64 $ ns `div` tenPower6 + +------------------------------------------------------------------------------- +-- Absolute time +------------------------------------------------------------------------------- + +-- | Absolute times are relative to a predefined epoch in time. 'AbsTime' +-- represents times using 'TimeSpec' which can represent times up to ~292 +-- billion years at a nanosecond resolution. +newtype AbsTime = AbsTime TimeSpec + deriving (Eq, Ord, Show) + +-- | Convert a 'TimeUnit' to an absolute time. +{-# INLINE_NORMAL toAbsTime #-} +toAbsTime :: TimeUnit a => a -> AbsTime +toAbsTime = AbsTime . toTimeSpec + +-- | Convert absolute time to a 'TimeUnit'. +{-# INLINE_NORMAL fromAbsTime #-} +fromAbsTime :: TimeUnit a => AbsTime -> a +fromAbsTime (AbsTime t) = fromTimeSpec t + +-- XXX We can also write rewrite rules to simplify divisions multiplications +-- and additions when manipulating units. Though, that might get simplified at +-- the assembly (llvm) level as well. Note to/from conversions may be lossy and +-- therefore this equation may not hold, but that's ok. +{-# RULES "fromAbsTime/toAbsTime" forall a. toAbsTime (fromAbsTime a) = a #-} +{-# RULES "toAbsTime/fromAbsTime" forall a. fromAbsTime (toAbsTime a) = a #-} + +------------------------------------------------------------------------------- +-- Relative time using NaonoSecond64 as the underlying representation +------------------------------------------------------------------------------- + +-- We use a separate type to represent relative time for safety and speed. +-- RelTime has a Num instance, absolute time doesn't. Relative times are +-- usually shorter and for our purposes an Int64 nanoseconds can hold close to +-- thousand year duration. It is also faster to manipulate. We do not check for +-- overflows during manipulations so use it only when you know the time cannot +-- be too big. If you need a bigger RelTime representation then use RelTimeBig. + +-- | Relative times are relative to some arbitrary point of time. Unlike +-- 'AbsTime' they are not relative to a predefined epoch. +newtype RelTime64 = RelTime64 NanoSecond64 + deriving ( Eq + , Read + , Show + , Enum + , Bounded + , Num + , Real + , Integral + , Ord + ) + +-- | Convert a 'TimeUnit' to a relative time. +{-# INLINE_NORMAL toRelTime64 #-} +toRelTime64 :: TimeUnit64 a => a -> RelTime64 +toRelTime64 = RelTime64 . toNanoSecond64 + +-- | Convert relative time to a 'TimeUnit'. +{-# INLINE_NORMAL fromRelTime64 #-} +fromRelTime64 :: TimeUnit64 a => RelTime64 -> a +fromRelTime64 (RelTime64 t) = fromNanoSecond64 t + +{-# RULES "fromRelTime64/toRelTime64" forall a . + toRelTime64 (fromRelTime64 a) = a #-} + +{-# RULES "toRelTime64/fromRelTime64" forall a . + fromRelTime64 (toRelTime64 a) = a #-} + +-- | Difference between two absolute points of time. +{-# INLINE diffAbsTime64 #-} +diffAbsTime64 :: AbsTime -> AbsTime -> RelTime64 +diffAbsTime64 (AbsTime (TimeSpec s1 ns1)) (AbsTime (TimeSpec s2 ns2)) = + RelTime64 $ NanoSecond64 $ ((s1 - s2) * tenPower9) + (ns1 - ns2) + +{-# INLINE addToAbsTime64 #-} +addToAbsTime64 :: AbsTime -> RelTime64 -> AbsTime +addToAbsTime64 (AbsTime (TimeSpec s1 ns1)) (RelTime64 (NanoSecond64 ns2)) = + AbsTime $ TimeSpec (s1 + s) ns + where (s, ns) = (ns1 + ns2) `divMod` tenPower9 + +------------------------------------------------------------------------------- +-- Relative time using TimeSpec as the underlying representation +------------------------------------------------------------------------------- + +newtype RelTime = RelTime TimeSpec + deriving ( Eq + , Read + , Show + -- , Enum + -- , Bounded + , Num + -- , Real + -- , Integral + , Ord + ) + +{-# INLINE_NORMAL toRelTime #-} +toRelTime :: TimeUnit a => a -> RelTime +toRelTime = RelTime . toTimeSpec + +{-# INLINE_NORMAL fromRelTime #-} +fromRelTime :: TimeUnit a => RelTime -> a +fromRelTime (RelTime t) = fromTimeSpec t + +{-# RULES "fromRelTime/toRelTime" forall a. toRelTime (fromRelTime a) = a #-} +{-# RULES "toRelTime/fromRelTime" forall a. fromRelTime (toRelTime a) = a #-} + +-- XXX rename to diffAbsTimes? +{-# INLINE diffAbsTime #-} +diffAbsTime :: AbsTime -> AbsTime -> RelTime +diffAbsTime (AbsTime t1) (AbsTime t2) = RelTime (t1 - t2) + +{-# INLINE addToAbsTime #-} +addToAbsTime :: AbsTime -> RelTime -> AbsTime +addToAbsTime (AbsTime t1) (RelTime t2) = AbsTime $ t1 + t2 + +------------------------------------------------------------------------------- +-- Formatting and printing +------------------------------------------------------------------------------- + +-- | Convert nanoseconds to a string showing time in an appropriate unit. +showNanoSecond64 :: NanoSecond64 -> String +showNanoSecond64 time@(NanoSecond64 ns) + | time < 0 = '-' : showNanoSecond64 (-time) + | ns < 1000 = fromIntegral ns `with` "ns" +#ifdef mingw32_HOST_OS + | ns < 1000000 = (fromIntegral ns / 1000) `with` "us" +#else + | ns < 1000000 = (fromIntegral ns / 1000) `with` "μs" +#endif + | ns < 1000000000 = (fromIntegral ns / 1000000) `with` "ms" + | ns < (60 * 1000000000) = (fromIntegral ns / 1000000000) `with` "s" + | ns < (60 * 60 * 1000000000) = + (fromIntegral ns / (60 * 1000000000)) `with` "min" + | ns < (24 * 60 * 60 * 1000000000) = + (fromIntegral ns / (60 * 60 * 1000000000)) `with` "hr" + | ns < (365 * 24 * 60 * 60 * 1000000000) = + (fromIntegral ns / (24 * 60 * 60 * 1000000000)) `with` "days" + | otherwise = + (fromIntegral ns / (365 * 24 * 60 * 60 * 1000000000)) `with` "years" + where with (t :: Double) (u :: String) + | t >= 1e9 = printf "%.4g %s" t u + | t >= 1e3 = printf "%.0f %s" t u + | t >= 1e2 = printf "%.1f %s" t u + | t >= 1e1 = printf "%.2f %s" t u + | otherwise = printf "%.3f %s" t u + +-- In general we should be able to show the time in a specified unit, if we +-- omit the unit we can show it in an automatically chosen one. +{- +data UnitName = + Nano + | Micro + | Milli + | Sec +-} + +showRelTime64 :: RelTime64 -> String +showRelTime64 = showNanoSecond64 . fromRelTime64 diff --git a/src/Streamly/Time/Windows.c b/src/Streamly/Time/Windows.c new file mode 100644 index 0000000..3184aaa --- /dev/null +++ b/src/Streamly/Time/Windows.c @@ -0,0 +1,115 @@ +/* + * Code taken from the Haskell "clock" package. + * + * Copyright (c) 2009-2012, Cetin Sert + * Copyright (c) 2010, Eugene Kirpichov + */ + +#ifdef _WIN32 +#include <windows.h> + +#if defined(_MSC_VER) || defined(_MSC_EXTENSIONS) + #define U64(x) x##Ui64 +#else + #define U64(x) x##ULL +#endif + +#define DELTA_EPOCH_IN_100NS U64(116444736000000000) + +static long ticks_to_nanos(LONGLONG subsecond_time, LONGLONG frequency) +{ + return (long)((1e9 * subsecond_time) / frequency); +} + +static ULONGLONG to_quad_100ns(FILETIME ft) +{ + ULARGE_INTEGER li; + li.LowPart = ft.dwLowDateTime; + li.HighPart = ft.dwHighDateTime; + return li.QuadPart; +} + +static void to_timespec_from_100ns(ULONGLONG t_100ns, long long *t) +{ + t[0] = (long)(t_100ns / 10000000UL); + t[1] = 100*(long)(t_100ns % 10000000UL); +} + +void clock_gettime_win32_monotonic(long long* t) +{ + LARGE_INTEGER time; + LARGE_INTEGER frequency; + QueryPerformanceCounter(&time); + QueryPerformanceFrequency(&frequency); + // seconds + t[0] = time.QuadPart / frequency.QuadPart; + // nanos = + t[1] = ticks_to_nanos(time.QuadPart % frequency.QuadPart, frequency.QuadPart); +} + +void clock_gettime_win32_realtime(long long* t) +{ + FILETIME ft; + ULONGLONG tmp; + + GetSystemTimeAsFileTime(&ft); + + tmp = to_quad_100ns(ft); + tmp -= DELTA_EPOCH_IN_100NS; + + to_timespec_from_100ns(tmp, t); +} + +void clock_gettime_win32_processtime(long long* t) +{ + FILETIME creation_time, exit_time, kernel_time, user_time; + ULONGLONG time; + + GetProcessTimes(GetCurrentProcess(), &creation_time, &exit_time, &kernel_time, &user_time); + // Both kernel and user, acc. to http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap03.html#tag_03_117 + + time = to_quad_100ns(user_time) + to_quad_100ns(kernel_time); + to_timespec_from_100ns(time, t); +} + +void clock_gettime_win32_threadtime(long long* t) +{ + FILETIME creation_time, exit_time, kernel_time, user_time; + ULONGLONG time; + + GetThreadTimes(GetCurrentThread(), &creation_time, &exit_time, &kernel_time, &user_time); + // Both kernel and user, acc. to http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap03.html#tag_03_117 + + time = to_quad_100ns(user_time) + to_quad_100ns(kernel_time); + to_timespec_from_100ns(time, t); +} + +void clock_getres_win32_monotonic(long long* t) +{ + LARGE_INTEGER frequency; + QueryPerformanceFrequency(&frequency); + + ULONGLONG resolution = U64(1000000000)/frequency.QuadPart; + t[0] = resolution / U64(1000000000); + t[1] = resolution % U64(1000000000); +} + +void clock_getres_win32_realtime(long long* t) +{ + t[0] = 0; + t[1] = 100; +} + +void clock_getres_win32_processtime(long long* t) +{ + t[0] = 0; + t[1] = 100; +} + +void clock_getres_win32_threadtime(long long* t) +{ + t[0] = 0; + t[1] = 100; +} + +#endif /* _WIN32 */ diff --git a/src/Streamly/Time/config.h.in b/src/Streamly/Time/config.h.in new file mode 100644 index 0000000..f2cea8b --- /dev/null +++ b/src/Streamly/Time/config.h.in @@ -0,0 +1,55 @@ +/* src/Streamly/Time/config.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the `clock_gettime' function. */ +#undef HAVE_CLOCK_GETTIME + +/* Define to 1 if you have the <inttypes.h> header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the <memory.h> header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the <stdint.h> header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the <stdlib.h> header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the <strings.h> header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the <string.h> header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the <sys/stat.h> header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the <sys/types.h> header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the <time.h> header file. */ +#undef HAVE_TIME_H + +/* Define to 1 if you have the <unistd.h> header file. */ +#undef HAVE_UNISTD_H + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS diff --git a/src/Streamly/Tutorial.hs b/src/Streamly/Tutorial.hs index cd6a935..20584ff 100644 --- a/src/Streamly/Tutorial.hs +++ b/src/Streamly/Tutorial.hs @@ -1441,9 +1441,9 @@ import Control.Monad.Trans.Class (MonadTrans (lift)) -- and operators instead of the ugly pragmas. -- -- For more concurrent programming examples see, --- <src/examples/ListDir.hs ListDir.hs>, --- <src/examples/MergeSort.hs MergeSort.hs> and --- <src/examples/SearchQuery.hs SearchQuery.hs> in the examples directory. +-- <examples/ListDir.hs ListDir.hs>, +-- <examples/MergeSort.hs MergeSort.hs> and +-- <examples/SearchQuery.hs SearchQuery.hs> in the examples directory. -- $reactive -- @@ -1518,13 +1518,13 @@ import Control.Monad.Trans.Class (MonadTrans (lift)) -- @ -- -- You can also find the source of this example in the examples directory as --- <src/examples/AcidRain.hs AcidRain.hs>. It has been adapted from Gabriel's +-- <examples/AcidRain.hs AcidRain.hs>. It has been adapted from Gabriel's -- <https://hackage.haskell.org/package/pipes-concurrency-2.0.8/docs/Pipes-Concurrent-Tutorial.html pipes-concurrency> -- package. -- This is much simpler compared to the pipes version because of the builtin -- concurrency in streamly. You can also find a SDL based reactive programming -- example adapted from Yampa in --- <src/examples/CirclingSquare.hs CirclingSquare.hs>. +-- <examples/CirclingSquare.hs CirclingSquare.hs>. -- $performance -- @@ -1605,7 +1605,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift)) -- import qualified Streaming.Prelude as SG -- -- -- | streaming to streamly --- fromStreaming :: (IsStream t, Monad m) => SG.Stream (SG.Of a) m r -> t m a +-- fromStreaming :: (IsStream t, MonadAsync m) => SG.Stream (SG.Of a) m r -> t m a -- fromStreaming = S.unfoldrM SG.uncons -- -- -- | streamly to streaming diff --git a/stack-7.10.yaml b/stack-7.10.yaml index f0449ab..c071c0c 100644 --- a/stack-7.10.yaml +++ b/stack-7.10.yaml @@ -4,8 +4,6 @@ packages: extra-deps: - QuickCheck-2.10 - lockfree-queue-0.2.3.1 - - simple-conduit-0.4.0 - - transient-0.5.9.2 - http-conduit-2.2.2 - http-client-0.5.0 - http-client-tls-0.3.0 diff --git a/stack-8.0.yaml b/stack-8.0.yaml index ea2b290..4b84dac 100644 --- a/stack-8.0.yaml +++ b/stack-8.0.yaml @@ -4,7 +4,6 @@ packages: extra-deps: - QuickCheck-2.10 - lockfree-queue-0.2.3.1 - - simple-conduit-0.6.0 - SDL-0.6.5.1 - gauge-0.2.4 - basement-0.0.4 @@ -1,16 +1,12 @@ -resolver: lts-12.11 +resolver: lts-13.13 packages: - '.' extra-deps: - SDL-0.6.6.0 - - gauge-0.2.4 - Chart-1.9 - Chart-diagrams-1.9 - SVGFonts-1.6.0.3 - bench-show-0.2.2 - - statistics-0.15.0.0 - - dense-linear-algebra-0.1.0.0 - - math-functions-0.3.0.2 flags: {} extra-package-dbs: [] diff --git a/streamly.cabal b/streamly.cabal index 1d9dedb..49309d4 100644 --- a/streamly.cabal +++ b/streamly.cabal @@ -1,5 +1,5 @@ name: streamly -version: 0.6.0 +version: 0.6.1 synopsis: Beautiful Streaming, Concurrent and Reactive Composition description: Streamly, short for streaming concurrently, provides monadic streams, with a @@ -71,13 +71,13 @@ license-file: LICENSE tested-with: GHC==7.10.3 , GHC==8.0.2 , GHC==8.4.4 - , GHC==8.6.3 + , GHC==8.6.4 author: Harendra Kumar maintainer: harendra.kumar@gmail.com copyright: 2017 Harendra Kumar category: Control, Concurrency, Streaming, Reactivity stability: Experimental -build-type: Simple +build-type: Configure cabal-version: >= 1.10 extra-source-files: @@ -91,6 +91,15 @@ extra-source-files: stack.yaml src/Streamly/Streams/Instances.hs src/Streamly/Streams/inline.hs + configure.ac + configure + src/Streamly/Time/config.h.in + +extra-tmp-files: + config.log + config.status + autom4te.cache + src/Streamly/Time/config.h source-repository head type: git @@ -101,6 +110,11 @@ flag dev manual: True default: False +flag no-charts + description: Disable chart generation + manual: True + default: False + flag no-fusion description: Disable rewrite rules manual: True @@ -126,8 +140,18 @@ flag examples-sdl ------------------------------------------------------------------------------- library + js-sources: jsbits/clock.js + include-dirs: src/Streamly/Time + , src/Streamly/Streams + if os(windows) + c-sources: src/Streamly/Time/Windows.c + if os(darwin) + c-sources: src/Streamly/Time/Darwin.c hs-source-dirs: src - other-modules: Streamly.SVar + other-modules: Streamly.Atomics + , Streamly.SVar + , Streamly.Time.Units + , Streamly.Time.Clock -- Base streams , Streamly.Streams.StreamK.Type @@ -186,7 +210,6 @@ library -- concurrency , atomic-primops >= 0.8 && < 0.9 , lockfree-queue >= 0.2.3 && < 0.3 - , clock >= 0.7.1 && < 0.8 -- transfomers , exceptions >= 0.8 && < 0.11 @@ -208,6 +231,7 @@ library test-suite test type: exitcode-stdio-1.0 main-is: Main.hs + js-sources: jsbits/clock.js hs-source-dirs: test ghc-options: -O0 -Wall -threaded -with-rtsopts=-N -fno-ignore-asserts if flag(dev) @@ -285,6 +309,7 @@ test-suite test test-suite properties type: exitcode-stdio-1.0 main-is: Prop.hs + js-sources: jsbits/clock.js hs-source-dirs: test ghc-options: -fno-ignore-asserts -Wall -O0 -threaded -with-rtsopts=-N if flag(dev) @@ -303,7 +328,7 @@ test-suite properties build-depends: streamly , base >= 4.8 && < 5 - , QuickCheck >= 2.10 && < 2.13 + , QuickCheck >= 2.10 && < 2.14 , hspec >= 2.0 && < 3 if impl(ghc < 8.0) build-depends: @@ -314,6 +339,7 @@ test-suite maxrate type: exitcode-stdio-1.0 default-language: Haskell2010 main-is: MaxRate.hs + js-sources: jsbits/clock.js hs-source-dirs: test ghc-options: -fno-ignore-asserts -O2 -Wall -threaded -with-rtsopts=-N if flag(dev) @@ -481,9 +507,18 @@ benchmark nested benchmark base type: exitcode-stdio-1.0 + include-dirs: src/Streamly/Time + , src/Streamly/Streams + if os(windows) + c-sources: src/Streamly/Time/Windows.c + if os(darwin) + c-sources: src/Streamly/Time/Darwin.c hs-source-dirs: benchmark, src main-is: BaseStreams.hs - other-modules: Streamly.SVar + other-modules: Streamly.Atomics + , Streamly.Time.Units + , Streamly.Time.Clock + , Streamly.SVar , Streamly.Streams.StreamK.Type , Streamly.Streams.StreamK , Streamly.Streams.StreamD.Type @@ -524,7 +559,6 @@ benchmark base -- concurrency , atomic-primops >= 0.8 && < 0.9 , lockfree-queue >= 0.2.3 && < 0.3 - , clock >= 0.7.1 && < 0.8 , exceptions >= 0.8 && < 0.11 , monad-control >= 1.0 && < 2 @@ -540,8 +574,17 @@ benchmark base executable nano-bench hs-source-dirs: benchmark, src + include-dirs: src/Streamly/Time + , src/Streamly/Streams + if os(windows) + c-sources: src/Streamly/Time/Windows.c + if os(darwin) + c-sources: src/Streamly/Time/Darwin.c main-is: NanoBenchmarks.hs - other-modules: Streamly.SVar + other-modules: Streamly.Atomics + , Streamly.Time.Units + , Streamly.Time.Clock + , Streamly.SVar , Streamly.Streams.StreamK.Type , Streamly.Streams.StreamK , Streamly.Streams.StreamD.Type @@ -562,7 +605,6 @@ executable nano-bench -- concurrency , atomic-primops >= 0.8 && < 0.9 , lockfree-queue >= 0.2.3 && < 0.3 - , clock >= 0.7.1 && < 0.8 , exceptions >= 0.8 && < 0.11 , monad-control >= 1.0 && < 2 @@ -591,7 +633,7 @@ executable chart default-language: Haskell2010 hs-source-dirs: benchmark main-is: Chart.hs - if flag(dev) + if flag(dev) && !flag(no-charts) && !impl(ghcjs) buildable: True build-Depends: base >= 4.8 && < 5 @@ -609,7 +651,8 @@ executable SearchQuery default-language: Haskell2010 main-is: SearchQuery.hs hs-source-dirs: examples - if flag(examples) || flag(examples-sdl) + ghc-options: -Wall + if (flag(examples) || flag(examples-sdl)) && !impl(ghcjs) buildable: True build-Depends: streamly @@ -622,6 +665,7 @@ executable ListDir default-language: Haskell2010 main-is: ListDir.hs hs-source-dirs: examples + ghc-options: -Wall if flag(examples) || flag(examples-sdl) buildable: True build-Depends: @@ -638,6 +682,7 @@ executable MergeSort default-language: Haskell2010 main-is: MergeSort.hs hs-source-dirs: examples + ghc-options: -Wall if flag(examples) || flag(examples-sdl) buildable: True build-Depends: @@ -651,6 +696,7 @@ executable AcidRain default-language: Haskell2010 main-is: AcidRain.hs hs-source-dirs: examples + ghc-options: -Wall if flag(examples) || flag(examples-sdl) buildable: True build-Depends: @@ -668,6 +714,7 @@ executable CirclingSquare default-language: Haskell2010 main-is: CirclingSquare.hs hs-source-dirs: examples + ghc-options: -Wall if flag(examples-sdl) buildable: True build-Depends: @@ -681,6 +728,7 @@ executable ControlFlow default-language: Haskell2010 main-is: ControlFlow.hs hs-source-dirs: examples + ghc-options: -Wall if flag(examples) || flag(examples-sdl) buildable: True build-Depends: diff --git a/test/MaxRate.hs b/test/MaxRate.hs index dc4c4f6..d66061b 100644 --- a/test/MaxRate.hs +++ b/test/MaxRate.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleContexts #-} import Streamly @@ -25,41 +26,66 @@ toMicroSecs x = x * 10^(6 :: Int) measureRate' :: IsStream t => String -> (t IO Int -> SerialT IO Int) - -> Double + -> Int -- buffers + -> Int -- threads + -> Either Double Int -- either rate or count of actions -> Int -> (Double, Double) -> (Double, Double) -> Spec -measureRate' desc t rval consumerDelay producerDelay dur = - it (desc <> " rate: " <> show rval +measureRate' desc t buffers threads rval consumerDelay producerDelay expectedRange = do + + let threadAction = + case rval of + Left r -> S.take (round $ 10 * r) . S.repeatM + Right n -> S.replicateM n + + rateDesc = case rval of + Left r -> " rate: " <> show r + Right n -> " count: " <> show n + + it (desc <> rateDesc + <> " buffers: " <> show buffers + <> " threads: " <> show threads <> ", consumer latency: " <> show consumerDelay <> ", producer latency: " <> show producerDelay) - $ durationShouldBe dur $ - runStream - $ (if consumerDelay > 0 - then S.mapM $ \x -> - threadDelay (toMicroSecs consumerDelay) >> return x - else id) - $ t - $ maxBuffer (-1) - $ maxThreads (-1) - $ avgRate rval - $ S.take (round $ rval * 10) - $ S.repeatM $ do - let (t1, t2) = producerDelay - r <- if t1 == t2 - then return $ round $ toMicroSecs t1 - else randomRIO ( round $ toMicroSecs t1 - , round $ toMicroSecs t2) - when (r > 0) $ -- do - -- t1 <- getTime Monotonic - threadDelay r - -- t2 <- getTime Monotonic - -- let delta = fromIntegral (toNanoSecs (t2 - t1)) / 1000000000 - -- putStrLn $ "delay took: " <> show delta - -- when (delta > 2) $ do - -- putStrLn $ "delay took high: " <> show delta - return 1 + $ durationShouldBe expectedRange $ + runStream + $ (if consumerDelay > 0 + then S.mapM $ \x -> + threadDelay (toMicroSecs consumerDelay) >> return x + else id) + $ t + $ maxBuffer buffers + $ maxThreads threads + $ (case rval of {Left r -> avgRate r; Right _ -> rate Nothing}) + $ threadAction $ do + let (t1, t2) = producerDelay + r <- if t1 == t2 + then return $ round $ toMicroSecs t1 + else randomRIO ( round $ toMicroSecs t1 + , round $ toMicroSecs t2) + when (r > 0) $ -- do + -- t1 <- getTime Monotonic + threadDelay r + -- t2 <- getTime Monotonic + -- let delta = fromIntegral (toNanoSecs (t2 - t1)) / 1000000000 + -- putStrLn $ "delay took: " <> show delta + -- when (delta > 2) $ do + -- putStrLn $ "delay took high: " <> show delta + return 1 + +measureRateVariable :: IsStream t + => String + -> (t IO Int -> SerialT IO Int) + -> Double + -> Int + -> (Double, Double) + -> (Double, Double) + -> Spec +measureRateVariable desc t rval consumerDelay producerDelay dur = + measureRate' desc t (-1) (-1) (Left rval) + consumerDelay producerDelay dur measureRate :: IsStream t => String @@ -71,47 +97,119 @@ measureRate :: IsStream t -> Spec measureRate desc t rval consumerDelay producerDelay dur = let d = fromIntegral producerDelay - in measureRate' desc t rval consumerDelay (d, d) dur + in measureRateVariable desc t rval consumerDelay (d, d) dur + +measureThreads :: IsStream t + => String + -> (t IO Int -> SerialT IO Int) + -> Int -- threads + -> Int -- count of actions + -> Spec +measureThreads desc t threads count = do + let expectedTime = + if threads < 0 + then 1.0 + else fromIntegral count / fromIntegral threads + duration = (expectedTime * 0.9, expectedTime * 1.1) + measureRate' desc t (-1) threads (Right count) 0 (1,1) duration + +measureBuffers :: IsStream t + => String + -> (t IO Int -> SerialT IO Int) + -> Int -- buffers + -> Int -- count of actions + -> Spec +measureBuffers desc t buffers count = do + let expectedTime = + if buffers < 0 + then 1.0 + else fromIntegral count / fromIntegral buffers + duration = (expectedTime * 0.9, expectedTime * 1.1) + measureRate' desc t buffers (-1) (Right count) 0 (1,1) duration main :: IO () main = hspec $ do + + describe "maxBuffers" $ do + measureBuffers "asyncly" asyncly (-1) 5 + -- XXX this test fails due to a known issue + -- measureBuffers "maxBuffers" asyncly 1 5 + measureBuffers "asyncly" asyncly 5 5 + + describe "maxThreads" $ do + measureThreads "asyncly" asyncly (-1) 5 + measureThreads "asyncly" asyncly 1 5 + measureThreads "asyncly" asyncly 5 5 + + measureThreads "aheadly" aheadly (-1) 5 + measureThreads "aheadly" aheadly 1 5 + measureThreads "aheadly" aheadly 5 5 + let range = (8,12) -- Note that because after the last yield we don't wait, the last period -- will be effectively shorter. This becomes significant when the rates are -- lower (1 or lower). For rate 1 we lose 1 second in the end and for rate -- 10 0.1 second. - let rates = [1, 10, 100, 1000, 10000, 100000, 1000000] + let rates = [1, 10, 100, 1000, 10000 +#ifndef __GHCJS__ + , 100000, 1000000 +#endif + ] in describe "asyncly no consumer delay no producer delay" $ forM_ rates (\r -> measureRate "asyncly" asyncly r 0 0 range) -- XXX try staggering the dispatches to achieve higher rates - let rates = [1, 10, 100, 1000, 10000, 25000] + let rates = [1, 10, 100, 1000 +#ifndef __GHCJS__ + , 10000, 25000 +#endif + ] in describe "asyncly no consumer delay and 1 sec producer delay" $ forM_ rates (\r -> measureRate "asyncly" asyncly r 0 1 range) -- At lower rates (1/10) this is likely to vary quite a bit depending on -- the spread of random producer latencies generated. - let rates = [1, 10, 100, 1000, 10000, 25000] + let rates = [1, 10, 100, 1000 +#ifndef __GHCJS__ + , 10000, 25000 +#endif + ] in describe "asyncly no consumer delay and variable producer delay" $ forM_ rates $ \r -> - measureRate' "asyncly" asyncly r 0 (0.1, 3) range + measureRateVariable "asyncly" asyncly r 0 (0.1, 3) range - let rates = [1, 10, 100, 1000, 10000, 100000, 1000000] + let rates = [1, 10, 100, 1000, 10000 +#ifndef __GHCJS__ + , 100000, 1000000 +#endif + ] in describe "wAsyncly no consumer delay no producer delay" $ forM_ rates (\r -> measureRate "wAsyncly" wAsyncly r 0 0 range) - let rates = [1, 10, 100, 1000, 10000, 25000] + let rates = [1, 10, 100, 1000 +#ifndef __GHCJS__ + , 10000, 25000 +#endif + ] in describe "wAsyncly no consumer delay and 1 sec producer delay" $ forM_ rates (\r -> measureRate "wAsyncly" wAsyncly r 0 1 range) - let rates = [1, 10, 100, 1000, 10000, 100000, 1000000] + let rates = [1, 10, 100, 1000, 10000 +#ifndef __GHCJS__ + , 100000, 1000000 +#endif + ] in describe "aheadly no consumer delay no producer delay" $ forM_ rates (\r -> measureRate "aheadly" aheadly r 0 0 range) -- XXX after the change to stop workers when the heap is clearing -- thi does not work well at a 25000 ops per second, need to fix. - let rates = [1, 10, 100, 1000, 10000, 12500] + let rates = [1, 10, 100, 1000 +#ifndef __GHCJS__ + , 10000, 12500 +#endif + ] in describe "aheadly no consumer delay and 1 sec producer delay" $ forM_ rates (\r -> measureRate "aheadly" aheadly r 0 1 range) |