summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorharendra <>2018-07-11 15:17:00 (GMT)
committerhdiff <hdiff@hdiff.luite.com>2018-07-11 15:17:00 (GMT)
commit47d20b95b7320b6a2abef57e459410ebe858598f (patch)
tree08cc107b868bad21c904e82c8331f96f4b648747
parentc4c86471690da861df0215567cf6da5b29b2a59c (diff)
version 0.4.00.4.0
-rw-r--r--Changelog.md25
-rw-r--r--README.md65
-rwxr-xr-xbench.sh11
-rw-r--r--benchmark/BaseStreams.hs111
-rw-r--r--benchmark/Linear.hs57
-rw-r--r--benchmark/LinearOps.hs151
-rw-r--r--benchmark/NestedOps.hs2
-rw-r--r--benchmark/StreamDOps.hs186
-rw-r--r--benchmark/StreamKOps.hs196
-rw-r--r--examples/CirclingSquare.hs6
-rw-r--r--examples/MergeSort.hs12
-rw-r--r--examples/SearchQuery.hs7
-rw-r--r--src/Streamly.hs26
-rw-r--r--src/Streamly/Core.hs1495
-rw-r--r--src/Streamly/Prelude.hs895
-rw-r--r--src/Streamly/SVar.hs974
-rw-r--r--src/Streamly/Streams.hs1490
-rw-r--r--src/Streamly/Streams/Ahead.hs385
-rw-r--r--src/Streamly/Streams/Async.hs591
-rw-r--r--src/Streamly/Streams/Instances.hs43
-rw-r--r--src/Streamly/Streams/Parallel.hs370
-rw-r--r--src/Streamly/Streams/Prelude.hs154
-rw-r--r--src/Streamly/Streams/SVar.hs143
-rw-r--r--src/Streamly/Streams/Serial.hs338
-rw-r--r--src/Streamly/Streams/StreamD.hs679
-rw-r--r--src/Streamly/Streams/StreamK.hs909
-rw-r--r--src/Streamly/Streams/Zip.hs248
-rw-r--r--src/Streamly/Streams/inline.h3
-rw-r--r--src/Streamly/Tutorial.hs32
-rw-r--r--stack-7.10.yaml2
-rw-r--r--stack-8.0.yaml2
-rw-r--r--stack.yaml28
-rw-r--r--streamly.cabal77
-rw-r--r--test/Main.hs139
-rw-r--r--test/Prop.hs389
-rw-r--r--test/loops.hs22
-rw-r--r--test/nested-loops.hs10
-rw-r--r--test/parallel-loops.hs10
38 files changed, 6453 insertions, 3830 deletions
diff --git a/Changelog.md b/Changelog.md
index 7ed99af..d4b7fbc 100644
--- a/Changelog.md
+++ b/Changelog.md
@@ -1,3 +1,28 @@
+## 0.4.0
+
+### Breaking changes
+
+* Signatures of `zipWithM` and `zipAsyncWithM` have changed
+* Some functions in prelude now require an additional `Monad` constraint on
+ the underlying type of the stream.
+
+### Deprecations
+
+* `once` has been deprecated and renamed to `yieldM`
+
+### Enhancements
+
+* Add concurrency control primitives `maxThreads` and `maxBuffer`.
+* Significant performance improvements utilizing stream fusion optimizations.
+* Add `yield` to construct a singleton stream from a pure value
+* Add `repeat` to generate an infinite stream by repeating a pure value
+* Add `fromList` and `fromListM` to generate streams from lists, faster than
+ `fromFoldable` and `fromFoldableM`
+* Add `map` as a synonym of fmap
+* Add `scanlM'`, the monadic version of scanl'
+* Add `takeWhileM` and `dropWhileM`
+* Add `filterM`
+
## 0.3.0
### Breaking changes
diff --git a/README.md b/README.md
index 2cfba90..65afde0 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,10 @@ Why use streamly?
* _Generality_: Unifies functionality provided by several disparate packages
(streaming, concurrency, list transformer, logic programming, reactive
programming) in a concise API.
- * _Performance_: Streamly is designed for high performance. See
+ * _Performance_: Streamly is designed for high performance. It employs stream
+ fusion optimizations for best possible performance. Serial peformance is
+ equivalent to the venerable `vector` library in most cases and even better
+ in some cases. Concurrent performance is unbeatable. See
[streaming-benchmarks](https://github.com/composewell/streaming-benchmarks)
for a comparison of popular streaming libraries on micro-benchmarks.
@@ -57,16 +60,17 @@ For more information on streamly, see:
## Streaming Pipelines
-Unlike `pipes` or `conduit` and like `vector` and `streaming` `streamly`
+Unlike `pipes` or `conduit` and like `vector` and `streaming`, `streamly`
composes stream data instead of stream processors (functions). A stream is
just like a list and is explicitly passed around to functions that process the
stream. Therefore, no special operator is needed to join stages in a streaming
-pipeline, just the standard forward (`$`) or reverse (`&`) function application
-operator is enough. Combinators are provided in `Streamly.Prelude` to
-transform or fold streams.
+pipeline, just the standard function application (`$`) or reverse function
+application (`&`) operator is enough. Combinators are provided in
+`Streamly.Prelude` to transform or fold streams.
-This snippet reads numbers from stdin, prints the squares of even numbers and
-exits if an even number more than 9 is entered.
+The following snippet provides a simple stream composition example that reads
+numbers from stdin, prints the squares of even numbers and exits if an even
+number more than 9 is entered.
```haskell
import Streamly
@@ -86,7 +90,8 @@ main = runStream $
Monadic construction and generation functions e.g. `consM`, `unfoldrM`,
`replicateM`, `repeatM`, `iterateM` and `fromFoldableM` etc. work concurrently
-when used with appropriate stream type combinator.
+when used with appropriate stream type combinator (e.g. `asyncly`, `aheadly` or
+`parallely`).
The following code finishes in 3 seconds (6 seconds when serial):
@@ -107,8 +112,8 @@ runStream $ asyncly $ S.replicateM 10 $ p 10
## Concurrent Streaming Pipelines
-Use `|&` or `|$` to apply stream processing functions concurrently. In the
-following example "hello" is printed every second, if you use `&` instead of
+Use `|&` or `|$` to apply stream processing functions concurrently. The
+following example prints a "hello" every second; if you use `&` instead of
`|&` you will see that the delay doubles to 2 seconds instead because of serial
application.
@@ -120,7 +125,7 @@ main = runStream $
## Mapping Concurrently
-We can use `mapM` or `sequence` concurrently on a stream.
+We can use `mapM` or `sequence` functions concurrently on a stream.
```
> let p n = threadDelay (n * 1000000) >> return n
@@ -130,8 +135,8 @@ We can use `mapM` or `sequence` concurrently on a stream.
## Serial and Concurrent Merging
Semigroup and Monoid instances can be used to fold streams serially or
-concurrently. In the following example we are composing ten actions in the
-stream each with a delay of 1 to 10 seconds, respectively. Since all the
+concurrently. In the following example we compose ten actions in the
+stream, each with a delay of 1 to 10 seconds, respectively. Since all the
actions are concurrent we see one output printed every second:
``` haskell
@@ -140,11 +145,11 @@ import qualified Streamly.Prelude as S
import Control.Concurrent (threadDelay)
main = S.toList $ parallely $ foldMap delay [1..10]
- where delay n = S.once $ threadDelay (n * 1000000) >> print n
+ where delay n = S.yieldM $ threadDelay (n * 1000000) >> print n
```
-Streams can be combined together in many ways. We are providing some examples
-below, see the tutorial for more ways. We will use the following `delay`
+Streams can be combined together in many ways. We provide some examples
+below, see the tutorial for more ways. We use the following `delay`
function in the examples to demonstrate the concurrency aspects:
``` haskell
@@ -152,7 +157,7 @@ import Streamly
import qualified Streamly.Prelude as S
import Control.Concurrent
-delay n = S.once $ do
+delay n = S.yieldM $ do
threadDelay (n * 1000000)
tid <- myThreadId
putStrLn (show tid ++ ": Delay " ++ show n)
@@ -190,7 +195,7 @@ import qualified Streamly.Prelude as S
loops = do
x <- S.fromFoldable [1,2]
y <- S.fromFoldable [3,4]
- S.once $ putStrLn $ show (x, y)
+ S.yieldM $ putStrLn $ show (x, y)
main = runStream loops
```
@@ -203,20 +208,32 @@ main = runStream loops
## Concurrent Nested Loops
-To run the above code with demand-driven depth first concurrency i.e. each
-iteration in the loops can run concurrently depending on the consumer rate:
+To run the above code with, lookahead style concurrency i.e. each iteration in
+the loop can run run concurrently by but the results are presented in the same
+order as serial execution:
+
+``` haskell
+main = runStream $ aheadly $ loops
+```
+
+To run it with depth first concurrency yielding results asynchronously in the
+same order as they become available (deep async composition):
``` haskell
main = runStream $ asyncly $ loops
```
-To run it with demand driven breadth first concurrency:
+To run it with breadth first concurrency and yeilding results asynchronously
+(wide async composition):
``` haskell
main = runStream $ wAsyncly $ loops
```
-To run it with strict concurrency irrespective of demand:
+The above streams provide lazy/demand-driven concurrency which is automatically
+scaled as per demand and is controlled/bounded so that it can be used on
+infinite streams. The following combinator provides strict, unbounded
+concurrency irrespective of demand:
``` haskell
main = runStream $ parallely $ loops
@@ -260,8 +277,8 @@ import qualified Streamly.Prelude as S
main = runStream $ aheadly $ getCurrentDir >>= readdir
where readdir d = do
- (dirs, files) <- S.once $ listDir d
- S.once $ mapM_ putStrLn $ map show files
+ (dirs, files) <- S.yieldM $ listDir d
+ S.yieldM $ mapM_ putStrLn $ map show files
-- read the subdirs concurrently, (<>) is concurrent
foldMap readdir dirs
```
diff --git a/bench.sh b/bench.sh
index 81d0714..3f4b1d4 100755
--- a/bench.sh
+++ b/bench.sh
@@ -124,15 +124,16 @@ run_bench () {
find_bench_prog
mkdir -p charts
- # We set min-samples to 1 so that we run with default benchmark duration of 5
- # seconds, whatever number of samples are possible in that.
- # We run just one iteration for each sample. Anyway the default is to run
- # for 30 ms and most our benchmarks are close to that or more.
- # If we use less than three samples, statistical analysis crashes
+ # We set min-samples to 3 if we use less than three samples, statistical
+ # analysis crashes. Note that the benchmark runs for a minimum of 5 seconds.
+ # We use min-duration=0 to run just one iteration for each sample. Anyway the
+ # default is to run iterations worth minimum 30 ms and most of our benchmarks
+ # are close to that or more.
$BENCH_PROG $ENABLE_QUICK \
--include-first-iter \
--min-samples 3 \
--min-duration 0 \
+ --match exact
--csvraw=$OUTPUT_FILE \
-v 2 \
--measure-with $BENCH_PROG $GAUGE_ARGS || die "Benchmarking failed"
diff --git a/benchmark/BaseStreams.hs b/benchmark/BaseStreams.hs
new file mode 100644
index 0000000..0544f58
--- /dev/null
+++ b/benchmark/BaseStreams.hs
@@ -0,0 +1,111 @@
+-- |
+-- Module : Main
+-- Copyright : (c) 2018 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+
+import Control.DeepSeq (NFData)
+-- import Data.Functor.Identity (Identity, runIdentity)
+import System.Random (randomRIO)
+
+import Gauge
+import qualified StreamDOps as D
+import qualified StreamKOps as K
+
+-- We need a monadic bind here to make sure that the function f does not get
+-- completely optimized out by the compiler in some cases.
+{-# INLINE benchIO #-}
+benchIO :: String -> (a IO Int -> IO ()) -> (Int -> a IO Int) -> Benchmark
+benchIO name run f = bench name $ nfIO $ randomRIO (1,1000) >>= run . f
+
+benchFold :: NFData b
+ => String -> (t IO Int -> IO b) -> (Int -> t IO Int) -> Benchmark
+benchFold name f src = bench name $ nfIO $ randomRIO (1,1000) >>= f . src
+
+{-
+_benchId :: NFData b => String -> (Ops.Stream m Int -> Identity b) -> Benchmark
+_benchId name f = bench name $ nf (runIdentity . f) (Ops.source 10)
+-}
+
+main :: IO ()
+main = do
+ defaultMain
+ [ bgroup "streamD"
+ [ bgroup "generation"
+ [ benchIO "unfoldr" D.toNull D.sourceUnfoldr
+ , benchIO "unfoldrM" D.toNull D.sourceUnfoldrM
+ , benchIO "fromEnum" D.toNull D.sourceFromEnum
+
+ , benchIO "fromFoldable" D.toNull D.sourceFromFoldable
+ -- , benchIO "fromFoldableM" D.sourceFromFoldableM
+ ]
+ , bgroup "elimination"
+ [ benchIO "toNull" D.toNull D.sourceUnfoldrM
+ , benchIO "uncons" D.uncons D.sourceUnfoldrM
+ , benchIO "nullHeadTail" D.nullHeadTail D.sourceUnfoldrM
+ ]
+ , bgroup "transformation"
+ [ -- benchIO "scan" D.scan D.sourceUnfoldrM
+ benchIO "map" D.map D.sourceUnfoldrM
+ , benchIO "mapM" D.mapM D.sourceUnfoldrM
+ ]
+ , bgroup "filtering"
+ [ benchIO "filter-even" D.filterEven D.sourceUnfoldrM
+ , benchIO "filter-all-out" D.filterAllOut D.sourceUnfoldrM
+ , benchIO "filter-all-in" D.filterAllIn D.sourceUnfoldrM
+ , benchIO "take-all" D.takeAll D.sourceUnfoldrM
+ ]
+ ]
+ , bgroup "streamK"
+ [ bgroup "generation"
+ [ benchIO "unfoldr" K.toNull K.sourceUnfoldr
+ , benchIO "unfoldrM" K.toNull K.sourceUnfoldrM
+ -- , benchIO "fromEnum" K.toNull K.sourceFromEnum
+
+ , benchIO "fromFoldable" K.toNull K.sourceFromFoldable
+ , benchIO "fromFoldableM" K.toNull K.sourceFromFoldableM
+
+ -- appends
+ , benchIO "foldMapWith" K.toNull K.sourceFoldMapWith
+ , benchIO "foldMapWithM" K.toNull K.sourceFoldMapWithM
+ ]
+ , bgroup "elimination"
+ [ benchIO "toNull" K.toNull K.sourceUnfoldrM
+ , benchIO "uncons" K.uncons K.sourceUnfoldrM
+ , benchIO "nullHeadTail" K.nullHeadTail K.sourceUnfoldrM
+ , benchFold "toList" K.toList K.sourceUnfoldrM
+ , benchFold "fold" K.foldl K.sourceUnfoldrM
+ , benchFold "last" K.last K.sourceUnfoldrM
+ ]
+ , bgroup "transformation"
+ [ benchIO "scan" K.scan K.sourceUnfoldrM
+ , benchIO "map" K.map K.sourceUnfoldrM
+ , benchIO "mapM" K.mapM K.sourceUnfoldrM
+ , benchIO "concat" K.concat K.sourceUnfoldrM
+ ]
+ , bgroup "filtering"
+ [ benchIO "filter-even" K.filterEven K.sourceUnfoldrM
+ , benchIO "filter-all-out" K.filterAllOut K.sourceUnfoldrM
+ , benchIO "filter-all-in" K.filterAllIn K.sourceUnfoldrM
+ , benchIO "take-all" K.takeAll K.sourceUnfoldrM
+ , benchIO "takeWhile-true" K.takeWhileTrue K.sourceUnfoldrM
+ , benchIO "drop-all" K.dropAll K.sourceUnfoldrM
+ , benchIO "dropWhile-true" K.dropWhileTrue K.sourceUnfoldrM
+ ]
+ , benchIO "zip" K.zip K.sourceUnfoldrM
+ , bgroup "compose"
+ [ benchIO "mapM" K.composeMapM K.sourceUnfoldrM
+ , benchIO "map-with-all-in-filter" K.composeMapAllInFilter K.sourceUnfoldrM
+ , benchIO "all-in-filters" K.composeAllInFilters K.sourceUnfoldrM
+ , benchIO "all-out-filters" K.composeAllOutFilters K.sourceUnfoldrM
+ ]
+ -- Scaling with same operation in sequence
+ , bgroup "compose-scaling"
+ [ benchIO "1" (K.composeScaling 1) K.sourceUnfoldrM
+ , benchIO "2" (K.composeScaling 2) K.sourceUnfoldrM
+ , benchIO "3" (K.composeScaling 3) K.sourceUnfoldrM
+ , benchIO "4" (K.composeScaling 4) K.sourceUnfoldrM
+ ]
+ ]
+ ]
diff --git a/benchmark/Linear.hs b/benchmark/Linear.hs
index 75a4c71..0aace62 100644
--- a/benchmark/Linear.hs
+++ b/benchmark/Linear.hs
@@ -15,9 +15,14 @@ import Gauge
-- We need a monadic bind here to make sure that the function f does not get
-- completely optimized out by the compiler in some cases.
+--
+-- | Takes a fold method, and uses it with a default source.
+{-# INLINE benchIO #-}
benchIO :: (IsStream t, NFData b) => String -> (t IO Int -> IO b) -> Benchmark
benchIO name f = bench name $ nfIO $ randomRIO (1,1000) >>= f . Ops.source
+-- | Takes a source, and uses it with a default drain/fold method.
+{-# INLINE benchSrcIO #-}
benchSrcIO
:: (t IO Int -> SerialT IO Int)
-> String
@@ -36,22 +41,48 @@ main = do
defaultMain
[ bgroup "serially"
[ bgroup "generation"
- [ benchSrcIO serially "unfoldr" $ Ops.sourceUnfoldr
+ [ -- Most basic, barely stream continuations running
+ benchSrcIO serially "unfoldr" $ Ops.sourceUnfoldr
, benchSrcIO serially "unfoldrM" Ops.sourceUnfoldrM
+ , benchSrcIO serially "fromList" Ops.sourceFromList
+ , benchSrcIO serially "fromListM" Ops.sourceFromListM
+ -- These are essentially cons and consM
, benchSrcIO serially "fromFoldable" Ops.sourceFromFoldable
, benchSrcIO serially "fromFoldableM" Ops.sourceFromFoldableM
+ -- These are essentially appends
, benchSrcIO serially "foldMapWith" Ops.sourceFoldMapWith
, benchSrcIO serially "foldMapWithM" Ops.sourceFoldMapWithM
]
, bgroup "elimination"
- [ benchIO "toList" Ops.toList
- , benchIO "fold" Ops.foldl
+ [ benchIO "toNull" $ Ops.toNull serially
+ , benchIO "uncons" Ops.uncons
+ , benchIO "nullHeadTail" Ops.nullHeadTail
+ , benchIO "mapM_" Ops.mapM_
+ , benchIO "toList" Ops.toList
+ , benchIO "foldr" Ops.foldr
+ , benchIO "foldrM" Ops.foldrM
+ , benchIO "foldl'" Ops.foldl
+
, benchIO "last" Ops.last
+ , benchIO "length" Ops.length
+ , benchIO "elem" Ops.elem
+ , benchIO "notElem" Ops.notElem
+ , benchIO "all" Ops.all
+ , benchIO "any" Ops.any
+ , benchIO "maximum" Ops.maximum
+ , benchIO "minimum" Ops.minimum
+ , benchIO "sum" Ops.sum
+ , benchIO "product" Ops.product
]
, bgroup "transformation"
[ benchIO "scan" Ops.scan
, benchIO "map" Ops.map
+ , benchIO "fmap" Ops.fmap
, benchIO "mapM" (Ops.mapM serially)
+ , benchIO "mapMaybe" Ops.mapMaybe
+ , benchIO "mapMaybeM" Ops.mapMaybeM
+ , bench "sequence" $ nfIO $ randomRIO (1,1000) >>= \n ->
+ (Ops.sequence serially) (Ops.sourceUnfoldrMAction n)
, benchIO "concat" Ops.concat
]
, bgroup "filtering"
@@ -60,10 +91,13 @@ main = do
, benchIO "filter-all-in" Ops.filterAllIn
, benchIO "take-all" Ops.takeAll
, benchIO "takeWhile-true" Ops.takeWhileTrue
+ , benchIO "takeWhileM-true" Ops.takeWhileMTrue
, benchIO "drop-all" Ops.dropAll
, benchIO "dropWhile-true" Ops.dropWhileTrue
+ , benchIO "dropWhileM-true" Ops.dropWhileMTrue
]
, benchIO "zip" $ Ops.zip
+ , benchIO "zipM" $ Ops.zipM
, bgroup "compose"
[ benchIO "mapM" Ops.composeMapM
, benchIO "map-with-all-in-filter" Ops.composeMapAllInFilter
@@ -80,19 +114,19 @@ main = do
]
, bgroup "asyncly"
[ -- benchIO "unfoldr" $ Ops.toNull asyncly
- -- , benchSrcIO asyncly "fromFoldable" Ops.sourceFromFoldable
benchSrcIO asyncly "unfoldrM" Ops.sourceUnfoldrM
+ -- , benchSrcIO asyncly "fromFoldable" Ops.sourceFromFoldable
, benchSrcIO asyncly "fromFoldableM" Ops.sourceFromFoldableM
- , benchSrcIO asyncly "foldMapWith" Ops.sourceFoldMapWith
+ -- , benchSrcIO asyncly "foldMapWith" Ops.sourceFoldMapWith
, benchSrcIO asyncly "foldMapWithM" Ops.sourceFoldMapWithM
, benchIO "mapM" $ Ops.mapM asyncly
]
, bgroup "wAsyncly"
[ -- benchIO "unfoldr" $ Ops.toNull wAsyncly
- -- , benchSrcIO wAsyncly "fromFoldable" Ops.sourceFromFoldable
benchSrcIO wAsyncly "unfoldrM" Ops.sourceUnfoldrM
+ -- , benchSrcIO wAsyncly "fromFoldable" Ops.sourceFromFoldable
, benchSrcIO wAsyncly "fromFoldableM" Ops.sourceFromFoldableM
- , benchSrcIO wAsyncly "foldMapWith" Ops.sourceFoldMapWith
+ -- , benchSrcIO wAsyncly "foldMapWith" Ops.sourceFoldMapWith
, benchSrcIO wAsyncly "foldMapWithM" Ops.sourceFoldMapWithM
, benchIO "mapM" $ Ops.mapM wAsyncly
]
@@ -100,23 +134,24 @@ main = do
-- all stream types.
, bgroup "aheadly"
[ -- benchIO "unfoldr" $ Ops.toNull aheadly
- -- , benchSrcIO aheadly "fromFoldable" Ops.sourceFromFoldable
benchSrcIO aheadly "unfoldrM" Ops.sourceUnfoldrM
+ -- , benchSrcIO aheadly "fromFoldable" Ops.sourceFromFoldable
, benchSrcIO aheadly "fromFoldableM" Ops.sourceFromFoldableM
- , benchSrcIO aheadly "foldMapWith" Ops.sourceFoldMapWith
+ -- , benchSrcIO aheadly "foldMapWith" Ops.sourceFoldMapWith
, benchSrcIO aheadly "foldMapWithM" Ops.sourceFoldMapWithM
, benchIO "mapM" $ Ops.mapM aheadly
]
-- XXX need to use smaller streams to finish in reasonable time
, bgroup "parallely"
[ --benchIO "unfoldr" $ Ops.toNull parallely
- --, benchSrcIO parallely "fromFoldable" Ops.sourceFromFoldable
benchSrcIO parallely "unfoldrM" Ops.sourceUnfoldrM
+ --, benchSrcIO parallely "fromFoldable" Ops.sourceFromFoldable
, benchSrcIO parallely "fromFoldableM" Ops.sourceFromFoldableM
- , benchSrcIO parallely "foldMapWith" Ops.sourceFoldMapWith
+ -- , benchSrcIO parallely "foldMapWith" Ops.sourceFoldMapWith
, benchSrcIO parallely "foldMapWithM" Ops.sourceFoldMapWithM
, benchIO "mapM" $ Ops.mapM parallely
-- Zip has only one parallel flavor
, benchIO "zip" $ Ops.zipAsync
+ , benchIO "zipM" $ Ops.zipAsyncM
]
]
diff --git a/benchmark/LinearOps.hs b/benchmark/LinearOps.hs
index abd826b..2701dad 100644
--- a/benchmark/LinearOps.hs
+++ b/benchmark/LinearOps.hs
@@ -9,9 +9,10 @@
module LinearOps where
+import Data.Maybe (fromJust)
import Prelude
(Monad, Int, (+), ($), (.), return, fmap, even, (>), (<=),
- subtract, undefined, Maybe(..))
+ subtract, undefined, Maybe(..), odd, Bool, not)
import qualified Streamly as S
import qualified Streamly.Prelude as S
@@ -24,43 +25,75 @@ maxValue = value + 1000
-- Benchmark ops
-------------------------------------------------------------------------------
-{-# INLINE toNull #-}
-{-# INLINE toList #-}
-{-# INLINE foldl #-}
-{-# INLINE last #-}
+{-# INLINE uncons #-}
+{-# INLINE nullHeadTail #-}
{-# INLINE scan #-}
+{-# INLINE mapM_ #-}
{-# INLINE map #-}
+{-# INLINE fmap #-}
+{-# INLINE mapMaybe #-}
{-# INLINE filterEven #-}
-{-# INLINE mapM #-}
{-# INLINE filterAllOut #-}
{-# INLINE filterAllIn #-}
{-# INLINE takeOne #-}
{-# INLINE takeAll #-}
{-# INLINE takeWhileTrue #-}
+{-# INLINE takeWhileMTrue #-}
{-# INLINE dropAll #-}
{-# INLINE dropWhileTrue #-}
+{-# INLINE dropWhileMTrue #-}
{-# INLINE zip #-}
+{-# INLINE zipM #-}
{-# INLINE concat #-}
-{-# INLINE composeMapM #-}
{-# INLINE composeAllInFilters #-}
{-# INLINE composeAllOutFilters #-}
{-# INLINE composeMapAllInFilter #-}
-scan, map, filterEven, filterAllOut,
- filterAllIn, takeOne, takeAll, takeWhileTrue, dropAll, dropWhileTrue, zip,
+uncons, nullHeadTail, scan, mapM_, map, fmap, mapMaybe, filterEven, filterAllOut,
+ filterAllIn, takeOne, takeAll, takeWhileTrue, takeWhileMTrue, dropAll,
+ dropWhileTrue, dropWhileMTrue, zip, zipM,
concat, composeAllInFilters, composeAllOutFilters,
composeMapAllInFilter
:: Monad m
=> Stream m Int -> m ()
-composeMapM :: S.MonadAsync m => Stream m Int -> m ()
-toList :: Monad m => Stream m Int -> m [Int]
-foldl :: Monad m => Stream m Int -> m Int
-last :: Monad m => Stream m Int -> m (Maybe Int)
+{-# INLINE composeMapM #-}
+{-# INLINE zipAsync #-}
+{-# INLINE zipAsyncM #-}
+{-# INLINE mapMaybeM #-}
+composeMapM, zipAsync, zipAsyncM, mapMaybeM :: S.MonadAsync m => Stream m Int -> m ()
+
+{-# INLINE toList #-}
+{-# INLINE foldr #-}
+{-# INLINE foldrM #-}
+toList, foldr, foldrM :: Monad m => Stream m Int -> m [Int]
+
+{-# INLINE last #-}
+{-# INLINE maximum #-}
+{-# INLINE minimum #-}
+last, minimum, maximum :: Monad m => Stream m Int -> m (Maybe Int)
+
+{-# INLINE foldl #-}
+{-# INLINE length #-}
+{-# INLINE sum #-}
+{-# INLINE product #-}
+foldl, length, sum, product :: Monad m => Stream m Int -> m Int
+
+{-# INLINE all #-}
+{-# INLINE any #-}
+{-# INLINE elem #-}
+{-# INLINE notElem #-}
+elem, notElem, all, any :: Monad m => Stream m Int -> m Bool
+{-# INLINE toNull #-}
toNull :: Monad m => (t m Int -> S.SerialT m Int) -> t m Int -> m ()
+
+{-# INLINE mapM #-}
mapM :: (S.IsStream t, S.MonadAsync m)
=> (t m Int -> S.SerialT m Int) -> t m Int -> m ()
-zipAsync :: S.MonadAsync m => Stream m Int -> m ()
+
+{-# INLINE sequence #-}
+sequence :: (S.IsStream t, S.MonadAsync m)
+ => (t m Int -> S.SerialT m Int) -> t m (m Int) -> m ()
-------------------------------------------------------------------------------
-- Stream generation and elimination
@@ -71,6 +104,15 @@ type Stream m a = S.SerialT m a
{-# INLINE source #-}
source :: (S.MonadAsync m, S.IsStream t) => Int -> t m Int
source n = S.serially $ sourceUnfoldrM n
+-- source n = S.serially $ sourceFromList n
+
+{-# INLINE sourceFromList #-}
+sourceFromList :: (Monad m, S.IsStream t) => Int -> t m Int
+sourceFromList n = S.fromList [n..n+value]
+
+{-# INLINE sourceFromListM #-}
+sourceFromListM :: (S.MonadAsync m, S.IsStream t) => Int -> t m Int
+sourceFromListM n = S.fromListM (Prelude.fmap return [n..n+value])
{-# INLINE sourceFromFoldable #-}
sourceFromFoldable :: S.IsStream t => Int -> t m Int
@@ -81,17 +123,17 @@ sourceFromFoldableM :: (S.IsStream t, S.MonadAsync m) => Int -> t m Int
sourceFromFoldableM n = S.fromFoldableM (Prelude.fmap return [n..n+value])
{-# INLINE sourceFoldMapWith #-}
-sourceFoldMapWith :: (S.IsStream t, Monad (t m), S.Semigroup (t m Int))
+sourceFoldMapWith :: (S.IsStream t, S.Semigroup (t m Int))
=> Int -> t m Int
-sourceFoldMapWith n = S.foldMapWith (S.<>) return [n..n+value]
+sourceFoldMapWith n = S.foldMapWith (S.<>) S.yield [n..n+value]
{-# INLINE sourceFoldMapWithM #-}
sourceFoldMapWithM :: (S.IsStream t, Monad m, S.Semigroup (t m Int))
=> Int -> t m Int
-sourceFoldMapWithM n = S.foldMapWith (S.<>) (S.once . return) [n..n+value]
+sourceFoldMapWithM n = S.foldMapWith (S.<>) (S.yieldM . return) [n..n+value]
{-# INLINE sourceUnfoldr #-}
-sourceUnfoldr :: S.IsStream t => Int -> t m Int
+sourceUnfoldr :: (Monad m, S.IsStream t) => Int -> t m Int
sourceUnfoldr n = S.unfoldr step n
where
step cnt =
@@ -108,18 +150,54 @@ sourceUnfoldrM n = S.unfoldrM step n
then return Nothing
else return (Just (cnt, cnt + 1))
-{-# INLINE runStream #-}
-runStream :: Monad m => Stream m a -> m ()
-runStream = S.runStream
+{-# INLINE sourceUnfoldrMAction #-}
+sourceUnfoldrMAction :: (S.IsStream t, S.MonadAsync m) => Int -> t m (m Int)
+sourceUnfoldrMAction n = S.serially $ S.unfoldrM step n
+ where
+ step cnt =
+ if cnt > n + value
+ then return Nothing
+ else return (Just (return cnt, cnt + 1))
-------------------------------------------------------------------------------
-- Elimination
-------------------------------------------------------------------------------
+{-# INLINE runStream #-}
+runStream :: Monad m => Stream m a -> m ()
+runStream = S.runStream
+
toNull t = runStream . t
+uncons s = do
+ r <- S.uncons s
+ case r of
+ Nothing -> return ()
+ Just (_, t) -> uncons t
+nullHeadTail s = do
+ r <- S.null s
+ if not r
+ then do
+ _ <- S.head s
+ t <- S.tail s
+ case t of
+ Nothing -> return ()
+ Just x -> nullHeadTail x
+ else return ()
+mapM_ = S.mapM_ (\_ -> return ())
toList = S.toList
+foldr = S.foldr (:) []
+foldrM = S.foldrM (\a xs -> return (a : xs)) []
foldl = S.foldl' (+) 0
last = S.last
+elem = S.elem maxValue
+notElem = S.notElem maxValue
+length = S.length
+all = S.all (<= maxValue)
+any = S.any (> maxValue)
+maximum = S.maximum
+minimum = S.minimum
+sum = S.sum
+product = S.product
-------------------------------------------------------------------------------
-- Transformation
@@ -130,23 +208,45 @@ transform :: Monad m => Stream m a -> m ()
transform = runStream
scan = transform . S.scanl' (+) 0
-map = transform . fmap (+1)
+fmap = transform . Prelude.fmap (+1)
+map = transform . S.map (+1)
mapM t = transform . t . S.mapM return
+mapMaybe = transform . S.mapMaybe
+ (\x -> if Prelude.odd x then Nothing else Just ())
+mapMaybeM = transform . S.mapMaybeM
+ (\x -> if Prelude.odd x then (return Nothing) else return $ Just ())
+sequence t = transform . t . S.sequence
filterEven = transform . S.filter even
filterAllOut = transform . S.filter (> maxValue)
filterAllIn = transform . S.filter (<= maxValue)
takeOne = transform . S.take 1
takeAll = transform . S.take maxValue
takeWhileTrue = transform . S.takeWhile (<= maxValue)
+takeWhileMTrue = transform . S.takeWhileM (return . (<= maxValue))
dropAll = transform . S.drop maxValue
dropWhileTrue = transform . S.dropWhile (<= maxValue)
+dropWhileMTrue = transform . S.dropWhileM (return . (<= maxValue))
-------------------------------------------------------------------------------
-- Zipping and concat
-------------------------------------------------------------------------------
-zip src = transform $ (S.zipWith (,) src src)
-zipAsync src = transform $ (S.zipAsyncWith (,) src src)
+zip src = do
+ r <- S.tail src
+ let src1 = fromJust r
+ transform $ (S.zipWith (,) src src1)
+zipM src = do
+ r <- S.tail src
+ let src1 = fromJust r
+ transform $ (S.zipWithM (\a b -> return (a,b)) src src1)
+zipAsync src = do
+ r <- S.tail src
+ let src1 = fromJust r
+ transform $ (S.zipAsyncWith (,) src src1)
+zipAsyncM src = do
+ r <- S.tail src
+ let src1 = fromJust r
+ transform $ (S.zipAsyncWithM (\a b -> return (a,b)) src src1)
concat _n = return ()
-------------------------------------------------------------------------------
@@ -160,7 +260,8 @@ compose f = transform . f . f . f . f
composeMapM = compose (S.mapM return)
composeAllInFilters = compose (S.filter (<= maxValue))
composeAllOutFilters = compose (S.filter (> maxValue))
-composeMapAllInFilter = compose (S.filter (<= maxValue) . fmap (subtract 1))
+composeMapAllInFilter =
+ compose (S.filter (<= maxValue) . Prelude.fmap (subtract 1))
{-# INLINABLE composeScaling #-}
composeScaling :: Monad m => Int -> Stream m Int -> m ()
diff --git a/benchmark/NestedOps.hs b/benchmark/NestedOps.hs
index df7195e..b2fb387 100644
--- a/benchmark/NestedOps.hs
+++ b/benchmark/NestedOps.hs
@@ -42,7 +42,7 @@ sourceUnfoldrM n value = S.serially $ S.unfoldrM step n
else return (Just (cnt, cnt + 1))
{-# INLINE sourceUnfoldr #-}
-sourceUnfoldr :: S.IsStream t => Int -> Int -> t m Int
+sourceUnfoldr :: (Monad m, S.IsStream t) => Int -> Int -> t m Int
sourceUnfoldr start n = S.unfoldr step start
where
step cnt =
diff --git a/benchmark/StreamDOps.hs b/benchmark/StreamDOps.hs
new file mode 100644
index 0000000..421790b
--- /dev/null
+++ b/benchmark/StreamDOps.hs
@@ -0,0 +1,186 @@
+-- |
+-- Module : StreamDOps
+-- Copyright : (c) 2018 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+
+{-# LANGUAGE FlexibleContexts #-}
+
+module StreamDOps where
+
+-- import Prelude
+ -- (Monad, Int, (+), ($), (.), return, fmap, even, (>), (<=),
+ -- subtract, undefined, Maybe(..))
+import Prelude
+ (Monad, Int, (+), (.), return, (>), even, (<=),
+ Maybe(..), not)
+
+import qualified Streamly.Streams.StreamD as S
+
+value, maxValue :: Int
+value = 1000000
+maxValue = value + 1000
+
+-------------------------------------------------------------------------------
+-- Benchmark ops
+-------------------------------------------------------------------------------
+
+{-# INLINE uncons #-}
+{-# INLINE nullHeadTail #-}
+-- {-# INLINE scan #-}
+{-# INLINE map #-}
+{-# INLINE filterEven #-}
+{-# INLINE filterAllOut #-}
+{-# INLINE filterAllIn #-}
+{-# INLINE takeOne #-}
+{-# INLINE takeAll #-}
+{-
+{-# INLINE takeWhileTrue #-}
+{-# INLINE dropAll #-}
+{-# INLINE dropWhileTrue #-}
+{-# INLINE zip #-}
+{-# INLINE concat #-}
+{-# INLINE composeAllInFilters #-}
+{-# INLINE composeAllOutFilters #-}
+{-# INLINE composeMapAllInFilter #-}
+-}
+uncons, nullHeadTail, map, filterEven, filterAllOut,
+ filterAllIn, takeOne, takeAll -- takeWhileTrue, dropAll, dropWhileTrue, zip,
+ -- concat, composeAllInFilters, composeAllOutFilters,
+ -- composeMapAllInFilter
+ :: Monad m
+ => Stream m Int -> m ()
+
+{-
+{-# INLINE composeMapM #-}
+composeMapM :: S.MonadAsync m => Stream m Int -> m ()
+-}
+
+{-# INLINE toList #-}
+toList :: Monad m => Stream m Int -> m [Int]
+{-# INLINE foldl #-}
+foldl :: Monad m => Stream m Int -> m Int
+{-# INLINE last #-}
+last :: Monad m => Stream m Int -> m (Maybe Int)
+
+{-# INLINE toNull #-}
+{-# INLINE mapM #-}
+toNull, mapM :: Monad m => Stream m Int -> m ()
+
+-------------------------------------------------------------------------------
+-- Stream generation and elimination
+-------------------------------------------------------------------------------
+
+type Stream m a = S.Stream m a
+
+{-# INLINE sourceUnfoldr #-}
+sourceUnfoldr :: Monad m => Int -> Stream m Int
+sourceUnfoldr n = S.unfoldr step n
+ where
+ step cnt =
+ if cnt > n + value
+ then Nothing
+ else (Just (cnt, cnt + 1))
+
+{-# INLINE sourceUnfoldrM #-}
+sourceUnfoldrM :: Monad m => Int -> Stream m Int
+sourceUnfoldrM n = S.unfoldrM step n
+ where
+ step cnt =
+ if cnt > n + value
+ then return Nothing
+ else return (Just (cnt, cnt + 1))
+
+{-# INLINE sourceFromEnum #-}
+sourceFromEnum :: Monad m => Int -> Stream m Int
+sourceFromEnum n = S.enumFromStepN n 1 value
+
+{-# INLINE sourceFromFoldable #-}
+sourceFromFoldable :: Monad m => Int -> Stream m Int
+sourceFromFoldable n = S.fromList [n..n+value]
+
+{-# INLINE source #-}
+source :: Monad m => Int -> Stream m Int
+source n = sourceUnfoldrM n
+
+-------------------------------------------------------------------------------
+-- Elimination
+-------------------------------------------------------------------------------
+
+{-# INLINE runStream #-}
+runStream :: Monad m => Stream m a -> m ()
+runStream = S.runStream
+
+toNull = runStream
+uncons s = do
+ r <- S.uncons s
+ case r of
+ Nothing -> return ()
+ Just (_, t) -> uncons t
+nullHeadTail s = do
+ r <- S.null s
+ if not r
+ then do
+ _ <- S.head s
+ t <- S.tail s
+ case t of
+ Nothing -> return ()
+ Just x -> nullHeadTail x
+ else return ()
+toList = S.toList
+foldl = S.foldl' (+) 0
+last = S.last
+
+-------------------------------------------------------------------------------
+-- Transformation
+-------------------------------------------------------------------------------
+
+{-# INLINE transform #-}
+transform :: Monad m => Stream m a -> m ()
+transform = runStream
+
+-- scan = transform . S.scanl' (+) 0
+map = transform . S.map (+1)
+mapM = transform . S.mapM return
+filterEven = transform . S.filter even
+filterAllOut = transform . S.filter (> maxValue)
+filterAllIn = transform . S.filter (<= maxValue)
+takeOne = transform . S.take 1
+takeAll = transform . S.take maxValue
+{-
+takeWhileTrue = transform . S.takeWhile (<= maxValue)
+dropAll = transform . S.drop maxValue
+dropWhileTrue = transform . S.dropWhile (<= maxValue)
+
+-------------------------------------------------------------------------------
+-- Zipping and concat
+-------------------------------------------------------------------------------
+
+zip src = transform $ (S.zipWith (,) src src)
+concat _n = return ()
+
+-------------------------------------------------------------------------------
+-- Composition
+-------------------------------------------------------------------------------
+
+{-# INLINE compose #-}
+compose :: Monad m => (Stream m Int -> Stream m Int) -> Stream m Int -> m ()
+compose f = transform . f . f . f . f
+
+composeMapM = compose (S.mapM return)
+composeAllInFilters = compose (S.filter (<= maxValue))
+composeAllOutFilters = compose (S.filter (> maxValue))
+composeMapAllInFilter = compose (S.filter (<= maxValue) . fmap (subtract 1))
+
+{-# INLINABLE composeScaling #-}
+composeScaling :: Monad m => Int -> Stream m Int -> m ()
+composeScaling m =
+ case m of
+ 1 -> transform . f
+ 2 -> transform . f . f
+ 3 -> transform . f . f . f
+ 4 -> transform . f . f . f . f
+ _ -> undefined
+ where f = S.filter (<= maxValue)
+ -}
diff --git a/benchmark/StreamKOps.hs b/benchmark/StreamKOps.hs
new file mode 100644
index 0000000..d9899e2
--- /dev/null
+++ b/benchmark/StreamKOps.hs
@@ -0,0 +1,196 @@
+-- |
+-- Module : StreamKOps
+-- Copyright : (c) 2018 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+
+{-# LANGUAGE FlexibleContexts #-}
+
+module StreamKOps where
+
+import Prelude
+ (Monad, Int, (+), ($), (.), return, fmap, even, (>), (<=),
+ subtract, undefined, Maybe(..), not)
+
+import qualified Streamly.Streams.StreamK as S hiding (runStream)
+-- import qualified Streamly.Streams.Serial as S
+import qualified Streamly as S
+import qualified Streamly.Prelude as P
+
+value, maxValue :: Int
+value = 1000000
+maxValue = value + 1000
+
+-------------------------------------------------------------------------------
+-- Benchmark ops
+-------------------------------------------------------------------------------
+
+{-# INLINE toNull #-}
+{-# INLINE uncons #-}
+{-# INLINE nullHeadTail #-}
+{-# INLINE scan #-}
+{-# INLINE map #-}
+{-# INLINE filterEven #-}
+{-# INLINE filterAllOut #-}
+{-# INLINE filterAllIn #-}
+{-# INLINE takeOne #-}
+{-# INLINE takeAll #-}
+{-# INLINE takeWhileTrue #-}
+{-# INLINE dropAll #-}
+{-# INLINE dropWhileTrue #-}
+{-# INLINE zip #-}
+{-# INLINE concat #-}
+{-# INLINE composeAllInFilters #-}
+{-# INLINE composeAllOutFilters #-}
+{-# INLINE composeMapAllInFilter #-}
+toNull, uncons, nullHeadTail, scan, map, filterEven, filterAllOut,
+ filterAllIn, takeOne, takeAll, takeWhileTrue, dropAll, dropWhileTrue, zip,
+ concat, composeAllInFilters, composeAllOutFilters,
+ composeMapAllInFilter
+ :: Monad m
+ => Stream m Int -> m ()
+
+{-# INLINE composeMapM #-}
+composeMapM :: S.MonadAsync m => Stream m Int -> m ()
+
+{-# INLINE toList #-}
+toList :: Monad m => Stream m Int -> m [Int]
+{-# INLINE foldl #-}
+foldl :: Monad m => Stream m Int -> m Int
+{-# INLINE last #-}
+last :: Monad m => Stream m Int -> m (Maybe Int)
+
+{-# INLINE mapM #-}
+mapM :: S.MonadAsync m => Stream m Int -> m ()
+
+-------------------------------------------------------------------------------
+-- Stream generation and elimination
+-------------------------------------------------------------------------------
+
+type Stream m a = S.SerialT m a
+
+{-# INLINE sourceUnfoldr #-}
+sourceUnfoldr :: Int -> Stream m Int
+sourceUnfoldr n = S.unfoldr step n
+ where
+ step cnt =
+ if cnt > n + value
+ then Nothing
+ else (Just (cnt, cnt + 1))
+
+{-# INLINE sourceUnfoldrM #-}
+sourceUnfoldrM :: S.MonadAsync m => Int -> Stream m Int
+sourceUnfoldrM n = S.unfoldrM step n
+ where
+ step cnt =
+ if cnt > n + value
+ then return Nothing
+ else return (Just (cnt, cnt + 1))
+
+{-
+{-# INLINE sourceFromEnum #-}
+sourceFromEnum :: Monad m => Int -> Stream m Int
+sourceFromEnum n = S.enumFromStepN n 1 value
+-}
+
+{-# INLINE sourceFromFoldable #-}
+sourceFromFoldable :: Int -> Stream m Int
+sourceFromFoldable n = S.fromFoldable [n..n+value]
+
+{-# INLINE sourceFromFoldableM #-}
+sourceFromFoldableM :: S.MonadAsync m => Int -> Stream m Int
+sourceFromFoldableM n = P.fromFoldableM (Prelude.fmap return [n..n+value])
+
+{-# INLINE sourceFoldMapWith #-}
+sourceFoldMapWith :: Monad m => Int -> Stream m Int
+sourceFoldMapWith n = S.foldMapWith (S.<>) return [n..n+value]
+
+{-# INLINE sourceFoldMapWithM #-}
+sourceFoldMapWithM :: Monad m => Int -> Stream m Int
+sourceFoldMapWithM n = S.foldMapWith (S.<>) (S.yieldM . return) [n..n+value]
+
+{-# INLINE source #-}
+source :: S.MonadAsync m => Int -> Stream m Int
+source n = sourceUnfoldrM n
+
+-------------------------------------------------------------------------------
+-- Elimination
+-------------------------------------------------------------------------------
+
+{-# INLINE runStream #-}
+runStream :: Monad m => Stream m a -> m ()
+runStream = S.runStream
+
+toNull = runStream
+uncons s = do
+ r <- S.uncons s
+ case r of
+ Nothing -> return ()
+ Just (_, t) -> uncons t
+
+nullHeadTail s = do
+ r <- S.null s
+ if not r
+ then do
+ _ <- S.head s
+ t <- S.tail s
+ case t of
+ Nothing -> return ()
+ Just x -> nullHeadTail x
+ else return ()
+
+toList = S.toList
+foldl = S.foldl' (+) 0
+last = S.last
+
+-------------------------------------------------------------------------------
+-- Transformation
+-------------------------------------------------------------------------------
+
+{-# INLINE transform #-}
+transform :: Monad m => Stream m a -> m ()
+transform = runStream
+
+scan = transform . S.scanl' (+) 0
+map = transform . fmap (+1)
+mapM = transform . S.mapM return
+filterEven = transform . S.filter even
+filterAllOut = transform . S.filter (> maxValue)
+filterAllIn = transform . S.filter (<= maxValue)
+takeOne = transform . S.take 1
+takeAll = transform . S.take maxValue
+takeWhileTrue = transform . P.takeWhile (<= maxValue)
+dropAll = transform . P.drop maxValue
+dropWhileTrue = transform . P.dropWhile (<= maxValue)
+
+-------------------------------------------------------------------------------
+-- Zipping and concat
+-------------------------------------------------------------------------------
+
+zip src = transform $ (P.zipWith (,) src src)
+concat _n = return ()
+
+-------------------------------------------------------------------------------
+-- Composition
+-------------------------------------------------------------------------------
+
+{-# INLINE compose #-}
+compose :: Monad m => (Stream m Int -> Stream m Int) -> Stream m Int -> m ()
+compose f = transform . f . f . f . f
+
+composeMapM = compose (S.mapM return)
+composeAllInFilters = compose (S.filter (<= maxValue))
+composeAllOutFilters = compose (S.filter (> maxValue))
+composeMapAllInFilter = compose (S.filter (<= maxValue) . fmap (subtract 1))
+
+{-# INLINABLE composeScaling #-}
+composeScaling :: Monad m => Int -> Stream m Int -> m ()
+composeScaling m =
+ case m of
+ 1 -> transform . f
+ 2 -> transform . f . f
+ 3 -> transform . f . f . f
+ 4 -> transform . f . f . f . f
+ _ -> undefined
+ where f = S.filter (<= maxValue)
diff --git a/examples/CirclingSquare.hs b/examples/CirclingSquare.hs
index 328caa6..f2814f6 100644
--- a/examples/CirclingSquare.hs
+++ b/examples/CirclingSquare.hs
@@ -9,7 +9,7 @@
import Data.IORef
import Graphics.UI.SDL as SDL
import Streamly
-import Streamly.Prelude (once)
+import Streamly.Prelude (yieldM)
import Streamly.Time
------------------------------------------------------------------------------
@@ -87,5 +87,5 @@ main :: IO ()
main = do
sdlInit
cref <- newIORef (0,0)
- runStream $ once (updateController cref)
- `parallel` once (updateDisplay cref)
+ runStream $ yieldM (updateController cref)
+ `parallel` yieldM (updateDisplay cref)
diff --git a/examples/MergeSort.hs b/examples/MergeSort.hs
index 504c489..ae33ede 100644
--- a/examples/MergeSort.hs
+++ b/examples/MergeSort.hs
@@ -4,29 +4,29 @@ import Data.Word
import System.Random (getStdGen, randoms)
import Data.List (sort)
import Streamly
-import Streamly.Prelude (once)
+import Streamly.Prelude (yieldM)
import qualified Streamly.Prelude as A
getSorted :: Serial Word16
getSorted = do
- g <- once getStdGen
+ g <- yieldM getStdGen
let ls = take 100000 (randoms g) :: [Word16]
foldMap return (sort ls)
-- | merge two streams generating the elements from each in parallel
mergeAsync :: Ord a => Serial a -> Serial a -> Serial a
mergeAsync a b = do
- x <- once $ mkAsync a
- y <- once $ mkAsync b
+ x <- yieldM $ mkAsync a
+ y <- yieldM $ mkAsync b
merge x y
merge :: Ord a => Serial a -> Serial a -> Serial a
merge a b = do
- a1 <- once $ A.uncons a
+ a1 <- yieldM $ A.uncons a
case a1 of
Nothing -> b
Just (x, ma) -> do
- b1 <- once $ A.uncons b
+ b1 <- yieldM $ A.uncons b
case b1 of
Nothing -> return x <> ma
Just (y, mb) ->
diff --git a/examples/SearchQuery.hs b/examples/SearchQuery.hs
index c729080..0405149 100644
--- a/examples/SearchQuery.hs
+++ b/examples/SearchQuery.hs
@@ -1,5 +1,5 @@
import Streamly
-import Streamly.Prelude (nil, once, (|:))
+import Streamly.Prelude (nil, yieldM, (|:))
import Network.HTTP.Simple
-- | Runs three search engine queries in parallel and prints the search engine
@@ -13,10 +13,11 @@ main = do
runStream . parallely $ google |: bing |: duckduckgo |: nil
putStrLn "\nUsing parallel semigroup composition"
- runStream . parallely $ once google <> once bing <> once duckduckgo
+ runStream . parallely $ yieldM google <> yieldM bing <> yieldM duckduckgo
putStrLn "\nUsing parallel applicative zip"
- runStream . zipAsyncly $ (,,) <$> once google <*> once bing <*> once duckduckgo
+ runStream . zipAsyncly $
+ (,,) <$> yieldM google <*> yieldM bing <*> yieldM duckduckgo
where
get :: String -> IO ()
diff --git a/src/Streamly.hs b/src/Streamly.hs
index 61a80d4..503d279 100644
--- a/src/Streamly.hs
+++ b/src/Streamly.hs
@@ -100,6 +100,11 @@ module Streamly
, wAsync
, parallel
+ -- * Concurrency Control
+ -- $concurrency
+ , maxThreads
+ , maxBuffer
+
-- * Folding Containers of Streams
-- $foldutils
, foldWith
@@ -152,7 +157,15 @@ module Streamly
)
where
-import Streamly.Streams
+import Streamly.Streams.StreamK hiding (runStream, serial)
+import Streamly.Streams.Serial
+import Streamly.Streams.Async
+import Streamly.Streams.Ahead
+import Streamly.Streams.Parallel
+import Streamly.Streams.Zip
+import Streamly.Streams.Prelude
+import Streamly.Streams.SVar (maxThreads, maxBuffer)
+import Streamly.SVar (MonadAsync)
import Data.Semigroup (Semigroup(..))
-- $serial
@@ -202,6 +215,17 @@ import Data.Semigroup (Semigroup(..))
-- which can be used to combine two streams in a predetermined way irrespective
-- of the type.
+-- $concurrency
+--
+-- These combinators can be used at any point in a stream composition to
+-- control the concurrency of the enclosed stream. When the combinators are
+-- used in a nested manner, the nearest enclosing combinator overrides the
+-- outer ones. These combinators have no effect on 'Parallel' streams,
+-- concurrency for 'Parallel' streams is always unbounded.
+-- Note that the use of these combinators does not enable concurrency, to
+-- enable concurrency you have to use one of the concurrent stream type
+-- combinators.
+
-- $adapters
--
-- You may want to use different stream composition styles at different points
diff --git a/src/Streamly/Core.hs b/src/Streamly/Core.hs
deleted file mode 100644
index 986f9b9..0000000
--- a/src/Streamly/Core.hs
+++ /dev/null
@@ -1,1495 +0,0 @@
-{-# LANGUAGE CPP #-}
-{-# LANGUAGE ConstraintKinds #-}
-{-# LANGUAGE FlexibleContexts #-}
-{-# LANGUAGE FlexibleInstances #-}
-{-# LANGUAGE LambdaCase #-}
-{-# LANGUAGE MagicHash #-}
-{-# LANGUAGE MultiParamTypeClasses #-}
-{-# LANGUAGE RankNTypes #-}
-{-# LANGUAGE UnboxedTuples #-}
-{-# LANGUAGE UndecidableInstances #-} -- XXX
-
--- |
--- Module : Streamly.Core
--- Copyright : (c) 2017 Harendra Kumar
---
--- License : BSD3
--- Maintainer : harendra.kumar@gmail.com
--- Stability : experimental
--- Portability : GHC
---
---
-module Streamly.Core
- (
- MonadAsync
-
- -- * Streams
- , Stream (..)
-
- -- * Construction (pure)
- , nil
- , cons
- , singleton
- , once
- , repeat
-
- -- * Construction (monadic)
- , consM
- , consMAhead
- , consMAsync
- , consMWAsync
- , consMParallel
-
- -- * Semigroup Style Composition
- , serial
- , wSerial
- , ahead
- , async
- , wAsync
- , parallel
-
- -- * applications
- , applyWith
- , runWith
-
- -- * zip
- , zipWith
- , zipAsyncWith
-
- -- * Concurrent Stream Vars (SVars)
- , SVar
- , SVarStyle (..)
- , newStreamVar1
- , fromStreamVar
- , toStreamVar
- )
-where
-
-import Control.Concurrent (ThreadId, myThreadId,
- threadDelay, getNumCapabilities)
-import Control.Concurrent.MVar (MVar, newEmptyMVar,
- tryPutMVar, takeMVar)
-import Control.Exception (SomeException (..), catch, mask)
-import Control.Monad (when)
-import Control.Monad.Catch (MonadThrow, throwM)
-import Control.Monad.IO.Class (MonadIO(..))
-import Control.Monad.Trans.Class (MonadTrans (lift))
-import Control.Monad.Trans.Control (MonadBaseControl, control)
-import Data.Atomics (casIORef, readForCAS, peekTicket
- ,atomicModifyIORefCAS_
- ,writeBarrier,storeLoadBarrier)
-import Data.Concurrent.Queue.MichaelScott (LinkedQueue, newQ, pushL,
- tryPopR, nullQ)
-import Data.Functor (void)
-import Data.Heap (Heap, Entry(..))
-import qualified Data.Heap as H
-import Data.IORef (IORef, modifyIORef, newIORef,
- readIORef, atomicModifyIORef
-#ifdef DIAGNOSTICS
- , writeIORef
-#endif
- )
-import Data.Maybe (fromJust)
-import Data.Semigroup (Semigroup(..))
-import Data.Set (Set)
-import qualified Data.Set as S
-import Prelude hiding (repeat, zipWith)
-
-import GHC.Exts
-import GHC.Conc (ThreadId(..))
-import GHC.IO (IO(..))
-
--- MVar diagnostics has some overhead - around 5% on asyncly null benchmark, we
--- can keep it on in production to debug problems quickly if and when they
--- happen, but it may result in unexpected output when threads are left hanging
--- until they are GCed because the consumer went away.
-
-#ifdef DIAGNOSTICS
-import Control.Concurrent.MVar (tryTakeMVar)
-import Control.Exception (catches, throwIO, Handler(..),
- BlockedIndefinitelyOnMVar(..),
- BlockedIndefinitelyOnSTM(..))
-import System.IO (hPutStrLn, stderr)
-#endif
-
-------------------------------------------------------------------------------
--- Parent child thread communication type
-------------------------------------------------------------------------------
-
--- | Events that a child thread may send to a parent thread.
-data ChildEvent a =
- ChildYield a
- | ChildStop ThreadId (Maybe SomeException)
-
--- | Sorting out-of-turn outputs in a heap for Ahead style streams
-data AheadHeapEntry m a =
- AheadEntryPure a
- | AheadEntryStream (Stream m a)
-
-------------------------------------------------------------------------------
--- State threaded around the monad for thread management
-------------------------------------------------------------------------------
-
--- XXX use a separate data structure for each type of SVar
--- | Identify the type of the SVar. Two computations using the same style can
--- be scheduled on the same SVar.
-data SVarStyle =
- AsyncVar -- depth first concurrent
- | WAsyncVar -- breadth first concurrent
- | ParallelVar -- all parallel
- | AheadVar -- Concurrent look ahead
- deriving (Eq, Show)
-
--- | An SVar or a Stream Var is a conduit to the output from multiple streams
--- running concurrently and asynchronously. An SVar can be thought of as an
--- asynchronous IO handle. We can write any number of streams to an SVar in a
--- non-blocking manner and then read them back at any time at any pace. The
--- SVar would run the streams asynchronously and accumulate results. An SVar
--- may not really execute the stream completely and accumulate all the results.
--- However, it ensures that the reader can read the results at whatever paces
--- it wants to read. The SVar monitors and adapts to the consumer's pace.
---
--- An SVar is a mini scheduler, it has an associated runqueue that holds the
--- stream tasks to be picked and run by a pool of worker threads. It has an
--- associated output queue where the output stream elements are placed by the
--- worker threads. A doorBell is used by the worker threads to intimate the
--- consumer thread about availability of new results in the output queue. More
--- workers are added to the SVar by 'fromStreamVar' on demand if the output
--- produced is not keeping pace with the consumer. On bounded SVars, workers
--- block on the output queue to provide throttling of the producer when the
--- consumer is not pulling fast enough. The number of workers may even get
--- reduced depending on the consuming pace.
---
--- New work is enqueued either at the time of creation of the SVar or as a
--- result of executing the parallel combinators i.e. '<|' and '<|>' when the
--- already enqueued computations get evaluated. See 'joinStreamVarAsync'.
---
-data SVar m a =
- SVar {
- -- Read only state
- svarStyle :: SVarStyle
-
- -- Shared output queue (events, length)
- , outputQueue :: IORef ([ChildEvent a], Int)
- , doorBell :: MVar () -- signal the consumer about output
-
- -- Output synchronization mechanism for Ahead streams (Ahead and
- -- wAhead). We maintain a heap of out of sequence ahead of time
- -- generated outputs and the sequence number of the task that is
- -- currently at the head of the stream. Concurrent execute ahead
- -- tasks that have a sequence number greater than the task at the
- -- head should add their output to the heap.
- , outputHeap :: IORef (Heap (Entry Int (AheadHeapEntry m a))
- , Int
- )
-
- -- Shared work queue (stream, seqNo)
- , workQueue :: IORef ([Stream m a], Int)
- , enqueue :: Stream m a -> IO ()
- , queueEmpty :: m Bool
- , waitingForWork :: IORef Bool
- , runqueue :: m ()
-
- -- Shared, thread tracking
- , runningThreads :: IORef (Set ThreadId)
- , activeWorkers :: IORef Int
-#ifdef DIAGNOSTICS
- , totalDispatches :: IORef Int
- , maxWorkers :: IORef Int
- , maxOutQSize :: IORef Int
- , maxHeapSize :: IORef Int
- , maxWorkQSize :: IORef Int
-#endif
- }
-
-#ifdef DIAGNOSTICS
-{-# NOINLINE dumpSVar #-}
-dumpSVar :: SVar m a -> IO String
-dumpSVar sv = do
- tid <- myThreadId
- (oqList, oqLen) <- readIORef $ outputQueue sv
- db <- tryTakeMVar $ doorBell sv
- aheadDump <-
- if svarStyle sv == AheadVar
- then do
- (oheap, oheapSeq) <- readIORef $ outputHeap sv
- (wq, wqSeq) <- readIORef $ workQueue sv
- maxHp <- readIORef $ maxHeapSize sv
- return $ unlines
- [ "heap length = " ++ show (H.size oheap)
- , "heap seqeunce = " ++ show oheapSeq
- , "work queue length = " ++ show (length wq)
- , "work queue sequence = " ++ show wqSeq
- , "heap max size = " ++ show maxHp
- ]
- else return []
-
- waiting <- readIORef $ waitingForWork sv
- rthread <- readIORef $ runningThreads sv
- workers <- readIORef $ activeWorkers sv
- maxWrk <- readIORef $ maxWorkers sv
- dispatches <- readIORef $ totalDispatches sv
- maxOq <- readIORef $ maxOutQSize sv
- -- XXX queueEmpty should be made IO return type
-
- return $ unlines
- [ "tid = " ++ show tid
- , "style = " ++ show (svarStyle sv)
- , "outputQueue length computed = " ++ show (length oqList)
- , "outputQueue length maintained = " ++ show oqLen
- , "output doorBell = " ++ show db
- , "total dispatches = " ++ show dispatches
- , "max workers = " ++ show maxWrk
- , "max outQSize = " ++ show maxOq
- ]
- ++ aheadDump ++ unlines
- [ "waitingForWork = " ++ show waiting
- , "running threads = " ++ show rthread
- , "running thread count = " ++ show workers
- ]
-
-{-# NOINLINE mvarExcHandler #-}
-mvarExcHandler :: SVar m a -> String -> BlockedIndefinitelyOnMVar -> IO ()
-mvarExcHandler sv label e@BlockedIndefinitelyOnMVar = do
- svInfo <- dumpSVar sv
- hPutStrLn stderr $ label ++ " " ++ "BlockedIndefinitelyOnMVar\n" ++ svInfo
- throwIO e
-
-{-# NOINLINE stmExcHandler #-}
-stmExcHandler :: SVar m a -> String -> BlockedIndefinitelyOnSTM -> IO ()
-stmExcHandler sv label e@BlockedIndefinitelyOnSTM = do
- svInfo <- dumpSVar sv
- hPutStrLn stderr $ label ++ " " ++ "BlockedIndefinitelyOnSTM\n" ++ svInfo
- throwIO e
-
-withDBGMVar :: SVar m a -> String -> IO () -> IO ()
-withDBGMVar sv label action =
- action `catches` [ Handler (mvarExcHandler sv label)
- , Handler (stmExcHandler sv label)
- ]
-#else
-withDBGMVar :: SVar m a -> String -> IO () -> IO ()
-withDBGMVar _ _ action = action
-#endif
-
--- Slightly faster version of CAS. Gained some improvement by avoiding the use
--- of "evaluate" because we know we do not have exceptions in fn.
-{-# INLINE atomicModifyIORefCAS #-}
-atomicModifyIORefCAS :: IORef a -> (a -> (a,b)) -> IO b
-atomicModifyIORefCAS ref fn = do
- tkt <- readForCAS ref
- loop tkt retries
-
- where
-
- retries = 25 :: Int
- loop _ 0 = atomicModifyIORef ref fn
- loop old tries = do
- let (new, result) = fn $ peekTicket old
- (success, tkt) <- casIORef ref old new
- if success
- then return result
- else loop tkt (tries - 1)
-
-------------------------------------------------------------------------------
--- The stream type
-------------------------------------------------------------------------------
-
--- | The type 'Stream m a' represents a monadic stream of values of type 'a'
--- constructed using actions in monad 'm'. It uses stop, singleton and yield
--- continuations equivalent to the following direct style type:
---
--- data Stream m a = Stop | Singleton a | Yield a (Stream m a)
---
--- To facilitate parallel composition we maintain a local state in an SVar that
--- is shared across and is used for synchronization of the streams being
--- composed.
---
--- The singleton case can be expressed in terms of stop and yield but we have
--- it as a separate case to optimize composition operations for streams with
--- single element. We build singleton streams in the implementation of 'pure'
--- for Applicative and Monad, and in 'lift' for MonadTrans.
---
-newtype Stream m a =
- Stream {
- runStream :: forall r.
- Maybe (SVar m a) -- local state
- -> m r -- stop
- -> (a -> m r) -- singleton
- -> (a -> Stream m a -> m r) -- yield
- -> m r
- }
-
-nil :: Stream m a
-nil = Stream $ \_ stp _ _ -> stp
-
--- | faster than consM because there is no bind.
-cons :: a -> Stream m a -> Stream m a
-cons a r = Stream $ \_ _ _ yld -> yld a r
-
--- | Same as @once . return@ but may be faster because there is no bind
-singleton :: a -> Stream m a
-singleton a = Stream $ \_ _ single _ -> single a
-
-{-# INLINE once #-}
-once :: Monad m => m a -> Stream m a
-once m = Stream $ \_ _ single _ -> m >>= single
-
-{-# INLINE consM #-}
-consM :: Monad m => m a -> Stream m a -> Stream m a
-consM m r = Stream $ \_ _ _ yld -> m >>= \a -> yld a r
-
-repeat :: a -> Stream m a
-repeat a = let x = cons a x in x
-
-------------------------------------------------------------------------------
--- Semigroup
-------------------------------------------------------------------------------
-
--- | Concatenates two streams sequentially i.e. the first stream is
--- exhausted completely before yielding any element from the second stream.
-{-# INLINE serial #-}
-serial :: Stream m a -> Stream m a -> Stream m a
-serial m1 m2 = go m1
- where
- go (Stream m) = Stream $ \_ stp sng yld ->
- let stop = (runStream m2) Nothing stp sng yld
- single a = yld a m2
- yield a r = yld a (go r)
- in m Nothing stop single yield
-
-instance Semigroup (Stream m a) where
- (<>) = serial
-
-------------------------------------------------------------------------------
--- Monoid
-------------------------------------------------------------------------------
-
-instance Monoid (Stream m a) where
- mempty = nil
- mappend = (<>)
-
-------------------------------------------------------------------------------
--- Interleave
-------------------------------------------------------------------------------
-
-{-# INLINE wSerial #-}
-wSerial :: Stream m a -> Stream m a -> Stream m a
-wSerial m1 m2 = Stream $ \_ stp sng yld -> do
- let stop = (runStream m2) Nothing stp sng yld
- single a = yld a m2
- yield a r = yld a (wSerial m2 r)
- (runStream m1) Nothing stop single yield
-
-------------------------------------------------------------------------------
--- Spawning threads and collecting result in streamed fashion
-------------------------------------------------------------------------------
-
--- | A monad that can perform concurrent or parallel IO operations. Streams
--- that can be composed concurrently require the underlying monad to be
--- 'MonadAsync'.
---
--- @since 0.1.0
-type MonadAsync m = (MonadIO m, MonadBaseControl IO m, MonadThrow m)
-
--- Stolen from the async package. The perf improvement is modest, 2% on a
--- thread heavy benchmark (parallel composition using noop computations).
--- A version of forkIO that does not include the outer exception
--- handler: saves a bit of time when we will be installing our own
--- exception handler.
-{-# INLINE rawForkIO #-}
-rawForkIO :: IO () -> IO ThreadId
-rawForkIO action = IO $ \ s ->
- case (fork# action s) of (# s1, tid #) -> (# s1, ThreadId tid #)
-
-{-# INLINE doFork #-}
-doFork :: MonadBaseControl IO m
- => m ()
- -> (SomeException -> IO ())
- -> m ThreadId
-doFork action exHandler =
- control $ \runInIO ->
- mask $ \restore -> do
- tid <- rawForkIO $ catch (restore $ void $ runInIO action)
- exHandler
- runInIO (return tid)
-
--- XXX exception safety of all atomic/MVar operations
-
--- TBD Each worker can have their own queue and the consumer can empty one
--- queue at a time, that way contention can be reduced.
-
-maxOutputQLen :: Int
-maxOutputQLen = 1500
-
--- | This function is used by the producer threads to queue output for the
--- consumer thread to consume. Returns whether the queue has more space.
-{-# NOINLINE send #-}
-send :: SVar m a -> ChildEvent a -> IO Bool
-send sv msg = do
- len <- atomicModifyIORefCAS (outputQueue sv) $ \(es, n) ->
- ((msg : es, n + 1), n)
- when (len <= 0) $ do
- -- The wake up must happen only after the store has finished otherwise
- -- we can have lost wakeup problems.
- writeBarrier
- -- Since multiple workers can try this at the same time, it is possible
- -- that we may put a spurious MVar after the consumer has already seen
- -- the output. But that's harmless, at worst it may cause the consumer
- -- to read the queue again and find it empty.
- -- The important point is that the consumer is guaranteed to receive a
- -- doorbell if something was added to the queue after it empties it.
- void $ tryPutMVar (doorBell sv) ()
- return (len < maxOutputQLen)
-
-{-# NOINLINE sendStop #-}
-sendStop :: SVar m a -> IO ()
-sendStop sv = do
- liftIO $ atomicModifyIORefCAS_ (activeWorkers sv) $ \n -> n - 1
- myThreadId >>= \tid -> void $ send sv (ChildStop tid Nothing)
-
--------------------------------------------------------------------------------
--- Async
--------------------------------------------------------------------------------
-
--- Note: For purely right associated expressions this queue should have at most
--- one element. It grows to more than one when we have left associcated
--- expressions. Large left associated compositions can grow this to a
--- large size
-{-# INLINE enqueueLIFO #-}
-enqueueLIFO :: SVar m a -> IORef [Stream m a] -> Stream m a -> IO ()
-enqueueLIFO sv q m = do
- atomicModifyIORefCAS_ q $ \ms -> m : ms
- storeLoadBarrier
- w <- readIORef $ waitingForWork sv
- when w $ do
- -- Note: the sequence of operations is important for correctness here.
- -- We need to set the flag to false strictly before sending the
- -- doorBell, otherwise the doorBell may get processed too early and
- -- then we may set the flag to False to later making the consumer lose
- -- the flag, even without receiving a doorBell.
- atomicModifyIORefCAS_ (waitingForWork sv) (const False)
- void $ tryPutMVar (doorBell sv) ()
-
-runqueueLIFO :: MonadIO m => SVar m a -> IORef [Stream m a] -> m ()
-runqueueLIFO sv q = run
-
- where
-
- run = do
- work <- dequeue
- case work of
- Nothing -> liftIO $ sendStop sv
- Just m -> (runStream m) (Just sv) run single yield
-
- single a = do
- res <- liftIO $ send sv (ChildYield a)
- if res then run else liftIO $ sendStop sv
- yield a r = do
- res <- liftIO $ send sv (ChildYield a)
- if res
- then (runStream r) (Just sv) run single yield
- else liftIO $ enqueueLIFO sv q r >> sendStop sv
-
- dequeue = liftIO $ atomicModifyIORefCAS q $ \case
- [] -> ([], Nothing)
- x : xs -> (xs, Just x)
-
--------------------------------------------------------------------------------
--- WAsync
--------------------------------------------------------------------------------
-
--- XXX we can use the Ahead style sequence/heap mechanism to make the best
--- effort to always try to finish the streams on the left side of an expression
--- first as long as possible.
-
-{-# INLINE enqueueFIFO #-}
-enqueueFIFO :: SVar m a -> LinkedQueue (Stream m a) -> Stream m a -> IO ()
-enqueueFIFO sv q m = do
- pushL q m
- storeLoadBarrier
- w <- readIORef $ waitingForWork sv
- when w $ do
- -- Note: the sequence of operations is important for correctness here.
- -- We need to set the flag to false strictly before sending the
- -- doorBell, otherwise the doorBell may get processed too early and
- -- then we may set the flag to False to later making the consumer lose
- -- the flag, even without receiving a doorBell.
- atomicModifyIORefCAS_ (waitingForWork sv) (const False)
- void $ tryPutMVar (doorBell sv) ()
-
-runqueueFIFO :: MonadIO m => SVar m a -> LinkedQueue (Stream m a) -> m ()
-runqueueFIFO sv q = run
-
- where
-
- run = do
- work <- dequeue
- case work of
- Nothing -> liftIO $ sendStop sv
- Just m -> (runStream m) (Just sv) run single yield
-
- dequeue = liftIO $ tryPopR q
- single a = do
- res <- liftIO $ send sv (ChildYield a)
- if res then run else liftIO $ sendStop sv
- yield a r = do
- res <- liftIO $ send sv (ChildYield a)
- liftIO (enqueueFIFO sv q r)
- if res then run else liftIO $ sendStop sv
-
--------------------------------------------------------------------------------
--- Parallel
--------------------------------------------------------------------------------
-
-{-# NOINLINE runOne #-}
-runOne :: MonadIO m => SVar m a -> Stream m a -> m ()
-runOne sv m = (runStream m) (Just sv) stop single yield
-
- where
-
- stop = liftIO $ sendStop sv
- sendit a = liftIO $ send sv (ChildYield a)
- single a = sendit a >> stop
- -- XXX there is no flow control in parallel case. We should perhaps use a
- -- queue and queue it back on that and exit the thread when the outputQueue
- -- overflows. Parallel is dangerous because it can accumulate unbounded
- -- output in the buffer.
- yield a r = void (sendit a) >> runOne sv r
-
--------------------------------------------------------------------------------
--- Ahead
--------------------------------------------------------------------------------
-
--- Lookahead streams can execute multiple tasks concurrently, ahead of time,
--- but always serve them in the same order as they appear in the stream. To
--- implement lookahead streams efficiently we assign a sequence number to each
--- task when the task is picked up for execution. When the task finishes, the
--- output is tagged with the same sequence number and we rearrange the outputs
--- in sequence based on that number.
---
--- To explain the mechanism imagine that the current task at the head of the
--- stream has a "token" to yield to the outputQueue. The ownership of the token
--- is determined by the current sequence number is maintained in outputHeap.
--- Sequence number is assigned when a task is queued. When a thread dequeues a
--- task it picks up the sequence number as well and when the output is ready it
--- uses the sequence number to queue the output to the outputQueue.
---
--- The thread with current sequence number sends the output directly to the
--- outputQueue. Other threads push the output to the outputHeap. When the task
--- being queued on the heap is a stream of many elements we evaluate only the
--- first element and keep the rest of the unevaluated computation in the heap.
--- When such a task gets the "token" for outputQueue it evaluates and directly
--- yields all the elements to the outputQueue without checking for the
--- "token".
---
--- Note that no two outputs in the heap can have the same sequence numbers and
--- therefore we do not need a stable heap. We have also separated the buffer
--- for the current task (outputQueue) and the pending tasks (outputHeap) so
--- that the pending tasks cannot interfere with the current task. Note that for
--- a single task just the outputQueue is enough and for the case of many
--- threads just a heap is good enough. However we balance between these two
--- cases, so that both are efficient.
---
--- For bigger streams it may make sense to have separate buffers for each
--- stream. However, for singleton streams this may become inefficient. However,
--- if we do not have separate buffers, then the streams that come later in
--- sequence may hog the buffer, hindering the streams that are ahead. For this
--- reason we have a single element buffer limitation for the streams being
--- executed in advance.
---
--- This scheme works pretty efficiently with less than 40% extra overhead
--- compared to the Async streams where we do not have any kind of sequencing of
--- the outputs. It is especially devised so that we are most efficient when we
--- have short tasks and need just a single thread. Also when a thread yields
--- many items it can hold lockfree access to the outputQueue and do it
--- efficiently.
---
--- XXX Maybe we can start the ahead threads at a lower cpu and IO priority so
--- that they do not hog the resources and hinder the progress of the threads in
--- front of them.
-
--- Left associated ahead expressions are expensive. We start a new SVar for
--- each left associative expression. The queue is used only for right
--- associated expression, we queue the right expression and execute the left.
--- Thererefore the queue never has more than on item in it.
-{-# INLINE enqueueAhead #-}
-enqueueAhead :: SVar m a -> IORef ([Stream m a], Int) -> Stream m a -> IO ()
-enqueueAhead sv q m = do
- atomicModifyIORefCAS_ q $ \ case
- ([], n) -> ([m], n + 1) -- increment sequence
- _ -> error "not empty"
- storeLoadBarrier
- w <- readIORef $ waitingForWork sv
- when w $ do
- -- Note: the sequence of operations is important for correctness here.
- -- We need to set the flag to false strictly before sending the
- -- doorBell, otherwise the doorBell may get processed too early and
- -- then we may set the flag to False to later making the consumer lose
- -- the flag, even without receiving a doorBell.
- atomicModifyIORefCAS_ (waitingForWork sv) (const False)
- void $ tryPutMVar (doorBell sv) ()
-
--- Normally the thread that has the token should never go away. The token gets
--- handed over to another thread, but someone or the other has the token at any
--- point of time. But if the task that has the token finds that the outputQueue
--- is full, in that case it can go away without even handing over the token to
--- another thread. In that case it sets the nextSequence number in the heap its
--- own sequence number before going away. To handle this case, any task that
--- does not have the token tries to dequeue from the heap first before
--- dequeuing from the work queue. If it finds that the task at the top of the
--- heap is the one that owns the current sequence number then it grabs the
--- token and starts with that.
---
--- XXX instead of queueing just the head element and the remaining computation
--- on the heap, evaluate as many as we can and place them on the heap. But we
--- need to give higher priority to the lower sequence numbers so that lower
--- priority tasks do not fill up the heap making higher priority tasks block
--- due to full heap. Maybe we can have a weighted space for them in the heap.
--- The weight is inversely proportional to the sequence number.
---
--- XXX review for livelock
---
-runqueueAhead :: MonadIO m => SVar m a -> IORef ([Stream m a], Int) -> m ()
-runqueueAhead sv q = runHeap
-
- where
-
- maxHeap = 1500
-
- toHeap seqNo ent = do
- hp <- liftIO $ atomicModifyIORefCAS (outputHeap sv) $ \(h, snum) ->
- ((H.insert (Entry seqNo ent) h, snum), h)
- if H.size hp <= maxHeap
- then runHeap
- else liftIO $ sendStop sv
-
- singleToHeap seqNo a = toHeap seqNo (AheadEntryPure a)
- yieldToHeap seqNo a r = toHeap seqNo (AheadEntryStream (a `cons` r))
-
- singleOutput seqNo a = do
- continue <- liftIO $ send sv (ChildYield a)
- if continue
- then runQueueToken seqNo
- else liftIO $ do
- atomicModifyIORefCAS_ (outputHeap sv) $ \(h, _) -> (h, seqNo + 1)
- sendStop sv
-
- yieldOutput seqNo a r = do
- continue <- liftIO $ send sv (ChildYield a)
- if continue
- then (runStream r) (Just sv) (runQueueToken seqNo)
- (singleOutput seqNo)
- (yieldOutput seqNo)
- else liftIO $ do
- atomicModifyIORefCAS_ (outputHeap sv) $ \(h, _) ->
- (H.insert (Entry seqNo (AheadEntryStream r)) h, seqNo)
- sendStop sv
-
- {-# INLINE runQueueToken #-}
- runQueueToken prevSeqNo = do
- work <- dequeue
- case work of
- Nothing -> do
- liftIO $ atomicModifyIORefCAS_ (outputHeap sv) $ \(h, _) ->
- (h, prevSeqNo + 1)
- runHeap
- Just (m, seqNo) -> do
- if seqNo == prevSeqNo + 1
- then
- (runStream m) (Just sv) (runQueueToken seqNo)
- (singleOutput seqNo)
- (yieldOutput seqNo)
- else do
- liftIO $ atomicModifyIORefCAS_ (outputHeap sv) $ \(h, _) ->
- (h, prevSeqNo + 1)
- (runStream m) (Just sv) runHeap
- (singleToHeap seqNo)
- (yieldToHeap seqNo)
- runQueueNoToken = do
- work <- dequeue
- case work of
- Nothing -> runHeap
- Just (m, seqNo) -> do
- if seqNo == 0
- then
- (runStream m) (Just sv) (runQueueToken seqNo)
- (singleOutput seqNo)
- (yieldOutput seqNo)
- else
- (runStream m) (Just sv) runHeap
- (singleToHeap seqNo)
- (yieldToHeap seqNo)
-
- {-# NOINLINE runHeap #-}
- runHeap = do
-#ifdef DIAGNOSTICS
- liftIO $ do
- maxHp <- readIORef (maxHeapSize sv)
- (hp, _) <- readIORef (outputHeap sv)
- when (H.size hp > maxHp) $ writeIORef (maxHeapSize sv) (H.size hp)
-#endif
- ent <- liftIO $ dequeueFromHeap (outputHeap sv)
- case ent of
- Nothing -> do
- done <- queueEmpty sv
- if done
- then liftIO $ sendStop sv
- else runQueueNoToken
- Just (Entry seqNo hent) -> do
- case hent of
- AheadEntryPure a -> singleOutput seqNo a
- AheadEntryStream r ->
- (runStream r) (Just sv) (runQueueToken seqNo)
- (singleOutput seqNo)
- (yieldOutput seqNo)
-
- dequeue = liftIO $ do
- atomicModifyIORefCAS q $ \case
- ([], n) -> (([], n), Nothing)
- (x : [], n) -> (([], n), Just (x, n))
- _ -> error "more than one item on queue"
-
- dequeueFromHeap
- :: IORef (Heap (Entry Int (AheadHeapEntry m a)), Int)
- -> IO (Maybe (Entry Int (AheadHeapEntry m a)))
- dequeueFromHeap hpRef = do
- atomicModifyIORefCAS hpRef $ \hp@(h, snum) -> do
- let r = H.uncons h
- case r of
- Nothing -> (hp, Nothing)
- Just (ent@(Entry seqNo _ev), hp') ->
- if (seqNo == snum)
- then ((hp', seqNo), Just ent)
- else (hp, Nothing)
-
--------------------------------------------------------------------------------
--- WAhead
--------------------------------------------------------------------------------
-
--- XXX To be implemented. Use a linked queue like WAsync and put back the
--- remaining computation at the back of the queue instead of the heap, and
--- increment the sequence number.
-
--- Thread tracking is needed for two reasons:
---
--- 1) Killing threads on exceptions. Threads may not be left to go away by
--- themselves because they may run for significant times before going away or
--- worse they may be stuck in IO and never go away.
---
--- 2) To know when all threads are done and the stream has ended.
-
-{-# NOINLINE addThread #-}
-addThread :: MonadIO m => SVar m a -> ThreadId -> m ()
-addThread sv tid =
- liftIO $ modifyIORef (runningThreads sv) (S.insert tid)
-
--- This is cheaper than modifyThread because we do not have to send a doorBell
--- This can make a difference when more workers are being dispatched.
-{-# INLINE delThread #-}
-delThread :: MonadIO m => SVar m a -> ThreadId -> m ()
-delThread sv tid =
- liftIO $ modifyIORef (runningThreads sv) $ (\s -> S.delete tid s)
-
--- If present then delete else add. This takes care of out of order add and
--- delete i.e. a delete arriving before we even added a thread.
--- This occurs when the forked thread is done even before the 'addThread' right
--- after the fork gets a chance to run.
-{-# INLINE modifyThread #-}
-modifyThread :: MonadIO m => SVar m a -> ThreadId -> m ()
-modifyThread sv tid = do
- changed <- liftIO $ atomicModifyIORefCAS (runningThreads sv) $ \old ->
- if (S.member tid old)
- then let new = (S.delete tid old) in (new, new)
- else let new = (S.insert tid old) in (new, old)
- if null changed
- then liftIO $ do
- writeBarrier
- void $ tryPutMVar (doorBell sv) ()
- else return ()
-
--- | This is safe even if we are adding more threads concurrently because if
--- a child thread is adding another thread then anyway 'runningThreads' will
--- not be empty.
-{-# INLINE allThreadsDone #-}
-allThreadsDone :: MonadIO m => SVar m a -> m Bool
-allThreadsDone sv = liftIO $ S.null <$> readIORef (runningThreads sv)
-
-{-# NOINLINE handleChildException #-}
-handleChildException :: SVar m a -> SomeException -> IO ()
-handleChildException sv e = do
- tid <- myThreadId
- void $ send sv (ChildStop tid (Just e))
-
-#ifdef DIAGNOSTICS
-recordMaxWorkers :: MonadIO m => SVar m a -> m ()
-recordMaxWorkers sv = liftIO $ do
- active <- readIORef (activeWorkers sv)
- maxWrk <- readIORef (maxWorkers sv)
- when (active > maxWrk) $ writeIORef (maxWorkers sv) active
- modifyIORef (totalDispatches sv) (+1)
-#endif
-
-{-# NOINLINE pushWorker #-}
-pushWorker :: MonadAsync m => SVar m a -> m ()
-pushWorker sv = do
- liftIO $ atomicModifyIORefCAS_ (activeWorkers sv) $ \n -> n + 1
-#ifdef DIAGNOSTICS
- recordMaxWorkers sv
-#endif
- doFork (runqueue sv) (handleChildException sv) >>= addThread sv
-
--- | In contrast to pushWorker which always happens only from the consumer
--- thread, a pushWorkerPar can happen concurrently from multiple threads on the
--- producer side. So we need to use a thread safe modification of
--- runningThreads. Alternatively, we can use a CreateThread event to avoid
--- using a CAS based modification.
-{-# NOINLINE pushWorkerPar #-}
-pushWorkerPar :: MonadAsync m => SVar m a -> Stream m a -> m ()
-pushWorkerPar sv m = do
- -- We do not use activeWorkers in case of ParallelVar but still there is no
- -- harm in maintaining it correctly.
-#ifdef DIAGNOSTICS
- liftIO $ atomicModifyIORefCAS_ (activeWorkers sv) $ \n -> n + 1
- recordMaxWorkers sv
-#endif
- doFork (runOne sv m) (handleChildException sv) >>= modifyThread sv
-
-{-# INLINE workDone #-}
-workDone :: MonadIO m => SVar m a -> m Bool
-workDone sv = do
- heapDone <-
- if (svarStyle sv == AheadVar)
- then do
- (hp, _) <- liftIO $ readIORef (outputHeap sv)
- return (H.size hp <= 0)
- else return True
- queueDone <- queueEmpty sv
- return $ queueDone && heapDone
-
-maxWorkerLimit :: Int
-maxWorkerLimit = 1500
-
-dispatchWorker :: MonadAsync m => SVar m a -> m ()
-dispatchWorker sv = do
- done <- workDone sv
- when (not done) $ do
- -- Note that the worker count is only decremented during event
- -- processing in fromStreamVar and therefore it is safe to read and
- -- use it without a lock.
- cnt <- liftIO $ readIORef $ activeWorkers sv
- -- Note that we may deadlock if the previous workers (tasks in the
- -- stream) wait/depend on the future workers (tasks in the stream)
- -- executing. In that case we should either configure the maxWorker
- -- count to higher or use parallel style instead of ahead or async
- -- style.
- when (cnt < maxWorkerLimit) $ pushWorker sv
-
-{-# NOINLINE sendWorkerWait #-}
-sendWorkerWait :: MonadAsync m => SVar m a -> m ()
-sendWorkerWait sv = do
- -- Note that we are guaranteed to have at least one outstanding worker when
- -- we enter this function. So if we sleep we are guaranteed to be woken up
- -- by a doorBell, when the worker exits.
-
- -- XXX we need a better way to handle this than hardcoded delays. The
- -- delays may be different for different systems.
- ncpu <- liftIO $ getNumCapabilities
- if ncpu <= 1
- then
- if (svarStyle sv == AheadVar)
- then liftIO $ threadDelay 100
- else liftIO $ threadDelay 25
- else
- if (svarStyle sv == AheadVar)
- then liftIO $ threadDelay 100
- else liftIO $ threadDelay 10
-
- (_, n) <- liftIO $ readIORef (outputQueue sv)
- when (n <= 0) $ do
- -- The queue may be empty temporarily if the worker has dequeued the
- -- work item but has not enqueued the remaining part yet. For the same
- -- reason, a worker may come back if it tries to dequeue and finds the
- -- queue empty, even though the whole work has not finished yet.
-
- -- If we find that the queue is empty, but it may be empty
- -- temporarily, when we checked it. If that's the case we might
- -- sleep indefinitely unless the active workers produce some
- -- output. We may deadlock specially if the otuput from the active
- -- workers depends on the future workers that we may never send.
- -- So in case the queue was temporarily empty set a flag to inform
- -- the enqueue to send us a doorbell.
-
- -- Note that this is just a best effort mechanism to avoid a
- -- deadlock. Deadlocks may still happen if for some weird reason
- -- the consuming computation shares an MVar or some other resource
- -- with the producing computation and gets blocked on that resource
- -- and therefore cannot do any pushworker to add more threads to
- -- the producer. In such cases the programmer should use a parallel
- -- style so that all the producers are scheduled immediately and
- -- unconditionally. We can also use a separate monitor thread to
- -- push workers instead of pushing them from the consumer, but then
- -- we are no longer using pull based concurrency rate adaptation.
- --
- -- XXX update this in the tutorial.
-
- -- register for the doorBell before we check the queue so that if we
- -- sleep because the queue was empty we are guaranteed to get a
- -- doorbell on the next enqueue.
-
- liftIO $ atomicModifyIORefCAS_ (waitingForWork sv) $ const True
- liftIO $ storeLoadBarrier
- dispatchWorker sv
-
- -- XXX test for the case when we miss sending a worker when the worker
- -- count is more than 1500.
- --
- -- XXX Assert here that if the heap is not empty then there is at
- -- least one outstanding worker. Otherwise we could be sleeping
- -- forever.
-
- done <- workDone sv
- if done
- then do
- liftIO $ withDBGMVar sv "sendWorkerWait: nothing to do"
- $ takeMVar (doorBell sv)
- (_, len) <- liftIO $ readIORef (outputQueue sv)
- when (len <= 0) $ sendWorkerWait sv
- else sendWorkerWait sv
-
--- | Pull a stream from an SVar.
-{-# NOINLINE fromStreamVar #-}
-fromStreamVar :: MonadAsync m => SVar m a -> Stream m a
-fromStreamVar sv = Stream $ \_ stp sng yld -> do
- (list, _) <-
- -- XXX we can set this in SVar
- if svarStyle sv == ParallelVar
- then do
- liftIO $ withDBGMVar sv "fromStreamVar: doorbell"
- $ takeMVar (doorBell sv)
- readOutputQ sv
- else do
- res@(_, len) <- readOutputQ sv
- -- When there is no output seen we dispatch more workers to help
- -- out if there is work pending in the work queue.
- if len <= 0
- then blockingRead
- else do
- -- send a worker proactively, if needed, even before we start
- -- processing the output. This may degrade single processor
- -- perf but improves multi-processor, because of more
- -- parallelism
- sendWorker
- return res
-
- -- Reversing the output is important to guarantee that we process the
- -- outputs in the same order as they were generated by the constituent
- -- streams.
- runStream (processEvents $ reverse list) Nothing stp sng yld
-
- where
-
- {-# INLINE readOutputQ #-}
- readOutputQ svr = liftIO $ do
- (list, len) <- atomicModifyIORefCAS (outputQueue svr) $
- \x -> (([],0), x)
-#ifdef DIAGNOSTICS
- oqLen <- readIORef (maxOutQSize svr)
- when (len > oqLen) $ writeIORef (maxOutQSize svr) len
-#endif
- return (list, len)
-
- sendWorker = do
- cnt <- liftIO $ readIORef $ activeWorkers sv
- when (cnt <= 0) $ do
- done <- workDone sv
- when (not done) $ pushWorker sv
-
- {-# INLINE blockingRead #-}
- blockingRead = do
- sendWorkerWait sv
- readOutputQ sv
-
- allDone stp = do
-#ifdef DIAGNOSTICS
-#ifdef DIAGNOSTICS_VERBOSE
- svInfo <- liftIO $ dumpSVar sv
- liftIO $ hPutStrLn stderr $ "fromStreamVar done\n" ++ svInfo
-#endif
-#endif
- stp
-
- {-# INLINE processEvents #-}
- processEvents [] = Stream $ \_ stp sng yld -> do
- workersDone <- allThreadsDone sv
- done <-
- -- XXX we can set this in SVar
- if svarStyle sv == ParallelVar
- then return workersDone
- else
- -- There may still be work pending even if there are no workers
- -- pending because all the workers may return if the
- -- outputQueue becomes full. In that case send off a worker to
- -- kickstart the work again.
- if workersDone
- then do
- r <- workDone sv
- when (not r) $ pushWorker sv
- return r
- else return False
-
- if done
- then allDone stp
- else runStream (fromStreamVar sv) Nothing stp sng yld
-
- processEvents (ev : es) = Stream $ \_ stp sng yld -> do
- let rest = processEvents es
- case ev of
- ChildYield a -> yld a rest
- ChildStop tid e -> do
- if svarStyle sv == ParallelVar
- then modifyThread sv tid
- else delThread sv tid
- case e of
- Nothing -> runStream rest Nothing stp sng yld
- Just ex -> throwM ex
-
-getFifoSVar :: MonadIO m => SVarStyle -> IO (SVar m a)
-getFifoSVar ctype = do
- outQ <- newIORef ([], 0)
- outQMv <- newEmptyMVar
- active <- newIORef 0
- wfw <- newIORef False
- running <- newIORef S.empty
- q <- newQ
-#ifdef DIAGNOSTICS
- disp <- newIORef 0
- maxWrk <- newIORef 0
- maxOq <- newIORef 0
- maxHs <- newIORef 0
- maxWq <- newIORef 0
-#endif
- let sv =
- SVar { outputQueue = outQ
- , doorBell = outQMv
- , outputHeap = undefined
- , runningThreads = running
- , workQueue = undefined
- , runqueue = runqueueFIFO sv q
- , enqueue = enqueueFIFO sv q
- , queueEmpty = liftIO $ nullQ q
- , waitingForWork = wfw
- , svarStyle = ctype
- , activeWorkers = active
-#ifdef DIAGNOSTICS
- , totalDispatches = disp
- , maxWorkers = maxWrk
- , maxOutQSize = maxOq
- , maxHeapSize = maxHs
- , maxWorkQSize = maxWq
-#endif
- }
- in return sv
-
-getLifoSVar :: MonadIO m => SVarStyle -> IO (SVar m a)
-getLifoSVar ctype = do
- outQ <- newIORef ([], 0)
- outQMv <- newEmptyMVar
- active <- newIORef 0
- wfw <- newIORef False
- running <- newIORef S.empty
- q <- newIORef []
-#ifdef DIAGNOSTICS
- disp <- newIORef 0
- maxWrk <- newIORef 0
- maxOq <- newIORef 0
- maxHs <- newIORef 0
- maxWq <- newIORef 0
-#endif
- let checkEmpty = null <$> liftIO (readIORef q)
- let sv =
- SVar { outputQueue = outQ
- , doorBell = outQMv
- , outputHeap = undefined
- , runningThreads = running
- , workQueue = undefined
- , runqueue = runqueueLIFO sv q
- , enqueue = enqueueLIFO sv q
- , queueEmpty = checkEmpty
- , waitingForWork = wfw
- , svarStyle = ctype
- , activeWorkers = active
-#ifdef DIAGNOSTICS
- , maxWorkers = maxWrk
- , totalDispatches = disp
- , maxOutQSize = maxOq
- , maxHeapSize = maxHs
- , maxWorkQSize = maxWq
-#endif
- }
- in return sv
-
-getParSVar :: SVarStyle -> IO (SVar m a)
-getParSVar style = do
- outQ <- newIORef ([], 0)
- outQMv <- newEmptyMVar
- active <- newIORef 0
- wfw <- newIORef False
- running <- newIORef S.empty
-#ifdef DIAGNOSTICS
- disp <- newIORef 0
- maxWrk <- newIORef 0
- maxOq <- newIORef 0
- maxHs <- newIORef 0
- maxWq <- newIORef 0
-#endif
- let sv =
- SVar { outputQueue = outQ
- , doorBell = outQMv
- , outputHeap = undefined
- , runningThreads = running
- , workQueue = undefined
- , runqueue = undefined
- , enqueue = undefined
- , queueEmpty = undefined
- , waitingForWork = wfw
- , svarStyle = style
- , activeWorkers = active
-#ifdef DIAGNOSTICS
- , totalDispatches = disp
- , maxWorkers = maxWrk
- , maxOutQSize = maxOq
- , maxHeapSize = maxHs
- , maxWorkQSize = maxWq
-#endif
- }
- in return sv
-
-getAheadSVar :: MonadIO m => SVarStyle -> IO (SVar m a)
-getAheadSVar style = do
- outQ <- newIORef ([], 0)
- outH <- newIORef (H.empty, 0)
- outQMv <- newEmptyMVar
- active <- newIORef 0
- wfw <- newIORef False
- running <- newIORef S.empty
- q <- newIORef ([], -1)
-
-#ifdef DIAGNOSTICS
- disp <- newIORef 0
- maxWrk <- newIORef 0
- maxOq <- newIORef 0
- maxHs <- newIORef 0
- maxWq <- newIORef 0
-#endif
-
- let checkEmpty = liftIO $ do
- (xs, _) <- readIORef q
- return $ null xs
- let sv =
- SVar { outputQueue = outQ
- , doorBell = outQMv
- , outputHeap = outH
- , runningThreads = running
- , workQueue = q
- , runqueue = runqueueAhead sv q
- , enqueue = undefined
- , queueEmpty = checkEmpty
- , waitingForWork = wfw
- , svarStyle = style
- , activeWorkers = active
-
-#ifdef DIAGNOSTICS
- , totalDispatches = disp
- , maxWorkers = maxWrk
- , maxOutQSize = maxOq
- , maxHeapSize = maxHs
- , maxWorkQSize = maxWq
-#endif
- }
- in return sv
-
--- | Create a new empty SVar.
-newEmptySVar :: MonadAsync m => SVarStyle -> m (SVar m a)
-newEmptySVar style = do
- liftIO $
- case style of
- WAsyncVar -> getFifoSVar style
- AsyncVar -> getLifoSVar style
- ParallelVar -> getParSVar style
- AheadVar -> getAheadSVar style
-
--- | Create a new SVar and enqueue one stream computation on it.
-{-# INLINABLE newStreamVar1 #-}
-newStreamVar1 :: MonadAsync m => SVarStyle -> Stream m a -> m (SVar m a)
-newStreamVar1 style m = do
- sv <- newEmptySVar style
- -- Note: We must have all the work on the queue before sending the
- -- pushworker, otherwise the pushworker may exit before we even get a
- -- chance to push.
- if style == ParallelVar
- then pushWorkerPar sv m
- else do
- liftIO $ (enqueue sv) m
- pushWorker sv
- return sv
-
--- | Create a new SVar and enqueue one stream computation on it.
-{-# INLINABLE newStreamVarAhead #-}
-newStreamVarAhead :: MonadAsync m => Stream m a -> m (SVar m a)
-newStreamVarAhead m = do
- sv <- newEmptySVar AheadVar
- -- Note: We must have all the work on the queue before sending the
- -- pushworker, otherwise the pushworker may exit before we even get a
- -- chance to push.
- liftIO $ enqueueAhead sv (workQueue sv) m
- pushWorker sv
- return sv
-
--- | Write a stream to an 'SVar' in a non-blocking manner. The stream can then
--- be read back from the SVar using 'fromSVar'.
-toStreamVar :: MonadAsync m => SVar m a -> Stream m a -> m ()
-toStreamVar sv m = do
- liftIO $ (enqueue sv) m
- done <- allThreadsDone sv
- -- XXX This is safe only when called from the consumer thread or when no
- -- consumer is present. There may be a race if we are not running in the
- -- consumer thread.
- when done $ pushWorker sv
-
-------------------------------------------------------------------------------
--- Running streams concurrently
-------------------------------------------------------------------------------
-
--- Concurrency rate control.
---
--- Our objective is to create more threads on demand if the consumer is running
--- faster than us. As soon as we encounter a concurrent composition we create a
--- push pull pair of threads. We use an SVar for communication between the
--- consumer, pulling from the SVar and the producer who is pushing to the SVar.
--- The producer creates more threads if the SVar drains and becomes empty, that
--- is the consumer is running faster.
---
--- XXX Note 1: This mechanism can be problematic if the initial production
--- latency is high, we may end up creating too many threads. So we need some
--- way to monitor and use the latency as well. Having a limit on the dispatches
--- (programmer controlled) may also help.
---
--- TBD Note 2: We may want to run computations at the lower level of the
--- composition tree serially even when they are composed using a parallel
--- combinator. We can use 'serial' in place of 'async' and 'wSerial' in
--- place of 'wAsync'. If we find that an SVar immediately above a computation
--- gets drained empty we can switch to parallelizing the computation. For that
--- we can use a state flag to fork the rest of the computation at any point of
--- time inside the Monad bind operation if the consumer is running at a faster
--- speed.
---
--- TBD Note 3: the binary operation ('parallel') composition allows us to
--- dispatch a chunkSize of only 1. If we have to dispatch in arbitrary
--- chunksizes we will need to compose the parallel actions using a data
--- constructor (A Free container) instead so that we can divide it in chunks of
--- arbitrary size before dispatching. If the stream is composed of
--- hierarchically composed grains of different sizes then we can always switch
--- to a desired granularity depending on the consumer speed.
---
--- TBD Note 4: for pure work (when we are not in the IO monad) we can divide it
--- into just the number of CPUs.
-
--- | Join two computations on the currently running 'SVar' queue for concurrent
--- execution. When we are using parallel composition, an SVar is passed around
--- as a state variable. We try to schedule a new parallel computation on the
--- SVar passed to us. The first time, when no SVar exists, a new SVar is
--- created. Subsequently, 'joinStreamVarAsync' may get called when a computation
--- already scheduled on the SVar is further evaluated. For example, when (a
--- `parallel` b) is evaluated it calls a 'joinStreamVarAsync' to put 'a' and 'b' on
--- the current scheduler queue.
---
--- The 'SVarStyle' required by the current composition context is passed as one
--- of the parameters. If the scheduling and composition style of the new
--- computation being scheduled is different than the style of the current SVar,
--- then we create a new SVar and schedule it on that. The newly created SVar
--- joins as one of the computations on the current SVar queue.
---
--- Cases when we need to switch to a new SVar:
---
--- * (x `parallel` y) `parallel` (t `parallel` u) -- all of them get scheduled on the same SVar
--- * (x `parallel` y) `parallel` (t `async` u) -- @t@ and @u@ get scheduled on a new child SVar
--- because of the scheduling policy change.
--- * if we 'adapt' a stream of type 'async' to a stream of type
--- 'Parallel', we create a new SVar at the transitioning bind.
--- * When the stream is switching from disjunctive composition to conjunctive
--- composition and vice-versa we create a new SVar to isolate the scheduling
--- of the two.
-
-forkSVarAsync :: MonadAsync m => SVarStyle -> Stream m a -> Stream m a -> Stream m a
-forkSVarAsync style m1 m2 = Stream $ \_ stp sng yld -> do
- sv <- newStreamVar1 style (concurrently m1 m2)
- (runStream (fromStreamVar sv)) Nothing stp sng yld
- where
- concurrently ma mb = Stream $ \svr stp sng yld -> do
- liftIO $ enqueue (fromJust svr) mb
- (runStream ma) svr stp sng yld
-
-{-# INLINE joinStreamVarAsync #-}
-joinStreamVarAsync :: MonadAsync m
- => SVarStyle -> Stream m a -> Stream m a -> Stream m a
-joinStreamVarAsync style m1 m2 = Stream $ \svr stp sng yld ->
- case svr of
- Just sv | svarStyle sv == style ->
- liftIO ((enqueue sv) m2) >> (runStream m1) svr stp sng yld
- _ -> runStream (forkSVarAsync style m1 m2) Nothing stp sng yld
-
-{-# NOINLINE forkSVarPar #-}
-forkSVarPar :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
-forkSVarPar m r = Stream $ \_ stp sng yld -> do
- sv <- newEmptySVar ParallelVar
- pushWorkerPar sv m
- pushWorkerPar sv r
- (runStream (fromStreamVar sv)) Nothing stp sng yld
-
-{-# INLINE joinStreamVarPar #-}
-joinStreamVarPar :: MonadAsync m
- => SVarStyle -> Stream m a -> Stream m a -> Stream m a
-joinStreamVarPar style m1 m2 = Stream $ \svr stp sng yld ->
- case svr of
- Just sv | svarStyle sv == style -> do
- pushWorkerPar sv m1 >> (runStream m2) svr stp sng yld
- _ -> runStream (forkSVarPar m1 m2) Nothing stp sng yld
-
-forkSVarAhead :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
-forkSVarAhead m1 m2 = Stream $ \_ stp sng yld -> do
- sv <- newStreamVarAhead (concurrently m1 m2)
- (runStream (fromStreamVar sv)) Nothing stp sng yld
- where
- concurrently ma mb = Stream $ \svr stp sng yld -> do
- liftIO $ enqueueAhead (fromJust svr) (workQueue (fromJust svr)) mb
- (runStream ma) Nothing stp sng yld
-
-{-# INLINE ahead #-}
-ahead :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
-ahead m1 m2 = Stream $ \svr stp sng yld -> do
- case svr of
- Just sv | svarStyle sv == AheadVar -> do
- liftIO $ enqueueAhead sv (workQueue sv) m2
- -- Always run the left side on a new SVar to avoid complexity in
- -- sequencing results. This means the left side cannot further
- -- split into more ahead computations on the same SVar.
- (runStream m1) Nothing stp sng yld
- _ -> runStream (forkSVarAhead m1 m2) Nothing stp sng yld
-
--- | XXX we can implement it more efficienty by directly implementing instead
--- of combining streams using ahead.
-{-# INLINE consMAhead #-}
-consMAhead :: MonadAsync m => m a -> Stream m a -> Stream m a
-consMAhead m r = once m `ahead` r
-
-------------------------------------------------------------------------------
--- Semigroup and Monoid style compositions for parallel actions
-------------------------------------------------------------------------------
-
-{-# INLINE async #-}
-async :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
-async = joinStreamVarAsync AsyncVar
-
--- | XXX we can implement it more efficienty by directly implementing instead
--- of combining streams using async.
-{-# INLINE consMAsync #-}
-consMAsync :: MonadAsync m => m a -> Stream m a -> Stream m a
-consMAsync m r = once m `async` r
-
-{-# INLINE wAsync #-}
-wAsync :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
-wAsync = joinStreamVarAsync WAsyncVar
-
--- | XXX we can implement it more efficienty by directly implementing instead
--- of combining streams using wAsync.
-{-# INLINE consMWAsync #-}
-consMWAsync :: MonadAsync m => m a -> Stream m a -> Stream m a
-consMWAsync m r = once m `wAsync` r
-
-{-# INLINE parallel #-}
-parallel :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
-parallel = joinStreamVarPar ParallelVar
-
--- | XXX we can implement it more efficienty by directly implementing instead
--- of combining streams using parallel.
-{-# INLINE consMParallel #-}
-consMParallel :: MonadAsync m => m a -> Stream m a -> Stream m a
-consMParallel m r = once m `parallel` r
-
--------------------------------------------------------------------------------
--- Functor instace is the same for all types
--------------------------------------------------------------------------------
-
-instance Monad m => Functor (Stream m) where
- fmap f m = Stream $ \_ stp sng yld ->
- let single = sng . f
- yield a r = yld (f a) (fmap f r)
- in (runStream m) Nothing stp single yield
-
-------------------------------------------------------------------------------
--- Alternative & MonadPlus
-------------------------------------------------------------------------------
-
-_alt :: Stream m a -> Stream m a -> Stream m a
-_alt m1 m2 = Stream $ \_ stp sng yld ->
- let stop = runStream m2 Nothing stp sng yld
- in runStream m1 Nothing stop sng yld
-
-------------------------------------------------------------------------------
--- Stream to stream function application
-------------------------------------------------------------------------------
-
-applyWith :: MonadAsync m
- => SVarStyle -> (Stream m a -> Stream m b) -> Stream m a -> Stream m b
-applyWith style f m = Stream $ \svr stp sng yld -> do
- sv <- newStreamVar1 style m
- runStream (f $ fromStreamVar sv) svr stp sng yld
-
-------------------------------------------------------------------------------
--- Stream runner function application
-------------------------------------------------------------------------------
-
-runWith :: MonadAsync m
- => SVarStyle -> (Stream m a -> m b) -> Stream m a -> m b
-runWith style f m = do
- sv <- newStreamVar1 style m
- f $ fromStreamVar sv
-
-------------------------------------------------------------------------------
--- Zipping
-------------------------------------------------------------------------------
-
-{-# INLINE zipWith #-}
-zipWith :: (a -> b -> c) -> Stream m a -> Stream m b -> Stream m c
-zipWith f m1 m2 = go m1 m2
- where
- go mx my = Stream $ \_ stp sng yld -> do
- let merge a ra =
- let single2 b = sng (f a b)
- yield2 b rb = yld (f a b) (go ra rb)
- in (runStream my) Nothing stp single2 yield2
- let single1 a = merge a nil
- yield1 a ra = merge a ra
- (runStream mx) Nothing stp single1 yield1
-
-{-# INLINE zipAsyncWith #-}
-zipAsyncWith :: MonadAsync m
- => (a -> b -> c) -> Stream m a -> Stream m b -> Stream m c
-zipAsyncWith f m1 m2 = Stream $ \_ stp sng yld -> do
- ma <- mkAsync m1
- mb <- mkAsync m2
- (runStream (zipWith f ma mb)) Nothing stp sng yld
-
- where
-
- mkAsync :: MonadAsync m => Stream m a -> m (Stream m a)
- mkAsync m = newStreamVar1 AsyncVar m
- >>= return . fromStreamVar
-
--------------------------------------------------------------------------------
--- Transformers
--------------------------------------------------------------------------------
-
-instance MonadTrans Stream where
- lift = once
diff --git a/src/Streamly/Prelude.hs b/src/Streamly/Prelude.hs
index 247609c..01abb44 100644
--- a/src/Streamly/Prelude.hs
+++ b/src/Streamly/Prelude.hs
@@ -1,10 +1,17 @@
{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE CPP #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE UndecidableInstances #-} -- XXX
+#if __GLASGOW_HASKELL__ >= 800
+{-# OPTIONS_GHC -Wno-orphans #-}
+#endif
+
+#include "Streams/inline.h"
+
-- |
-- Module : Streamly.Prelude
-- Copyright : (c) 2017 Harendra Kumar
@@ -39,32 +46,40 @@
module Streamly.Prelude
(
-- * Construction
- -- | Primitives to construct or inspect a stream.
- nil
+ -- | Primitives to construct a stream.
+ K.nil
+ , K.cons
+ , (K..:)
, consM
, (|:)
- , cons
- , (.:)
- -- * Generation by Unfolding
+ -- * Deconstruction
+ , uncons
+
+ -- * Generation
+ -- ** Unfolds
, unfoldr
, unfoldrM
- -- * Special Generation
- -- | Generate a monadic stream from an input structure, a seed or a
- -- generation function.
- , once
+ -- ** Specialized Generation
+ -- | Generate a monadic stream from a seed.
, replicateM
+ , K.repeat
, repeatM
, iterate
, iterateM
- , fromFoldable
- , fromFoldableM
- -- * Deconstruction
- , uncons
+ -- ** Conversions
+ -- | Transform an input structure into a stream.
+ , yield
+ , yieldM
+ , fromList
+ , fromListM
+ , K.fromFoldable
+ , fromFoldableM
+ , fromHandle
- -- * Elimination by Folding
+ -- * Elimination
-- ** General Folds
, foldr
, foldrM
@@ -73,54 +88,65 @@ module Streamly.Prelude
, foldx
, foldxM
- -- ** Special Folds
- , mapM_
- , toList
- , all
- , any
+ -- ** Specialized Folds
+ , null
, head
, tail
, last
- , null
- , length
, elem
, notElem
+ , length
+ , all
+ , any
, maximum
, minimum
, sum
, product
- -- * Scans
+ -- ** Map and Fold
+ , mapM_
+
+ -- ** Conversions
+ -- | Transform a stream into an output structure of another type.
+ , toList
+ , toHandle
+
+ -- * Transformation
+ -- ** By folding (scans)
, scanl'
+ , scanlM'
, scanx
- -- * Filtering
+ -- ** Filtering
, filter
+ , filterM
, take
, takeWhile
+ , takeWhileM
, drop
, dropWhile
+ , dropWhileM
- -- * Reordering
- , reverse
-
- -- * Mapping
+ -- ** Mapping
+ , Serial.map
, mapM
+ , sequence
+
+ -- ** Map and Filter
, mapMaybe
, mapMaybeM
- , sequence
+
+ -- ** Reordering
+ , reverse
-- * Zipping
, zipWith
, zipWithM
- , zipAsyncWith
- , zipAsyncWithM
-
- -- * IO
- , fromHandle
- , toHandle
+ , Z.zipAsyncWith
+ , Z.zipAsyncWithM
-- * Deprecated
+ , K.once
, each
, scan
, foldl
@@ -128,26 +154,69 @@ module Streamly.Prelude
)
where
-import Control.Monad (void)
-import Control.Monad.IO.Class (MonadIO(..))
-import Data.Semigroup (Semigroup(..))
-import Data.Maybe (isJust, fromJust)
-import Prelude hiding (filter, drop, dropWhile, take,
- takeWhile, zipWith, foldr, foldl,
- mapM, mapM_, sequence, all, any,
- sum, product, elem, notElem,
- maximum, minimum, head, last,
- tail, length, null, reverse,
- iterate)
+import Control.Monad.IO.Class (MonadIO(..))
+import Data.Maybe (isJust, fromJust)
+import Prelude
+ hiding (filter, drop, dropWhile, take, takeWhile, zipWith, foldr,
+ foldl, map, mapM, mapM_, sequence, all, any, sum, product, elem,
+ notElem, maximum, minimum, head, last, tail, length, null,
+ reverse, iterate)
import qualified Prelude
import qualified System.IO as IO
-import qualified Streamly.Core as S
-import Streamly.Core (Stream(Stream))
-import Streamly.Streams
+import Streamly.SVar (MonadAsync, defState, rstState)
+import Streamly.Streams.SVar (maxYields)
+import Streamly.Streams.StreamK (IsStream(..))
+import Streamly.Streams.Serial (SerialT)
+
+import qualified Streamly.Streams.StreamK as K
+import qualified Streamly.Streams.StreamD as D
+import qualified Streamly.Streams.Zip as Z
+
+#ifdef USE_STREAMK_ONLY
+import qualified Streamly.Streams.StreamK as S
+import qualified Streamly.Streams.Zip as S
+#else
+import qualified Streamly.Streams.StreamD as S
+#endif
+
+import qualified Streamly.Streams.Serial as Serial
+
+------------------------------------------------------------------------------
+-- Conversion to and from direct style stream
+------------------------------------------------------------------------------
+
+-- These definitions are dependent on what is imported as S
+{-# INLINE fromStreamS #-}
+fromStreamS :: (IsStream t, Monad m) => S.Stream m a -> t m a
+fromStreamS = fromStream . S.toStreamK
+
+{-# INLINE toStreamS #-}
+toStreamS :: (IsStream t, Monad m) => t m a -> S.Stream m a
+toStreamS = S.fromStreamK . toStream
+
+{-# INLINE fromStreamD #-}
+fromStreamD :: (IsStream t, Monad m) => D.Stream m a -> t m a
+fromStreamD = fromStream . D.toStreamK
+
+{-# INLINE toStreamD #-}
+toStreamD :: (IsStream t, Monad m) => t m a -> D.Stream m a
+toStreamD = D.fromStreamK . toStream
------------------------------------------------------------------------------
--- Construction
+-- Deconstruction
+------------------------------------------------------------------------------
+
+-- | Decompose a stream into its head and tail. If the stream is empty, returns
+-- 'Nothing'. If the stream is non-empty, returns @Just (a, ma)@, where @a@ is
+-- the head of the stream and @ma@ its tail.
+--
+-- @since 0.1.0
+uncons :: (IsStream t, Monad m) => SerialT m a -> m (Maybe (a, t m a))
+uncons m = K.uncons (K.adapt m)
+
+------------------------------------------------------------------------------
+-- Generation by Unfolding
------------------------------------------------------------------------------
-- | Build a stream by unfolding a /pure/ step function starting from a seed.
@@ -167,14 +236,11 @@ import Streamly.Streams
-- @
--
-- @since 0.1.0
-{-# INLINE unfoldr #-}
-unfoldr :: IsStream t => (b -> Maybe (a, b)) -> b -> t m a
-unfoldr step = fromStream . go
- where
- go s = Stream $ \_ stp _ yld ->
- case step s of
- Nothing -> stp
- Just (a, b) -> yld a (go b)
+{-# INLINE_EARLY unfoldr #-}
+unfoldr :: (Monad m, IsStream t) => (b -> Maybe (a, b)) -> b -> t m a
+unfoldr step seed = fromStreamS (S.unfoldr step seed)
+{-# RULES "unfoldr fallback to StreamK" [1]
+ forall a b. S.toStreamK (S.unfoldr a b) = K.unfoldr a b #-}
-- | Build a stream by unfolding a /monadic/ step function starting from a
-- seed. The step function returns the next element in the stream and the next
@@ -207,58 +273,26 @@ unfoldr step = fromStream . go
-- /Concurrent/
--
-- /Since: 0.1.0/
-{-# INLINE unfoldrM #-}
+{-# INLINE_EARLY unfoldrM #-}
unfoldrM :: (IsStream t, MonadAsync m) => (b -> m (Maybe (a, b))) -> b -> t m a
-unfoldrM step = go
- where
- go s = fromStream $ Stream $ \svr stp sng yld -> do
- mayb <- step s
- case mayb of
- Nothing -> stp
- Just (a, b) ->
- S.runStream (toStream (return a |: go b)) svr stp sng yld
+unfoldrM = K.unfoldrM
--- | Construct a stream from a 'Foldable' containing pure values.
---
--- @since 0.2.0
-{-# INLINE fromFoldable #-}
-fromFoldable :: (IsStream t, Foldable f) => f a -> t m a
-fromFoldable = Prelude.foldr cons nil
+{-# RULES "unfoldrM serial" unfoldrM = unfoldrMSerial #-}
+{-# INLINE_EARLY unfoldrMSerial #-}
+unfoldrMSerial :: MonadAsync m => (b -> m (Maybe (a, b))) -> b -> SerialT m a
+unfoldrMSerial step seed = fromStreamS (S.unfoldrM step seed)
--- | Construct a stream from a 'Foldable' containing monadic actions.
---
--- @
--- runStream $ serially $ S.fromFoldableM $ replicate 10 (threadDelay 1000000 >> print 1)
--- runStream $ asyncly $ S.fromFoldableM $ replicate 10 (threadDelay 1000000 >> print 1)
--- @
---
--- /Concurrent (do not use with 'parallely' on infinite containers)/
---
--- @since 0.3.0
-{-# INLINE fromFoldableM #-}
-fromFoldableM :: (IsStream t, MonadAsync m, Foldable f) => f (m a) -> t m a
-fromFoldableM = Prelude.foldr consM nil
+------------------------------------------------------------------------------
+-- Specialized Generation
+------------------------------------------------------------------------------
--- | Same as 'fromFoldable'.
---
--- @since 0.1.0
-{-# DEPRECATED each "Please use fromFoldable instead." #-}
-{-# INLINE each #-}
-each :: (IsStream t, Foldable f) => f a -> t m a
-each = fromFoldable
+{-# INLINE yield #-}
+yield :: IsStream t => a -> t m a
+yield a = K.yield a
--- | Create a singleton stream by executing a monadic action once. Same as
--- @m \`consM` nil@ but more efficient.
---
--- @
--- > toList $ once getLine
--- hello
--- ["hello"]
--- @
---
--- @since 0.2.0
-once :: (IsStream t, Monad m) => m a -> t m a
-once = fromStream . S.once
+{-# INLINE yieldM #-}
+yieldM :: (Monad m, IsStream t) => m a -> t m a
+yieldM m = K.yieldM m
-- | Generate a stream by performing a monadic action @n@ times.
--
@@ -274,7 +308,7 @@ once = fromStream . S.once
replicateM :: (IsStream t, MonadAsync m) => Int -> m a -> t m a
replicateM n m = go n
where
- go cnt = if cnt <= 0 then nil else m |: go (cnt - 1)
+ go cnt = if cnt <= 0 then K.nil else m |: go (cnt - 1)
-- | Generate a stream by repeatedly executing a monadic action forever.
--
@@ -296,7 +330,7 @@ repeatM = go
iterate :: IsStream t => (a -> a) -> a -> t m a
iterate step = fromStream . go
where
- go s = S.cons s (go (step s))
+ go s = K.cons s (go (step s))
-- | Iterate a monadic function from a seed value, streaming the results
-- forever.
@@ -320,9 +354,56 @@ iterate step = fromStream . go
iterateM :: (IsStream t, MonadAsync m) => (a -> m a) -> a -> t m a
iterateM step = go
where
- go s = fromStream $ Stream $ \svr stp sng yld -> do
+ go s = fromStream $ K.Stream $ \svr stp sng yld -> do
next <- step s
- S.runStream (toStream (return s |: go next)) svr stp sng yld
+ K.unStream (toStream (return s |: go next)) svr stp sng yld
+
+------------------------------------------------------------------------------
+-- Conversions
+------------------------------------------------------------------------------
+
+-- | Construct a stream from a list containing pure values. This can be more
+-- efficient than 'K.fromFoldable' for lists as it can fuse the list.
+--
+-- @since 0.4.0
+{-# INLINE_EARLY fromList #-}
+fromList :: (Monad m, IsStream t) => [a] -> t m a
+fromList = fromStreamS . S.fromList
+{-# RULES "fromList fallback to StreamK" [1]
+ forall a. S.toStreamK (S.fromList a) = K.fromFoldable a #-}
+
+-- | Construct a stream from a list containing monadic actions. This can be
+-- more efficient than 'fromFoldableM' especially for serial streams as it can
+-- fuse the list.
+--
+-- @since 0.4.0
+{-# INLINE_EARLY fromListM #-}
+fromListM :: (MonadAsync m, IsStream t) => [m a] -> t m a
+fromListM = fromStreamD . D.fromListM
+{-# RULES "fromListM fallback to StreamK" [1]
+ forall a. D.toStreamK (D.fromListM a) = fromFoldableM a #-}
+
+-- | Construct a stream from a 'Foldable' containing monadic actions.
+--
+-- @
+-- runStream $ serially $ S.fromFoldableM $ replicate 10 (threadDelay 1000000 >> print 1)
+-- runStream $ asyncly $ S.fromFoldableM $ replicate 10 (threadDelay 1000000 >> print 1)
+-- @
+--
+-- /Concurrent (do not use with 'parallely' on infinite containers)/
+--
+-- @since 0.3.0
+{-# INLINE fromFoldableM #-}
+fromFoldableM :: (IsStream t, MonadAsync m, Foldable f) => f (m a) -> t m a
+fromFoldableM = Prelude.foldr consM K.nil
+
+-- | Same as 'fromFoldable'.
+--
+-- @since 0.1.0
+{-# DEPRECATED each "Please use fromFoldable instead." #-}
+{-# INLINE each #-}
+each :: (IsStream t, Foldable f) => f a -> t m a
+each = K.fromFoldable
-- | Read lines from an IO Handle into a stream of Strings.
--
@@ -330,7 +411,7 @@ iterateM step = go
fromHandle :: (IsStream t, MonadIO m) => IO.Handle -> t m String
fromHandle h = fromStream go
where
- go = Stream $ \_ stp _ yld -> do
+ go = K.Stream $ \_ stp _ yld -> do
eof <- liftIO $ IO.hIsEOF h
if eof
then stp
@@ -339,26 +420,9 @@ fromHandle h = fromStream go
yld str go
------------------------------------------------------------------------------
--- Elimination
+-- Elimination by Folding
------------------------------------------------------------------------------
--- | Lazy right associative fold. For example, to fold a stream into a list:
---
--- @
--- >> runIdentity $ foldr (:) [] (serially $ fromFoldable [1,2,3])
--- [1,2,3]
--- @
---
--- @since 0.1.0
-foldr :: Monad m => (a -> b -> b) -> b -> SerialT m a -> m b
-foldr step acc m = go (toStream m)
- where
- go m1 =
- let stop = return acc
- single a = return (step a acc)
- yield a r = go r >>= \b -> return (step a b)
- in (S.runStream m1) Nothing stop single yield
-
-- | Lazy right fold with a monadic step function. For example, to fold a
-- stream into a list:
--
@@ -370,47 +434,21 @@ foldr step acc m = go (toStream m)
-- @since 0.2.0
{-# INLINE foldrM #-}
foldrM :: Monad m => (a -> b -> m b) -> b -> SerialT m a -> m b
-foldrM step acc m = go (toStream m)
- where
- go m1 =
- let stop = return acc
- single a = step a acc
- yield a r = go r >>= step a
- in (S.runStream m1) Nothing stop single yield
+foldrM step acc m = S.foldrM step acc $ toStreamS m
--- | Strict left scan with an extraction function. Like 'scanl'', but applies a
--- user supplied extraction function (the third argument) at each step. This is
--- designed to work with the @foldl@ library. The suffix @x@ is a mnemonic for
--- extraction.
+-- | Lazy right associative fold. For example, to fold a stream into a list:
--
--- @since 0.2.0
-{-# INLINE scanx #-}
-scanx :: IsStream t => (x -> a -> x) -> x -> (x -> b) -> t m a -> t m b
-scanx step begin done m =
- cons (done begin) $ fromStream $ go (toStream m) begin
- where
- go m1 !acc = Stream $ \_ stp sng yld ->
- let single a = sng (done $ step acc a)
- yield a r =
- let s = step acc a
- in yld (done s) (go r s)
- in S.runStream m1 Nothing stp single yield
-
--- |
--- @since 0.1.1
-{-# DEPRECATED scan "Please use scanx instead." #-}
-scan :: IsStream t => (x -> a -> x) -> x -> (x -> b) -> t m a -> t m b
-scan = scanx
-
--- | Strict left scan. Like 'foldl'', but returns the folded value at each
--- step, generating a stream of all intermediate fold results. The first
--- element of the stream is the user supplied initial value, and the last
--- element of the stream is the same as the result of 'foldl''.
+-- @
+-- >> runIdentity $ foldr (:) [] (serially $ fromFoldable [1,2,3])
+-- [1,2,3]
+-- @
--
--- @since 0.2.0
-{-# INLINE scanl' #-}
-scanl' :: IsStream t => (b -> a -> b) -> b -> t m a -> t m b
-scanl' step begin m = scanx step begin id m
+-- @since 0.1.0
+{-# INLINE foldr #-}
+foldr :: Monad m => (a -> b -> b) -> b -> SerialT m a -> m b
+-- XXX somehow this definition does not perform well, need to investigate
+-- foldr step acc m = S.foldr step acc $ S.fromStreamK (toStream m)
+foldr f = foldrM (\a b -> return (f a b))
-- | Strict left fold with an extraction function. Like the standard strict
-- left fold, but applies a user supplied extraction function (the third
@@ -420,24 +458,7 @@ scanl' step begin m = scanx step begin id m
-- @since 0.2.0
{-# INLINE foldx #-}
foldx :: Monad m => (x -> a -> x) -> x -> (x -> b) -> SerialT m a -> m b
-foldx step begin done m = get $ go (toStream m) begin
- where
- {-# NOINLINE get #-}
- get m1 =
- let single = return . done
- in (S.runStream m1) Nothing undefined single undefined
-
- -- Note, this can be implemented by making a recursive call to "go",
- -- however that is more expensive because of unnecessary recursion
- -- that cannot be tail call optimized. Unfolding recursion explicitly via
- -- continuations is much more efficient.
- go m1 !acc = Stream $ \_ _ sng yld ->
- let stop = sng acc
- single a = sng $ step acc a
- yield a r =
- let stream = go r (step acc a)
- in (S.runStream stream) Nothing undefined sng yld
- in (S.runStream m1) Nothing stop single yield
+foldx = K.foldx
-- |
-- @since 0.1.0
@@ -450,20 +471,14 @@ foldl = foldx
-- @since 0.2.0
{-# INLINE foldl' #-}
foldl' :: Monad m => (b -> a -> b) -> b -> SerialT m a -> m b
-foldl' step begin m = foldx step begin id m
+foldl' step begin m = S.foldl' step begin $ toStreamS m
-- XXX replace the recursive "go" with explicit continuations.
-- | Like 'foldx', but with a monadic step function.
--
-- @since 0.2.0
foldxM :: Monad m => (x -> a -> m x) -> m x -> (x -> m b) -> SerialT m a -> m b
-foldxM step begin done m = go begin (toStream m)
- where
- go !acc m1 =
- let stop = acc >>= done
- single a = acc >>= \b -> step b a >>= done
- yield a r = acc >>= \b -> go (step b a) r
- in (S.runStream m1) Nothing stop single yield
+foldxM = K.foldxM
-- |
-- @since 0.1.0
@@ -475,266 +490,245 @@ foldlM = foldxM
--
-- @since 0.2.0
foldlM' :: Monad m => (b -> a -> m b) -> b -> SerialT m a -> m b
-foldlM' step begin m = foldxM step (return begin) return m
-
--- | Decompose a stream into its head and tail. If the stream is empty, returns
--- 'Nothing'. If the stream is non-empty, returns @Just (a, ma)@, where @a@ is
--- the head of the stream and @ma@ its tail.
---
--- @since 0.1.0
-uncons :: (IsStream t, Monad m) => SerialT m a -> m (Maybe (a, t m a))
-uncons m =
- let stop = return Nothing
- single a = return (Just (a, nil))
- yield a r = return (Just (a, fromStream r))
- in (S.runStream (toStream m)) Nothing stop single yield
-
--- | Write a stream of Strings to an IO Handle.
---
--- @since 0.1.0
-toHandle :: MonadIO m => IO.Handle -> SerialT m String -> m ()
-toHandle h m = go (toStream m)
- where
- go m1 =
- let stop = return ()
- single a = liftIO (IO.hPutStrLn h a)
- yield a r = liftIO (IO.hPutStrLn h a) >> go r
- in (S.runStream m1) Nothing stop single yield
+foldlM' step begin m = S.foldlM' step begin $ toStreamS m
------------------------------------------------------------------------------
--- Special folds
+-- Specialized folds
------------------------------------------------------------------------------
--- | Convert a stream into a list in the underlying monad.
+-- | Determine whether the stream is empty.
--
--- @since 0.1.0
-{-# INLINABLE toList #-}
-toList :: Monad m => SerialT m a -> m [a]
-toList = foldrM (\a xs -> return (a : xs)) []
+-- @since 0.1.1
+{-# INLINE null #-}
+null :: Monad m => SerialT m a -> m Bool
+null m = K.null m
--- | Take first 'n' elements from the stream and discard the rest.
+-- | Extract the first element of the stream, if any.
--
-- @since 0.1.0
-{-# INLINE take #-}
-take :: IsStream t => Int -> t m a -> t m a
-take n m = fromStream $ go n (toStream m)
- where
- go n1 m1 = Stream $ \_ stp sng yld ->
- let yield a r = yld a (go (n1 - 1) r)
- in if n1 <= 0 then stp else (S.runStream m1) Nothing stp sng yield
+{-# INLINE head #-}
+head :: Monad m => SerialT m a -> m (Maybe a)
+head m = K.head m
--- | Include only those elements that pass a predicate.
+-- | Extract all but the first element of the stream, if any.
--
--- @since 0.1.0
-{-# INLINE filter #-}
-filter :: IsStream t => (a -> Bool) -> t m a -> t m a
-filter p m = fromStream $ go (toStream m)
- where
- go m1 = Stream $ \_ stp sng yld ->
- let single a | p a = sng a
- | otherwise = stp
- yield a r | p a = yld a (go r)
- | otherwise = (S.runStream r) Nothing stp single yield
- in (S.runStream m1) Nothing stp single yield
+-- @since 0.1.1
+{-# INLINE tail #-}
+tail :: (IsStream t, Monad m) => SerialT m a -> m (Maybe (t m a))
+tail m = K.tail (K.adapt m)
--- | End the stream as soon as the predicate fails on an element.
+-- | Extract the last element of the stream, if any.
+--
+-- @since 0.1.1
+{-# INLINE last #-}
+last :: Monad m => SerialT m a -> m (Maybe a)
+last m = S.last $ toStreamS m
+
+-- | Determine whether an element is present in the stream.
--
-- @since 0.1.0
-{-# INLINE takeWhile #-}
-takeWhile :: IsStream t => (a -> Bool) -> t m a -> t m a
-takeWhile p m = fromStream $ go (toStream m)
- where
- go m1 = Stream $ \_ stp sng yld ->
- let single a | p a = sng a
- | otherwise = stp
- yield a r | p a = yld a (go r)
- | otherwise = stp
- in (S.runStream m1) Nothing stp single yield
+{-# INLINE elem #-}
+elem :: (Monad m, Eq a) => a -> SerialT m a -> m Bool
+elem e m = S.elem e (toStreamS m)
--- | Discard first 'n' elements from the stream and take the rest.
+-- | Determine whether an element is not present in the stream.
--
-- @since 0.1.0
-drop :: IsStream t => Int -> t m a -> t m a
-drop n m = fromStream $ go n (toStream m)
- where
- go n1 m1 = Stream $ \_ stp sng yld ->
- let single _ = stp
- yield _ r = (S.runStream $ go (n1 - 1) r) Nothing stp sng yld
- -- Somehow "<=" check performs better than a ">"
- in if n1 <= 0
- then (S.runStream m1) Nothing stp sng yld
- else (S.runStream m1) Nothing stp single yield
+{-# INLINE notElem #-}
+notElem :: (Monad m, Eq a) => a -> SerialT m a -> m Bool
+notElem e m = S.notElem e (toStreamS m)
--- | Drop elements in the stream as long as the predicate succeeds and then
--- take the rest of the stream.
+-- | Determine the length of the stream.
--
-- @since 0.1.0
-{-# INLINE dropWhile #-}
-dropWhile :: IsStream t => (a -> Bool) -> t m a -> t m a
-dropWhile p m = fromStream $ go (toStream m)
- where
- go m1 = Stream $ \_ stp sng yld ->
- let single a | p a = stp
- | otherwise = sng a
- yield a r | p a = (S.runStream r) Nothing stp single yield
- | otherwise = yld a r
- in (S.runStream m1) Nothing stp single yield
+{-# INLINE length #-}
+length :: Monad m => SerialT m a -> m Int
+length = foldl' (\n _ -> n + 1) 0
-- | Determine whether all elements of a stream satisfy a predicate.
--
-- @since 0.1.0
+{-# INLINE all #-}
all :: Monad m => (a -> Bool) -> SerialT m a -> m Bool
-all p m = go (toStream m)
- where
- go m1 =
- let single a | p a = return True
- | otherwise = return False
- yield a r | p a = go r
- | otherwise = return False
- in (S.runStream m1) Nothing (return True) single yield
+all p m = S.all p (toStreamS m)
-- | Determine whether any of the elements of a stream satisfy a predicate.
--
-- @since 0.1.0
+{-# INLINE any #-}
any :: Monad m => (a -> Bool) -> SerialT m a -> m Bool
-any p m = go (toStream m)
- where
- go m1 =
- let single a | p a = return True
- | otherwise = return False
- yield a r | p a = return True
- | otherwise = go r
- in (S.runStream m1) Nothing (return False) single yield
+any p m = S.any p (toStreamS m)
-- | Determine the sum of all elements of a stream of numbers
--
-- @since 0.1.0
+{-# INLINE sum #-}
sum :: (Monad m, Num a) => SerialT m a -> m a
-sum = foldl (+) 0 id
+sum = foldl' (+) 0
-- | Determine the product of all elements of a stream of numbers
--
-- @since 0.1.1
+{-# INLINE product #-}
product :: (Monad m, Num a) => SerialT m a -> m a
-product = foldl (*) 1 id
+product = foldl' (*) 1
--- | Extract the first element of the stream, if any.
+-- | Determine the minimum element in a stream.
--
-- @since 0.1.0
-head :: Monad m => SerialT m a -> m (Maybe a)
-head m =
- let stop = return Nothing
- single a = return (Just a)
- yield a _ = return (Just a)
- in (S.runStream (toStream m)) Nothing stop single yield
+{-# INLINE minimum #-}
+minimum :: (Monad m, Ord a) => SerialT m a -> m (Maybe a)
+minimum m = S.minimum (toStreamS m)
--- | Extract all but the first element of the stream, if any.
+-- | Determine the maximum element in a stream.
--
--- @since 0.1.1
-tail :: (IsStream t, Monad m) => SerialT m a -> m (Maybe (t m a))
-tail m =
- let stop = return Nothing
- single _ = return $ Just nil
- yield _ r = return $ Just $ fromStream r
- in (S.runStream (toStream m)) Nothing stop single yield
+-- @since 0.1.0
+{-# INLINE maximum #-}
+maximum :: (Monad m, Ord a) => SerialT m a -> m (Maybe a)
+maximum m = S.maximum (toStreamS m)
--- | Extract the last element of the stream, if any.
---
--- @since 0.1.1
-{-# INLINE last #-}
-last :: Monad m => SerialT m a -> m (Maybe a)
-last = foldl (\_ y -> Just y) Nothing id
+------------------------------------------------------------------------------
+-- Map and Fold
+------------------------------------------------------------------------------
--- | Determine whether the stream is empty.
+-- XXX this can utilize parallel mapping if we implement it as runStream . mapM
+-- | Apply a monadic action to each element of the stream and discard the
+-- output of the action.
--
--- @since 0.1.1
-null :: Monad m => SerialT m a -> m Bool
-null m =
- let stop = return True
- single _ = return False
- yield _ _ = return False
- in (S.runStream (toStream m)) Nothing stop single yield
+-- @since 0.1.0
+{-# INLINE mapM_ #-}
+mapM_ :: Monad m => (a -> m b) -> SerialT m a -> m ()
+mapM_ f m = S.mapM_ f $ toStreamS m
--- | Determine whether an element is present in the stream.
+------------------------------------------------------------------------------
+-- Conversions
+------------------------------------------------------------------------------
+
+-- | Convert a stream into a list in the underlying monad.
--
-- @since 0.1.0
-elem :: (Monad m, Eq a) => a -> SerialT m a -> m Bool
-elem e m = go (toStream m)
- where
- go m1 =
- let stop = return False
- single a = return (a == e)
- yield a r = if a == e then return True else go r
- in (S.runStream m1) Nothing stop single yield
+{-# INLINE toList #-}
+toList :: Monad m => SerialT m a -> m [a]
+toList m = S.toList $ toStreamS m
--- | Determine whether an element is not present in the stream.
+-- | Write a stream of Strings to an IO Handle.
--
-- @since 0.1.0
-notElem :: (Monad m, Eq a) => a -> SerialT m a -> m Bool
-notElem e m = go (toStream m)
+toHandle :: MonadIO m => IO.Handle -> SerialT m String -> m ()
+toHandle h m = go (toStream m)
where
go m1 =
- let stop = return True
- single a = return (a /= e)
- yield a r = if a == e then return False else go r
- in (S.runStream m1) Nothing stop single yield
+ let stop = return ()
+ single a = liftIO (IO.hPutStrLn h a)
+ yieldk a r = liftIO (IO.hPutStrLn h a) >> go r
+ in (K.unStream m1) defState stop single yieldk
--- | Determine the length of the stream.
+------------------------------------------------------------------------------
+-- Transformation by Folding (Scans)
+------------------------------------------------------------------------------
+
+-- | Strict left scan with an extraction function. Like 'scanl'', but applies a
+-- user supplied extraction function (the third argument) at each step. This is
+-- designed to work with the @foldl@ library. The suffix @x@ is a mnemonic for
+-- extraction.
+--
+-- @since 0.2.0
+{-# INLINE scanx #-}
+scanx :: IsStream t => (x -> a -> x) -> x -> (x -> b) -> t m a -> t m b
+scanx = K.scanx
+
+-- |
+-- @since 0.1.1
+{-# DEPRECATED scan "Please use scanx instead." #-}
+scan :: IsStream t => (x -> a -> x) -> x -> (x -> b) -> t m a -> t m b
+scan = scanx
+
+-- | Like 'scanl'' but with a monadic step function.
+--
+-- @since 0.4.0
+{-# INLINE scanlM' #-}
+scanlM' :: (IsStream t, Monad m) => (b -> a -> m b) -> b -> t m a -> t m b
+scanlM' step begin m = fromStreamD $ D.scanlM' step begin $ toStreamD m
+
+-- | Strict left scan. Like 'foldl'', but returns the folded value at each
+-- step, generating a stream of all intermediate fold results. The first
+-- element of the stream is the user supplied initial value, and the last
+-- element of the stream is the same as the result of 'foldl''.
+--
+-- @since 0.2.0
+{-# INLINE scanl' #-}
+scanl' :: (IsStream t, Monad m) => (b -> a -> b) -> b -> t m a -> t m b
+scanl' step = scanlM' (\a b -> return (step a b))
+
+------------------------------------------------------------------------------
+-- Transformation by Filtering
+------------------------------------------------------------------------------
+
+-- | Include only those elements that pass a predicate.
--
-- @since 0.1.0
-length :: Monad m => SerialT m a -> m Int
-length = foldl (\n _ -> n + 1) 0 id
+{-# INLINE filter #-}
+#if __GLASGOW_HASKELL__ != 802
+-- GHC 8.2.2 crashes with this code, when used with "stack"
+filter :: (IsStream t, Monad m) => (a -> Bool) -> t m a -> t m a
+filter p m = fromStreamS $ S.filter p $ toStreamS m
+#else
+filter :: IsStream t => (a -> Bool) -> t m a -> t m a
+filter = K.filter
+#endif
--- | Returns the elements of the stream in reverse order.
--- The stream must be finite.
+-- | Same as 'filter' but with a monadic predicate.
--
--- @since 0.1.1
-reverse :: (IsStream t) => t m a -> t m a
-reverse m = fromStream $ go S.nil (toStream m)
- where
- go rev rest = Stream $ \_ stp sng yld ->
- let run x = S.runStream x Nothing stp sng yld
- stop = run rev
- single a = run $ a `S.cons` rev
- yield a r = run $ go (a `S.cons` rev) r
- in S.runStream rest Nothing stop single yield
-
--- XXX replace the recursive "go" with continuation
--- | Determine the minimum element in a stream.
+-- @since 0.4.0
+{-# INLINE filterM #-}
+filterM :: (IsStream t, Monad m) => (a -> m Bool) -> t m a -> t m a
+filterM p m = fromStreamD $ D.filterM p $ toStreamD m
+
+-- | Take first 'n' elements from the stream and discard the rest.
--
-- @since 0.1.0
-minimum :: (Monad m, Ord a) => SerialT m a -> m (Maybe a)
-minimum m = go Nothing (toStream m)
- where
- go res m1 =
- let stop = return res
- single a = return $ min_ a res
- yield a r = go (min_ a res) r
- in (S.runStream m1) Nothing stop single yield
+{-# INLINE take #-}
+take :: (IsStream t, Monad m) => Int -> t m a -> t m a
+take n m = fromStreamS $ S.take n $ toStreamS (maxYields (Just n) m)
- min_ a res = case res of
- Nothing -> Just a
- Just e -> Just $ min a e
+-- | End the stream as soon as the predicate fails on an element.
+--
+-- @since 0.1.0
+{-# INLINE takeWhile #-}
+takeWhile :: (IsStream t, Monad m) => (a -> Bool) -> t m a -> t m a
+takeWhile p m = fromStreamS $ S.takeWhile p $ toStreamS m
--- XXX replace the recursive "go" with continuation
--- | Determine the maximum element in a stream.
+-- | Same as 'takeWhile' but with a monadic predicate.
+--
+-- @since 0.4.0
+{-# INLINE takeWhileM #-}
+takeWhileM :: (IsStream t, Monad m) => (a -> m Bool) -> t m a -> t m a
+takeWhileM p m = fromStreamD $ D.takeWhileM p $ toStreamD m
+
+-- | Discard first 'n' elements from the stream and take the rest.
--
-- @since 0.1.0
-maximum :: (Monad m, Ord a) => SerialT m a -> m (Maybe a)
-maximum m = go Nothing (toStream m)
- where
- go res m1 =
- let stop = return res
- single a = return $ max_ a res
- yield a r = go (max_ a res) r
- in (S.runStream m1) Nothing stop single yield
+{-# INLINE drop #-}
+drop :: (IsStream t, Monad m) => Int -> t m a -> t m a
+drop n m = fromStreamS $ S.drop n $ toStreamS m
- max_ a res = case res of
- Nothing -> Just a
- Just e -> Just $ max a e
+-- | Drop elements in the stream as long as the predicate succeeds and then
+-- take the rest of the stream.
+--
+-- @since 0.1.0
+{-# INLINE dropWhile #-}
+dropWhile :: (IsStream t, Monad m) => (a -> Bool) -> t m a -> t m a
+dropWhile p m = fromStreamS $ S.dropWhile p $ toStreamS m
+
+-- | Same as 'dropWhile' but with a monadic predicate.
+--
+-- @since 0.4.0
+{-# INLINE dropWhileM #-}
+dropWhileM :: (IsStream t, Monad m) => (a -> m Bool) -> t m a -> t m a
+dropWhileM p m = fromStreamD $ D.dropWhileM p $ toStreamD m
------------------------------------------------------------------------------
--- Transformation
+-- Transformation by Mapping
------------------------------------------------------------------------------
-- | Replace each element of the stream with the result of a monadic action
@@ -751,55 +745,14 @@ maximum m = go Nothing (toStream m)
-- /Concurrent (do not use with 'parallely' on infinite streams)/
--
-- @since 0.1.0
-{-# INLINE mapM #-}
+{-# INLINE_EARLY mapM #-}
mapM :: (IsStream t, MonadAsync m) => (a -> m b) -> t m a -> t m b
-mapM f m = go (toStream m)
- where
- go m1 = fromStream $ Stream $ \svr stp sng yld ->
- let single a = f a >>= sng
- yield a r = S.runStream (toStream (f a |: (go r))) svr stp sng yld
- in (S.runStream m1) Nothing stp single yield
-
--- | Map a 'Maybe' returning function to a stream, filter out the 'Nothing'
--- elements, and return a stream of values extracted from 'Just'.
---
--- @since 0.3.0
-{-# INLINE mapMaybe #-}
-mapMaybe :: (IsStream t) => (a -> Maybe b) -> t m a -> t m b
-mapMaybe f m = go (toStream m)
- where
- go m1 = fromStream $ Stream $ \_ stp sng yld ->
- let single a = case f a of
- Just b -> sng b
- Nothing -> stp
- yield a r = case f a of
- Just b -> yld b (toStream $ go r)
- Nothing -> (S.runStream r) Nothing stp single yield
- in (S.runStream m1) Nothing stp single yield
-
--- | Like 'mapMaybe' but maps a monadic function.
---
--- /Concurrent (do not use with 'parallely' on infinite streams)/
---
--- @since 0.3.0
-{-# INLINE mapMaybeM #-}
-mapMaybeM :: (IsStream t, MonadAsync m, Functor (t m))
- => (a -> m (Maybe b)) -> t m a -> t m b
-mapMaybeM f = fmap fromJust . filter isJust . mapM f
+mapM = K.mapM
--- XXX this can utilize parallel mapping if we implement it as runStream . mapM
--- | Apply a monadic action to each element of the stream and discard the
--- output of the action.
---
--- @since 0.1.0
-mapM_ :: Monad m => (a -> m b) -> SerialT m a -> m ()
-mapM_ f m = go (toStream m)
- where
- go m1 =
- let stop = return ()
- single a = void (f a)
- yield a r = f a >> go r
- in (S.runStream m1) Nothing stop single yield
+{-# RULES "mapM serial" mapM = mapMSerial #-}
+{-# INLINE mapMSerial #-}
+mapMSerial :: Monad m => (a -> m b) -> SerialT m a -> SerialT m b
+mapMSerial = Serial.mapM
-- | Reduce a stream of monadic actions to a stream of the output of those
-- actions.
@@ -815,60 +768,64 @@ mapM_ f m = go (toStream m)
-- /Concurrent (do not use with 'parallely' on infinite streams)/
--
-- @since 0.1.0
+{-# INLINE sequence #-}
sequence :: (IsStream t, MonadAsync m) => t m (m a) -> t m a
-sequence m = go (toStream m)
- where
- go m1 = fromStream $ Stream $ \svr stp sng yld ->
- let single ma = ma >>= sng
- yield ma r = S.runStream (toStream $ ma |: go r) svr stp sng yld
- in (S.runStream m1) Nothing stp single yield
+sequence = K.sequence
------------------------------------------------------------------------------
--- Serially Zipping Streams
+-- Transformation by Map and Filter
------------------------------------------------------------------------------
--- | Zip two streams serially using a pure zipping function.
+-- | Map a 'Maybe' returning function to a stream, filter out the 'Nothing'
+-- elements, and return a stream of values extracted from 'Just'.
--
--- @since 0.1.0
-zipWith :: IsStream t => (a -> b -> c) -> t m a -> t m b -> t m c
-zipWith f m1 m2 = fromStream $ S.zipWith f (toStream m1) (toStream m2)
+-- @since 0.3.0
+{-# INLINE mapMaybe #-}
+mapMaybe :: (IsStream t, Monad m) => (a -> Maybe b) -> t m a -> t m b
+mapMaybe f m = fromStreamS $ S.mapMaybe f $ toStreamS m
--- | Zip two streams serially using a monadic zipping function.
+-- | Like 'mapMaybe' but maps a monadic function.
--
--- @since 0.1.0
-zipWithM :: IsStream t => (a -> b -> t m c) -> t m a -> t m b -> t m c
-zipWithM f m1 m2 = fromStream $ go (toStream m1) (toStream m2)
+-- /Concurrent (do not use with 'parallely' on infinite streams)/
+--
+-- @since 0.3.0
+{-# INLINE mapMaybeM #-}
+mapMaybeM :: (IsStream t, MonadAsync m, Functor (t m))
+ => (a -> m (Maybe b)) -> t m a -> t m b
+mapMaybeM f = fmap fromJust . filter isJust . mapM f
+
+------------------------------------------------------------------------------
+-- Transformation by Reordering
+------------------------------------------------------------------------------
+
+-- | Returns the elements of the stream in reverse order.
+-- The stream must be finite.
+--
+-- @since 0.1.1
+reverse :: (IsStream t) => t m a -> t m a
+reverse m = fromStream $ go K.nil (toStream m)
where
- go mx my = Stream $ \_ stp sng yld -> do
- let merge a ra =
- let run x = S.runStream x Nothing stp sng yld
- single2 b = run $ toStream (f a b)
- yield2 b rb = run $ toStream (f a b) <> go ra rb
- in (S.runStream my) Nothing stp single2 yield2
- let single1 a = merge a S.nil
- yield1 a ra = merge a ra
- (S.runStream mx) Nothing stp single1 yield1
+ go rev rest = K.Stream $ \st stp sng yld ->
+ let runIt x = K.unStream x (rstState st) stp sng yld
+ stop = runIt rev
+ single a = runIt $ a `K.cons` rev
+ yieldk a r = runIt $ go (a `K.cons` rev) r
+ in K.unStream rest (rstState st) stop single yieldk
------------------------------------------------------------------------------
--- Parallely Zipping Streams
+-- Zipping
------------------------------------------------------------------------------
--- | Zip two streams concurrently (i.e. both the elements being zipped are
--- generated concurrently) using a pure zipping function.
+-- | Zip two streams serially using a monadic zipping function.
--
--- @since 0.1.0
-zipAsyncWith :: (IsStream t, MonadAsync m)
- => (a -> b -> c) -> t m a -> t m b -> t m c
-zipAsyncWith f m1 m2 =
- fromStream $ S.zipAsyncWith f (toStream m1) (toStream m2)
+-- @since 0.4.0
+{-# INLINABLE zipWithM #-}
+zipWithM :: (IsStream t, Monad m) => (a -> b -> m c) -> t m a -> t m b -> t m c
+zipWithM f m1 m2 = fromStreamS $ S.zipWithM f (toStreamS m1) (toStreamS m2)
--- | Zip two streams asyncly (i.e. both the elements being zipped are generated
--- concurrently) using a monadic zipping function.
+-- | Zip two streams serially using a pure zipping function.
--
-- @since 0.1.0
-zipAsyncWithM :: (IsStream t, MonadAsync m)
- => (a -> b -> t m c) -> t m a -> t m b -> t m c
-zipAsyncWithM f m1 m2 = fromStream $ Stream $ \_ stp sng yld -> do
- ma <- mkAsync m1
- mb <- mkAsync m2
- (S.runStream (toStream (zipWithM f ma mb))) Nothing stp sng yld
+{-# INLINABLE zipWith #-}
+zipWith :: (IsStream t, Monad m) => (a -> b -> c) -> t m a -> t m b -> t m c
+zipWith f m1 m2 = fromStreamS $ S.zipWith f (toStreamS m1) (toStreamS m2)
diff --git a/src/Streamly/SVar.hs b/src/Streamly/SVar.hs
new file mode 100644
index 0000000..4bfcfdf
--- /dev/null
+++ b/src/Streamly/SVar.hs
@@ -0,0 +1,974 @@
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE KindSignatures #-}
+{-# LANGUAGE ConstraintKinds #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE MagicHash #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE UnboxedTuples #-}
+
+-- |
+-- Module : Streamly.SVar
+-- Copyright : (c) 2017 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+-- Stability : experimental
+-- Portability : GHC
+--
+--
+module Streamly.SVar
+ (
+ MonadAsync
+ , SVar (..)
+ , SVarStyle (..)
+ , defaultMaxBuffer
+ , defaultMaxThreads
+ , State (..)
+ , defState
+ , rstState
+
+ , newAheadVar
+ , newParallelVar
+
+ , toStreamVar
+
+ , atomicModifyIORefCAS
+ , ChildEvent (..)
+ , AheadHeapEntry (..)
+ , sendYield
+ , sendStop
+ , enqueueLIFO
+ , workLoopLIFO
+ , workLoopFIFO
+ , enqueueFIFO
+ , enqueueAhead
+ , pushWorkerPar
+
+ , queueEmptyAhead
+ , dequeueAhead
+ , dequeueFromHeap
+
+ , postProcessBounded
+ , readOutputQBounded
+ , sendWorker
+ , delThread
+ )
+where
+
+import Control.Concurrent
+ (ThreadId, myThreadId, threadDelay, getNumCapabilities)
+import Control.Concurrent.MVar
+ (MVar, newEmptyMVar, tryPutMVar, takeMVar)
+import Control.Exception (SomeException(..), catch, mask)
+import Control.Monad (when)
+import Control.Monad.Catch (MonadThrow)
+import Control.Monad.IO.Class (MonadIO(..))
+import Control.Monad.Trans.Control (MonadBaseControl, control)
+import Data.Atomics
+ (casIORef, readForCAS, peekTicket, atomicModifyIORefCAS_,
+ writeBarrier, storeLoadBarrier)
+import Data.Concurrent.Queue.MichaelScott
+ (LinkedQueue, pushL, tryPopR)
+import Data.Functor (void)
+import Data.Heap (Heap, Entry(..))
+import Data.IORef
+ (IORef, modifyIORef, newIORef, readIORef, atomicModifyIORef)
+import Data.Maybe (fromJust)
+import Data.Set (Set)
+import GHC.Conc (ThreadId(..))
+import GHC.Exts
+import GHC.IO (IO(..))
+
+import qualified Data.Heap as H
+import qualified Data.Set as S
+
+-- MVar diagnostics has some overhead - around 5% on asyncly null benchmark, we
+-- can keep it on in production to debug problems quickly if and when they
+-- happen, but it may result in unexpected output when threads are left hanging
+-- until they are GCed because the consumer went away.
+
+#ifdef DIAGNOSTICS
+import Control.Concurrent.MVar (tryTakeMVar)
+import Control.Exception
+ (catches, throwIO, Handler(..), BlockedIndefinitelyOnMVar(..),
+ BlockedIndefinitelyOnSTM(..))
+import Data.IORef (writeIORef)
+import System.IO (hPutStrLn, stderr)
+#endif
+
+------------------------------------------------------------------------------
+-- Parent child thread communication type
+------------------------------------------------------------------------------
+
+-- | Events that a child thread may send to a parent thread.
+data ChildEvent a =
+ ChildYield a
+ | ChildStop ThreadId (Maybe SomeException)
+
+-- | Sorting out-of-turn outputs in a heap for Ahead style streams
+data AheadHeapEntry (t :: (* -> *) -> * -> *) m a =
+ AheadEntryPure a
+ | AheadEntryStream (t m a)
+
+------------------------------------------------------------------------------
+-- State threaded around the monad for thread management
+------------------------------------------------------------------------------
+
+-- | Identify the type of the SVar. Two computations using the same style can
+-- be scheduled on the same SVar.
+data SVarStyle =
+ AsyncVar -- depth first concurrent
+ | WAsyncVar -- breadth first concurrent
+ | ParallelVar -- all parallel
+ | AheadVar -- Concurrent look ahead
+ deriving (Eq, Show)
+
+-- | An SVar or a Stream Var is a conduit to the output from multiple streams
+-- running concurrently and asynchronously. An SVar can be thought of as an
+-- asynchronous IO handle. We can write any number of streams to an SVar in a
+-- non-blocking manner and then read them back at any time at any pace. The
+-- SVar would run the streams asynchronously and accumulate results. An SVar
+-- may not really execute the stream completely and accumulate all the results.
+-- However, it ensures that the reader can read the results at whatever paces
+-- it wants to read. The SVar monitors and adapts to the consumer's pace.
+--
+-- An SVar is a mini scheduler, it has an associated workLoop that holds the
+-- stream tasks to be picked and run by a pool of worker threads. It has an
+-- associated output queue where the output stream elements are placed by the
+-- worker threads. A outputDoorBell is used by the worker threads to intimate the
+-- consumer thread about availability of new results in the output queue. More
+-- workers are added to the SVar by 'fromStreamVar' on demand if the output
+-- produced is not keeping pace with the consumer. On bounded SVars, workers
+-- block on the output queue to provide throttling of the producer when the
+-- consumer is not pulling fast enough. The number of workers may even get
+-- reduced depending on the consuming pace.
+--
+-- New work is enqueued either at the time of creation of the SVar or as a
+-- result of executing the parallel combinators i.e. '<|' and '<|>' when the
+-- already enqueued computations get evaluated. See 'joinStreamVarAsync'.
+--
+-- XXX can we use forall t m.
+data SVar t m a =
+ SVar {
+ -- Read only state
+ svarStyle :: SVarStyle
+
+ -- Shared output queue (events, length)
+ , outputQueue :: IORef ([ChildEvent a], Int)
+ , maxYieldLimit :: Maybe (IORef Int)
+ , outputDoorBell :: MVar () -- signal the consumer about output
+ , readOutputQ :: m [ChildEvent a]
+ , postProcess :: m Bool
+
+ -- Used only by bounded SVar types
+ , enqueue :: t m a -> IO ()
+ , isWorkDone :: IO Bool
+ , needDoorBell :: IORef Bool
+ , workLoop :: m ()
+
+ -- Shared, thread tracking
+ , workerThreads :: IORef (Set ThreadId)
+ , workerCount :: IORef Int
+ , accountThread :: ThreadId -> m ()
+#ifdef DIAGNOSTICS
+ , outputHeap :: IORef (Heap (Entry Int (AheadHeapEntry t m a))
+ , Int
+ )
+ -- Shared work queue (stream, seqNo)
+ , aheadWorkQueue :: IORef ([t m a], Int)
+ , totalDispatches :: IORef Int
+ , maxWorkers :: IORef Int
+ , maxOutQSize :: IORef Int
+ , maxHeapSize :: IORef Int
+ , maxWorkQSize :: IORef Int
+#endif
+ }
+
+data State t m a = State
+ { streamVar :: Maybe (SVar t m a)
+ , yieldLimit :: Maybe Int
+ , threadsHigh :: Int
+ , bufferHigh :: Int
+ }
+
+defaultMaxThreads, defaultMaxBuffer :: Int
+defaultMaxThreads = 1500
+defaultMaxBuffer = 1500
+
+defState :: State t m a
+defState = State
+ { streamVar = Nothing
+ , yieldLimit = Nothing
+ , threadsHigh = defaultMaxThreads
+ , bufferHigh = defaultMaxBuffer
+ }
+
+-- XXX if perf gets affected we can have all the Nothing params in a single
+-- structure so that we reset is fast. We can also use rewrite rules such that
+-- reset occurs only in concurrent streams to reduce the impact on serial
+-- streams.
+-- We can optimize this so that we clear it only if it is a Just value, it
+-- results in slightly better perf for zip/zipM but the performance of scan
+-- worsens a lot, it does not fuse.
+rstState :: State t m a -> State t m b
+rstState st = st
+ { streamVar = Nothing
+ , yieldLimit = Nothing
+ }
+
+#ifdef DIAGNOSTICS
+{-# NOINLINE dumpSVar #-}
+dumpSVar :: SVar t m a -> IO String
+dumpSVar sv = do
+ tid <- myThreadId
+ (oqList, oqLen) <- readIORef $ outputQueue sv
+ db <- tryTakeMVar $ outputDoorBell sv
+ aheadDump <-
+ if svarStyle sv == AheadVar
+ then do
+ (oheap, oheapSeq) <- readIORef $ outputHeap sv
+ (wq, wqSeq) <- readIORef $ aheadWorkQueue sv
+ maxHp <- readIORef $ maxHeapSize sv
+ return $ unlines
+ [ "heap length = " ++ show (H.size oheap)
+ , "heap seqeunce = " ++ show oheapSeq
+ , "work queue length = " ++ show (length wq)
+ , "work queue sequence = " ++ show wqSeq
+ , "heap max size = " ++ show maxHp
+ ]
+ else return []
+
+ waiting <- readIORef $ needDoorBell sv
+ rthread <- readIORef $ workerThreads sv
+ workers <- readIORef $ workerCount sv
+ maxWrk <- readIORef $ maxWorkers sv
+ dispatches <- readIORef $ totalDispatches sv
+ maxOq <- readIORef $ maxOutQSize sv
+
+ return $ unlines
+ [ "tid = " ++ show tid
+ , "style = " ++ show (svarStyle sv)
+ , "outputQueue length computed = " ++ show (length oqList)
+ , "outputQueue length maintained = " ++ show oqLen
+ , "output outputDoorBell = " ++ show db
+ , "total dispatches = " ++ show dispatches
+ , "max workers = " ++ show maxWrk
+ , "max outQSize = " ++ show maxOq
+ ]
+ ++ aheadDump ++ unlines
+ [ "needDoorBell = " ++ show waiting
+ , "running threads = " ++ show rthread
+ , "running thread count = " ++ show workers
+ ]
+
+{-# NOINLINE mvarExcHandler #-}
+mvarExcHandler :: SVar t m a -> String -> BlockedIndefinitelyOnMVar -> IO ()
+mvarExcHandler sv label e@BlockedIndefinitelyOnMVar = do
+ svInfo <- dumpSVar sv
+ hPutStrLn stderr $ label ++ " " ++ "BlockedIndefinitelyOnMVar\n" ++ svInfo
+ throwIO e
+
+{-# NOINLINE stmExcHandler #-}
+stmExcHandler :: SVar t m a -> String -> BlockedIndefinitelyOnSTM -> IO ()
+stmExcHandler sv label e@BlockedIndefinitelyOnSTM = do
+ svInfo <- dumpSVar sv
+ hPutStrLn stderr $ label ++ " " ++ "BlockedIndefinitelyOnSTM\n" ++ svInfo
+ throwIO e
+
+withDBGMVar :: SVar t m a -> String -> IO () -> IO ()
+withDBGMVar sv label action =
+ action `catches` [ Handler (mvarExcHandler sv label)
+ , Handler (stmExcHandler sv label)
+ ]
+#else
+withDBGMVar :: SVar t m a -> String -> IO () -> IO ()
+withDBGMVar _ _ action = action
+#endif
+
+-- Slightly faster version of CAS. Gained some improvement by avoiding the use
+-- of "evaluate" because we know we do not have exceptions in fn.
+{-# INLINE atomicModifyIORefCAS #-}
+atomicModifyIORefCAS :: IORef a -> (a -> (a,b)) -> IO b
+atomicModifyIORefCAS ref fn = do
+ tkt <- readForCAS ref
+ loop tkt retries
+
+ where
+
+ retries = 25 :: Int
+ loop _ 0 = atomicModifyIORef ref fn
+ loop old tries = do
+ let (new, result) = fn $ peekTicket old
+ (success, tkt) <- casIORef ref old new
+ if success
+ then return result
+ else loop tkt (tries - 1)
+
+------------------------------------------------------------------------------
+-- Spawning threads and collecting result in streamed fashion
+------------------------------------------------------------------------------
+
+-- | A monad that can perform concurrent or parallel IO operations. Streams
+-- that can be composed concurrently require the underlying monad to be
+-- 'MonadAsync'.
+--
+-- @since 0.1.0
+type MonadAsync m = (MonadIO m, MonadBaseControl IO m, MonadThrow m)
+
+-- Stolen from the async package. The perf improvement is modest, 2% on a
+-- thread heavy benchmark (parallel composition using noop computations).
+-- A version of forkIO that does not include the outer exception
+-- handler: saves a bit of time when we will be installing our own
+-- exception handler.
+{-# INLINE rawForkIO #-}
+rawForkIO :: IO () -> IO ThreadId
+rawForkIO action = IO $ \ s ->
+ case (fork# action s) of (# s1, tid #) -> (# s1, ThreadId tid #)
+
+{-# INLINE doFork #-}
+doFork :: MonadBaseControl IO m
+ => m ()
+ -> (SomeException -> IO ())
+ -> m ThreadId
+doFork action exHandler =
+ control $ \runInIO ->
+ mask $ \restore -> do
+ tid <- rawForkIO $ catch (restore $ void $ runInIO action)
+ exHandler
+ runInIO (return tid)
+
+-- XXX exception safety of all atomic/MVar operations
+
+-- TBD Each worker can have their own queue and the consumer can empty one
+-- queue at a time, that way contention can be reduced.
+
+-- | This function is used by the producer threads to queue output for the
+-- consumer thread to consume. Returns whether the queue has more space.
+send :: Int -> SVar t m a -> ChildEvent a -> IO Bool
+send maxOutputQLen sv msg = do
+ len <- atomicModifyIORefCAS (outputQueue sv) $ \(es, n) ->
+ ((msg : es, n + 1), n)
+ when (len <= 0) $ do
+ -- The wake up must happen only after the store has finished otherwise
+ -- we can have lost wakeup problems.
+ writeBarrier
+ -- Since multiple workers can try this at the same time, it is possible
+ -- that we may put a spurious MVar after the consumer has already seen
+ -- the output. But that's harmless, at worst it may cause the consumer
+ -- to read the queue again and find it empty.
+ -- The important point is that the consumer is guaranteed to receive a
+ -- doorbell if something was added to the queue after it empties it.
+ void $ tryPutMVar (outputDoorBell sv) ()
+ return (len < maxOutputQLen || maxOutputQLen < 0)
+
+{-# NOINLINE sendYield #-}
+sendYield :: Int -> SVar t m a -> ChildEvent a -> IO Bool
+sendYield maxOutputQLen sv msg = do
+ ylimit <- case maxYieldLimit sv of
+ Nothing -> return True
+ Just ref -> atomicModifyIORefCAS ref $ \x -> (x - 1, x > 1)
+ r <- send maxOutputQLen sv msg
+ return $ r && ylimit
+
+{-# NOINLINE sendStop #-}
+sendStop :: SVar t m a -> IO ()
+sendStop sv = do
+ liftIO $ atomicModifyIORefCAS_ (workerCount sv) $ \n -> n - 1
+ myThreadId >>= \tid -> void $ send (-1) sv (ChildStop tid Nothing)
+
+-------------------------------------------------------------------------------
+-- Async
+-------------------------------------------------------------------------------
+
+-- Note: For purely right associated expressions this queue should have at most
+-- one element. It grows to more than one when we have left associcated
+-- expressions. Large left associated compositions can grow this to a
+-- large size
+{-# INLINE enqueueLIFO #-}
+enqueueLIFO :: SVar t m a -> IORef [t m a] -> t m a -> IO ()
+enqueueLIFO sv q m = do
+ atomicModifyIORefCAS_ q $ \ms -> m : ms
+ storeLoadBarrier
+ w <- readIORef $ needDoorBell sv
+ when w $ do
+ -- Note: the sequence of operations is important for correctness here.
+ -- We need to set the flag to false strictly before sending the
+ -- outputDoorBell, otherwise the outputDoorBell may get processed too early and
+ -- then we may set the flag to False to later making the consumer lose
+ -- the flag, even without receiving a outputDoorBell.
+ atomicModifyIORefCAS_ (needDoorBell sv) (const False)
+ void $ tryPutMVar (outputDoorBell sv) ()
+
+{-# INLINE workLoopLIFO #-}
+workLoopLIFO :: MonadIO m
+ => (State t m a -> IORef [t m a] -> t m a -> m () -> m ())
+ -> State t m a -> IORef [t m a] -> m ()
+workLoopLIFO f st q = run
+
+ where
+
+ sv = fromJust $ streamVar st
+ run = do
+ work <- dequeue
+ case work of
+ Nothing -> liftIO $ sendStop sv
+ Just m -> f st q m run
+
+ dequeue = liftIO $ atomicModifyIORefCAS q $ \case
+ [] -> ([], Nothing)
+ x : xs -> (xs, Just x)
+
+-------------------------------------------------------------------------------
+-- WAsync
+-------------------------------------------------------------------------------
+
+-- XXX we can use the Ahead style sequence/heap mechanism to make the best
+-- effort to always try to finish the streams on the left side of an expression
+-- first as long as possible.
+
+{-# INLINE enqueueFIFO #-}
+enqueueFIFO :: SVar t m a -> LinkedQueue (t m a) -> t m a -> IO ()
+enqueueFIFO sv q m = do
+ pushL q m
+ storeLoadBarrier
+ w <- readIORef $ needDoorBell sv
+ when w $ do
+ -- Note: the sequence of operations is important for correctness here.
+ -- We need to set the flag to false strictly before sending the
+ -- outputDoorBell, otherwise the outputDoorBell may get processed too early and
+ -- then we may set the flag to False to later making the consumer lose
+ -- the flag, even without receiving a outputDoorBell.
+ atomicModifyIORefCAS_ (needDoorBell sv) (const False)
+ void $ tryPutMVar (outputDoorBell sv) ()
+
+{-# INLINE workLoopFIFO #-}
+workLoopFIFO :: MonadIO m
+ => (State t m a -> LinkedQueue (t m a) -> t m a -> m () -> m ())
+ -> State t m a -> LinkedQueue (t m a) -> m ()
+workLoopFIFO f st q = run
+
+ where
+
+ sv = fromJust $ streamVar st
+ run = do
+ work <- liftIO $ tryPopR q
+ case work of
+ Nothing -> liftIO $ sendStop sv
+ Just m -> f st q m run
+
+-------------------------------------------------------------------------------
+-- Ahead
+-------------------------------------------------------------------------------
+
+-- Lookahead streams can execute multiple tasks concurrently, ahead of time,
+-- but always serve them in the same order as they appear in the stream. To
+-- implement lookahead streams efficiently we assign a sequence number to each
+-- task when the task is picked up for execution. When the task finishes, the
+-- output is tagged with the same sequence number and we rearrange the outputs
+-- in sequence based on that number.
+--
+-- To explain the mechanism imagine that the current task at the head of the
+-- stream has a "token" to yield to the outputQueue. The ownership of the token
+-- is determined by the current sequence number is maintained in outputHeap.
+-- Sequence number is assigned when a task is queued. When a thread dequeues a
+-- task it picks up the sequence number as well and when the output is ready it
+-- uses the sequence number to queue the output to the outputQueue.
+--
+-- The thread with current sequence number sends the output directly to the
+-- outputQueue. Other threads push the output to the outputHeap. When the task
+-- being queued on the heap is a stream of many elements we evaluate only the
+-- first element and keep the rest of the unevaluated computation in the heap.
+-- When such a task gets the "token" for outputQueue it evaluates and directly
+-- yields all the elements to the outputQueue without checking for the
+-- "token".
+--
+-- Note that no two outputs in the heap can have the same sequence numbers and
+-- therefore we do not need a stable heap. We have also separated the buffer
+-- for the current task (outputQueue) and the pending tasks (outputHeap) so
+-- that the pending tasks cannot interfere with the current task. Note that for
+-- a single task just the outputQueue is enough and for the case of many
+-- threads just a heap is good enough. However we balance between these two
+-- cases, so that both are efficient.
+--
+-- For bigger streams it may make sense to have separate buffers for each
+-- stream. However, for singleton streams this may become inefficient. However,
+-- if we do not have separate buffers, then the streams that come later in
+-- sequence may hog the buffer, hindering the streams that are ahead. For this
+-- reason we have a single element buffer limitation for the streams being
+-- executed in advance.
+--
+-- This scheme works pretty efficiently with less than 40% extra overhead
+-- compared to the Async streams where we do not have any kind of sequencing of
+-- the outputs. It is especially devised so that we are most efficient when we
+-- have short tasks and need just a single thread. Also when a thread yields
+-- many items it can hold lockfree access to the outputQueue and do it
+-- efficiently.
+--
+-- XXX Maybe we can start the ahead threads at a lower cpu and IO priority so
+-- that they do not hog the resources and hinder the progress of the threads in
+-- front of them.
+
+-- Left associated ahead expressions are expensive. We start a new SVar for
+-- each left associative expression. The queue is used only for right
+-- associated expression, we queue the right expression and execute the left.
+-- Thererefore the queue never has more than on item in it.
+{-# INLINE enqueueAhead #-}
+enqueueAhead :: SVar t m a -> IORef ([t m a], Int) -> t m a -> IO ()
+enqueueAhead sv q m = do
+ atomicModifyIORefCAS_ q $ \ case
+ ([], n) -> ([m], n + 1) -- increment sequence
+ _ -> error "not empty"
+ storeLoadBarrier
+ w <- readIORef $ needDoorBell sv
+ when w $ do
+ -- Note: the sequence of operations is important for correctness here.
+ -- We need to set the flag to false strictly before sending the
+ -- outputDoorBell, otherwise the outputDoorBell may get processed too early and
+ -- then we may set the flag to False to later making the consumer lose
+ -- the flag, even without receiving a outputDoorBell.
+ atomicModifyIORefCAS_ (needDoorBell sv) (const False)
+ void $ tryPutMVar (outputDoorBell sv) ()
+
+-- Normally the thread that has the token should never go away. The token gets
+-- handed over to another thread, but someone or the other has the token at any
+-- point of time. But if the task that has the token finds that the outputQueue
+-- is full, in that case it can go away without even handing over the token to
+-- another thread. In that case it sets the nextSequence number in the heap its
+-- own sequence number before going away. To handle this case, any task that
+-- does not have the token tries to dequeue from the heap first before
+-- dequeuing from the work queue. If it finds that the task at the top of the
+-- heap is the one that owns the current sequence number then it grabs the
+-- token and starts with that.
+--
+-- XXX instead of queueing just the head element and the remaining computation
+-- on the heap, evaluate as many as we can and place them on the heap. But we
+-- need to give higher priority to the lower sequence numbers so that lower
+-- priority tasks do not fill up the heap making higher priority tasks block
+-- due to full heap. Maybe we can have a weighted space for them in the heap.
+-- The weight is inversely proportional to the sequence number.
+--
+-- XXX review for livelock
+--
+{-# INLINE queueEmptyAhead #-}
+queueEmptyAhead :: MonadIO m => IORef ([t m a], Int) -> m Bool
+queueEmptyAhead q = liftIO $ do
+ (xs, _) <- readIORef q
+ return $ null xs
+
+{-# INLINE dequeueAhead #-}
+dequeueAhead :: MonadIO m
+ => IORef ([t m a], Int) -> m (Maybe (t m a, Int))
+dequeueAhead q = liftIO $ do
+ atomicModifyIORefCAS q $ \case
+ ([], n) -> (([], n), Nothing)
+ (x : [], n) -> (([], n), Just (x, n))
+ _ -> error "more than one item on queue"
+
+{-# INLINE dequeueFromHeap #-}
+dequeueFromHeap
+ :: IORef (Heap (Entry Int (AheadHeapEntry t m a)), Int)
+ -> IO (Maybe (Entry Int (AheadHeapEntry t m a)))
+dequeueFromHeap hpRef = do
+ atomicModifyIORefCAS hpRef $ \hp@(h, snum) -> do
+ let r = H.uncons h
+ case r of
+ Nothing -> (hp, Nothing)
+ Just (ent@(Entry seqNo _ev), hp') ->
+ if (seqNo == snum)
+ then ((hp', seqNo), Just ent)
+ else (hp, Nothing)
+
+-------------------------------------------------------------------------------
+-- WAhead
+-------------------------------------------------------------------------------
+
+-- XXX To be implemented. Use a linked queue like WAsync and put back the
+-- remaining computation at the back of the queue instead of the heap, and
+-- increment the sequence number.
+
+-- Thread tracking is needed for two reasons:
+--
+-- 1) Killing threads on exceptions. Threads may not be left to go away by
+-- themselves because they may run for significant times before going away or
+-- worse they may be stuck in IO and never go away.
+--
+-- 2) To know when all threads are done and the stream has ended.
+
+{-# NOINLINE addThread #-}
+addThread :: MonadIO m => SVar t m a -> ThreadId -> m ()
+addThread sv tid =
+ liftIO $ modifyIORef (workerThreads sv) (S.insert tid)
+
+-- This is cheaper than modifyThread because we do not have to send a
+-- outputDoorBell This can make a difference when more workers are being
+-- dispatched.
+{-# INLINE delThread #-}
+delThread :: MonadIO m => SVar t m a -> ThreadId -> m ()
+delThread sv tid =
+ liftIO $ modifyIORef (workerThreads sv) $ (\s -> S.delete tid s)
+
+-- If present then delete else add. This takes care of out of order add and
+-- delete i.e. a delete arriving before we even added a thread.
+-- This occurs when the forked thread is done even before the 'addThread' right
+-- after the fork gets a chance to run.
+{-# INLINE modifyThread #-}
+modifyThread :: MonadIO m => SVar t m a -> ThreadId -> m ()
+modifyThread sv tid = do
+ changed <- liftIO $ atomicModifyIORefCAS (workerThreads sv) $ \old ->
+ if (S.member tid old)
+ then let new = (S.delete tid old) in (new, new)
+ else let new = (S.insert tid old) in (new, old)
+ if null changed
+ then liftIO $ do
+ writeBarrier
+ void $ tryPutMVar (outputDoorBell sv) ()
+ else return ()
+
+-- | This is safe even if we are adding more threads concurrently because if
+-- a child thread is adding another thread then anyway 'workerThreads' will
+-- not be empty.
+{-# INLINE allThreadsDone #-}
+allThreadsDone :: MonadIO m => SVar t m a -> m Bool
+allThreadsDone sv = liftIO $ S.null <$> readIORef (workerThreads sv)
+
+{-# NOINLINE handleChildException #-}
+handleChildException :: SVar t m a -> SomeException -> IO ()
+handleChildException sv e = do
+ tid <- myThreadId
+ void $ send (-1) sv (ChildStop tid (Just e))
+
+#ifdef DIAGNOSTICS
+recordMaxWorkers :: MonadIO m => SVar t m a -> m ()
+recordMaxWorkers sv = liftIO $ do
+ active <- readIORef (workerCount sv)
+ maxWrk <- readIORef (maxWorkers sv)
+ when (active > maxWrk) $ writeIORef (maxWorkers sv) active
+ modifyIORef (totalDispatches sv) (+1)
+#endif
+
+{-# NOINLINE pushWorker #-}
+pushWorker :: MonadAsync m => SVar t m a -> m ()
+pushWorker sv = do
+ liftIO $ atomicModifyIORefCAS_ (workerCount sv) $ \n -> n + 1
+#ifdef DIAGNOSTICS
+ recordMaxWorkers sv
+#endif
+ doFork (workLoop sv) (handleChildException sv) >>= addThread sv
+
+-- XXX we can push the workerCount modification in accountThread and use the
+-- same pushWorker for Parallel case as well.
+--
+-- | In contrast to pushWorker which always happens only from the consumer
+-- thread, a pushWorkerPar can happen concurrently from multiple threads on the
+-- producer side. So we need to use a thread safe modification of
+-- workerThreads. Alternatively, we can use a CreateThread event to avoid
+-- using a CAS based modification.
+{-# NOINLINE pushWorkerPar #-}
+pushWorkerPar :: MonadAsync m => SVar t m a -> m () -> m ()
+pushWorkerPar sv wloop = do
+ -- We do not use workerCount in case of ParallelVar but still there is no
+ -- harm in maintaining it correctly.
+#ifdef DIAGNOSTICS
+ liftIO $ atomicModifyIORefCAS_ (workerCount sv) $ \n -> n + 1
+ recordMaxWorkers sv
+#endif
+ doFork wloop (handleChildException sv) >>= modifyThread sv
+
+dispatchWorker :: MonadAsync m => Int -> SVar t m a -> m ()
+dispatchWorker maxWorkerLimit sv = do
+ done <- liftIO $ isWorkDone sv
+ when (not done) $ do
+ -- Note that the worker count is only decremented during event
+ -- processing in fromStreamVar and therefore it is safe to read and
+ -- use it without a lock.
+ cnt <- liftIO $ readIORef $ workerCount sv
+ -- Note that we may deadlock if the previous workers (tasks in the
+ -- stream) wait/depend on the future workers (tasks in the stream)
+ -- executing. In that case we should either configure the maxWorker
+ -- count to higher or use parallel style instead of ahead or async
+ -- style.
+ limit <- case maxYieldLimit sv of
+ Nothing -> return maxWorkerLimit
+ Just x -> do
+ lim <- liftIO $ readIORef x
+ return $
+ if maxWorkerLimit > 0
+ then min maxWorkerLimit lim
+ else lim
+ when (cnt < limit || limit < 0) $ pushWorker sv
+
+{-# NOINLINE sendWorkerWait #-}
+sendWorkerWait :: MonadAsync m => Int -> SVar t m a -> m ()
+sendWorkerWait maxWorkerLimit sv = do
+ -- Note that we are guaranteed to have at least one outstanding worker when
+ -- we enter this function. So if we sleep we are guaranteed to be woken up
+ -- by a outputDoorBell, when the worker exits.
+
+ -- XXX we need a better way to handle this than hardcoded delays. The
+ -- delays may be different for different systems.
+ ncpu <- liftIO $ getNumCapabilities
+ if ncpu <= 1
+ then
+ if (svarStyle sv == AheadVar)
+ then liftIO $ threadDelay 100
+ else liftIO $ threadDelay 25
+ else
+ if (svarStyle sv == AheadVar)
+ then liftIO $ threadDelay 100
+ else liftIO $ threadDelay 10
+
+ (_, n) <- liftIO $ readIORef (outputQueue sv)
+ when (n <= 0) $ do
+ -- The queue may be empty temporarily if the worker has dequeued the
+ -- work item but has not enqueued the remaining part yet. For the same
+ -- reason, a worker may come back if it tries to dequeue and finds the
+ -- queue empty, even though the whole work has not finished yet.
+
+ -- If we find that the queue is empty, but it may be empty
+ -- temporarily, when we checked it. If that's the case we might
+ -- sleep indefinitely unless the active workers produce some
+ -- output. We may deadlock specially if the otuput from the active
+ -- workers depends on the future workers that we may never send.
+ -- So in case the queue was temporarily empty set a flag to inform
+ -- the enqueue to send us a doorbell.
+
+ -- Note that this is just a best effort mechanism to avoid a
+ -- deadlock. Deadlocks may still happen if for some weird reason
+ -- the consuming computation shares an MVar or some other resource
+ -- with the producing computation and gets blocked on that resource
+ -- and therefore cannot do any pushworker to add more threads to
+ -- the producer. In such cases the programmer should use a parallel
+ -- style so that all the producers are scheduled immediately and
+ -- unconditionally. We can also use a separate monitor thread to
+ -- push workers instead of pushing them from the consumer, but then
+ -- we are no longer using pull based concurrency rate adaptation.
+ --
+ -- XXX update this in the tutorial.
+
+ -- register for the outputDoorBell before we check the queue so that if we
+ -- sleep because the queue was empty we are guaranteed to get a
+ -- doorbell on the next enqueue.
+
+ liftIO $ atomicModifyIORefCAS_ (needDoorBell sv) $ const True
+ liftIO $ storeLoadBarrier
+ dispatchWorker maxWorkerLimit sv
+
+ -- XXX test for the case when we miss sending a worker when the worker
+ -- count is more than 1500.
+ --
+ -- XXX Assert here that if the heap is not empty then there is at
+ -- least one outstanding worker. Otherwise we could be sleeping
+ -- forever.
+
+ done <- liftIO $ isWorkDone sv
+ if done
+ then do
+ liftIO $ withDBGMVar sv "sendWorkerWait: nothing to do"
+ $ takeMVar (outputDoorBell sv)
+ (_, len) <- liftIO $ readIORef (outputQueue sv)
+ when (len <= 0) $ sendWorkerWait maxWorkerLimit sv
+ else sendWorkerWait maxWorkerLimit sv
+
+{-# INLINE readOutputQRaw #-}
+readOutputQRaw :: SVar t m a -> IO ([ChildEvent a], Int)
+readOutputQRaw sv = do
+ (list, len) <- atomicModifyIORefCAS (outputQueue sv) $ \x -> (([],0), x)
+#ifdef DIAGNOSTICS
+ oqLen <- readIORef (maxOutQSize sv)
+ when (len > oqLen) $ writeIORef (maxOutQSize sv) len
+#endif
+ return (list, len)
+
+readOutputQBounded :: MonadAsync m => Int -> SVar t m a -> m [ChildEvent a]
+readOutputQBounded n sv = do
+ (list, len) <- liftIO $ readOutputQRaw sv
+ -- When there is no output seen we dispatch more workers to help
+ -- out if there is work pending in the work queue.
+ if len <= 0
+ then blockingRead
+ else do
+ -- send a worker proactively, if needed, even before we start
+ -- processing the output. This may degrade single processor
+ -- perf but improves multi-processor, because of more
+ -- parallelism
+ sendOneWorker
+ return list
+
+ where
+
+ sendOneWorker = do
+ cnt <- liftIO $ readIORef $ workerCount sv
+ when (cnt <= 0) $ do
+ done <- liftIO $ isWorkDone sv
+ when (not done) $ pushWorker sv
+
+ {-# INLINE blockingRead #-}
+ blockingRead = do
+ sendWorkerWait n sv
+ liftIO $ (readOutputQRaw sv >>= return . fst)
+
+postProcessBounded :: MonadAsync m => SVar t m a -> m Bool
+postProcessBounded sv = do
+ workersDone <- allThreadsDone sv
+ -- There may still be work pending even if there are no workers
+ -- pending because all the workers may return if the
+ -- outputQueue becomes full. In that case send off a worker to
+ -- kickstart the work again.
+ if workersDone
+ then do
+ r <- liftIO $ isWorkDone sv
+ when (not r) $ pushWorker sv
+ return r
+ else return False
+
+getAheadSVar :: MonadAsync m
+ => State t m a
+ -> ( State t m a
+ -> IORef ([t m a], Int)
+ -> IORef (Heap (Entry Int (AheadHeapEntry t m a)), Int)
+ -> m ())
+ -> IO (SVar t m a)
+getAheadSVar st f = do
+ outQ <- newIORef ([], 0)
+ outH <- newIORef (H.empty, 0)
+ outQMv <- newEmptyMVar
+ active <- newIORef 0
+ wfw <- newIORef False
+ running <- newIORef S.empty
+ q <- newIORef ([], -1)
+ yl <- case yieldLimit st of
+ Nothing -> return Nothing
+ Just x -> Just <$> newIORef x
+
+#ifdef DIAGNOSTICS
+ disp <- newIORef 0
+ maxWrk <- newIORef 0
+ maxOq <- newIORef 0
+ maxHs <- newIORef 0
+ maxWq <- newIORef 0
+#endif
+ let sv =
+ SVar { outputQueue = outQ
+ , maxYieldLimit = yl
+ , outputDoorBell = outQMv
+ , readOutputQ = readOutputQBounded (threadsHigh st) sv
+ , postProcess = postProcessBounded sv
+ , workerThreads = running
+ -- , workLoop = workLoopAhead sv q outH
+ , workLoop = f st{streamVar = Just sv} q outH
+ , enqueue = enqueueAhead sv q
+ , isWorkDone = isWorkDoneAhead q outH
+ , needDoorBell = wfw
+ , svarStyle = AheadVar
+ , workerCount = active
+ , accountThread = delThread sv
+#ifdef DIAGNOSTICS
+ , aheadWorkQueue = q
+ , outputHeap = outH
+ , totalDispatches = disp
+ , maxWorkers = maxWrk
+ , maxOutQSize = maxOq
+ , maxHeapSize = maxHs
+ , maxWorkQSize = maxWq
+#endif
+ }
+ in return sv
+
+ where
+
+ {-# INLINE isWorkDoneAhead #-}
+ isWorkDoneAhead q ref = do
+ heapDone <- do
+ (hp, _) <- readIORef ref
+ return (H.size hp <= 0)
+ queueDone <- checkEmpty q
+ return $ queueDone && heapDone
+
+ checkEmpty q = do
+ (xs, _) <- readIORef q
+ return $ null xs
+
+getParallelSVar :: MonadIO m => IO (SVar t m a)
+getParallelSVar = do
+ outQ <- newIORef ([], 0)
+ outQMv <- newEmptyMVar
+ active <- newIORef 0
+ running <- newIORef S.empty
+#ifdef DIAGNOSTICS
+ disp <- newIORef 0
+ maxWrk <- newIORef 0
+ maxOq <- newIORef 0
+ maxHs <- newIORef 0
+ maxWq <- newIORef 0
+#endif
+ let sv =
+ SVar { outputQueue = outQ
+ , maxYieldLimit = Nothing
+ , outputDoorBell = outQMv
+ , readOutputQ = readOutputQPar sv
+ , postProcess = allThreadsDone sv
+ , workerThreads = running
+ , workLoop = undefined
+ , enqueue = undefined
+ , isWorkDone = undefined
+ , needDoorBell = undefined
+ , svarStyle = ParallelVar
+ , workerCount = active
+ , accountThread = modifyThread sv
+#ifdef DIAGNOSTICS
+ , aheadWorkQueue = undefined
+ , outputHeap = undefined
+ , totalDispatches = disp
+ , maxWorkers = maxWrk
+ , maxOutQSize = maxOq
+ , maxHeapSize = maxHs
+ , maxWorkQSize = maxWq
+#endif
+ }
+ in return sv
+
+ where
+
+ readOutputQPar sv = liftIO $ do
+ withDBGMVar sv "readOutputQPar: doorbell" $ takeMVar (outputDoorBell sv)
+ readOutputQRaw sv >>= return . fst
+
+sendWorker :: MonadAsync m => SVar t m a -> t m a -> m (SVar t m a)
+sendWorker sv m = do
+ -- Note: We must have all the work on the queue before sending the
+ -- pushworker, otherwise the pushworker may exit before we even get a
+ -- chance to push.
+ liftIO $ enqueue sv m
+ pushWorker sv
+ return sv
+
+{-# INLINABLE newAheadVar #-}
+newAheadVar :: MonadAsync m
+ => State t m a
+ -> t m a
+ -> ( State t m a
+ -> IORef ([t m a], Int)
+ -> IORef (Heap (Entry Int (AheadHeapEntry t m a)), Int)
+ -> m ())
+ -> m (SVar t m a)
+newAheadVar st m wloop = do
+ sv <- liftIO $ getAheadSVar st wloop
+ sendWorker sv m
+
+{-# INLINABLE newParallelVar #-}
+newParallelVar :: MonadAsync m => m (SVar t m a)
+newParallelVar = liftIO $ getParallelSVar
+
+-- XXX this errors out for Parallel/Ahead SVars
+-- | Write a stream to an 'SVar' in a non-blocking manner. The stream can then
+-- be read back from the SVar using 'fromSVar'.
+toStreamVar :: MonadAsync m => SVar t m a -> t m a -> m ()
+toStreamVar sv m = do
+ liftIO $ (enqueue sv) m
+ done <- allThreadsDone sv
+ -- XXX This is safe only when called from the consumer thread or when no
+ -- consumer is present. There may be a race if we are not running in the
+ -- consumer thread.
+ when done $ pushWorker sv
diff --git a/src/Streamly/Streams.hs b/src/Streamly/Streams.hs
deleted file mode 100644
index 2281bc2..0000000
--- a/src/Streamly/Streams.hs
+++ /dev/null
@@ -1,1490 +0,0 @@
-{-# LANGUAGE CPP #-}
-{-# LANGUAGE ConstraintKinds #-}
-{-# LANGUAGE FlexibleContexts #-}
-{-# LANGUAGE FlexibleInstances #-}
-{-# LANGUAGE GeneralizedNewtypeDeriving#-}
-{-# LANGUAGE InstanceSigs #-}
-{-# LANGUAGE MultiParamTypeClasses #-}
-{-# LANGUAGE RankNTypes #-}
-{-# LANGUAGE StandaloneDeriving #-}
-{-# LANGUAGE UndecidableInstances #-} -- XXX
-
--- |
--- Module : Streamly.Streams
--- Copyright : (c) 2017 Harendra Kumar
---
--- License : BSD3
--- Maintainer : harendra.kumar@gmail.com
--- Stability : experimental
--- Portability : GHC
---
---
-module Streamly.Streams
- (
- IsStream (..)
- , Streaming -- deprecated
- , S.MonadAsync
-
- -- * Construction
- , nil
- , cons
- , (.:)
- , streamBuild
- , fromCallback
- , fromSVar
-
- -- * Elimination
- , streamFold
- , runStream
- , runStreaming -- deprecated
- , toSVar
-
- -- * Transformation
- , mkAsync
- , (|$)
- , (|&)
- , (|$.)
- , (|&.)
-
- -- * Merging Streams
- , serial
- , wSerial
- , ahead
- , async
- , wAsync
- , parallel
- , (<=>) --deprecated
- , (<|) --deprecated
-
- -- * IO Streams
- , Serial
- , WSerial
- , Ahead
- , Async
- , WAsync
- , Parallel
- , ZipSerial
- , ZipAsync
-
- -- * Stream Transformers
- , SerialT
- , StreamT -- deprecated
- , WSerialT
- , InterleavedT -- deprecated
- , AheadT
- , AsyncT
- , WAsyncT
- , ParallelT
- , ZipStream -- deprecated
- , ZipSerialM
- , ZipAsyncM
-
- -- * Type Adapters
- , serially -- deprecated
- , wSerially
- , interleaving -- deprecated
- , aheadly
- , asyncly
- , wAsyncly
- , parallely
- , zipSerially
- , zipping -- deprecated
- , zipAsyncly
- , zippingAsync -- deprecated
- , adapt
-
- -- * Running Streams
- , runStreamT -- deprecated
- , runInterleavedT -- deprecated
- , runAsyncT -- deprecated
- , runParallelT -- deprecated
- , runZipStream -- deprecated
- , runZipAsync -- deprecated
-
- -- * Fold Utilities
- , foldWith
- , foldMapWith
- , forEachWith
- )
-where
-
-import Control.Monad (ap)
-import Control.Monad.Base (MonadBase (..), liftBaseDefault)
-import Control.Monad.Catch (MonadThrow, throwM)
--- import Control.Monad.Error.Class (MonadError(..))
-import Control.Monad.IO.Class (MonadIO(..))
-import Control.Monad.Reader.Class (MonadReader(..))
-import Control.Monad.State.Class (MonadState(..))
-import Control.Monad.Trans.Class (MonadTrans (lift))
-import Data.Semigroup (Semigroup(..))
-import Streamly.Core ( MonadAsync , SVar,
- SVarStyle(..))
-import qualified Streamly.Core as S
-
-------------------------------------------------------------------------------
--- Types that can behave as a Stream
-------------------------------------------------------------------------------
-
-infixr 5 `consM`
-infixr 5 |:
-
--- | Class of types that can represent a stream of elements of some type 'a' in
--- some monad 'm'.
---
--- @since 0.2.0
-class IsStream t where
- toStream :: t m a -> S.Stream m a
- fromStream :: S.Stream m a -> t m a
- -- | Constructs a stream by adding a monadic action at the head of an
- -- existing stream. For example:
- --
- -- @
- -- > toList $ getLine \`consM` getLine \`consM` nil
- -- hello
- -- world
- -- ["hello","world"]
- -- @
- --
- -- /Concurrent (do not use 'parallely' to construct infinite streams)/
- --
- -- @since 0.2.0
- consM :: MonadAsync m => m a -> t m a -> t m a
- -- | Operator equivalent of 'consM'. We can read it as "@parallel colon@"
- -- to remember that @|@ comes before ':'.
- --
- -- @
- -- > toList $ getLine |: getLine |: nil
- -- hello
- -- world
- -- ["hello","world"]
- -- @
- --
- -- @
- -- let delay = threadDelay 1000000 >> print 1
- -- runStream $ serially $ delay |: delay |: delay |: nil
- -- runStream $ parallely $ delay |: delay |: delay |: nil
- -- @
- --
- -- /Concurrent (do not use 'parallely' to construct infinite streams)/
- --
- -- @since 0.2.0
- (|:) :: MonadAsync m => m a -> t m a -> t m a
- -- We can define (|:) just as 'consM' but it is defined explicitly for each
- -- type because we want to use SPECIALIZE pragma on the definition.
-
--- | Same as 'IsStream'.
---
--- @since 0.1.0
-{-# DEPRECATED Streaming "Please use IsStream instead." #-}
-type Streaming = IsStream
-
-------------------------------------------------------------------------------
--- Constructing a stream
-------------------------------------------------------------------------------
-
--- | An empty stream.
---
--- @
--- > toList nil
--- []
--- @
---
--- @since 0.1.0
-nil :: IsStream t => t m a
-nil = fromStream S.nil
-
--- | Constructs a stream by adding a monadic action at the head of an existing
--- stream. For example:
---
--- @
--- > toList $ getLine \`consM` getLine \`consM` nil
--- hello
--- world
--- ["hello","world"]
--- @
---
--- @since 0.2.0
-consMSerial :: (IsStream t, Monad m) => m a -> t m a -> t m a
-consMSerial m r = fromStream $ S.consM m (toStream r)
-
-infixr 5 `cons`
-
--- | Construct a stream by adding a pure value at the head of an existing
--- stream. For pure values it can be faster than 'consM'. For example:
---
--- @
--- > toList $ 1 \`cons` 2 \`cons` 3 \`cons` nil
--- [1,2,3]
--- @
---
--- @since 0.1.0
-cons :: IsStream t => a -> t m a -> t m a
-cons a r = fromStream $ S.cons a (toStream r)
-
-infixr 5 .:
-
--- | Operator equivalent of 'cons'.
---
--- @
--- > toList $ 1 .: 2 .: 3 .: nil
--- [1,2,3]
--- @
---
--- @since 0.1.1
-(.:) :: IsStream t => a -> t m a -> t m a
-(.:) = cons
-
--- | Build a stream from its church encoding. The function passed maps
--- directly to the underlying representation of the stream type. The second
--- parameter to the function is the "yield" function yielding a value and the
--- remaining stream if any otherwise 'Nothing'. The third parameter is to
--- represent an "empty" stream.
-streamBuild :: IsStream t
- => (forall r. Maybe (SVar m a)
- -> (a -> t m a -> m r)
- -> (a -> m r)
- -> m r
- -> m r)
- -> t m a
-streamBuild k = fromStream $ S.Stream $ \svr stp sng yld ->
- let yield a r = yld a (toStream r)
- in k svr yield sng stp
-
--- | Build a singleton stream from a callback function.
-fromCallback :: IsStream t => (forall r. (a -> m r) -> m r) -> t m a
-fromCallback k = fromStream $ S.Stream $ \_ _ sng _ -> k sng
-
--- | Read an SVar to get a stream.
-fromSVar :: (MonadAsync m, IsStream t) => SVar m a -> t m a
-fromSVar sv = fromStream $ S.fromStreamVar sv
-
-------------------------------------------------------------------------------
--- Destroying a stream
-------------------------------------------------------------------------------
-
--- | Fold a stream using its church encoding. The second argument is the "step"
--- function consuming an element and the remaining stream, if any. The third
--- argument is for consuming an "empty" stream that yields nothing.
-streamFold
- :: IsStream t
- => Maybe (SVar m a)
- -> (a -> t m a -> m r)
- -> (a -> m r)
- -> m r
- -> t m a
- -> m r
-streamFold svr step single blank m =
- let yield a x = step a (fromStream x)
- in (S.runStream (toStream m)) svr blank single yield
-
--- | Run a streaming composition, discard the results. By default it interprets
--- the stream as 'SerialT', to run other types of streams use the type adapting
--- combinators for example @runStream . 'asyncly'@.
---
--- @since 0.2.0
-runStream :: Monad m => SerialT m a -> m ()
-runStream m = go (toStream m)
- where
- go m1 =
- let stop = return ()
- single _ = return ()
- yield _ r = go r
- in (S.runStream m1) Nothing stop single yield
-
--- | Same as 'runStream'
---
--- @since 0.1.0
-{-# DEPRECATED runStreaming "Please use runStream instead." #-}
-runStreaming :: (Monad m, IsStream t) => t m a -> m ()
-runStreaming = runStream . adapt
-
--- | Write a stream to an 'SVar' in a non-blocking manner. The stream can then
--- be read back from the SVar using 'fromSVar'.
-toSVar :: (IsStream t, MonadAsync m) => SVar m a -> t m a -> m ()
-toSVar sv m = S.toStreamVar sv (toStream m)
-
-------------------------------------------------------------------------------
--- Transformation
-------------------------------------------------------------------------------
-
--- XXX Get rid of this?
--- | Make a stream asynchronous, triggers the computation and returns a stream
--- in the underlying monad representing the output generated by the original
--- computation. The returned action is exhaustible and must be drained once. If
--- not drained fully we may have a thread blocked forever and once exhausted it
--- will always return 'empty'.
---
--- @since 0.2.0
-mkAsync :: (IsStream t, MonadAsync m) => t m a -> m (t m a)
-mkAsync m = do
- sv <- S.newStreamVar1 AsyncVar (toStream m)
- return $ fromSVar sv
-
-{-# INLINE applyWith #-}
-applyWith :: (IsStream t, MonadAsync m)
- => SVarStyle -> (t m a -> t m b) -> t m a -> t m b
-applyWith style f x = fromStream $
- S.applyWith style (toStream . f . fromStream) (toStream x)
-
-{-# INLINE runWith #-}
-runWith :: (IsStream t, MonadAsync m)
- => SVarStyle -> (t m a -> m b) -> t m a -> m b
-runWith style f x = S.runWith style (f . fromStream) (toStream x)
-
-infixr 0 |$
-infixr 0 |$.
-
-infixl 1 |&
-infixl 1 |&.
-
--- | Parallel function application operator for streams; just like the regular
--- function application operator '$' except that it is concurrent. The
--- following code prints a value every second even though each stage adds a 1
--- second delay.
---
---
--- @
--- runStream $
--- S.mapM (\\x -> threadDelay 1000000 >> print x)
--- |$ S.repeatM (threadDelay 1000000 >> return 1)
--- @
---
--- /Concurrent/
---
--- @since 0.3.0
-{-# INLINE (|$) #-}
-(|$) :: (IsStream t, MonadAsync m) => (t m a -> t m b) -> t m a -> t m b
-f |$ x = applyWith ParallelVar f x
-
--- | Parallel reverse function application operator for streams; just like the
--- regular reverse function application operator '&' except that it is
--- concurrent.
---
--- @
--- runStream $
--- S.repeatM (threadDelay 1000000 >> return 1)
--- |& S.mapM (\\x -> threadDelay 1000000 >> print x)
--- @
---
--- /Concurrent/
---
--- @since 0.3.0
-{-# INLINE (|&) #-}
-(|&) :: (IsStream t, MonadAsync m) => t m a -> (t m a -> t m b) -> t m b
-x |& f = f |$ x
-
--- | Parallel function application operator; applies a @run@ or @fold@ function
--- to a stream such that the fold consumer and the stream producer run in
--- parallel. A @run@ or @fold@ function reduces the stream to a value in the
--- underlying monad. The @.@ at the end of the operator is a mnemonic for
--- termination of the stream.
---
--- @
--- S.foldlM' (\\_ a -> threadDelay 1000000 >> print a) ()
--- |$. S.repeatM (threadDelay 1000000 >> return 1)
--- @
---
--- /Concurrent/
---
--- @since 0.3.0
-{-# INLINE (|$.) #-}
-(|$.) :: (IsStream t, MonadAsync m) => (t m a -> m b) -> t m a -> m b
-f |$. x = runWith ParallelVar f x
-
--- | Parallel reverse function application operator for applying a run or fold
--- functions to a stream. Just like '|$.' except that the operands are reversed.
---
--- @
--- S.repeatM (threadDelay 1000000 >> return 1)
--- |&. S.foldlM' (\\_ a -> threadDelay 1000000 >> print a) ()
--- @
---
--- /Concurrent/
---
--- @since 0.3.0
-{-# INLINE (|&.) #-}
-(|&.) :: (IsStream t, MonadAsync m) => t m a -> (t m a -> m b) -> m b
-x |&. f = f |$. x
-
-------------------------------------------------------------------------------
--- CPP macros for common instances
-------------------------------------------------------------------------------
-
--- XXX use template haskell instead and include Monoid and IsStream instances
--- as well.
-
-withLocal :: MonadReader r m => (r -> r) -> S.Stream m a -> S.Stream m a
-withLocal f m =
- S.Stream $ \_ stp sng yld ->
- let single = local f . sng
- yield a r = local f $ yld a (withLocal f r)
- in (S.runStream m) Nothing (local f stp) single yield
-
-{-
--- XXX handle and test cross thread state transfer
-withCatchError
- :: MonadError e m
- => S.Stream m a -> (e -> S.Stream m a) -> S.Stream m a
-withCatchError m h =
- S.Stream $ \_ stp sng yld ->
- let run x = S.runStream x Nothing stp sng yield
- handle r = r `catchError` \e -> run $ h e
- yield a r = yld a (withCatchError r h)
- in handle $ run m
--}
-
-#define MONADPARALLEL , MonadAsync m
-
-#define MONAD_APPLICATIVE_INSTANCE(STREAM,CONSTRAINT) \
-instance (Monad m CONSTRAINT) => Applicative (STREAM m) where { \
- pure = STREAM . S.singleton; \
- (<*>) = ap }
-
-#define MONAD_COMMON_INSTANCES(STREAM,CONSTRAINT) \
-instance (MonadBase b m, Monad m CONSTRAINT) => MonadBase b (STREAM m) where {\
- liftBase = liftBaseDefault }; \
- \
-instance (MonadIO m CONSTRAINT) => MonadIO (STREAM m) where { \
- liftIO = lift . liftIO }; \
- \
-instance (MonadThrow m CONSTRAINT) => MonadThrow (STREAM m) where { \
- throwM = lift . throwM }; \
- \
-{- \
-instance (MonadError e m CONSTRAINT) => MonadError e (STREAM m) where { \
- throwError = lift . throwError; \
- catchError m h = \
- fromStream $ withCatchError (toStream m) (\e -> toStream $ h e) }; \
--} \
- \
-instance (MonadReader r m CONSTRAINT) => MonadReader r (STREAM m) where { \
- ask = lift ask; \
- local f m = fromStream $ withLocal f (toStream m) }; \
- \
-instance (MonadState s m CONSTRAINT) => MonadState s (STREAM m) where { \
- get = lift get; \
- put x = lift (put x); \
- state k = lift (state k) }
-
-------------------------------------------------------------------------------
--- SerialT
-------------------------------------------------------------------------------
-
--- | Deep serial composition or serial composition with depth first traversal.
--- The 'Semigroup' instance of 'SerialT' appends two streams serially in a
--- depth first manner, yielding all elements from the first stream, and then
--- all elements from the second stream.
---
--- @
--- import Streamly
--- import qualified "Streamly.Prelude" as S
---
--- main = ('toList' . 'serially' $ (fromFoldable [1,2]) \<\> (fromFoldable [3,4])) >>= print
--- @
--- @
--- [1,2,3,4]
--- @
---
--- The 'Monad' instance runs the /monadic continuation/ for each
--- element of the stream, serially.
---
--- @
--- main = 'runStream' . 'serially' $ do
--- x <- return 1 \<\> return 2
--- S.once $ print x
--- @
--- @
--- 1
--- 2
--- @
---
--- 'SerialT' nests streams serially in a depth first manner.
---
--- @
--- main = 'runStream' . 'serially' $ do
--- x <- return 1 \<\> return 2
--- y <- return 3 \<\> return 4
--- S.once $ print (x, y)
--- @
--- @
--- (1,3)
--- (1,4)
--- (2,3)
--- (2,4)
--- @
---
--- This behavior of 'SerialT' is exactly like a list transformer. We call the
--- monadic code being run for each element of the stream a monadic
--- continuation. In imperative paradigm we can think of this composition as
--- nested @for@ loops and the monadic continuation is the body of the loop. The
--- loop iterates for all elements of the stream.
---
--- The 'serially' combinator can be omitted as the default stream type is
--- 'SerialT'.
--- Note that serial composition with depth first traversal can be used to
--- combine an infinite number of streams as it explores only one stream at a
--- time.
---
--- @since 0.2.0
-newtype SerialT m a = SerialT {getSerialT :: S.Stream m a}
- deriving (Semigroup, Monoid, Functor, MonadTrans)
-
--- |
--- @since 0.1.0
-{-# DEPRECATED StreamT "Please use 'SerialT' instead." #-}
-type StreamT = SerialT
-
-instance IsStream SerialT where
- toStream = getSerialT
- fromStream = SerialT
-
- {-# INLINE consM #-}
- {-# SPECIALIZE consM :: IO a -> SerialT IO a -> SerialT IO a #-}
- consM :: Monad m => m a -> SerialT m a -> SerialT m a
- consM = consMSerial
-
- {-# INLINE (|:) #-}
- {-# SPECIALIZE (|:) :: IO a -> SerialT IO a -> SerialT IO a #-}
- (|:) :: Monad m => m a -> SerialT m a -> SerialT m a
- (|:) = consMSerial
-
-------------------------------------------------------------------------------
--- Semigroup
-------------------------------------------------------------------------------
-
--- | Polymorphic version of the 'Semigroup' operation '<>' of 'SerialT'.
--- Appends two streams sequentially, yielding all elements from the first
--- stream, and then all elements from the second stream.
---
--- @since 0.2.0
-{-# INLINE serial #-}
-serial :: IsStream t => t m a -> t m a -> t m a
-serial m1 m2 = fromStream $ S.serial (toStream m1) (toStream m2)
-
-------------------------------------------------------------------------------
--- Monad
-------------------------------------------------------------------------------
-
-instance Monad m => Monad (SerialT m) where
- return = pure
- (SerialT (S.Stream m)) >>= f = SerialT $ S.Stream $ \_ stp sng yld ->
- let run x = (S.runStream x) Nothing stp sng yld
- single a = run $ toStream (f a)
- yield a r = run $ toStream $ f a <> (fromStream r >>= f)
- in m Nothing stp single yield
-
-------------------------------------------------------------------------------
--- Other instances
-------------------------------------------------------------------------------
-
-MONAD_APPLICATIVE_INSTANCE(SerialT,)
-MONAD_COMMON_INSTANCES(SerialT,)
-
-------------------------------------------------------------------------------
--- WSerialT
-------------------------------------------------------------------------------
-
--- | Wide serial composition or serial composition with a breadth first
--- traversal. The 'Semigroup' instance of 'WSerialT' traverses
--- the two streams in a breadth first manner. In other words, it interleaves
--- two streams, yielding one element from each stream alternately.
---
--- @
--- import Streamly
--- import qualified "Streamly.Prelude" as S
---
--- main = ('toList' . 'wSerially' $ (fromFoldable [1,2]) \<\> (fromFoldable [3,4])) >>= print
--- @
--- @
--- [1,3,2,4]
--- @
---
--- Similarly, the 'Monad' instance interleaves the iterations of the
--- inner and the outer loop, nesting loops in a breadth first manner.
---
---
--- @
--- main = 'runStream' . 'wSerially' $ do
--- x <- return 1 \<\> return 2
--- y <- return 3 \<\> return 4
--- S.once $ print (x, y)
--- @
--- @
--- (1,3)
--- (2,3)
--- (1,4)
--- (2,4)
--- @
---
--- Note that a serial composition with breadth first traversal can only combine
--- a finite number of streams as it needs to retain state for each unfinished
--- stream.
---
--- @since 0.2.0
-newtype WSerialT m a = WSerialT {getWSerialT :: S.Stream m a}
- deriving (Functor, MonadTrans)
-
--- |
--- @since 0.1.0
-{-# DEPRECATED InterleavedT "Please use 'WSerialT' instead." #-}
-type InterleavedT = WSerialT
-
-instance IsStream WSerialT where
- toStream = getWSerialT
- fromStream = WSerialT
-
- {-# INLINE consM #-}
- {-# SPECIALIZE consM :: IO a -> WSerialT IO a -> WSerialT IO a #-}
- consM :: Monad m => m a -> WSerialT m a -> WSerialT m a
- consM = consMSerial
-
- {-# INLINE (|:) #-}
- {-# SPECIALIZE (|:) :: IO a -> WSerialT IO a -> WSerialT IO a #-}
- (|:) :: Monad m => m a -> WSerialT m a -> WSerialT m a
- (|:) = consMSerial
-
-------------------------------------------------------------------------------
--- Semigroup
-------------------------------------------------------------------------------
-
--- | Polymorphic version of the 'Semigroup' operation '<>' of 'WSerialT'.
--- Interleaves two streams, yielding one element from each stream alternately.
---
--- @since 0.2.0
-{-# INLINE wSerial #-}
-wSerial :: IsStream t => t m a -> t m a -> t m a
-wSerial m1 m2 = fromStream $ S.wSerial (toStream m1) (toStream m2)
-
-instance Semigroup (WSerialT m a) where
- (<>) = wSerial
-
-infixr 5 <=>
-
--- | Same as 'wSerial'.
---
--- @since 0.1.0
-{-# DEPRECATED (<=>) "Please use 'wSerial' instead." #-}
-{-# INLINE (<=>) #-}
-(<=>) :: IsStream t => t m a -> t m a -> t m a
-(<=>) = wSerial
-
-------------------------------------------------------------------------------
--- Monoid
-------------------------------------------------------------------------------
-
-instance Monoid (WSerialT m a) where
- mempty = nil
- mappend = (<>)
-
-------------------------------------------------------------------------------
--- Monad
-------------------------------------------------------------------------------
-
-instance Monad m => Monad (WSerialT m) where
- return = pure
- (WSerialT (S.Stream m)) >>= f = WSerialT $ S.Stream $ \_ stp sng yld ->
- let run x = (S.runStream x) Nothing stp sng yld
- single a = run $ toStream (f a)
- yield a r = run $ toStream $ f a <> (fromStream r >>= f)
- in m Nothing stp single yield
-
-------------------------------------------------------------------------------
--- Other instances
-------------------------------------------------------------------------------
-
-MONAD_APPLICATIVE_INSTANCE(WSerialT,)
-MONAD_COMMON_INSTANCES(WSerialT,)
-
-------------------------------------------------------------------------------
--- AheadT
-------------------------------------------------------------------------------
-
--- | Deep ahead composition or ahead composition with depth first traversal.
--- The semigroup composition of 'AheadT' appends streams in a depth first
--- manner just like 'SerialT' except that it can produce elements concurrently
--- ahead of time. It is like 'AsyncT' except that 'AsyncT' produces the output
--- as it arrives whereas 'AheadT' orders the output in the traversal order.
---
--- @
--- main = ('toList' . 'aheadly' $ (fromFoldable [1,2]) \<> (fromFoldable [3,4])) >>= print
--- @
--- @
--- [1,2,3,4]
--- @
---
--- Any exceptions generated by a constituent stream are propagated to the
--- output stream.
---
--- Similarly, the monad instance of 'AheadT' may run each iteration
--- concurrently ahead of time but presents the results in the same order as
--- 'SerialT'.
---
--- @
--- import "Streamly"
--- import qualified "Streamly.Prelude" as S
--- import Control.Concurrent
---
--- main = 'runStream' . 'aheadly' $ do
--- n <- return 3 \<\> return 2 \<\> return 1
--- S.once $ do
--- threadDelay (n * 1000000)
--- myThreadId >>= \\tid -> putStrLn (show tid ++ ": Delay " ++ show n)
--- @
--- @
--- ThreadId 40: Delay 1
--- ThreadId 39: Delay 2
--- ThreadId 38: Delay 3
--- @
---
--- All iterations may run in the same thread if they do not block.
---
--- Note that ahead composition with depth first traversal can be used to
--- combine infinite number of streams as it explores only a bounded number of
--- streams at a time.
---
--- @since 0.3.0
-newtype AheadT m a = AheadT {getAheadT :: S.Stream m a}
- deriving (Functor, MonadTrans)
-
-instance IsStream AheadT where
- toStream = getAheadT
- fromStream = AheadT
-
- {-# INLINE consM #-}
- {-# SPECIALIZE consM :: IO a -> AheadT IO a -> AheadT IO a #-}
- consM m r = fromStream $ S.consMAhead m (toStream r)
-
- {-# INLINE (|:) #-}
- {-# SPECIALIZE (|:) :: IO a -> AheadT IO a -> AheadT IO a #-}
- (|:) = consM
-
-------------------------------------------------------------------------------
--- Semigroup
-------------------------------------------------------------------------------
-
--- | Polymorphic version of the 'Semigroup' operation '<>' of 'AheadT'.
--- Merges two streams sequentially but with concurrent lookahead.
---
--- @since 0.3.0
-{-# INLINE ahead #-}
-ahead :: (IsStream t, MonadAsync m) => t m a -> t m a -> t m a
-ahead m1 m2 = fromStream $ S.ahead (toStream m1) (toStream m2)
-
-instance MonadAsync m => Semigroup (AheadT m a) where
- (<>) = ahead
-
-------------------------------------------------------------------------------
--- Monoid
-------------------------------------------------------------------------------
-
-instance MonadAsync m => Monoid (AheadT m a) where
- mempty = nil
- mappend = (<>)
-
-------------------------------------------------------------------------------
--- Monad
-------------------------------------------------------------------------------
-
-{-# INLINE aheadbind #-}
-aheadbind
- :: MonadAsync m
- => S.Stream m a
- -> (a -> S.Stream m b)
- -> S.Stream m b
-aheadbind m f = go m
- where
- go (S.Stream g) =
- S.Stream $ \ctx stp sng yld ->
- let run x = (S.runStream x) ctx stp sng yld
- single a = run $ f a
- yield a r = run $ f a `S.ahead` go r
- in g Nothing stp single yield
-
-instance MonadAsync m => Monad (AheadT m) where
- return = pure
- (AheadT m) >>= f = AheadT $ aheadbind m (getAheadT . f)
-
-------------------------------------------------------------------------------
--- Other instances
-------------------------------------------------------------------------------
-
-MONAD_APPLICATIVE_INSTANCE(AheadT,MONADPARALLEL)
-MONAD_COMMON_INSTANCES(AheadT, MONADPARALLEL)
-
-------------------------------------------------------------------------------
--- AsyncT
-------------------------------------------------------------------------------
-
--- | Deep async composition or async composition with depth first traversal. In
--- a left to right 'Semigroup' composition it tries to yield elements from the
--- left stream as long as it can, but it can run the right stream in parallel
--- if it needs to, based on demand. The right stream can be run if the left
--- stream blocks on IO or cannot produce elements fast enough for the consumer.
---
--- @
--- main = ('toList' . 'asyncly' $ (fromFoldable [1,2]) \<> (fromFoldable [3,4])) >>= print
--- @
--- @
--- [1,2,3,4]
--- @
---
--- Any exceptions generated by a constituent stream are propagated to the
--- output stream. The output and exceptions from a single stream are guaranteed
--- to arrive in the same order in the resulting stream as they were generated
--- in the input stream. However, the relative ordering of elements from
--- different streams in the resulting stream can vary depending on scheduling
--- and generation delays.
---
--- Similarly, the monad instance of 'AsyncT' /may/ run each iteration
--- concurrently based on demand. More concurrent iterations are started only
--- if the previous iterations are not able to produce enough output for the
--- consumer.
---
--- @
--- import "Streamly"
--- import qualified "Streamly.Prelude" as S
--- import Control.Concurrent
---
--- main = 'runStream' . 'asyncly' $ do
--- n <- return 3 \<\> return 2 \<\> return 1
--- S.once $ do
--- threadDelay (n * 1000000)
--- myThreadId >>= \\tid -> putStrLn (show tid ++ ": Delay " ++ show n)
--- @
--- @
--- ThreadId 40: Delay 1
--- ThreadId 39: Delay 2
--- ThreadId 38: Delay 3
--- @
---
--- All iterations may run in the same thread if they do not block.
---
--- Note that async composition with depth first traversal can be used to
--- combine infinite number of streams as it explores only a bounded number of
--- streams at a time.
---
--- @since 0.1.0
-newtype AsyncT m a = AsyncT {getAsyncT :: S.Stream m a}
- deriving (Functor, MonadTrans)
-
-instance IsStream AsyncT where
- toStream = getAsyncT
- fromStream = AsyncT
-
- {-# INLINE consM #-}
- {-# SPECIALIZE consM :: IO a -> AsyncT IO a -> AsyncT IO a #-}
- consM m r = fromStream $ S.consMAsync m (toStream r)
-
- {-# INLINE (|:) #-}
- {-# SPECIALIZE (|:) :: IO a -> AsyncT IO a -> AsyncT IO a #-}
- (|:) = consM
-
-------------------------------------------------------------------------------
--- Semigroup
-------------------------------------------------------------------------------
-
--- | Polymorphic version of the 'Semigroup' operation '<>' of 'AsyncT'.
--- Merges two streams possibly concurrently, preferring the
--- elements from the left one when available.
---
--- @since 0.2.0
-{-# INLINE async #-}
-async :: (IsStream t, MonadAsync m) => t m a -> t m a -> t m a
-async m1 m2 = fromStream $ S.async (toStream m1) (toStream m2)
-
--- | Same as 'async'.
---
--- @since 0.1.0
-{-# DEPRECATED (<|) "Please use 'async' instead." #-}
-{-# INLINE (<|) #-}
-(<|) :: (IsStream t, MonadAsync m) => t m a -> t m a -> t m a
-(<|) = async
-
-instance MonadAsync m => Semigroup (AsyncT m a) where
- (<>) = async
-
-------------------------------------------------------------------------------
--- Monoid
-------------------------------------------------------------------------------
-
-instance MonadAsync m => Monoid (AsyncT m a) where
- mempty = nil
- mappend = (<>)
-
-------------------------------------------------------------------------------
--- Monad
-------------------------------------------------------------------------------
-
-{-# INLINE parbind #-}
-parbind
- :: (forall c. S.Stream m c -> S.Stream m c -> S.Stream m c)
- -> S.Stream m a
- -> (a -> S.Stream m b)
- -> S.Stream m b
-parbind par m f = go m
- where
- go (S.Stream g) =
- S.Stream $ \ctx stp sng yld ->
- let run x = (S.runStream x) ctx stp sng yld
- single a = run $ f a
- yield a r = run $ f a `par` go r
- in g Nothing stp single yield
-
-instance MonadAsync m => Monad (AsyncT m) where
- return = pure
- (AsyncT m) >>= f = AsyncT $ parbind S.async m (getAsyncT . f)
-
-------------------------------------------------------------------------------
--- Other instances
-------------------------------------------------------------------------------
-
-MONAD_APPLICATIVE_INSTANCE(AsyncT,MONADPARALLEL)
-MONAD_COMMON_INSTANCES(AsyncT, MONADPARALLEL)
-
-------------------------------------------------------------------------------
--- WAsyncT
-------------------------------------------------------------------------------
-
--- | Wide async composition or async composition with breadth first traversal.
--- The Semigroup instance of 'WAsyncT' concurrently /traverses/ the composed
--- streams using a depth first travesal or in a round robin fashion, yielding
--- elements from both streams alternately.
---
--- @
--- main = ('toList' . 'wAsyncly' $ (fromFoldable [1,2]) \<> (fromFoldable [3,4])) >>= print
--- @
--- @
--- [1,3,2,4]
--- @
---
--- Any exceptions generated by a constituent stream are propagated to the
--- output stream. The output and exceptions from a single stream are guaranteed
--- to arrive in the same order in the resulting stream as they were generated
--- in the input stream. However, the relative ordering of elements from
--- different streams in the resulting stream can vary depending on scheduling
--- and generation delays.
---
--- Similarly, the 'Monad' instance of 'WAsyncT' runs /all/ iterations fairly
--- concurrently using a round robin scheduling.
---
--- @
--- import "Streamly"
--- import qualified "Streamly.Prelude" as S
--- import Control.Concurrent
---
--- main = 'runStream' . 'wAsyncly' $ do
--- n <- return 3 \<\> return 2 \<\> return 1
--- S.once $ do
--- threadDelay (n * 1000000)
--- myThreadId >>= \\tid -> putStrLn (show tid ++ ": Delay " ++ show n)
--- @
--- @
--- ThreadId 40: Delay 1
--- ThreadId 39: Delay 2
--- ThreadId 38: Delay 3
--- @
---
--- Unlike 'AsyncT' all iterations are guaranteed to run fairly
--- concurrently, unconditionally.
---
--- Note that async composition with breadth first traversal can only combine a
--- finite number of streams as it needs to retain state for each unfinished
--- stream.
---
--- @since 0.2.0
-newtype WAsyncT m a = WAsyncT {getWAsyncT :: S.Stream m a}
- deriving (Functor, MonadTrans)
-
-instance IsStream WAsyncT where
- toStream = getWAsyncT
- fromStream = WAsyncT
-
- {-# INLINE consM #-}
- {-# SPECIALIZE consM :: IO a -> WAsyncT IO a -> WAsyncT IO a #-}
- consM m r = fromStream $ S.consMWAsync m (toStream r)
-
- {-# INLINE (|:) #-}
- {-# SPECIALIZE (|:) :: IO a -> WAsyncT IO a -> WAsyncT IO a #-}
- (|:) = consM
-
-------------------------------------------------------------------------------
--- Semigroup
-------------------------------------------------------------------------------
-
--- | Polymorphic version of the 'Semigroup' operation '<>' of 'WAsyncT'.
--- Merges two streams concurrently choosing elements from both fairly.
---
--- @since 0.2.0
-{-# INLINE wAsync #-}
-wAsync :: (IsStream t, MonadAsync m) => t m a -> t m a -> t m a
-wAsync m1 m2 = fromStream $ S.wAsync (toStream m1) (toStream m2)
-
-instance MonadAsync m => Semigroup (WAsyncT m a) where
- (<>) = wAsync
-
-------------------------------------------------------------------------------
--- Monoid
-------------------------------------------------------------------------------
-
-instance MonadAsync m => Monoid (WAsyncT m a) where
- mempty = nil
- mappend = (<>)
-
-------------------------------------------------------------------------------
--- Monad
-------------------------------------------------------------------------------
-
-instance MonadAsync m => Monad (WAsyncT m) where
- return = pure
- (WAsyncT m) >>= f =
- WAsyncT $ parbind S.wAsync m (getWAsyncT . f)
-
-------------------------------------------------------------------------------
--- Other instances
-------------------------------------------------------------------------------
-
-MONAD_APPLICATIVE_INSTANCE(WAsyncT,MONADPARALLEL)
-MONAD_COMMON_INSTANCES(WAsyncT, MONADPARALLEL)
-
-------------------------------------------------------------------------------
--- ParallelT
-------------------------------------------------------------------------------
-
--- | Async composition with simultaneous traversal of all streams.
---
--- The Semigroup instance of 'ParallelT' concurrently /merges/ two streams,
--- running both strictly concurrently and yielding elements from both streams
--- as they arrive. When multiple streams are combined using 'ParallelT' each
--- one is evaluated in its own thread and the results produced are presented in
--- the combined stream on a first come first serve basis.
---
--- 'AsyncT' and 'WAsyncT' are /concurrent lookahead streams/ each with a
--- specific type of consumption pattern (depth first or breadth first). Since
--- they are lookahead, they may introduce certain default latency in starting
--- more concurrent tasks for efficiency reasons or may put a default limitation
--- on the resource consumption (e.g. number of concurrent threads for
--- lookahead). If we look at the implementation detail, they both can share a
--- pool of worker threads to evaluate the streams in the desired pattern and at
--- the desired rate. However, 'ParallelT' uses a separate runtime thread to
--- evaluate each stream.
---
--- 'WAsyncT' is similar to 'ParallelT', as both of them evaluate the
--- constituent streams fairly in a round robin fashion.
--- However, the key difference is that 'WAsyncT' is lazy or pull driven
--- whereas 'ParallelT' is strict or push driven. 'ParallelT' immediately
--- starts concurrent evaluation of both the streams (in separate threads) and
--- later picks the results whereas 'WAsyncT' may wait for a certain latency
--- threshold before initiating concurrent evaluation of the next stream. The
--- concurrent scheduling of the next stream or the degree of concurrency is
--- driven by the feedback from the consumer. In case of 'ParallelT' each stream
--- is evaluated in a separate thread and results are /pushed/ to a shared
--- output buffer, the evaluation rate is controlled by blocking when the buffer
--- is full.
---
--- Concurrent lookahead streams are generally more efficient than
--- 'ParallelT' and can work pretty efficiently even for smaller tasks because
--- they do not necessarily use a separate thread for each task. So they should
--- be preferred over 'ParallelT' especially when efficiency is a concern and
--- simultaneous strict evaluation is not a requirement. 'ParallelT' is useful
--- for cases when the streams are required to be evaluated simultaneously
--- irrespective of how the consumer consumes them e.g. when we want to race
--- two tasks and want to start both strictly at the same time or if we have
--- timers in the parallel tasks and our results depend on the timers being
--- started at the same time. We can say that 'ParallelT' is almost the same
--- (modulo some implementation differences) as 'WAsyncT' when the latter is
--- used with unlimited lookahead and zero latency in initiating lookahead.
---
--- @
--- main = ('toList' . 'parallely' $ (fromFoldable [1,2]) \<> (fromFoldable [3,4])) >>= print
--- @
--- @
--- [1,3,2,4]
--- @
---
--- When streams with more than one element are merged, it yields whichever
--- stream yields first without any bias, unlike the 'Async' style streams.
---
--- Any exceptions generated by a constituent stream are propagated to the
--- output stream. The output and exceptions from a single stream are guaranteed
--- to arrive in the same order in the resulting stream as they were generated
--- in the input stream. However, the relative ordering of elements from
--- different streams in the resulting stream can vary depending on scheduling
--- and generation delays.
---
--- Similarly, the 'Monad' instance of 'ParallelT' runs /all/ iterations
--- of the loop concurrently.
---
--- @
--- import "Streamly"
--- import qualified "Streamly.Prelude" as S
--- import Control.Concurrent
---
--- main = 'runStream' . 'parallely' $ do
--- n <- return 3 \<\> return 2 \<\> return 1
--- S.once $ do
--- threadDelay (n * 1000000)
--- myThreadId >>= \\tid -> putStrLn (show tid ++ ": Delay " ++ show n)
--- @
--- @
--- ThreadId 40: Delay 1
--- ThreadId 39: Delay 2
--- ThreadId 38: Delay 3
--- @
---
--- Note that parallel composition can only combine a finite number of
--- streams as it needs to retain state for each unfinished stream.
---
--- @since 0.1.0
-newtype ParallelT m a = ParallelT {getParallelT :: S.Stream m a}
- deriving (Functor, MonadTrans)
-
-instance IsStream ParallelT where
- toStream = getParallelT
- fromStream = ParallelT
-
- {-# INLINE consM #-}
- {-# SPECIALIZE consM :: IO a -> ParallelT IO a -> ParallelT IO a #-}
- consM m r = fromStream $ S.consMParallel m (toStream r)
-
- {-# INLINE (|:) #-}
- {-# SPECIALIZE (|:) :: IO a -> ParallelT IO a -> ParallelT IO a #-}
- (|:) = consM
-
-------------------------------------------------------------------------------
--- Semigroup
-------------------------------------------------------------------------------
-
--- | Polymorphic version of the 'Semigroup' operation '<>' of 'ParallelT'
--- Merges two streams concurrently.
---
--- @since 0.2.0
-{-# INLINE parallel #-}
-parallel :: (IsStream t, MonadAsync m) => t m a -> t m a -> t m a
-parallel m1 m2 = fromStream $ S.parallel (toStream m1) (toStream m2)
-
-instance MonadAsync m => Semigroup (ParallelT m a) where
- (<>) = parallel
-
-------------------------------------------------------------------------------
--- Monoid
-------------------------------------------------------------------------------
-
-instance MonadAsync m => Monoid (ParallelT m a) where
- mempty = nil
- mappend = (<>)
-
-------------------------------------------------------------------------------
--- Monad
-------------------------------------------------------------------------------
-
-instance MonadAsync m => Monad (ParallelT m) where
- return = pure
- (ParallelT m) >>= f = ParallelT $ parbind S.parallel m (getParallelT . f)
-
-------------------------------------------------------------------------------
--- Other instances
-------------------------------------------------------------------------------
-
-MONAD_APPLICATIVE_INSTANCE(ParallelT,MONADPARALLEL)
-MONAD_COMMON_INSTANCES(ParallelT, MONADPARALLEL)
-
-------------------------------------------------------------------------------
--- Serially Zipping Streams
-------------------------------------------------------------------------------
-
--- | The applicative instance of 'ZipSerialM' zips a number of streams serially
--- i.e. it produces one element from each stream serially and then zips all
--- those elements.
---
--- @
--- main = (toList . 'zipSerially' $ (,,) \<$\> s1 \<*\> s2 \<*\> s3) >>= print
--- where s1 = fromFoldable [1, 2]
--- s2 = fromFoldable [3, 4]
--- s3 = fromFoldable [5, 6]
--- @
--- @
--- [(1,3,5),(2,4,6)]
--- @
---
--- The 'Semigroup' instance of this type works the same way as that of
--- 'SerialT'.
---
--- @since 0.2.0
-newtype ZipSerialM m a = ZipSerialM {getZipSerialM :: S.Stream m a}
- deriving (Functor, Semigroup, Monoid)
-
--- |
--- @since 0.1.0
-{-# DEPRECATED ZipStream "Please use 'ZipSerialM' instead." #-}
-type ZipStream = ZipSerialM
-
-instance IsStream ZipSerialM where
- toStream = getZipSerialM
- fromStream = ZipSerialM
-
- {-# INLINE consM #-}
- {-# SPECIALIZE consM :: IO a -> ZipSerialM IO a -> ZipSerialM IO a #-}
- consM :: Monad m => m a -> ZipSerialM m a -> ZipSerialM m a
- consM = consMSerial
-
- {-# INLINE (|:) #-}
- {-# SPECIALIZE (|:) :: IO a -> ZipSerialM IO a -> ZipSerialM IO a #-}
- (|:) :: Monad m => m a -> ZipSerialM m a -> ZipSerialM m a
- (|:) = consMSerial
-
-instance Monad m => Applicative (ZipSerialM m) where
- pure = ZipSerialM . S.repeat
- m1 <*> m2 = fromStream $ S.zipWith id (toStream m1) (toStream m2)
-
-------------------------------------------------------------------------------
--- Parallely Zipping Streams
-------------------------------------------------------------------------------
-
--- | Like 'ZipSerialM' but zips in parallel, it generates all the elements to
--- be zipped concurrently.
---
--- @
--- main = (toList . 'zipAsyncly' $ (,,) \<$\> s1 \<*\> s2 \<*\> s3) >>= print
--- where s1 = fromFoldable [1, 2]
--- s2 = fromFoldable [3, 4]
--- s3 = fromFoldable [5, 6]
--- @
--- @
--- [(1,3,5),(2,4,6)]
--- @
---
--- The 'Semigroup' instance of this type works the same way as that of
--- 'SerialT'.
---
--- @since 0.2.0
-newtype ZipAsyncM m a = ZipAsyncM {getZipAsyncM :: S.Stream m a}
- deriving (Functor, Semigroup, Monoid)
-
-instance IsStream ZipAsyncM where
- toStream = getZipAsyncM
- fromStream = ZipAsyncM
-
- {-# INLINE consM #-}
- {-# SPECIALIZE consM :: IO a -> ZipAsyncM IO a -> ZipAsyncM IO a #-}
- consM :: Monad m => m a -> ZipAsyncM m a -> ZipAsyncM m a
- consM = consMSerial
-
- {-# INLINE (|:) #-}
- {-# SPECIALIZE (|:) :: IO a -> ZipAsyncM IO a -> ZipAsyncM IO a #-}
- (|:) :: Monad m => m a -> ZipAsyncM m a -> ZipAsyncM m a
- (|:) = consMSerial
-
-instance MonadAsync m => Applicative (ZipAsyncM m) where
- pure = ZipAsyncM . S.repeat
- m1 <*> m2 = fromStream $ S.zipAsyncWith id (toStream m1) (toStream m2)
-
--------------------------------------------------------------------------------
--- Type adapting combinators
--------------------------------------------------------------------------------
-
--- | Adapt any specific stream type to any other specific stream type.
---
--- @since 0.1.0
-adapt :: (IsStream t1, IsStream t2) => t1 m a -> t2 m a
-adapt = fromStream . toStream
-
--- | Fix the type of a polymorphic stream as 'SerialT'.
---
--- @since 0.1.0
-serially :: IsStream t => SerialT m a -> t m a
-serially = adapt
-
--- | Fix the type of a polymorphic stream as 'WSerialT'.
---
--- @since 0.2.0
-wSerially :: IsStream t => WSerialT m a -> t m a
-wSerially = adapt
-
--- | Same as 'wSerially'.
---
--- @since 0.1.0
-{-# DEPRECATED interleaving "Please use wSerially instead." #-}
-interleaving :: IsStream t => WSerialT m a -> t m a
-interleaving = wSerially
-
--- | Fix the type of a polymorphic stream as 'AheadT'.
---
--- @since 0.3.0
-aheadly :: IsStream t => AheadT m a -> t m a
-aheadly = adapt
-
--- | Fix the type of a polymorphic stream as 'AsyncT'.
---
--- @since 0.1.0
-asyncly :: IsStream t => AsyncT m a -> t m a
-asyncly = adapt
-
--- | Fix the type of a polymorphic stream as 'WAsyncT'.
---
--- @since 0.2.0
-wAsyncly :: IsStream t => WAsyncT m a -> t m a
-wAsyncly = adapt
-
--- | Fix the type of a polymorphic stream as 'ParallelT'.
---
--- @since 0.1.0
-parallely :: IsStream t => ParallelT m a -> t m a
-parallely = adapt
-
--- | Fix the type of a polymorphic stream as 'ZipSerialM'.
---
--- @since 0.2.0
-zipSerially :: IsStream t => ZipSerialM m a -> t m a
-zipSerially = adapt
-
--- | Same as 'zipSerially'.
---
--- @since 0.1.0
-{-# DEPRECATED zipping "Please use zipSerially instead." #-}
-zipping :: IsStream t => ZipSerialM m a -> t m a
-zipping = zipSerially
-
--- | Fix the type of a polymorphic stream as 'ZipAsyncM'.
---
--- @since 0.2.0
-zipAsyncly :: IsStream t => ZipAsyncM m a -> t m a
-zipAsyncly = adapt
-
--- | Same as 'zipAsyncly'.
---
--- @since 0.1.0
-{-# DEPRECATED zippingAsync "Please use zipAsyncly instead." #-}
-zippingAsync :: IsStream t => ZipAsyncM m a -> t m a
-zippingAsync = zipAsyncly
-
--------------------------------------------------------------------------------
--- Running Streams, convenience functions specialized to types
--------------------------------------------------------------------------------
-
--- | Same as @runStream@.
---
--- @since 0.1.0
-{-# DEPRECATED runStreamT "Please use runStream instead." #-}
-runStreamT :: Monad m => SerialT m a -> m ()
-runStreamT = runStream
-
--- | Same as @runStream . wSerially@.
---
--- @since 0.1.0
-{-# DEPRECATED runInterleavedT "Please use 'runStream . interleaving' instead." #-}
-runInterleavedT :: Monad m => InterleavedT m a -> m ()
-runInterleavedT = runStream . wSerially
-
--- | Same as @runStream . asyncly@.
---
--- @since 0.1.0
-{-# DEPRECATED runAsyncT "Please use 'runStream . asyncly' instead." #-}
-runAsyncT :: Monad m => AsyncT m a -> m ()
-runAsyncT = runStream . asyncly
-
--- | Same as @runStream . parallely@.
---
--- @since 0.1.0
-{-# DEPRECATED runParallelT "Please use 'runStream . parallely' instead." #-}
-runParallelT :: Monad m => ParallelT m a -> m ()
-runParallelT = runStream . parallely
-
--- | Same as @runStream . zipping@.
---
--- @since 0.1.0
-{-# DEPRECATED runZipStream "Please use 'runStream . zipSerially instead." #-}
-runZipStream :: Monad m => ZipSerialM m a -> m ()
-runZipStream = runStream . zipSerially
-
--- | Same as @runStream . zippingAsync@.
---
--- @since 0.1.0
-{-# DEPRECATED runZipAsync "Please use 'runStream . zipAsyncly instead." #-}
-runZipAsync :: Monad m => ZipAsyncM m a -> m ()
-runZipAsync = runStream . zipAsyncly
-
-------------------------------------------------------------------------------
--- IO Streams
-------------------------------------------------------------------------------
-
--- | A serial IO stream of elements of type @a@. See 'SerialT' documentation
--- for more details.
---
--- @since 0.2.0
-type Serial a = SerialT IO a
-
--- | An interleaving serial IO stream of elements of type @a@. See 'WSerialT'
--- documentation for more details.
---
--- @since 0.2.0
-type WSerial a = WSerialT IO a
-
--- | A serial IO stream of elements of type @a@ with concurrent lookahead. See
--- 'AheadT' documentation for more details.
---
--- @since 0.3.0
-type Ahead a = AheadT IO a
-
--- | A demand driven left biased parallely composing IO stream of elements of
--- type @a@. See 'AsyncT' documentation for more details.
---
--- @since 0.2.0
-type Async a = AsyncT IO a
-
--- | A round robin parallely composing IO stream of elements of type @a@.
--- See 'WAsyncT' documentation for more details.
---
--- @since 0.2.0
-type WAsync a = WAsyncT IO a
-
--- | A parallely composing IO stream of elements of type @a@.
--- See 'ParallelT' documentation for more details.
---
--- @since 0.2.0
-type Parallel a = ParallelT IO a
-
--- | An IO stream whose applicative instance zips streams serially.
---
--- @since 0.2.0
-type ZipSerial a = ZipSerialM IO a
-
--- | An IO stream whose applicative instance zips streams wAsyncly.
---
--- @since 0.2.0
-type ZipAsync a = ZipAsyncM IO a
-
-------------------------------------------------------------------------------
--- Fold Utilities
-------------------------------------------------------------------------------
-
--- | A variant of 'Data.Foldable.fold' that allows you to fold a 'Foldable'
--- container of streams using the specified stream sum operation.
---
--- @foldWith 'async' $ map return [1..3]@
---
--- @since 0.1.0
-{-# INLINABLE foldWith #-}
-foldWith :: (IsStream t, Foldable f)
- => (t m a -> t m a -> t m a) -> f (t m a) -> t m a
-foldWith f = foldr f nil
-
--- | A variant of 'foldMap' that allows you to map a monadic streaming action
--- on a 'Foldable' container and then fold it using the specified stream sum
--- operation.
---
--- @foldMapWith 'async' return [1..3]@
---
--- @since 0.1.0
-{-# INLINABLE foldMapWith #-}
-foldMapWith :: (IsStream t, Foldable f)
- => (t m b -> t m b -> t m b) -> (a -> t m b) -> f a -> t m b
-foldMapWith f g = foldr (f . g) nil
-
--- | Like 'foldMapWith' but with the last two arguments reversed i.e. the
--- monadic streaming function is the last argument.
---
--- @since 0.1.0
-{-# INLINABLE forEachWith #-}
-forEachWith :: (IsStream t, Foldable f)
- => (t m b -> t m b -> t m b) -> f a -> (a -> t m b) -> t m b
-forEachWith f xs g = foldr (f . g) nil xs
diff --git a/src/Streamly/Streams/Ahead.hs b/src/Streamly/Streams/Ahead.hs
new file mode 100644
index 0000000..db8ce50
--- /dev/null
+++ b/src/Streamly/Streams/Ahead.hs
@@ -0,0 +1,385 @@
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE ConstraintKinds #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE GeneralizedNewtypeDeriving#-}
+{-# LANGUAGE InstanceSigs #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE StandaloneDeriving #-}
+{-# LANGUAGE UndecidableInstances #-} -- XXX
+
+-- |
+-- Module : Streamly.Streams.Ahead
+-- Copyright : (c) 2017 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+-- Stability : experimental
+-- Portability : GHC
+--
+--
+module Streamly.Streams.Ahead
+ (
+ AheadT
+ , Ahead
+ , aheadly
+ , ahead
+ )
+where
+
+import Control.Monad (ap)
+import Control.Monad.Base (MonadBase(..), liftBaseDefault)
+import Control.Monad.Catch (MonadThrow, throwM)
+-- import Control.Monad.Error.Class (MonadError(..))
+import Control.Monad.IO.Class (MonadIO(..))
+import Control.Monad.Reader.Class (MonadReader(..))
+import Control.Monad.State.Class (MonadState(..))
+import Control.Monad.Trans.Class (MonadTrans(lift))
+import Data.Atomics (atomicModifyIORefCAS_)
+import Data.Heap (Heap, Entry(..))
+import Data.IORef (IORef, readIORef)
+import Data.Maybe (fromJust)
+import Data.Semigroup (Semigroup(..))
+
+import qualified Data.Heap as H
+
+import Streamly.Streams.SVar (fromSVar)
+import Streamly.Streams.Serial (map)
+import Streamly.SVar
+import Streamly.Streams.StreamK (IsStream(..), Stream(..))
+import qualified Streamly.Streams.StreamK as K
+
+#ifdef DIAGNOSTICS
+import Control.Monad (when)
+import Data.IORef (writeIORef)
+#endif
+import Prelude hiding (map)
+
+#include "Instances.hs"
+
+-------------------------------------------------------------------------------
+-- Ahead
+-------------------------------------------------------------------------------
+
+-- Lookahead streams can execute multiple tasks concurrently, ahead of time,
+-- but always serve them in the same order as they appear in the stream. To
+-- implement lookahead streams efficiently we assign a sequence number to each
+-- task when the task is picked up for execution. When the task finishes, the
+-- output is tagged with the same sequence number and we rearrange the outputs
+-- in sequence based on that number.
+--
+-- To explain the mechanism imagine that the current task at the head of the
+-- stream has a "token" to yield to the outputQueue. The ownership of the token
+-- is determined by the current sequence number is maintained in outputHeap.
+-- Sequence number is assigned when a task is queued. When a thread dequeues a
+-- task it picks up the sequence number as well and when the output is ready it
+-- uses the sequence number to queue the output to the outputQueue.
+--
+-- The thread with current sequence number sends the output directly to the
+-- outputQueue. Other threads push the output to the outputHeap. When the task
+-- being queued on the heap is a stream of many elements we evaluate only the
+-- first element and keep the rest of the unevaluated computation in the heap.
+-- When such a task gets the "token" for outputQueue it evaluates and directly
+-- yields all the elements to the outputQueue without checking for the
+-- "token".
+--
+-- Note that no two outputs in the heap can have the same sequence numbers and
+-- therefore we do not need a stable heap. We have also separated the buffer
+-- for the current task (outputQueue) and the pending tasks (outputHeap) so
+-- that the pending tasks cannot interfere with the current task. Note that for
+-- a single task just the outputQueue is enough and for the case of many
+-- threads just a heap is good enough. However we balance between these two
+-- cases, so that both are efficient.
+--
+-- For bigger streams it may make sense to have separate buffers for each
+-- stream. However, for singleton streams this may become inefficient. However,
+-- if we do not have separate buffers, then the streams that come later in
+-- sequence may hog the buffer, hindering the streams that are ahead. For this
+-- reason we have a single element buffer limitation for the streams being
+-- executed in advance.
+--
+-- This scheme works pretty efficiently with less than 40% extra overhead
+-- compared to the Async streams where we do not have any kind of sequencing of
+-- the outputs. It is especially devised so that we are most efficient when we
+-- have short tasks and need just a single thread. Also when a thread yields
+-- many items it can hold lockfree access to the outputQueue and do it
+-- efficiently.
+--
+-- XXX Maybe we can start the ahead threads at a lower cpu and IO priority so
+-- that they do not hog the resources and hinder the progress of the threads in
+-- front of them.
+
+-- Left associated ahead expressions are expensive. We start a new SVar for
+-- each left associative expression. The queue is used only for right
+-- associated expression, we queue the right expression and execute the left.
+-- Thererefore the queue never has more than on item in it.
+
+workLoopAhead :: MonadIO m
+ => State Stream m a
+ -> IORef ([Stream m a], Int)
+ -> IORef (Heap (Entry Int (AheadHeapEntry Stream m a)) , Int)
+ -> m ()
+workLoopAhead st q heap = runHeap
+
+ where
+
+ sv = fromJust $ streamVar st
+ maxBuf = bufferHigh st
+ toHeap seqNo ent = do
+ hp <- liftIO $ atomicModifyIORefCAS heap $ \(h, snum) ->
+ ((H.insert (Entry seqNo ent) h, snum), h)
+ (_, len) <- liftIO $ readIORef (outputQueue sv)
+ let maxHeap = maxBuf - len
+ limit <- case maxYieldLimit sv of
+ Nothing -> return maxHeap
+ Just ref -> do
+ r <- liftIO $ readIORef ref
+ return $ if r >= 0 then r else maxHeap
+ if H.size hp <= limit
+ then runHeap
+ else liftIO $ sendStop sv
+
+ singleToHeap seqNo a = toHeap seqNo (AheadEntryPure a)
+ yieldToHeap seqNo a r = toHeap seqNo (AheadEntryStream (a `K.cons` r))
+
+ singleOutput seqNo a = do
+ continue <- liftIO $ sendYield maxBuf sv (ChildYield a)
+ if continue
+ then runQueueToken seqNo
+ else liftIO $ do
+ atomicModifyIORefCAS_ heap $ \(h, _) -> (h, seqNo + 1)
+ sendStop sv
+
+ yieldOutput seqNo a r = do
+ continue <- liftIO $ sendYield maxBuf sv (ChildYield a)
+ if continue
+ then unStream r st (runQueueToken seqNo)
+ (singleOutput seqNo)
+ (yieldOutput seqNo)
+ else liftIO $ do
+ atomicModifyIORefCAS_ heap $ \(h, _) ->
+ (H.insert (Entry seqNo (AheadEntryStream r)) h, seqNo)
+ sendStop sv
+
+ {-# INLINE runQueueToken #-}
+ runQueueToken prevSeqNo = do
+ work <- dequeueAhead q
+ case work of
+ Nothing -> do
+ liftIO $ atomicModifyIORefCAS_ heap $ \(h, _) ->
+ (h, prevSeqNo + 1)
+ runHeap
+ Just (m, seqNo) -> do
+ if seqNo == prevSeqNo + 1
+ then
+ unStream m st (runQueueToken seqNo)
+ (singleOutput seqNo)
+ (yieldOutput seqNo)
+ else do
+ liftIO $ atomicModifyIORefCAS_ heap $ \(h, _) ->
+ (h, prevSeqNo + 1)
+ unStream m st runHeap
+ (singleToHeap seqNo)
+ (yieldToHeap seqNo)
+ runQueueNoToken = do
+ work <- dequeueAhead q
+ case work of
+ Nothing -> runHeap
+ Just (m, seqNo) -> do
+ if seqNo == 0
+ then
+ unStream m st (runQueueToken seqNo)
+ (singleOutput seqNo)
+ (yieldOutput seqNo)
+ else
+ unStream m st runHeap
+ (singleToHeap seqNo)
+ (yieldToHeap seqNo)
+
+ {-# NOINLINE runHeap #-}
+ runHeap = do
+#ifdef DIAGNOSTICS
+ liftIO $ do
+ maxHp <- readIORef (maxHeapSize sv)
+ (hp, _) <- readIORef heap
+ when (H.size hp > maxHp) $ writeIORef (maxHeapSize sv) (H.size hp)
+#endif
+ ent <- liftIO $ dequeueFromHeap heap
+ case ent of
+ Nothing -> do
+ done <- queueEmptyAhead q
+ if done
+ then liftIO $ sendStop sv
+ else runQueueNoToken
+ Just (Entry seqNo hent) -> do
+ case hent of
+ AheadEntryPure a -> singleOutput seqNo a
+ AheadEntryStream r ->
+ unStream r st (runQueueToken seqNo)
+ (singleOutput seqNo)
+ (yieldOutput seqNo)
+
+-------------------------------------------------------------------------------
+-- WAhead
+-------------------------------------------------------------------------------
+
+-- XXX To be implemented. Use a linked queue like WAsync and put back the
+-- remaining computation at the back of the queue instead of the heap, and
+-- increment the sequence number.
+
+-- The only difference between forkSVarAsync and this is that we run the left
+-- computation without a shared SVar.
+forkSVarAhead :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
+forkSVarAhead m1 m2 = Stream $ \st stp sng yld -> do
+ sv <- newAheadVar st (concurrently m1 m2) workLoopAhead
+ unStream (fromSVar sv) (rstState st) stp sng yld
+ where
+ concurrently ma mb = Stream $ \st stp sng yld -> do
+ liftIO $ enqueue (fromJust $ streamVar st) mb
+ unStream ma (rstState st) stp sng yld
+
+{-# INLINE aheadS #-}
+aheadS :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
+aheadS m1 m2 = Stream $ \st stp sng yld -> do
+ case streamVar st of
+ Just sv | svarStyle sv == AheadVar -> do
+ liftIO $ enqueue sv m2
+ -- Always run the left side on a new SVar to avoid complexity in
+ -- sequencing results. This means the left side cannot further
+ -- split into more ahead computations on the same SVar.
+ unStream m1 (rstState st) stp sng yld
+ _ -> unStream (forkSVarAhead m1 m2) st stp sng yld
+
+-- | XXX we can implement it more efficienty by directly implementing instead
+-- of combining streams using ahead.
+{-# INLINE consMAhead #-}
+consMAhead :: MonadAsync m => m a -> Stream m a -> Stream m a
+consMAhead m r = K.yieldM m `aheadS` r
+
+------------------------------------------------------------------------------
+-- AheadT
+------------------------------------------------------------------------------
+
+-- | Deep ahead composition or ahead composition with depth first traversal.
+-- The semigroup composition of 'AheadT' appends streams in a depth first
+-- manner just like 'SerialT' except that it can produce elements concurrently
+-- ahead of time. It is like 'AsyncT' except that 'AsyncT' produces the output
+-- as it arrives whereas 'AheadT' orders the output in the traversal order.
+--
+-- @
+-- main = ('toList' . 'aheadly' $ (fromFoldable [1,2]) \<> (fromFoldable [3,4])) >>= print
+-- @
+-- @
+-- [1,2,3,4]
+-- @
+--
+-- Any exceptions generated by a constituent stream are propagated to the
+-- output stream.
+--
+-- Similarly, the monad instance of 'AheadT' may run each iteration
+-- concurrently ahead of time but presents the results in the same order as
+-- 'SerialT'.
+--
+-- @
+-- import "Streamly"
+-- import qualified "Streamly.Prelude" as S
+-- import Control.Concurrent
+--
+-- main = 'runStream' . 'aheadly' $ do
+-- n <- return 3 \<\> return 2 \<\> return 1
+-- S.once $ do
+-- threadDelay (n * 1000000)
+-- myThreadId >>= \\tid -> putStrLn (show tid ++ ": Delay " ++ show n)
+-- @
+-- @
+-- ThreadId 40: Delay 1
+-- ThreadId 39: Delay 2
+-- ThreadId 38: Delay 3
+-- @
+--
+-- All iterations may run in the same thread if they do not block.
+--
+-- Note that ahead composition with depth first traversal can be used to
+-- combine infinite number of streams as it explores only a bounded number of
+-- streams at a time.
+--
+-- @since 0.3.0
+newtype AheadT m a = AheadT {getAheadT :: Stream m a}
+ deriving (MonadTrans)
+
+-- | A serial IO stream of elements of type @a@ with concurrent lookahead. See
+-- 'AheadT' documentation for more details.
+--
+-- @since 0.3.0
+type Ahead a = AheadT IO a
+
+-- | Fix the type of a polymorphic stream as 'AheadT'.
+--
+-- @since 0.3.0
+aheadly :: IsStream t => AheadT m a -> t m a
+aheadly = K.adapt
+
+instance IsStream AheadT where
+ toStream = getAheadT
+ fromStream = AheadT
+
+ {-# INLINE consM #-}
+ {-# SPECIALIZE consM :: IO a -> AheadT IO a -> AheadT IO a #-}
+ consM m r = fromStream $ consMAhead m (toStream r)
+
+ {-# INLINE (|:) #-}
+ {-# SPECIALIZE (|:) :: IO a -> AheadT IO a -> AheadT IO a #-}
+ (|:) = consM
+
+------------------------------------------------------------------------------
+-- Semigroup
+------------------------------------------------------------------------------
+
+-- | Polymorphic version of the 'Semigroup' operation '<>' of 'AheadT'.
+-- Merges two streams sequentially but with concurrent lookahead.
+--
+-- @since 0.3.0
+{-# INLINE ahead #-}
+ahead :: (IsStream t, MonadAsync m) => t m a -> t m a -> t m a
+ahead m1 m2 = fromStream $ aheadS (toStream m1) (toStream m2)
+
+instance MonadAsync m => Semigroup (AheadT m a) where
+ (<>) = ahead
+
+------------------------------------------------------------------------------
+-- Monoid
+------------------------------------------------------------------------------
+
+instance MonadAsync m => Monoid (AheadT m a) where
+ mempty = K.nil
+ mappend = (<>)
+
+------------------------------------------------------------------------------
+-- Monad
+------------------------------------------------------------------------------
+
+{-# INLINE aheadbind #-}
+aheadbind
+ :: MonadAsync m
+ => Stream m a
+ -> (a -> Stream m b)
+ -> Stream m b
+aheadbind m f = go m
+ where
+ go (Stream g) =
+ Stream $ \st stp sng yld ->
+ let run x = unStream x st stp sng yld
+ single a = run $ f a
+ yieldk a r = run $ f a `aheadS` go r
+ in g (rstState st) stp single yieldk
+
+instance MonadAsync m => Monad (AheadT m) where
+ return = pure
+ (AheadT m) >>= f = AheadT $ aheadbind m (getAheadT . f)
+
+------------------------------------------------------------------------------
+-- Other instances
+------------------------------------------------------------------------------
+
+MONAD_APPLICATIVE_INSTANCE(AheadT,MONADPARALLEL)
+MONAD_COMMON_INSTANCES(AheadT, MONADPARALLEL)
diff --git a/src/Streamly/Streams/Async.hs b/src/Streamly/Streams/Async.hs
new file mode 100644
index 0000000..af1be32
--- /dev/null
+++ b/src/Streamly/Streams/Async.hs
@@ -0,0 +1,591 @@
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE ConstraintKinds #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE GeneralizedNewtypeDeriving#-}
+{-# LANGUAGE InstanceSigs #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE StandaloneDeriving #-}
+{-# LANGUAGE UndecidableInstances #-} -- XXX
+
+-- |
+-- Module : Streamly.Streams.Async
+-- Copyright : (c) 2017 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+-- Stability : experimental
+-- Portability : GHC
+--
+--
+module Streamly.Streams.Async
+ (
+ AsyncT
+ , Async
+ , asyncly
+ , async
+ , (<|) --deprecated
+ , mkAsync
+ , mkAsync'
+
+ , WAsyncT
+ , WAsync
+ , wAsyncly
+ , wAsync
+ )
+where
+
+import Control.Monad (ap)
+import Control.Monad.Base (MonadBase(..), liftBaseDefault)
+import Control.Monad.Catch (MonadThrow, throwM)
+import Control.Concurrent.MVar (newEmptyMVar)
+-- import Control.Monad.Error.Class (MonadError(..))
+import Control.Monad.IO.Class (MonadIO(..))
+import Control.Monad.Reader.Class (MonadReader(..))
+import Control.Monad.State.Class (MonadState(..))
+import Control.Monad.Trans.Class (MonadTrans(lift))
+import Data.Concurrent.Queue.MichaelScott (LinkedQueue, newQ, nullQ)
+import Data.IORef (IORef, newIORef, readIORef)
+import Data.Maybe (fromJust)
+import Data.Semigroup (Semigroup(..))
+
+import Prelude hiding (map)
+import qualified Data.Set as S
+
+import Streamly.Streams.SVar (fromSVar)
+import Streamly.Streams.Serial (map)
+import Streamly.SVar
+import Streamly.Streams.StreamK (IsStream(..), Stream(..), adapt)
+import qualified Streamly.Streams.StreamK as K
+
+#include "Instances.hs"
+
+-------------------------------------------------------------------------------
+-- Async
+-------------------------------------------------------------------------------
+
+{-# INLINE runStreamLIFO #-}
+runStreamLIFO :: MonadIO m
+ => State Stream m a -> IORef [Stream m a] -> Stream m a -> m () -> m ()
+runStreamLIFO st q m stop = unStream m st stop single yieldk
+ where
+ sv = fromJust $ streamVar st
+ maxBuf = bufferHigh st
+ single a = do
+ res <- liftIO $ sendYield maxBuf sv (ChildYield a)
+ if res then stop else liftIO $ sendStop sv
+ yieldk a r = do
+ res <- liftIO $ sendYield maxBuf sv (ChildYield a)
+ if res
+ then (unStream r) st stop single yieldk
+ else liftIO $ enqueueLIFO sv q r >> sendStop sv
+
+-------------------------------------------------------------------------------
+-- WAsync
+-------------------------------------------------------------------------------
+
+{-# INLINE runStreamFIFO #-}
+runStreamFIFO
+ :: MonadIO m
+ => State Stream m a
+ -> LinkedQueue (Stream m a)
+ -> Stream m a
+ -> m ()
+ -> m ()
+runStreamFIFO st q m stop = unStream m st stop single yieldk
+ where
+ sv = fromJust $ streamVar st
+ maxBuf = bufferHigh st
+ single a = do
+ res <- liftIO $ sendYield maxBuf sv (ChildYield a)
+ if res then stop else liftIO $ sendStop sv
+ yieldk a r = do
+ res <- liftIO $ sendYield maxBuf sv (ChildYield a)
+ liftIO (enqueueFIFO sv q r)
+ if res then stop else liftIO $ sendStop sv
+
+-------------------------------------------------------------------------------
+-- SVar creation
+-- This code belongs in SVar.hs but is kept here for perf reasons
+-------------------------------------------------------------------------------
+
+-- XXX we have this function in this file because passing runStreamLIFO as a
+-- function argument to this function results in a perf degradation of more
+-- than 10%. Need to investigate what the root cause is.
+-- Interestingly, the same thing does not make any difference for Ahead.
+getLifoSVar :: MonadAsync m => State Stream m a -> IO (SVar Stream m a)
+getLifoSVar st = do
+ outQ <- newIORef ([], 0)
+ outQMv <- newEmptyMVar
+ active <- newIORef 0
+ wfw <- newIORef False
+ running <- newIORef S.empty
+ q <- newIORef []
+ yl <- case yieldLimit st of
+ Nothing -> return Nothing
+ Just x -> Just <$> newIORef x
+#ifdef DIAGNOSTICS
+ disp <- newIORef 0
+ maxWrk <- newIORef 0
+ maxOq <- newIORef 0
+ maxHs <- newIORef 0
+ maxWq <- newIORef 0
+#endif
+ let checkEmpty = null <$> readIORef q
+ let sv =
+ SVar { outputQueue = outQ
+ , maxYieldLimit = yl
+ , outputDoorBell = outQMv
+ , readOutputQ = readOutputQBounded (threadsHigh st) sv
+ , postProcess = postProcessBounded sv
+ , workerThreads = running
+ , workLoop = workLoopLIFO runStreamLIFO
+ st{streamVar = Just sv} q
+ , enqueue = enqueueLIFO sv q
+ , isWorkDone = checkEmpty
+ , needDoorBell = wfw
+ , svarStyle = AsyncVar
+ , workerCount = active
+ , accountThread = delThread sv
+#ifdef DIAGNOSTICS
+ , aheadWorkQueue = undefined
+ , outputHeap = undefined
+ , maxWorkers = maxWrk
+ , totalDispatches = disp
+ , maxOutQSize = maxOq
+ , maxHeapSize = maxHs
+ , maxWorkQSize = maxWq
+#endif
+ }
+ in return sv
+
+getFifoSVar :: MonadAsync m => State Stream m a -> IO (SVar Stream m a)
+getFifoSVar st = do
+ outQ <- newIORef ([], 0)
+ outQMv <- newEmptyMVar
+ active <- newIORef 0
+ wfw <- newIORef False
+ running <- newIORef S.empty
+ q <- newQ
+ yl <- case yieldLimit st of
+ Nothing -> return Nothing
+ Just x -> Just <$> newIORef x
+#ifdef DIAGNOSTICS
+ disp <- newIORef 0
+ maxWrk <- newIORef 0
+ maxOq <- newIORef 0
+ maxHs <- newIORef 0
+ maxWq <- newIORef 0
+#endif
+ let sv =
+ SVar { outputQueue = outQ
+ , maxYieldLimit = yl
+ , outputDoorBell = outQMv
+ , readOutputQ = readOutputQBounded (threadsHigh st) sv
+ , postProcess = postProcessBounded sv
+ , workerThreads = running
+ , workLoop = workLoopFIFO runStreamFIFO
+ st{streamVar = Just sv} q
+ , enqueue = enqueueFIFO sv q
+ , isWorkDone = nullQ q
+ , needDoorBell = wfw
+ , svarStyle = WAsyncVar
+ , workerCount = active
+ , accountThread = delThread sv
+#ifdef DIAGNOSTICS
+ , aheadWorkQueue = undefined
+ , outputHeap = undefined
+ , totalDispatches = disp
+ , maxWorkers = maxWrk
+ , maxOutQSize = maxOq
+ , maxHeapSize = maxHs
+ , maxWorkQSize = maxWq
+#endif
+ }
+ in return sv
+
+{-# INLINABLE newAsyncVar #-}
+newAsyncVar :: MonadAsync m
+ => State Stream m a -> Stream m a -> m (SVar Stream m a)
+newAsyncVar st m = do
+ sv <- liftIO $ getLifoSVar st
+ sendWorker sv m
+
+-- XXX Get rid of this?
+-- | Make a stream asynchronous, triggers the computation and returns a stream
+-- in the underlying monad representing the output generated by the original
+-- computation. The returned action is exhaustible and must be drained once. If
+-- not drained fully we may have a thread blocked forever and once exhausted it
+-- will always return 'empty'.
+--
+-- @since 0.2.0
+{-# INLINABLE mkAsync #-}
+mkAsync :: (IsStream t, MonadAsync m) => t m a -> m (t m a)
+mkAsync m = newAsyncVar defState (toStream m) >>= return . fromSVar
+
+{-# INLINABLE mkAsync' #-}
+mkAsync' :: (IsStream t, MonadAsync m) => State Stream m a -> t m a -> m (t m a)
+mkAsync' st m = newAsyncVar st (toStream m) >>= return . fromSVar
+
+-- | Create a new SVar and enqueue one stream computation on it.
+{-# INLINABLE newWAsyncVar #-}
+newWAsyncVar :: MonadAsync m
+ => State Stream m a -> Stream m a -> m (SVar Stream m a)
+newWAsyncVar st m = do
+ sv <- liftIO $ getFifoSVar st
+ sendWorker sv m
+
+------------------------------------------------------------------------------
+-- Running streams concurrently
+------------------------------------------------------------------------------
+
+-- Concurrency rate control.
+--
+-- Our objective is to create more threads on demand if the consumer is running
+-- faster than us. As soon as we encounter a concurrent composition we create a
+-- push pull pair of threads. We use an SVar for communication between the
+-- consumer, pulling from the SVar and the producer who is pushing to the SVar.
+-- The producer creates more threads if the SVar drains and becomes empty, that
+-- is the consumer is running faster.
+--
+-- XXX Note 1: This mechanism can be problematic if the initial production
+-- latency is high, we may end up creating too many threads. So we need some
+-- way to monitor and use the latency as well. Having a limit on the dispatches
+-- (programmer controlled) may also help.
+--
+-- TBD Note 2: We may want to run computations at the lower level of the
+-- composition tree serially even when they are composed using a parallel
+-- combinator. We can use 'serial' in place of 'async' and 'wSerial' in
+-- place of 'wAsync'. If we find that an SVar immediately above a computation
+-- gets drained empty we can switch to parallelizing the computation. For that
+-- we can use a state flag to fork the rest of the computation at any point of
+-- time inside the Monad bind operation if the consumer is running at a faster
+-- speed.
+--
+-- TBD Note 3: the binary operation ('parallel') composition allows us to
+-- dispatch a chunkSize of only 1. If we have to dispatch in arbitrary
+-- chunksizes we will need to compose the parallel actions using a data
+-- constructor (A Free container) instead so that we can divide it in chunks of
+-- arbitrary size before dispatching. If the stream is composed of
+-- hierarchically composed grains of different sizes then we can always switch
+-- to a desired granularity depending on the consumer speed.
+--
+-- TBD Note 4: for pure work (when we are not in the IO monad) we can divide it
+-- into just the number of CPUs.
+
+-- | Join two computations on the currently running 'SVar' queue for concurrent
+-- execution. When we are using parallel composition, an SVar is passed around
+-- as a state variable. We try to schedule a new parallel computation on the
+-- SVar passed to us. The first time, when no SVar exists, a new SVar is
+-- created. Subsequently, 'joinStreamVarAsync' may get called when a computation
+-- already scheduled on the SVar is further evaluated. For example, when (a
+-- `parallel` b) is evaluated it calls a 'joinStreamVarAsync' to put 'a' and 'b' on
+-- the current scheduler queue.
+--
+-- The 'SVarStyle' required by the current composition context is passed as one
+-- of the parameters. If the scheduling and composition style of the new
+-- computation being scheduled is different than the style of the current SVar,
+-- then we create a new SVar and schedule it on that. The newly created SVar
+-- joins as one of the computations on the current SVar queue.
+--
+-- Cases when we need to switch to a new SVar:
+--
+-- * (x `parallel` y) `parallel` (t `parallel` u) -- all of them get scheduled on the same SVar
+-- * (x `parallel` y) `parallel` (t `async` u) -- @t@ and @u@ get scheduled on a new child SVar
+-- because of the scheduling policy change.
+-- * if we 'adapt' a stream of type 'async' to a stream of type
+-- 'Parallel', we create a new SVar at the transitioning bind.
+-- * When the stream is switching from disjunctive composition to conjunctive
+-- composition and vice-versa we create a new SVar to isolate the scheduling
+-- of the two.
+
+forkSVarAsync :: MonadAsync m
+ => SVarStyle -> Stream m a -> Stream m a -> Stream m a
+forkSVarAsync style m1 m2 = Stream $ \st stp sng yld -> do
+ sv <- case style of
+ AsyncVar -> newAsyncVar st (concurrently m1 m2)
+ WAsyncVar -> newWAsyncVar st (concurrently m1 m2)
+ _ -> error "illegal svar type"
+ unStream (fromSVar sv) (rstState st) stp sng yld
+ where
+ concurrently ma mb = Stream $ \st stp sng yld -> do
+ liftIO $ enqueue (fromJust $ streamVar st) mb
+ unStream ma st stp sng yld
+
+{-# INLINE joinStreamVarAsync #-}
+joinStreamVarAsync :: MonadAsync m
+ => SVarStyle -> Stream m a -> Stream m a -> Stream m a
+joinStreamVarAsync style m1 m2 = Stream $ \st stp sng yld -> do
+ case streamVar st of
+ Just sv | svarStyle sv == style ->
+ liftIO (enqueue sv m2) >> unStream m1 st stp sng yld
+ _ -> unStream (forkSVarAsync style m1 m2) st stp sng yld
+
+------------------------------------------------------------------------------
+-- Semigroup and Monoid style compositions for parallel actions
+------------------------------------------------------------------------------
+
+{-# INLINE asyncS #-}
+asyncS :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
+asyncS = joinStreamVarAsync AsyncVar
+
+-- | Polymorphic version of the 'Semigroup' operation '<>' of 'AsyncT'.
+-- Merges two streams possibly concurrently, preferring the
+-- elements from the left one when available.
+--
+-- @since 0.2.0
+{-# INLINE async #-}
+async :: (IsStream t, MonadAsync m) => t m a -> t m a -> t m a
+async m1 m2 = fromStream $
+ joinStreamVarAsync AsyncVar (toStream m1) (toStream m2)
+
+-- | Same as 'async'.
+--
+-- @since 0.1.0
+{-# DEPRECATED (<|) "Please use 'async' instead." #-}
+{-# INLINE (<|) #-}
+(<|) :: (IsStream t, MonadAsync m) => t m a -> t m a -> t m a
+(<|) = async
+
+-- | XXX we can implement it more efficienty by directly implementing instead
+-- of combining streams using async.
+{-# INLINE consMAsync #-}
+consMAsync :: MonadAsync m => m a -> Stream m a -> Stream m a
+consMAsync m r = K.yieldM m `asyncS` r
+
+------------------------------------------------------------------------------
+-- AsyncT
+------------------------------------------------------------------------------
+
+-- | Deep async composition or async composition with depth first traversal. In
+-- a left to right 'Semigroup' composition it tries to yield elements from the
+-- left stream as long as it can, but it can run the right stream in parallel
+-- if it needs to, based on demand. The right stream can be run if the left
+-- stream blocks on IO or cannot produce elements fast enough for the consumer.
+--
+-- @
+-- main = ('toList' . 'asyncly' $ (fromFoldable [1,2]) \<> (fromFoldable [3,4])) >>= print
+-- @
+-- @
+-- [1,2,3,4]
+-- @
+--
+-- Any exceptions generated by a constituent stream are propagated to the
+-- output stream. The output and exceptions from a single stream are guaranteed
+-- to arrive in the same order in the resulting stream as they were generated
+-- in the input stream. However, the relative ordering of elements from
+-- different streams in the resulting stream can vary depending on scheduling
+-- and generation delays.
+--
+-- Similarly, the monad instance of 'AsyncT' /may/ run each iteration
+-- concurrently based on demand. More concurrent iterations are started only
+-- if the previous iterations are not able to produce enough output for the
+-- consumer.
+--
+-- @
+-- import "Streamly"
+-- import qualified "Streamly.Prelude" as S
+-- import Control.Concurrent
+--
+-- main = 'runStream' . 'asyncly' $ do
+-- n <- return 3 \<\> return 2 \<\> return 1
+-- S.once $ do
+-- threadDelay (n * 1000000)
+-- myThreadId >>= \\tid -> putStrLn (show tid ++ ": Delay " ++ show n)
+-- @
+-- @
+-- ThreadId 40: Delay 1
+-- ThreadId 39: Delay 2
+-- ThreadId 38: Delay 3
+-- @
+--
+-- All iterations may run in the same thread if they do not block.
+--
+-- Note that async composition with depth first traversal can be used to
+-- combine infinite number of streams as it explores only a bounded number of
+-- streams at a time.
+--
+-- @since 0.1.0
+newtype AsyncT m a = AsyncT {getAsyncT :: Stream m a}
+ deriving (MonadTrans)
+
+-- | A demand driven left biased parallely composing IO stream of elements of
+-- type @a@. See 'AsyncT' documentation for more details.
+--
+-- @since 0.2.0
+type Async a = AsyncT IO a
+
+-- | Fix the type of a polymorphic stream as 'AsyncT'.
+--
+-- @since 0.1.0
+asyncly :: IsStream t => AsyncT m a -> t m a
+asyncly = adapt
+
+instance IsStream AsyncT where
+ toStream = getAsyncT
+ fromStream = AsyncT
+
+ {-# INLINE consM #-}
+ {-# SPECIALIZE consM :: IO a -> AsyncT IO a -> AsyncT IO a #-}
+ consM m r = fromStream $ consMAsync m (toStream r)
+
+ {-# INLINE (|:) #-}
+ {-# SPECIALIZE (|:) :: IO a -> AsyncT IO a -> AsyncT IO a #-}
+ (|:) = consM
+
+------------------------------------------------------------------------------
+-- Semigroup
+------------------------------------------------------------------------------
+
+instance MonadAsync m => Semigroup (AsyncT m a) where
+ (<>) = async
+
+------------------------------------------------------------------------------
+-- Monoid
+------------------------------------------------------------------------------
+
+instance MonadAsync m => Monoid (AsyncT m a) where
+ mempty = K.nil
+ mappend = (<>)
+
+------------------------------------------------------------------------------
+-- Monad
+------------------------------------------------------------------------------
+
+instance MonadAsync m => Monad (AsyncT m) where
+ return = pure
+ (AsyncT m) >>= f = AsyncT $ K.bindWith asyncS m (getAsyncT . f)
+
+------------------------------------------------------------------------------
+-- Other instances
+------------------------------------------------------------------------------
+
+MONAD_APPLICATIVE_INSTANCE(AsyncT,MONADPARALLEL)
+MONAD_COMMON_INSTANCES(AsyncT, MONADPARALLEL)
+
+------------------------------------------------------------------------------
+-- WAsyncT
+------------------------------------------------------------------------------
+
+{-# INLINE wAsyncS #-}
+wAsyncS :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
+wAsyncS = joinStreamVarAsync WAsyncVar
+
+-- | XXX we can implement it more efficienty by directly implementing instead
+-- of combining streams using wAsync.
+{-# INLINE consMWAsync #-}
+consMWAsync :: MonadAsync m => m a -> Stream m a -> Stream m a
+consMWAsync m r = K.yieldM m `wAsyncS` r
+
+-- | Polymorphic version of the 'Semigroup' operation '<>' of 'WAsyncT'.
+-- Merges two streams concurrently choosing elements from both fairly.
+--
+-- @since 0.2.0
+{-# INLINE wAsync #-}
+wAsync :: (IsStream t, MonadAsync m) => t m a -> t m a -> t m a
+wAsync m1 m2 = fromStream $ wAsyncS (toStream m1) (toStream m2)
+
+-- | Wide async composition or async composition with breadth first traversal.
+-- The Semigroup instance of 'WAsyncT' concurrently /traverses/ the composed
+-- streams using a depth first travesal or in a round robin fashion, yielding
+-- elements from both streams alternately.
+--
+-- @
+-- main = ('toList' . 'wAsyncly' $ (fromFoldable [1,2]) \<> (fromFoldable [3,4])) >>= print
+-- @
+-- @
+-- [1,3,2,4]
+-- @
+--
+-- Any exceptions generated by a constituent stream are propagated to the
+-- output stream. The output and exceptions from a single stream are guaranteed
+-- to arrive in the same order in the resulting stream as they were generated
+-- in the input stream. However, the relative ordering of elements from
+-- different streams in the resulting stream can vary depending on scheduling
+-- and generation delays.
+--
+-- Similarly, the 'Monad' instance of 'WAsyncT' runs /all/ iterations fairly
+-- concurrently using a round robin scheduling.
+--
+-- @
+-- import "Streamly"
+-- import qualified "Streamly.Prelude" as S
+-- import Control.Concurrent
+--
+-- main = 'runStream' . 'wAsyncly' $ do
+-- n <- return 3 \<\> return 2 \<\> return 1
+-- S.once $ do
+-- threadDelay (n * 1000000)
+-- myThreadId >>= \\tid -> putStrLn (show tid ++ ": Delay " ++ show n)
+-- @
+-- @
+-- ThreadId 40: Delay 1
+-- ThreadId 39: Delay 2
+-- ThreadId 38: Delay 3
+-- @
+--
+-- Unlike 'AsyncT' all iterations are guaranteed to run fairly
+-- concurrently, unconditionally.
+--
+-- Note that async composition with breadth first traversal can only combine a
+-- finite number of streams as it needs to retain state for each unfinished
+-- stream.
+--
+-- @since 0.2.0
+newtype WAsyncT m a = WAsyncT {getWAsyncT :: Stream m a}
+ deriving (MonadTrans)
+
+-- | A round robin parallely composing IO stream of elements of type @a@.
+-- See 'WAsyncT' documentation for more details.
+--
+-- @since 0.2.0
+type WAsync a = WAsyncT IO a
+
+-- | Fix the type of a polymorphic stream as 'WAsyncT'.
+--
+-- @since 0.2.0
+wAsyncly :: IsStream t => WAsyncT m a -> t m a
+wAsyncly = adapt
+
+instance IsStream WAsyncT where
+ toStream = getWAsyncT
+ fromStream = WAsyncT
+
+ {-# INLINE consM #-}
+ {-# SPECIALIZE consM :: IO a -> WAsyncT IO a -> WAsyncT IO a #-}
+ consM m r = fromStream $ consMWAsync m (toStream r)
+
+ {-# INLINE (|:) #-}
+ {-# SPECIALIZE (|:) :: IO a -> WAsyncT IO a -> WAsyncT IO a #-}
+ (|:) = consM
+
+------------------------------------------------------------------------------
+-- Semigroup
+------------------------------------------------------------------------------
+
+instance MonadAsync m => Semigroup (WAsyncT m a) where
+ (<>) = wAsync
+
+------------------------------------------------------------------------------
+-- Monoid
+------------------------------------------------------------------------------
+
+instance MonadAsync m => Monoid (WAsyncT m a) where
+ mempty = K.nil
+ mappend = (<>)
+
+------------------------------------------------------------------------------
+-- Monad
+------------------------------------------------------------------------------
+
+instance MonadAsync m => Monad (WAsyncT m) where
+ return = pure
+ (WAsyncT m) >>= f =
+ WAsyncT $ K.bindWith wAsyncS m (getWAsyncT . f)
+
+------------------------------------------------------------------------------
+-- Other instances
+------------------------------------------------------------------------------
+
+MONAD_APPLICATIVE_INSTANCE(WAsyncT,MONADPARALLEL)
+MONAD_COMMON_INSTANCES(WAsyncT, MONADPARALLEL)
diff --git a/src/Streamly/Streams/Instances.hs b/src/Streamly/Streams/Instances.hs
new file mode 100644
index 0000000..1430704
--- /dev/null
+++ b/src/Streamly/Streams/Instances.hs
@@ -0,0 +1,43 @@
+------------------------------------------------------------------------------
+-- CPP macros for common instances
+------------------------------------------------------------------------------
+
+-- XXX use template haskell instead and include Monoid and IsStream instances
+-- as well.
+
+#define MONADPARALLEL , MonadAsync m
+
+#define MONAD_APPLICATIVE_INSTANCE(STREAM,CONSTRAINT) \
+instance (Monad m CONSTRAINT) => Applicative (STREAM m) where { \
+ pure = STREAM . K.yield; \
+ (<*>) = ap }
+
+#define MONAD_COMMON_INSTANCES(STREAM,CONSTRAINT) \
+instance Monad m => Functor (STREAM m) where { \
+ fmap = map }; \
+ \
+instance (MonadBase b m, Monad m CONSTRAINT) => MonadBase b (STREAM m) where {\
+ liftBase = liftBaseDefault }; \
+ \
+instance (MonadIO m CONSTRAINT) => MonadIO (STREAM m) where { \
+ liftIO = lift . liftIO }; \
+ \
+instance (MonadThrow m CONSTRAINT) => MonadThrow (STREAM m) where { \
+ throwM = lift . throwM }; \
+ \
+{- \
+instance (MonadError e m CONSTRAINT) => MonadError e (STREAM m) where { \
+ throwError = lift . throwError; \
+ catchError m h = \
+ fromStream $ withCatchError (toStream m) (\e -> toStream $ h e) }; \
+-} \
+ \
+instance (MonadReader r m CONSTRAINT) => MonadReader r (STREAM m) where { \
+ ask = lift ask; \
+ local f m = fromStream $ K.withLocal f (toStream m) }; \
+ \
+instance (MonadState s m CONSTRAINT) => MonadState s (STREAM m) where { \
+ get = lift get; \
+ put x = lift (put x); \
+ state k = lift (state k) }
+
diff --git a/src/Streamly/Streams/Parallel.hs b/src/Streamly/Streams/Parallel.hs
new file mode 100644
index 0000000..0518e53
--- /dev/null
+++ b/src/Streamly/Streams/Parallel.hs
@@ -0,0 +1,370 @@
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE ConstraintKinds #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE GeneralizedNewtypeDeriving#-}
+{-# LANGUAGE InstanceSigs #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE StandaloneDeriving #-}
+{-# LANGUAGE UndecidableInstances #-} -- XXX
+
+-- |
+-- Module : Streamly.Streams.Parallel
+-- Copyright : (c) 2017 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+-- Stability : experimental
+-- Portability : GHC
+--
+--
+module Streamly.Streams.Parallel
+ (
+ ParallelT
+ , Parallel
+ , parallely
+ , parallel
+
+ -- * Function application
+ , mkParallel
+ , (|$)
+ , (|&)
+ , (|$.)
+ , (|&.)
+ )
+where
+
+import Control.Monad (ap)
+import Control.Monad.Base (MonadBase(..), liftBaseDefault)
+import Control.Monad.Catch (MonadThrow, throwM)
+-- import Control.Monad.Error.Class (MonadError(..))
+import Control.Monad.IO.Class (MonadIO(..))
+import Control.Monad.Reader.Class (MonadReader(..))
+import Control.Monad.State.Class (MonadState(..))
+import Control.Monad.Trans.Class (MonadTrans(lift))
+import Data.Functor (void)
+import Data.Maybe (fromJust)
+import Data.Semigroup (Semigroup(..))
+import Prelude hiding (map)
+
+import Streamly.Streams.SVar (fromSVar)
+import Streamly.Streams.Serial (map)
+import Streamly.SVar
+import Streamly.Streams.StreamK (IsStream(..), Stream(..), adapt)
+import qualified Streamly.Streams.StreamK as K
+
+#include "Instances.hs"
+
+-------------------------------------------------------------------------------
+-- Parallel
+-------------------------------------------------------------------------------
+
+{-# NOINLINE runOne #-}
+runOne :: MonadIO m => State Stream m a -> Stream m a -> m ()
+runOne st m = unStream m st stop single yieldk
+
+ where
+
+ sv = fromJust $ streamVar st
+ stop = liftIO $ sendStop sv
+ sendit a = liftIO $ sendYield (-1) sv (ChildYield a)
+ single a = sendit a >> stop
+ -- XXX there is no flow control in parallel case. We should perhaps use a
+ -- queue and queue it back on that and exit the thread when the outputQueue
+ -- overflows. Parallel is dangerous because it can accumulate unbounded
+ -- output in the buffer.
+ yieldk a r = void (sendit a) >> runOne st r
+
+{-# NOINLINE forkSVarPar #-}
+forkSVarPar :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
+forkSVarPar m r = Stream $ \st stp sng yld -> do
+ sv <- newParallelVar
+ pushWorkerPar sv (runOne st{streamVar = Just sv} m)
+ pushWorkerPar sv (runOne st{streamVar = Just sv} r)
+ (unStream (fromSVar sv)) (rstState st) stp sng yld
+
+{-# INLINE joinStreamVarPar #-}
+joinStreamVarPar :: MonadAsync m
+ => SVarStyle -> Stream m a -> Stream m a -> Stream m a
+joinStreamVarPar style m1 m2 = Stream $ \st stp sng yld ->
+ case streamVar st of
+ Just sv | svarStyle sv == style -> do
+ pushWorkerPar sv (runOne st m1)
+ unStream m2 st stp sng yld
+ _ -> unStream (forkSVarPar m1 m2) (rstState st) stp sng yld
+
+{-# INLINE parallelStream #-}
+parallelStream :: MonadAsync m => Stream m a -> Stream m a -> Stream m a
+parallelStream = joinStreamVarPar ParallelVar
+
+-- | XXX we can implement it more efficienty by directly implementing instead
+-- of combining streams using parallel.
+{-# INLINE consMParallel #-}
+consMParallel :: MonadAsync m => m a -> Stream m a -> Stream m a
+consMParallel m r = K.yieldM m `parallelStream` r
+
+-- | Polymorphic version of the 'Semigroup' operation '<>' of 'ParallelT'
+-- Merges two streams concurrently.
+--
+-- @since 0.2.0
+{-# INLINE parallel #-}
+parallel :: (IsStream t, MonadAsync m) => t m a -> t m a -> t m a
+parallel m1 m2 = fromStream $ parallelStream (toStream m1) (toStream m2)
+
+------------------------------------------------------------------------------
+-- Convert a stream to parallel
+------------------------------------------------------------------------------
+
+mkParallel :: (IsStream t, MonadAsync m) => t m a -> m (t m a)
+mkParallel m = do
+ sv <- newParallelVar
+ pushWorkerPar sv (runOne defState{streamVar = Just sv} $ toStream m)
+ return $ fromSVar sv
+
+------------------------------------------------------------------------------
+-- Stream to stream concurrent function application
+------------------------------------------------------------------------------
+
+{-# INLINE applyWith #-}
+applyWith :: (IsStream t, MonadAsync m) => (t m a -> t m b) -> t m a -> t m b
+applyWith f m = fromStream $ Stream $ \st stp sng yld -> do
+ sv <- newParallelVar
+ pushWorkerPar sv (runOne st{streamVar = Just sv} (toStream m))
+ unStream (toStream $ f $ fromSVar sv) st stp sng yld
+
+------------------------------------------------------------------------------
+-- Stream runner concurrent function application
+------------------------------------------------------------------------------
+
+{-# INLINE runWith #-}
+runWith :: (IsStream t, MonadAsync m) => (t m a -> m b) -> t m a -> m b
+runWith f m = do
+ sv <- newParallelVar
+ pushWorkerPar sv (runOne defState{streamVar = Just sv} $ toStream m)
+ f $ fromSVar sv
+
+------------------------------------------------------------------------------
+-- Concurrent Application
+------------------------------------------------------------------------------
+
+infixr 0 |$
+infixr 0 |$.
+
+infixl 1 |&
+infixl 1 |&.
+
+-- | Parallel function application operator for streams; just like the regular
+-- function application operator '$' except that it is concurrent. The
+-- following code prints a value every second even though each stage adds a 1
+-- second delay.
+--
+--
+-- @
+-- runStream $
+-- S.mapM (\\x -> threadDelay 1000000 >> print x)
+-- |$ S.repeatM (threadDelay 1000000 >> return 1)
+-- @
+--
+-- /Concurrent/
+--
+-- @since 0.3.0
+{-# INLINE (|$) #-}
+(|$) :: (IsStream t, MonadAsync m) => (t m a -> t m b) -> t m a -> t m b
+f |$ x = applyWith f x
+
+-- | Parallel reverse function application operator for streams; just like the
+-- regular reverse function application operator '&' except that it is
+-- concurrent.
+--
+-- @
+-- runStream $
+-- S.repeatM (threadDelay 1000000 >> return 1)
+-- |& S.mapM (\\x -> threadDelay 1000000 >> print x)
+-- @
+--
+-- /Concurrent/
+--
+-- @since 0.3.0
+{-# INLINE (|&) #-}
+(|&) :: (IsStream t, MonadAsync m) => t m a -> (t m a -> t m b) -> t m b
+x |& f = f |$ x
+
+-- | Parallel function application operator; applies a @run@ or @fold@ function
+-- to a stream such that the fold consumer and the stream producer run in
+-- parallel. A @run@ or @fold@ function reduces the stream to a value in the
+-- underlying monad. The @.@ at the end of the operator is a mnemonic for
+-- termination of the stream.
+--
+-- @
+-- S.foldlM' (\\_ a -> threadDelay 1000000 >> print a) ()
+-- |$. S.repeatM (threadDelay 1000000 >> return 1)
+-- @
+--
+-- /Concurrent/
+--
+-- @since 0.3.0
+{-# INLINE (|$.) #-}
+(|$.) :: (IsStream t, MonadAsync m) => (t m a -> m b) -> t m a -> m b
+f |$. x = runWith f x
+
+-- | Parallel reverse function application operator for applying a run or fold
+-- functions to a stream. Just like '|$.' except that the operands are reversed.
+--
+-- @
+-- S.repeatM (threadDelay 1000000 >> return 1)
+-- |&. S.foldlM' (\\_ a -> threadDelay 1000000 >> print a) ()
+-- @
+--
+-- /Concurrent/
+--
+-- @since 0.3.0
+{-# INLINE (|&.) #-}
+(|&.) :: (IsStream t, MonadAsync m) => t m a -> (t m a -> m b) -> m b
+x |&. f = f |$. x
+
+------------------------------------------------------------------------------
+-- ParallelT
+------------------------------------------------------------------------------
+
+-- | Async composition with simultaneous traversal of all streams.
+--
+-- The Semigroup instance of 'ParallelT' concurrently /merges/ two streams,
+-- running both strictly concurrently and yielding elements from both streams
+-- as they arrive. When multiple streams are combined using 'ParallelT' each
+-- one is evaluated in its own thread and the results produced are presented in
+-- the combined stream on a first come first serve basis.
+--
+-- 'AsyncT' and 'WAsyncT' are /concurrent lookahead streams/ each with a
+-- specific type of consumption pattern (depth first or breadth first). Since
+-- they are lookahead, they may introduce certain default latency in starting
+-- more concurrent tasks for efficiency reasons or may put a default limitation
+-- on the resource consumption (e.g. number of concurrent threads for
+-- lookahead). If we look at the implementation detail, they both can share a
+-- pool of worker threads to evaluate the streams in the desired pattern and at
+-- the desired rate. However, 'ParallelT' uses a separate runtime thread to
+-- evaluate each stream.
+--
+-- 'WAsyncT' is similar to 'ParallelT', as both of them evaluate the
+-- constituent streams fairly in a round robin fashion.
+-- However, the key difference is that 'WAsyncT' is lazy or pull driven
+-- whereas 'ParallelT' is strict or push driven. 'ParallelT' immediately
+-- starts concurrent evaluation of both the streams (in separate threads) and
+-- later picks the results whereas 'WAsyncT' may wait for a certain latency
+-- threshold before initiating concurrent evaluation of the next stream. The
+-- concurrent scheduling of the next stream or the degree of concurrency is
+-- driven by the feedback from the consumer. In case of 'ParallelT' each stream
+-- is evaluated in a separate thread and results are /pushed/ to a shared
+-- output buffer, the evaluation rate is controlled by blocking when the buffer
+-- is full.
+--
+-- Concurrent lookahead streams are generally more efficient than
+-- 'ParallelT' and can work pretty efficiently even for smaller tasks because
+-- they do not necessarily use a separate thread for each task. So they should
+-- be preferred over 'ParallelT' especially when efficiency is a concern and
+-- simultaneous strict evaluation is not a requirement. 'ParallelT' is useful
+-- for cases when the streams are required to be evaluated simultaneously
+-- irrespective of how the consumer consumes them e.g. when we want to race
+-- two tasks and want to start both strictly at the same time or if we have
+-- timers in the parallel tasks and our results depend on the timers being
+-- started at the same time. We can say that 'ParallelT' is almost the same
+-- (modulo some implementation differences) as 'WAsyncT' when the latter is
+-- used with unlimited lookahead and zero latency in initiating lookahead.
+--
+-- @
+-- main = ('toList' . 'parallely' $ (fromFoldable [1,2]) \<> (fromFoldable [3,4])) >>= print
+-- @
+-- @
+-- [1,3,2,4]
+-- @
+--
+-- When streams with more than one element are merged, it yields whichever
+-- stream yields first without any bias, unlike the 'Async' style streams.
+--
+-- Any exceptions generated by a constituent stream are propagated to the
+-- output stream. The output and exceptions from a single stream are guaranteed
+-- to arrive in the same order in the resulting stream as they were generated
+-- in the input stream. However, the relative ordering of elements from
+-- different streams in the resulting stream can vary depending on scheduling
+-- and generation delays.
+--
+-- Similarly, the 'Monad' instance of 'ParallelT' runs /all/ iterations
+-- of the loop concurrently.
+--
+-- @
+-- import "Streamly"
+-- import qualified "Streamly.Prelude" as S
+-- import Control.Concurrent
+--
+-- main = 'runStream' . 'parallely' $ do
+-- n <- return 3 \<\> return 2 \<\> return 1
+-- S.once $ do
+-- threadDelay (n * 1000000)
+-- myThreadId >>= \\tid -> putStrLn (show tid ++ ": Delay " ++ show n)
+-- @
+-- @
+-- ThreadId 40: Delay 1
+-- ThreadId 39: Delay 2
+-- ThreadId 38: Delay 3
+-- @
+--
+-- Note that parallel composition can only combine a finite number of
+-- streams as it needs to retain state for each unfinished stream.
+--
+-- @since 0.1.0
+newtype ParallelT m a = ParallelT {getParallelT :: Stream m a}
+ deriving (MonadTrans)
+
+-- | A parallely composing IO stream of elements of type @a@.
+-- See 'ParallelT' documentation for more details.
+--
+-- @since 0.2.0
+type Parallel a = ParallelT IO a
+
+-- | Fix the type of a polymorphic stream as 'ParallelT'.
+--
+-- @since 0.1.0
+parallely :: IsStream t => ParallelT m a -> t m a
+parallely = adapt
+
+instance IsStream ParallelT where
+ toStream = getParallelT
+ fromStream = ParallelT
+
+ {-# INLINE consM #-}
+ {-# SPECIALIZE consM :: IO a -> ParallelT IO a -> ParallelT IO a #-}
+ consM m r = fromStream $ consMParallel m (toStream r)
+
+ {-# INLINE (|:) #-}
+ {-# SPECIALIZE (|:) :: IO a -> ParallelT IO a -> ParallelT IO a #-}
+ (|:) = consM
+
+------------------------------------------------------------------------------
+-- Semigroup
+------------------------------------------------------------------------------
+
+instance MonadAsync m => Semigroup (ParallelT m a) where
+ (<>) = parallel
+
+------------------------------------------------------------------------------
+-- Monoid
+------------------------------------------------------------------------------
+
+instance MonadAsync m => Monoid (ParallelT m a) where
+ mempty = K.nil
+ mappend = (<>)
+
+------------------------------------------------------------------------------
+-- Monad
+------------------------------------------------------------------------------
+
+instance MonadAsync m => Monad (ParallelT m) where
+ return = pure
+ (ParallelT m) >>= f
+ = ParallelT $ K.bindWith parallelStream m (getParallelT . f)
+
+------------------------------------------------------------------------------
+-- Other instances
+------------------------------------------------------------------------------
+
+MONAD_APPLICATIVE_INSTANCE(ParallelT,MONADPARALLEL)
+MONAD_COMMON_INSTANCES(ParallelT, MONADPARALLEL)
diff --git a/src/Streamly/Streams/Prelude.hs b/src/Streamly/Streams/Prelude.hs
new file mode 100644
index 0000000..ec3054d
--- /dev/null
+++ b/src/Streamly/Streams/Prelude.hs
@@ -0,0 +1,154 @@
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE ConstraintKinds #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE GeneralizedNewtypeDeriving#-}
+{-# LANGUAGE InstanceSigs #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE RankNTypes #-}
+{-# LANGUAGE StandaloneDeriving #-}
+{-# LANGUAGE UndecidableInstances #-} -- XXX
+
+#if __GLASGOW_HASKELL__ >= 800
+{-# OPTIONS_GHC -Wno-orphans #-}
+#endif
+
+#include "inline.h"
+
+-- |
+-- Module : Streamly.Streams.Prelude
+-- Copyright : (c) 2017 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+-- Stability : experimental
+-- Portability : GHC
+--
+--
+module Streamly.Streams.Prelude
+ (
+ -- * Elimination
+ runStream
+ , runStreaming -- deprecated
+ , runStreamT -- deprecated
+ , runInterleavedT -- deprecated
+ , runParallelT -- deprecated
+ , runAsyncT -- deprecated
+ , runZipStream -- deprecated
+ , runZipAsync -- deprecated
+
+ -- * Fold Utilities
+ , foldWith
+ , foldMapWith
+ , forEachWith
+ )
+where
+
+import Streamly.Streams.StreamK (IsStream(..))
+import Streamly.Streams.Serial (SerialT, WSerialT)
+import Streamly.Streams.Parallel (ParallelT)
+import Streamly.Streams.Async (AsyncT)
+import Streamly.Streams.Zip (ZipSerialM, ZipAsyncM)
+
+import qualified Streamly.Streams.StreamD as D
+import qualified Streamly.Streams.StreamK as K
+
+------------------------------------------------------------------------------
+-- Eliminating a stream
+------------------------------------------------------------------------------
+
+-- | Run a streaming composition, discard the results. By default it interprets
+-- the stream as 'SerialT', to run other types of streams use the type adapting
+-- combinators for example @runStream . 'asyncly'@.
+--
+-- @since 0.2.0
+{-# INLINE_EARLY runStream #-}
+runStream :: Monad m => SerialT m a -> m ()
+runStream m = D.runStream $ D.fromStreamK (toStream m)
+{-# RULES "runStream fallback to CPS" [1]
+ forall a. D.runStream (D.fromStreamK a) = K.runStream a #-}
+
+-- | Same as 'runStream'
+--
+-- @since 0.1.0
+{-# DEPRECATED runStreaming "Please use runStream instead." #-}
+runStreaming :: (Monad m, IsStream t) => t m a -> m ()
+runStreaming = runStream . K.adapt
+
+-- | Same as @runStream@.
+--
+-- @since 0.1.0
+{-# DEPRECATED runStreamT "Please use runStream instead." #-}
+runStreamT :: Monad m => SerialT m a -> m ()
+runStreamT = runStream
+
+-- | Same as @runStream . wSerially@.
+--
+-- @since 0.1.0
+{-# DEPRECATED runInterleavedT "Please use 'runStream . interleaving' instead." #-}
+runInterleavedT :: Monad m => WSerialT m a -> m ()
+runInterleavedT = runStream . K.adapt
+
+-- | Same as @runStream . parallely@.
+--
+-- @since 0.1.0
+{-# DEPRECATED runParallelT "Please use 'runStream . parallely' instead." #-}
+runParallelT :: Monad m => ParallelT m a -> m ()
+runParallelT = runStream . K.adapt
+
+-- | Same as @runStream . asyncly@.
+--
+-- @since 0.1.0
+{-# DEPRECATED runAsyncT "Please use 'runStream . asyncly' instead." #-}
+runAsyncT :: Monad m => AsyncT m a -> m ()
+runAsyncT = runStream . K.adapt
+
+-- | Same as @runStream . zipping@.
+--
+-- @since 0.1.0
+{-# DEPRECATED runZipStream "Please use 'runStream . zipSerially instead." #-}
+runZipStream :: Monad m => ZipSerialM m a -> m ()
+runZipStream = runStream . K.adapt
+
+-- | Same as @runStream . zippingAsync@.
+--
+-- @since 0.1.0
+{-# DEPRECATED runZipAsync "Please use 'runStream . zipAsyncly instead." #-}
+runZipAsync :: Monad m => ZipAsyncM m a -> m ()
+runZipAsync = runStream . K.adapt
+
+------------------------------------------------------------------------------
+-- Fold Utilities
+------------------------------------------------------------------------------
+
+-- | A variant of 'Data.Foldable.fold' that allows you to fold a 'Foldable'
+-- container of streams using the specified stream sum operation.
+--
+-- @foldWith 'async' $ map return [1..3]@
+--
+-- @since 0.1.0
+{-# INLINABLE foldWith #-}
+foldWith :: (IsStream t, Foldable f)
+ => (t m a -> t m a -> t m a) -> f (t m a) -> t m a
+foldWith f = foldr f K.nil
+
+-- | A variant of 'foldMap' that allows you to map a monadic streaming action
+-- on a 'Foldable' container and then fold it using the specified stream sum
+-- operation.
+--
+-- @foldMapWith 'async' return [1..3]@
+--
+-- @since 0.1.0
+{-# INLINABLE foldMapWith #-}
+foldMapWith :: (IsStream t, Foldable f)
+ => (t m b -> t m b -> t m b) -> (a -> t m b) -> f a -> t m b
+foldMapWith f g = foldr (f . g) K.nil
+
+-- | Like 'foldMapWith' but with the last two arguments reversed i.e. the
+-- monadic streaming function is the last argument.
+--
+-- @since 0.1.0
+{-# INLINABLE forEachWith #-}
+forEachWith :: (IsStream t, Foldable f)
+ => (t m b -> t m b -> t m b) -> f a -> (a -> t m b) -> t m b
+forEachWith f xs g = foldr (f . g) K.nil xs
diff --git a/src/Streamly/Streams/SVar.hs b/src/Streamly/Streams/SVar.hs
new file mode 100644
index 0000000..971fdb7
--- /dev/null
+++ b/src/Streamly/Streams/SVar.hs
@@ -0,0 +1,143 @@
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE ConstraintKinds #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE MagicHash #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE RankNTypes #-}
+{-# LANGUAGE UnboxedTuples #-}
+{-# LANGUAGE UndecidableInstances #-} -- XXX
+
+#include "inline.h"
+
+-- |
+-- Module : Streamly.Streams.SVar
+-- Copyright : (c) 2017 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+-- Stability : experimental
+-- Portability : GHC
+--
+--
+module Streamly.Streams.SVar
+ (
+ fromSVar
+ , toSVar
+ , maxThreads
+ , maxBuffer
+ , maxYields
+ )
+where
+
+import Control.Monad.Catch (throwM)
+
+import Streamly.SVar
+import Streamly.Streams.StreamK
+import Streamly.Streams.Serial (SerialT)
+
+-- MVar diagnostics has some overhead - around 5% on asyncly null benchmark, we
+-- can keep it on in production to debug problems quickly if and when they
+-- happen, but it may result in unexpected output when threads are left hanging
+-- until they are GCed because the consumer went away.
+
+-- | Pull a stream from an SVar.
+{-# NOINLINE fromStreamVar #-}
+fromStreamVar :: MonadAsync m => SVar Stream m a -> Stream m a
+fromStreamVar sv = Stream $ \st stp sng yld -> do
+ list <- readOutputQ sv
+ -- Reversing the output is important to guarantee that we process the
+ -- outputs in the same order as they were generated by the constituent
+ -- streams.
+ unStream (processEvents $ reverse list) (rstState st) stp sng yld
+
+ where
+
+ allDone stp = do
+#ifdef DIAGNOSTICS
+#ifdef DIAGNOSTICS_VERBOSE
+ svInfo <- liftIO $ dumpSVar sv
+ liftIO $ hPutStrLn stderr $ "fromStreamVar done\n" ++ svInfo
+#endif
+#endif
+ stp
+
+ {-# INLINE processEvents #-}
+ processEvents [] = Stream $ \st stp sng yld -> do
+ done <- postProcess sv
+ if done
+ then allDone stp
+ else unStream (fromStreamVar sv) (rstState st) stp sng yld
+
+ processEvents (ev : es) = Stream $ \st stp sng yld -> do
+ let rest = processEvents es
+ case ev of
+ ChildYield a -> yld a rest
+ ChildStop tid e -> do
+ accountThread sv tid
+ case e of
+ Nothing -> unStream rest (rstState st) stp sng yld
+ Just ex -> throwM ex
+
+{-# INLINE fromSVar #-}
+fromSVar :: (MonadAsync m, IsStream t) => SVar Stream m a -> t m a
+fromSVar sv = fromStream $ fromStreamVar sv
+
+-- | Write a stream to an 'SVar' in a non-blocking manner. The stream can then
+-- be read back from the SVar using 'fromSVar'.
+toSVar :: (IsStream t, MonadAsync m) => SVar Stream m a -> t m a -> m ()
+toSVar sv m = toStreamVar sv (toStream m)
+
+-------------------------------------------------------------------------------
+-- Concurrency control
+-------------------------------------------------------------------------------
+--
+-- XXX need to write these in direct style otherwise they will break fusion.
+--
+-- | Specify the maximum number of threads that can be spawned concurrently
+-- when using concurrent streams. This is not the grand total number of threads
+-- but the maximum number of threads at each point of concurrency.
+-- A value of 0 resets the thread limit to default, a negative value means
+-- there is no limit. The default value is 1500.
+--
+-- @since 0.4.0
+{-# INLINE_NORMAL maxThreads #-}
+maxThreads :: IsStream t => Int -> t m a -> t m a
+maxThreads n m = fromStream $ Stream $ \st stp sng yld -> do
+ let n' = if n == 0 then defaultMaxThreads else n
+ unStream (toStream m) (st {threadsHigh = n'}) stp sng yld
+
+{-# RULES "maxThreadsSerial serial" maxThreads = maxThreadsSerial #-}
+maxThreadsSerial :: Int -> SerialT m a -> SerialT m a
+maxThreadsSerial _ = id
+
+-- | Specify the maximum size of the buffer for storing the results from
+-- concurrent computations. If the buffer becomes full we stop spawning more
+-- concurrent tasks until there is space in the buffer.
+-- A value of 0 resets the buffer size to default, a negative value means
+-- there is no limit. The default value is 1500.
+--
+-- @since 0.4.0
+{-# INLINE_NORMAL maxBuffer #-}
+maxBuffer :: IsStream t => Int -> t m a -> t m a
+maxBuffer n m = fromStream $ Stream $ \st stp sng yld -> do
+ let n' = if n == 0 then defaultMaxBuffer else n
+ unStream (toStream m) (st {bufferHigh = n'}) stp sng yld
+
+{-# RULES "maxBuffer serial" maxBuffer = maxBufferSerial #-}
+maxBufferSerial :: Int -> SerialT m a -> SerialT m a
+maxBufferSerial _ = id
+
+-- Stop concurrent dispatches after this limit. This is useful in API's like
+-- "take" where we want to dispatch only upto the number of elements "take"
+-- needs. This value applies only to the immediate next level and is not
+-- inherited by everything in enclosed scope.
+{-# INLINE_NORMAL maxYields #-}
+maxYields :: IsStream t => Maybe Int -> t m a -> t m a
+maxYields n m = fromStream $ Stream $ \st stp sng yld -> do
+ unStream (toStream m) (st {yieldLimit = n}) stp sng yld
+
+{-# RULES "maxYields serial" maxYields = maxYieldsSerial #-}
+maxYieldsSerial :: Maybe Int -> SerialT m a -> SerialT m a
+maxYieldsSerial _ = id
diff --git a/src/Streamly/Streams/Serial.hs b/src/Streamly/Streams/Serial.hs
new file mode 100644
index 0000000..0e3d15c
--- /dev/null
+++ b/src/Streamly/Streams/Serial.hs
@@ -0,0 +1,338 @@
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE ConstraintKinds #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE GeneralizedNewtypeDeriving#-}
+{-# LANGUAGE InstanceSigs #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE StandaloneDeriving #-}
+{-# LANGUAGE UndecidableInstances #-} -- XXX
+
+-- |
+-- Module : Streamly.Streams.Serial
+-- Copyright : (c) 2017 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+-- Stability : experimental
+-- Portability : GHC
+--
+--
+module Streamly.Streams.Serial
+ (
+ -- * Serial appending stream
+ SerialT
+ , StreamT -- deprecated
+ , Serial
+ , serial
+ , serially
+
+ -- * Serial interleaving stream
+ , WSerialT
+ , InterleavedT -- deprecated
+ , WSerial
+ , wSerial
+ , (<=>) -- deprecated
+ , wSerially
+ , interleaving -- deprecated
+
+ -- * Transformation
+ , map
+ , mapM
+ )
+where
+
+import Control.Monad (ap)
+import Control.Monad.Base (MonadBase(..), liftBaseDefault)
+import Control.Monad.Catch (MonadThrow, throwM)
+-- import Control.Monad.Error.Class (MonadError(..))
+import Control.Monad.IO.Class (MonadIO(..))
+import Control.Monad.Reader.Class (MonadReader(..))
+import Control.Monad.State.Class (MonadState(..))
+import Control.Monad.Trans.Class (MonadTrans(lift))
+import Data.Semigroup (Semigroup(..))
+import Prelude hiding (map, mapM)
+
+import Streamly.SVar (rstState)
+import Streamly.Streams.StreamK (IsStream(..), adapt, Stream(..))
+import qualified Streamly.Streams.StreamK as K
+import qualified Streamly.Streams.StreamD as D
+
+#include "Instances.hs"
+#include "inline.h"
+
+------------------------------------------------------------------------------
+-- SerialT
+------------------------------------------------------------------------------
+
+-- | Deep serial composition or serial composition with depth first traversal.
+-- The 'Semigroup' instance of 'SerialT' appends two streams serially in a
+-- depth first manner, yielding all elements from the first stream, and then
+-- all elements from the second stream.
+--
+-- @
+-- import Streamly
+-- import qualified "Streamly.Prelude" as S
+--
+-- main = ('toList' . 'serially' $ (fromFoldable [1,2]) \<\> (fromFoldable [3,4])) >>= print
+-- @
+-- @
+-- [1,2,3,4]
+-- @
+--
+-- The 'Monad' instance runs the /monadic continuation/ for each
+-- element of the stream, serially.
+--
+-- @
+-- main = 'runStream' . 'serially' $ do
+-- x <- return 1 \<\> return 2
+-- S.once $ print x
+-- @
+-- @
+-- 1
+-- 2
+-- @
+--
+-- 'SerialT' nests streams serially in a depth first manner.
+--
+-- @
+-- main = 'runStream' . 'serially' $ do
+-- x <- return 1 \<\> return 2
+-- y <- return 3 \<\> return 4
+-- S.once $ print (x, y)
+-- @
+-- @
+-- (1,3)
+-- (1,4)
+-- (2,3)
+-- (2,4)
+-- @
+--
+-- This behavior of 'SerialT' is exactly like a list transformer. We call the
+-- monadic code being run for each element of the stream a monadic
+-- continuation. In imperative paradigm we can think of this composition as
+-- nested @for@ loops and the monadic continuation is the body of the loop. The
+-- loop iterates for all elements of the stream.
+--
+-- The 'serially' combinator can be omitted as the default stream type is
+-- 'SerialT'.
+-- Note that serial composition with depth first traversal can be used to
+-- combine an infinite number of streams as it explores only one stream at a
+-- time.
+--
+-- @since 0.2.0
+newtype SerialT m a = SerialT {getSerialT :: Stream m a}
+ deriving (Semigroup, Monoid, MonadTrans)
+
+-- | A serial IO stream of elements of type @a@. See 'SerialT' documentation
+-- for more details.
+--
+-- @since 0.2.0
+type Serial a = SerialT IO a
+
+-- |
+-- @since 0.1.0
+{-# DEPRECATED StreamT "Please use 'SerialT' instead." #-}
+type StreamT = SerialT
+
+-- | Fix the type of a polymorphic stream as 'SerialT'.
+--
+-- @since 0.1.0
+serially :: IsStream t => SerialT m a -> t m a
+serially = adapt
+
+instance IsStream SerialT where
+ toStream = getSerialT
+ fromStream = SerialT
+
+ {-# INLINE consM #-}
+ {-# SPECIALIZE consM :: IO a -> SerialT IO a -> SerialT IO a #-}
+ consM :: Monad m => m a -> SerialT m a -> SerialT m a
+ consM m r = fromStream $ K.consMSerial m (toStream r)
+
+ {-# INLINE (|:) #-}
+ {-# SPECIALIZE (|:) :: IO a -> SerialT IO a -> SerialT IO a #-}
+ (|:) :: Monad m => m a -> SerialT m a -> SerialT m a
+ m |: r = fromStream $ K.consMSerial m (toStream r)
+
+------------------------------------------------------------------------------
+-- Semigroup
+------------------------------------------------------------------------------
+
+-- | Polymorphic version of the 'Semigroup' operation '<>' of 'SerialT'.
+-- Appends two streams sequentially, yielding all elements from the first
+-- stream, and then all elements from the second stream.
+--
+-- @since 0.2.0
+{-# INLINE serial #-}
+serial :: IsStream t => t m a -> t m a -> t m a
+serial m1 m2 = fromStream $ K.serial (toStream m1) (toStream m2)
+
+------------------------------------------------------------------------------
+-- Monad
+------------------------------------------------------------------------------
+
+instance Monad m => Monad (SerialT m) where
+ return = pure
+ (SerialT (Stream m)) >>= f = SerialT $ Stream $ \st stp sng yld ->
+ let run x = (unStream x) (rstState st) stp sng yld
+ single a = run $ toStream (f a)
+ yieldk a r = run $ toStream $ f a <> (fromStream r >>= f)
+ in m (rstState st) stp single yieldk
+
+------------------------------------------------------------------------------
+-- Other instances
+------------------------------------------------------------------------------
+
+{-# INLINE_EARLY mapM #-}
+mapM :: (IsStream t, Monad m) => (a -> m b) -> t m a -> t m b
+mapM f m = fromStream $ D.toStreamK $ D.mapM f $ D.fromStreamK (toStream m)
+
+{-# INLINE map #-}
+map :: (IsStream t, Monad m) => (a -> b) -> t m a -> t m b
+map f = mapM (return . f)
+
+MONAD_APPLICATIVE_INSTANCE(SerialT,)
+MONAD_COMMON_INSTANCES(SerialT,)
+
+------------------------------------------------------------------------------
+-- WSerialT
+------------------------------------------------------------------------------
+
+-- | Wide serial composition or serial composition with a breadth first
+-- traversal. The 'Semigroup' instance of 'WSerialT' traverses
+-- the two streams in a breadth first manner. In other words, it interleaves
+-- two streams, yielding one element from each stream alternately.
+--
+-- @
+-- import Streamly
+-- import qualified "Streamly.Prelude" as S
+--
+-- main = ('toList' . 'wSerially' $ (fromFoldable [1,2]) \<\> (fromFoldable [3,4])) >>= print
+-- @
+-- @
+-- [1,3,2,4]
+-- @
+--
+-- Similarly, the 'Monad' instance interleaves the iterations of the
+-- inner and the outer loop, nesting loops in a breadth first manner.
+--
+--
+-- @
+-- main = 'runStream' . 'wSerially' $ do
+-- x <- return 1 \<\> return 2
+-- y <- return 3 \<\> return 4
+-- S.once $ print (x, y)
+-- @
+-- @
+-- (1,3)
+-- (2,3)
+-- (1,4)
+-- (2,4)
+-- @
+--
+-- Note that a serial composition with breadth first traversal can only combine
+-- a finite number of streams as it needs to retain state for each unfinished
+-- stream.
+--
+-- @since 0.2.0
+newtype WSerialT m a = WSerialT {getWSerialT :: Stream m a}
+ deriving (MonadTrans)
+
+-- | An interleaving serial IO stream of elements of type @a@. See 'WSerialT'
+-- documentation for more details.
+--
+-- @since 0.2.0
+type WSerial a = WSerialT IO a
+
+-- |
+-- @since 0.1.0
+{-# DEPRECATED InterleavedT "Please use 'WSerialT' instead." #-}
+type InterleavedT = WSerialT
+
+-- | Fix the type of a polymorphic stream as 'WSerialT'.
+--
+-- @since 0.2.0
+wSerially :: IsStream t => WSerialT m a -> t m a
+wSerially = adapt
+
+-- | Same as 'wSerially'.
+--
+-- @since 0.1.0
+{-# DEPRECATED interleaving "Please use wSerially instead." #-}
+interleaving :: IsStream t => WSerialT m a -> t m a
+interleaving = wSerially
+
+instance IsStream WSerialT where
+ toStream = getWSerialT
+ fromStream = WSerialT
+
+ {-# INLINE consM #-}
+ {-# SPECIALIZE consM :: IO a -> WSerialT IO a -> WSerialT IO a #-}
+ consM :: Monad m => m a -> WSerialT m a -> WSerialT m a
+ consM m r = fromStream $ K.consMSerial m (toStream r)
+
+ {-# INLINE (|:) #-}
+ {-# SPECIALIZE (|:) :: IO a -> WSerialT IO a -> WSerialT IO a #-}
+ (|:) :: Monad m => m a -> WSerialT m a -> WSerialT m a
+ m |: r = fromStream $ K.consMSerial m (toStream r)
+
+------------------------------------------------------------------------------
+-- Semigroup
+------------------------------------------------------------------------------
+
+{-# INLINE interleave #-}
+interleave :: Stream m a -> Stream m a -> Stream m a
+interleave m1 m2 = Stream $ \st stp sng yld -> do
+ let stop = (unStream m2) (rstState st) stp sng yld
+ single a = yld a m2
+ yieldk a r = yld a (interleave m2 r)
+ (unStream m1) (rstState st) stop single yieldk
+
+-- | Polymorphic version of the 'Semigroup' operation '<>' of 'WSerialT'.
+-- Interleaves two streams, yielding one element from each stream alternately.
+--
+-- @since 0.2.0
+{-# INLINE wSerial #-}
+wSerial :: IsStream t => t m a -> t m a -> t m a
+wSerial m1 m2 = fromStream $ interleave (toStream m1) (toStream m2)
+
+instance Semigroup (WSerialT m a) where
+ (<>) = wSerial
+
+infixr 5 <=>
+
+-- | Same as 'wSerial'.
+--
+-- @since 0.1.0
+{-# DEPRECATED (<=>) "Please use 'wSerial' instead." #-}
+{-# INLINE (<=>) #-}
+(<=>) :: IsStream t => t m a -> t m a -> t m a
+(<=>) = wSerial
+
+------------------------------------------------------------------------------
+-- Monoid
+------------------------------------------------------------------------------
+
+instance Monoid (WSerialT m a) where
+ mempty = K.nil
+ mappend = (<>)
+
+------------------------------------------------------------------------------
+-- Monad
+------------------------------------------------------------------------------
+
+instance Monad m => Monad (WSerialT m) where
+ return = pure
+ (WSerialT (Stream m)) >>= f = WSerialT $ Stream $ \st stp sng yld ->
+ let run x = (unStream x) (rstState st) stp sng yld
+ single a = run $ toStream (f a)
+ yieldk a r = run $ toStream $ f a <> (fromStream r >>= f)
+ in m (rstState st) stp single yieldk
+
+------------------------------------------------------------------------------
+-- Other instances
+------------------------------------------------------------------------------
+
+MONAD_APPLICATIVE_INSTANCE(WSerialT,)
+MONAD_COMMON_INSTANCES(WSerialT,)
diff --git a/src/Streamly/Streams/StreamD.hs b/src/Streamly/Streams/StreamD.hs
new file mode 100644
index 0000000..001dc2e
--- /dev/null
+++ b/src/Streamly/Streams/StreamD.hs
@@ -0,0 +1,679 @@
+{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE ConstraintKinds #-}
+{-# LANGUAGE ExistentialQuantification #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE MagicHash #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE RankNTypes #-}
+{-# LANGUAGE UnboxedTuples #-}
+
+#include "inline.h"
+
+-- |
+-- Module : Streamly.Streams.StreamD
+-- Copyright : (c) 2018 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+-- Stability : experimental
+-- Portability : GHC
+--
+-- Direct style re-implementation of CPS style stream in StreamK module. The
+-- symbol or suffix 'D' in this module denotes the "Direct" style. GHC is able
+-- to INLINE and fuse direct style better, providing better performance than
+-- CPS implementation.
+--
+-- @
+-- import qualified Streamly.Streams.StreamD as D
+-- @
+
+-- Some of functions in this file have been adapted from the vector
+-- library, https://hackage.haskell.org/package/vector.
+
+module Streamly.Streams.StreamD
+ (
+ -- * The stream type
+ Step (..)
+ , Stream (..)
+
+ -- * Construction
+ , nil
+ , cons
+
+ -- * Deconstruction
+ , uncons
+
+ -- * Generation
+ -- ** Unfolds
+ , unfoldr
+ , unfoldrM
+
+ -- ** Specialized Generation
+ -- | Generate a monadic stream from a seed.
+ , repeat
+ , enumFromStepN
+
+ -- ** Conversions
+ -- | Transform an input structure into a stream.
+ -- | Direct style stream does not support @fromFoldable@.
+ , yield
+ , yieldM
+ , fromList
+ , fromListM
+ , fromStreamK
+
+ -- * Elimination
+ -- ** General Folds
+ , foldr
+ , foldrM
+ , foldl'
+ , foldlM'
+
+ -- ** Specialized Folds
+ , runStream
+ , null
+ , head
+ , tail
+ , last
+ , elem
+ , notElem
+ , all
+ , any
+ , maximum
+ , minimum
+
+ -- ** Map and Fold
+ , mapM_
+
+ -- ** Conversions
+ -- | Transform a stream into another type.
+ , toList
+ , toStreamK
+
+ -- * Transformation
+ -- ** By folding (scans)
+ , scanlM'
+
+ -- * Filtering
+ , filter
+ , filterM
+ , take
+ , takeWhile
+ , takeWhileM
+ , drop
+ , dropWhile
+ , dropWhileM
+
+ -- * Mapping
+ , map
+ , mapM
+
+ -- ** Map and Filter
+ , mapMaybe
+ , mapMaybeM
+
+ -- * Zipping
+ , zipWith
+ , zipWithM
+ )
+where
+
+import Data.Maybe (fromJust, isJust)
+import GHC.Types ( SPEC(..) )
+import Prelude
+ hiding (map, mapM, mapM_, repeat, foldr, last, take, filter,
+ takeWhile, drop, dropWhile, all, any, maximum, minimum, elem,
+ notElem, null, head, tail, zipWith)
+
+import Streamly.SVar (MonadAsync, State(..), defState, rstState)
+import qualified Streamly.Streams.StreamK as K
+
+------------------------------------------------------------------------------
+-- The direct style stream type
+------------------------------------------------------------------------------
+
+-- | A stream is a succession of 'Step's. A 'Yield' produces a single value and
+-- the next state of the stream. 'Stop' indicates there are no more values in
+-- the stream.
+data Step s a = Yield a s | Stop
+
+instance Functor (Step s) where
+ {-# INLINE fmap #-}
+ fmap f (Yield x s) = Yield (f x) s
+ fmap _ Stop = Stop
+
+-- gst = global state
+-- | A stream consists of a step function that generates the next step given a
+-- current state, and the current state.
+data Stream m a = forall s. Stream (State K.Stream m a -> s -> m (Step s a)) s
+
+------------------------------------------------------------------------------
+-- Construction
+------------------------------------------------------------------------------
+
+-- | An empty 'Stream'.
+{-# INLINE_NORMAL nil #-}
+nil :: Monad m => Stream m a
+nil = Stream (\_ _ -> return Stop) ()
+
+-- | Can fuse but has O(n^2) complexity.
+cons :: Monad m => a -> Stream m a -> Stream m a
+cons x (Stream step state) = Stream step1 Nothing
+ where
+ step1 _ Nothing = return $ Yield x (Just state)
+ step1 gst (Just st) = do
+ r <- step (rstState gst) st
+ case r of
+ Yield a s -> return $ Yield a (Just s)
+ Stop -> return Stop
+
+-------------------------------------------------------------------------------
+-- Deconstruction
+-------------------------------------------------------------------------------
+
+-- Does not fuse, has the same performance as the StreamK version.
+{-# INLINE_NORMAL uncons #-}
+uncons :: Monad m => Stream m a -> m (Maybe (a, Stream m a))
+uncons (Stream step state) = go state
+ where
+ go st = do
+ r <- step defState st
+ return $ case r of
+ Yield x s -> Just (x, (Stream step s))
+ Stop -> Nothing
+
+------------------------------------------------------------------------------
+-- Generation by unfold
+------------------------------------------------------------------------------
+
+{-# INLINE_NORMAL unfoldrM #-}
+unfoldrM :: Monad m => (s -> m (Maybe (a, s))) -> s -> Stream m a
+unfoldrM next state = Stream step state
+ where
+ {-# INLINE_LATE step #-}
+ step _ st = do
+ r <- next st
+ return $ case r of
+ Just (x, s) -> Yield x s
+ Nothing -> Stop
+
+{-# INLINE_LATE unfoldr #-}
+unfoldr :: Monad m => (s -> Maybe (a, s)) -> s -> Stream m a
+unfoldr f = unfoldrM (return . f)
+
+------------------------------------------------------------------------------
+-- Specialized Generation
+------------------------------------------------------------------------------
+
+repeat :: Monad m => a -> Stream m a
+repeat x = Stream (\_ _ -> return $ Yield x ()) ()
+
+{-# INLINE_NORMAL enumFromStepN #-}
+enumFromStepN :: (Num a, Monad m) => a -> a -> Int -> Stream m a
+enumFromStepN from stride n =
+ from `seq` stride `seq` n `seq` Stream step (from, n)
+ where
+ {-# INLINE_LATE step #-}
+ step _ (x, i) | i > 0 = return $ Yield x (x + stride, i - 1)
+ | otherwise = return $ Stop
+
+-------------------------------------------------------------------------------
+-- Generation by Conversion
+-------------------------------------------------------------------------------
+
+-- | Create a singleton 'Stream' from a pure value.
+{-# INLINE_NORMAL yield #-}
+yield :: Monad m => a -> Stream m a
+yield x = Stream (\_ s -> return $ step undefined s) True
+ where
+ {-# INLINE_LATE step #-}
+ step _ True = Yield x False
+ step _ False = Stop
+
+-- | Create a singleton 'Stream' from a monadic action.
+{-# INLINE_NORMAL yieldM #-}
+yieldM :: Monad m => m a -> Stream m a
+yieldM m = Stream step True
+ where
+ {-# INLINE_LATE step #-}
+ step _ True = m >>= \x -> return $ Yield x False
+ step _ False = return Stop
+
+-- XXX we need the MonadAsync constraint because of a rewrite rule.
+-- | Convert a list of monadic actions to a 'Stream'
+{-# INLINE_LATE fromListM #-}
+fromListM :: MonadAsync m => [m a] -> Stream m a
+fromListM zs = Stream step zs
+ where
+ {-# INLINE_LATE step #-}
+ step _ (m:ms) = m >>= \x -> return $ Yield x ms
+ step _ [] = return Stop
+
+-- | Convert a list of pure values to a 'Stream'
+{-# INLINE_LATE fromList #-}
+fromList :: Monad m => [a] -> Stream m a
+fromList zs = Stream step zs
+ where
+ {-# INLINE_LATE step #-}
+ step _ (x:xs) = return $ Yield x xs
+ step _ [] = return Stop
+
+-- XXX pass the state to streamD
+{-# INLINE_LATE fromStreamK #-}
+fromStreamK :: Monad m => K.Stream m a -> Stream m a
+fromStreamK m = Stream step m
+ where
+ step gst m1 =
+ let stop = return Stop
+ single a = return $ Yield a K.nil
+ yieldk a r = return $ Yield a r
+ in K.unStream m1 gst stop single yieldk
+
+------------------------------------------------------------------------------
+-- Elimination by Folds
+------------------------------------------------------------------------------
+
+{-# INLINE_NORMAL foldrM #-}
+foldrM :: Monad m => (a -> b -> m b) -> b -> Stream m a -> m b
+foldrM f z (Stream step state) = go SPEC state
+ where
+ go !_ st = do
+ r <- step defState st
+ case r of
+ Yield x s -> go SPEC s >>= f x
+ Stop -> return z
+
+{-# INLINE_NORMAL foldr #-}
+foldr :: Monad m => (a -> b -> b) -> b -> Stream m a -> m b
+foldr f = foldrM (\a b -> return (f a b))
+
+{-# INLINE_NORMAL foldlM' #-}
+foldlM' :: Monad m => (b -> a -> m b) -> b -> Stream m a -> m b
+foldlM' fstep begin (Stream step state) = go SPEC begin state
+ where
+ go !_ acc st = acc `seq` do
+ r <- step defState st
+ case r of
+ Yield x s -> do
+ acc' <- fstep acc x
+ go SPEC acc' s
+ Stop -> return acc
+
+{-# INLINE foldl' #-}
+foldl' :: Monad m => (b -> a -> b) -> b -> Stream m a -> m b
+foldl' fstep = foldlM' (\b a -> return (fstep b a))
+
+------------------------------------------------------------------------------
+-- Specialized Folds
+------------------------------------------------------------------------------
+
+-- | Run a streaming composition, discard the results.
+{-# INLINE_LATE runStream #-}
+runStream :: Monad m => Stream m a -> m ()
+runStream (Stream step state) = go SPEC state
+ where
+ go !_ st = do
+ r <- step defState st
+ case r of
+ Yield _ s -> go SPEC s
+ Stop -> return ()
+
+{-# INLINE_NORMAL null #-}
+null :: Monad m => Stream m a -> m Bool
+null (Stream step state) = go state
+ where
+ go st = do
+ r <- step defState st
+ case r of
+ Yield _ _ -> return False
+ Stop -> return True
+
+-- XXX SPEC?
+{-# INLINE_NORMAL head #-}
+head :: Monad m => Stream m a -> m (Maybe a)
+head (Stream step state) = go state
+ where
+ go st = do
+ r <- step defState st
+ case r of
+ Yield x _ -> return (Just x)
+ Stop -> return Nothing
+
+-- Does not fuse, has the same performance as the StreamK version.
+{-# INLINE_NORMAL tail #-}
+tail :: Monad m => Stream m a -> m (Maybe (Stream m a))
+tail (Stream step state) = go state
+ where
+ go st = do
+ r <- step defState st
+ case r of
+ Yield _ s -> return (Just $ Stream step s)
+ Stop -> return Nothing
+
+-- XXX will it fuse? need custom impl?
+{-# INLINE_NORMAL last #-}
+last :: Monad m => Stream m a -> m (Maybe a)
+last = foldl' (\_ y -> Just y) Nothing
+
+{-# INLINE_NORMAL elem #-}
+elem :: (Monad m, Eq a) => a -> Stream m a -> m Bool
+elem e (Stream step state) = go state
+ where
+ go st = do
+ r <- step defState st
+ case r of
+ Yield x s ->
+ if x == e
+ then return True
+ else go s
+ Stop -> return False
+
+{-# INLINE_NORMAL notElem #-}
+notElem :: (Monad m, Eq a) => a -> Stream m a -> m Bool
+notElem e (Stream step state) = go state
+ where
+ go st = do
+ r <- step defState st
+ case r of
+ Yield x s ->
+ if x == e
+ then return False
+ else go s
+ Stop -> return True
+
+{-# INLINE_NORMAL all #-}
+all :: Monad m => (a -> Bool) -> Stream m a -> m Bool
+all p (Stream step state) = go state
+ where
+ go st = do
+ r <- step defState st
+ case r of
+ Yield x s ->
+ if p x
+ then go s
+ else return False
+ Stop -> return True
+
+{-# INLINE_NORMAL any #-}
+any :: Monad m => (a -> Bool) -> Stream m a -> m Bool
+any p (Stream step state) = go state
+ where
+ go st = do
+ r <- step defState st
+ case r of
+ Yield x s ->
+ if p x
+ then return True
+ else go s
+ Stop -> return False
+
+{-# INLINE_NORMAL maximum #-}
+maximum :: (Monad m, Ord a) => Stream m a -> m (Maybe a)
+maximum (Stream step state) = go Nothing state
+ where
+ go Nothing st = do
+ r <- step defState st
+ case r of
+ Yield x s -> go (Just x) s
+ Stop -> return Nothing
+ go (Just acc) st = do
+ r <- step defState st
+ case r of
+ Yield x s ->
+ if acc <= x
+ then go (Just x) s
+ else go (Just acc) s
+ Stop -> return (Just acc)
+
+{-# INLINE_NORMAL minimum #-}
+minimum :: (Monad m, Ord a) => Stream m a -> m (Maybe a)
+minimum (Stream step state) = go Nothing state
+ where
+ go Nothing st = do
+ r <- step defState st
+ case r of
+ Yield x s -> go (Just x) s
+ Stop -> return Nothing
+ go (Just acc) st = do
+ r <- step defState st
+ case r of
+ Yield x s ->
+ if acc <= x
+ then go (Just acc) s
+ else go (Just x) s
+ Stop -> return (Just acc)
+
+------------------------------------------------------------------------------
+-- Map and Fold
+------------------------------------------------------------------------------
+
+-- | Execute a monadic action for each element of the 'Stream'
+{-# INLINE_NORMAL mapM_ #-}
+mapM_ :: Monad m => (a -> m b) -> Stream m a -> m ()
+mapM_ m = runStream . mapM m
+
+------------------------------------------------------------------------------
+-- Converting folds
+------------------------------------------------------------------------------
+
+{-# INLINE toList #-}
+toList :: Monad m => Stream m a -> m [a]
+toList = foldr (:) []
+
+-- Convert a direct stream to and from CPS encoded stream
+{-# INLINE_LATE toStreamK #-}
+toStreamK :: Monad m => Stream m a -> K.Stream m a
+toStreamK (Stream step state) = go state
+ where
+ go st = K.Stream $ \gst stp _ yld -> do
+ r <- step gst st
+ case r of
+ Yield x s -> yld x (go s)
+ Stop -> stp
+
+#ifndef DISABLE_FUSION
+{-# RULES "fromStreamK/toStreamK fusion"
+ forall s. toStreamK (fromStreamK s) = s #-}
+{-# RULES "toStreamK/fromStreamK fusion"
+ forall s. fromStreamK (toStreamK s) = s #-}
+#endif
+
+------------------------------------------------------------------------------
+-- Transformation by Folding (Scans)
+------------------------------------------------------------------------------
+
+{-# INLINE_NORMAL postscanlM' #-}
+postscanlM' :: Monad m => (b -> a -> m b) -> b -> Stream m a -> Stream m b
+postscanlM' fstep begin (Stream step state) =
+ begin `seq` Stream step' (state, begin)
+ where
+ {-# INLINE_LATE step' #-}
+ step' gst (st, acc) = acc `seq` do
+ r <- step (rstState gst) st
+ case r of
+ Yield x s -> do
+ y <- fstep acc x
+ y `seq` return (Yield y (s, y))
+ Stop -> return Stop
+
+{-# INLINE scanlM' #-}
+scanlM' :: Monad m => (b -> a -> m b) -> b -> Stream m a -> Stream m b
+scanlM' fstep begin s = begin `seq` (begin `cons` postscanlM' fstep begin s)
+
+-------------------------------------------------------------------------------
+-- Filtering
+-------------------------------------------------------------------------------
+
+{-# INLINE_NORMAL take #-}
+take :: Monad m => Int -> Stream m a -> Stream m a
+take n (Stream step state) = n `seq` Stream step' (state, 0)
+ where
+ {-# INLINE_LATE step' #-}
+ step' gst (st, i) | i < n = do
+ r <- step (rstState gst) st
+ return $ case r of
+ Yield x s -> Yield x (s, i + 1)
+ Stop -> Stop
+ step' _ (_, _) = return Stop
+
+{-# INLINE_NORMAL takeWhileM #-}
+takeWhileM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a
+takeWhileM f (Stream step state) = Stream step' state
+ where
+ {-# INLINE_LATE step' #-}
+ step' gst st = do
+ r <- step (rstState gst) st
+ case r of
+ Yield x s -> do
+ b <- f x
+ return $ if b then Yield x s else Stop
+ Stop -> return $ Stop
+
+{-# INLINE takeWhile #-}
+takeWhile :: Monad m => (a -> Bool) -> Stream m a -> Stream m a
+takeWhile f = takeWhileM (return . f)
+
+{-# INLINE_NORMAL drop #-}
+drop :: Monad m => Int -> Stream m a -> Stream m a
+drop n (Stream step state) = Stream step' (state, Just n)
+ where
+ {-# INLINE_LATE step' #-}
+ step' gst (st, Just i)
+ | i > 0 = do
+ r <- step (rstState gst) st
+ case r of
+ Yield _ s -> step' (rstState gst) (s, Just (i - 1))
+ Stop -> return Stop
+ | otherwise = step' gst (st, Nothing)
+
+ step' gst (st, Nothing) = do
+ r <- step (rstState gst) st
+ return $ case r of
+ Yield x s -> Yield x (s, Nothing)
+ Stop -> Stop
+
+data DropWhileState s a
+ = DropWhileDrop s
+ | DropWhileYield a s
+ | DropWhileNext s
+
+{-# INLINE_NORMAL dropWhileM #-}
+dropWhileM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a
+dropWhileM f (Stream step state) = Stream step' (DropWhileDrop state)
+ where
+ {-# INLINE_LATE step' #-}
+ step' gst (DropWhileDrop st) = do
+ r <- step (rstState gst) st
+ case r of
+ Yield x s -> do
+ b <- f x
+ if b
+ then step' (rstState gst) (DropWhileDrop s)
+ else step' (rstState gst) (DropWhileYield x s)
+ Stop -> return Stop
+
+ step' gst (DropWhileNext st) = do
+ r <- step (rstState gst) st
+ case r of
+ Yield x s -> step' (rstState gst) (DropWhileYield x s)
+ Stop -> return Stop
+
+ step' _ (DropWhileYield x st) = return $ Yield x (DropWhileNext st)
+
+{-# INLINE dropWhile #-}
+dropWhile :: Monad m => (a -> Bool) -> Stream m a -> Stream m a
+dropWhile f = dropWhileM (return . f)
+
+{-# INLINE_NORMAL filterM #-}
+filterM :: Monad m => (a -> m Bool) -> Stream m a -> Stream m a
+filterM f (Stream step state) = Stream step' state
+ where
+ {-# INLINE_LATE step' #-}
+ step' gst st = do
+ r <- step (rstState gst) st
+ case r of
+ Yield x s -> do
+ b <- f x
+ if b
+ then return $ Yield x s
+ else step' (rstState gst) s
+ Stop -> return $ Stop
+
+{-# INLINE filter #-}
+filter :: Monad m => (a -> Bool) -> Stream m a -> Stream m a
+filter f = filterM (return . f)
+
+------------------------------------------------------------------------------
+-- Transformation by Mapping
+------------------------------------------------------------------------------
+
+-- | Map a monadic function over a 'Stream'
+{-# INLINE_NORMAL mapM #-}
+mapM :: Monad m => (a -> m b) -> Stream m a -> Stream m b
+mapM f (Stream step state) = Stream step' state
+ where
+ {-# INLINE_LATE step' #-}
+ step' gst st = do
+ r <- step (rstState gst) st
+ case r of
+ Yield x s -> f x >>= \a -> return $ Yield a s
+ Stop -> return Stop
+
+{-# INLINE map #-}
+map :: Monad m => (a -> b) -> Stream m a -> Stream m b
+map f = mapM (return . f)
+
+------------------------------------------------------------------------------
+-- Transformation by Map and Filter
+------------------------------------------------------------------------------
+
+-- XXX Will this always fuse properly?
+{-# INLINE_NORMAL mapMaybe #-}
+mapMaybe :: Monad m => (a -> Maybe b) -> Stream m a -> Stream m b
+mapMaybe f = fmap fromJust . filter isJust . map f
+
+{-# INLINE_NORMAL mapMaybeM #-}
+mapMaybeM :: Monad m => (a -> m (Maybe b)) -> Stream m a -> Stream m b
+mapMaybeM f = fmap fromJust . filter isJust . mapM f
+
+------------------------------------------------------------------------------
+-- Instances
+------------------------------------------------------------------------------
+
+{-# INLINE_NORMAL zipWithM #-}
+zipWithM :: Monad m
+ => (a -> b -> m c) -> Stream m a -> Stream m b -> Stream m c
+zipWithM f (Stream stepa ta) (Stream stepb tb) = Stream step (ta, tb, Nothing)
+ where
+ {-# INLINE_LATE step #-}
+ step gst (sa, sb, Nothing) = do
+ r <- stepa (rstState gst) sa
+ case r of
+ Yield x sa' -> step gst (sa', sb, Just x)
+ Stop -> return Stop
+
+ step gst (sa, sb, Just x) = do
+ r <- stepb (rstState gst) sb
+ case r of
+ Yield y sb' -> do
+ z <- f x y
+ return $ Yield z (sa, sb', Nothing)
+ Stop -> return Stop
+
+{-# RULES "zipWithM xs xs"
+ forall f xs. zipWithM f xs xs = mapM (\x -> f x x) xs #-}
+
+{-# INLINE zipWith #-}
+zipWith :: Monad m => (a -> b -> c) -> Stream m a -> Stream m b -> Stream m c
+zipWith f = zipWithM (\a b -> return (f a b))
+
+------------------------------------------------------------------------------
+-- Instances
+------------------------------------------------------------------------------
+
+instance Monad m => Functor (Stream m) where
+ {-# INLINE fmap #-}
+ fmap = map
diff --git a/src/Streamly/Streams/StreamK.hs b/src/Streamly/Streams/StreamK.hs
new file mode 100644
index 0000000..f4d1ccb
--- /dev/null
+++ b/src/Streamly/Streams/StreamK.hs
@@ -0,0 +1,909 @@
+{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE ConstraintKinds #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE InstanceSigs #-}
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE MagicHash #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE RankNTypes #-}
+{-# LANGUAGE UnboxedTuples #-}
+{-# LANGUAGE UndecidableInstances #-} -- XXX
+
+-- |
+-- Module : Streamly.Streams.StreamK
+-- Copyright : (c) 2017 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+-- Stability : experimental
+-- Portability : GHC
+--
+--
+-- Continuation passing style (CPS) stream implementation. The symbol 'K' below
+-- denotes a function as well as a Kontinuation.
+--
+-- @
+-- import qualified Streamly.Streams.StreamK as K
+-- @
+--
+module Streamly.Streams.StreamK
+ (
+ -- * A class for streams
+ IsStream (..)
+ , adapt
+
+ -- * The stream type
+ , Stream (..)
+
+ -- * Construction
+ , mkStream
+ , nil
+ , cons
+ , (.:)
+
+ -- * Asynchronous construction
+ , nilK
+ , yieldK
+ , consK
+
+ -- * Deconstruction
+ , uncons
+
+ -- * Generation
+ -- ** Unfolds
+ , unfoldr
+ , unfoldrM
+
+ -- ** Specialized Generation
+ , repeat
+
+ -- ** Conversions
+ , yield
+ , yieldM
+ , fromFoldable
+ , fromList
+ , fromStreamK
+
+ -- * Elimination
+ -- ** General Folds
+ , foldStream
+ , foldr
+ , foldrM
+ , foldl'
+ , foldlM'
+ , foldx
+ , foldxM
+
+ -- ** Specialized Folds
+ , runStream
+ , null
+ , head
+ , tail
+ , elem
+ , notElem
+ , all
+ , any
+ , last
+ , minimum
+ , maximum
+
+ -- ** Map and Fold
+ , mapM_
+
+ -- ** Conversions
+ , toList
+ , toStreamK
+
+ -- * Transformation
+ -- ** By folding (scans)
+ , scanl'
+ , scanx
+
+ -- ** Filtering
+ , filter
+ , take
+ , takeWhile
+ , drop
+ , dropWhile
+
+ -- ** Mapping
+ , map
+ , mapM
+ , sequence
+
+ -- ** Map and Filter
+ , mapMaybe
+
+ -- * Semigroup Style Composition
+ , serial
+
+ -- * Utilities
+ , consMSerial
+ , bindWith
+ , withLocal
+
+ -- * Deprecated
+ , Streaming -- deprecated
+ , once -- deprecated
+ )
+where
+
+import Control.Monad (void)
+import Control.Monad.Reader.Class (MonadReader(..))
+import Control.Monad.Trans.Class (MonadTrans(lift))
+import Data.Semigroup (Semigroup(..))
+import Prelude
+ hiding (foldl, foldr, last, map, mapM, mapM_, repeat, sequence,
+ take, filter, all, any, takeWhile, drop, dropWhile, minimum,
+ maximum, elem, notElem, null, head, tail)
+import qualified Prelude
+
+import Streamly.SVar
+
+------------------------------------------------------------------------------
+-- The basic stream type
+------------------------------------------------------------------------------
+
+-- | The type @Stream m a@ represents a monadic stream of values of type 'a'
+-- constructed using actions in monad 'm'. It uses stop, singleton and yield
+-- continuations equivalent to the following direct style type:
+--
+-- @
+-- data Stream m a = Stop | Singleton a | Yield a (Stream m a)
+-- @
+--
+-- To facilitate parallel composition we maintain a local state in an 'SVar'
+-- that is shared across and is used for synchronization of the streams being
+-- composed.
+--
+-- The singleton case can be expressed in terms of stop and yield but we have
+-- it as a separate case to optimize composition operations for streams with
+-- single element. We build singleton streams in the implementation of 'pure'
+-- for Applicative and Monad, and in 'lift' for MonadTrans.
+--
+newtype Stream m a =
+ Stream {
+ unStream :: forall r.
+ State Stream m a -- state
+ -> m r -- stop
+ -> (a -> m r) -- singleton
+ -> (a -> Stream m a -> m r) -- yield
+ -> m r
+ }
+
+------------------------------------------------------------------------------
+-- Types that can behave as a Stream
+------------------------------------------------------------------------------
+
+infixr 5 `consM`
+infixr 5 |:
+
+-- | Class of types that can represent a stream of elements of some type 'a' in
+-- some monad 'm'.
+--
+-- @since 0.2.0
+class IsStream t where
+ toStream :: t m a -> Stream m a
+ fromStream :: Stream m a -> t m a
+ -- | Constructs a stream by adding a monadic action at the head of an
+ -- existing stream. For example:
+ --
+ -- @
+ -- > toList $ getLine \`consM` getLine \`consM` nil
+ -- hello
+ -- world
+ -- ["hello","world"]
+ -- @
+ --
+ -- /Concurrent (do not use 'parallely' to construct infinite streams)/
+ --
+ -- @since 0.2.0
+ consM :: MonadAsync m => m a -> t m a -> t m a
+ -- | Operator equivalent of 'consM'. We can read it as "@parallel colon@"
+ -- to remember that @|@ comes before ':'.
+ --
+ -- @
+ -- > toList $ getLine |: getLine |: nil
+ -- hello
+ -- world
+ -- ["hello","world"]
+ -- @
+ --
+ -- @
+ -- let delay = threadDelay 1000000 >> print 1
+ -- runStream $ serially $ delay |: delay |: delay |: nil
+ -- runStream $ parallely $ delay |: delay |: delay |: nil
+ -- @
+ --
+ -- /Concurrent (do not use 'parallely' to construct infinite streams)/
+ --
+ -- @since 0.2.0
+ (|:) :: MonadAsync m => m a -> t m a -> t m a
+ -- We can define (|:) just as 'consM' but it is defined explicitly for each
+ -- type because we want to use SPECIALIZE pragma on the definition.
+
+-- | Same as 'IsStream'.
+--
+-- @since 0.1.0
+{-# DEPRECATED Streaming "Please use IsStream instead." #-}
+type Streaming = IsStream
+
+-------------------------------------------------------------------------------
+-- Type adapting combinators
+-------------------------------------------------------------------------------
+
+-- | Adapt any specific stream type to any other specific stream type.
+--
+-- @since 0.1.0
+adapt :: (IsStream t1, IsStream t2) => t1 m a -> t2 m a
+adapt = fromStream . toStream
+
+------------------------------------------------------------------------------
+-- Building a stream
+------------------------------------------------------------------------------
+
+-- | Build a stream from an 'SVar', a stop continuation, a singleton stream
+-- continuation and a yield continuation.
+mkStream:: IsStream t
+ => (forall r. State Stream m a
+ -> m r
+ -> (a -> m r)
+ -> (a -> t m a -> m r)
+ -> m r)
+ -> t m a
+mkStream k = fromStream $ Stream $ \st stp sng yld ->
+ let yieldk a r = yld a (toStream r)
+ in k (rstState st) stp sng yieldk
+
+------------------------------------------------------------------------------
+-- Construction
+------------------------------------------------------------------------------
+
+-- | An empty stream.
+--
+-- @
+-- > toList nil
+-- []
+-- @
+--
+-- @since 0.1.0
+nil :: IsStream t => t m a
+nil = fromStream $ Stream $ \_ stp _ _ -> stp
+
+infixr 5 `cons`
+
+-- faster than consM because there is no bind.
+-- | Construct a stream by adding a pure value at the head of an existing
+-- stream. For serial streams this is the same as @(return a) \`consM` r@ but
+-- more efficient. For concurrent streams this is not concurrent whereas
+-- 'consM' is concurrent. For example:
+--
+-- @
+-- > toList $ 1 \`cons` 2 \`cons` 3 \`cons` nil
+-- [1,2,3]
+-- @
+--
+-- @since 0.1.0
+cons :: IsStream t => a -> t m a -> t m a
+cons a r = fromStream $ Stream $ \_ _ _ yld -> yld a (toStream r)
+
+infixr 5 .:
+
+-- | Operator equivalent of 'cons'.
+--
+-- @
+-- > toList $ 1 .: 2 .: 3 .: nil
+-- [1,2,3]
+-- @
+--
+-- @since 0.1.1
+(.:) :: IsStream t => a -> t m a -> t m a
+(.:) = cons
+
+{-# INLINE consMSerial #-}
+consMSerial :: (Monad m) => m a -> Stream m a -> Stream m a
+consMSerial m r = Stream $ \_ _ _ yld -> m >>= \a -> yld a r
+
+------------------------------------------------------------------------------
+-- Asynchronous construction
+------------------------------------------------------------------------------
+
+-- | Make an empty stream from a callback function.
+nilK :: IsStream t => (forall r. m r -> m r) -> t m a
+nilK k = fromStream $ Stream $ \_ stp _ _ -> k stp
+
+-- | Make a singleton stream from a one shot callback function.
+yieldK :: IsStream t => (forall r. (a -> m r) -> m r) -> t m a
+yieldK k = fromStream $ Stream $ \_ _ sng _ -> k sng
+
+-- | Construct a stream from a callback function.
+consK :: IsStream t => (forall r. (a -> m r) -> m r) -> t m a -> t m a
+consK k r = fromStream $ Stream $ \_ _ _ yld -> k (\x -> yld x (toStream r))
+
+-- XXX consK with concurrent callbacks
+-- XXX Build a stream from a repeating callback function.
+
+-------------------------------------------------------------------------------
+-- IsStream Stream
+-------------------------------------------------------------------------------
+
+instance IsStream Stream where
+ toStream = id
+ fromStream = id
+
+ {-# INLINE consM #-}
+ {-# SPECIALIZE consM :: IO a -> Stream IO a -> Stream IO a #-}
+ consM :: Monad m => m a -> Stream m a -> Stream m a
+ consM = consMSerial
+
+ {-# INLINE (|:) #-}
+ {-# SPECIALIZE (|:) :: IO a -> Stream IO a -> Stream IO a #-}
+ (|:) :: Monad m => m a -> Stream m a -> Stream m a
+ (|:) = consMSerial
+
+-------------------------------------------------------------------------------
+-- Deconstruction
+-------------------------------------------------------------------------------
+
+{-# INLINE uncons #-}
+uncons :: (IsStream t, Monad m) => t m a -> m (Maybe (a, t m a))
+uncons m =
+ let stop = return Nothing
+ single a = return (Just (a, nil))
+ yieldk a r = return (Just (a, fromStream r))
+ in (unStream (toStream m)) defState stop single yieldk
+
+-------------------------------------------------------------------------------
+-- Generation
+-------------------------------------------------------------------------------
+
+{-# INLINE unfoldr #-}
+unfoldr :: IsStream t => (b -> Maybe (a, b)) -> b -> t m a
+unfoldr step = fromStream . go
+ where
+ go s = Stream $ \_ stp _ yld ->
+ case step s of
+ Nothing -> stp
+ Just (a, b) -> yld a (go b)
+
+{-# INLINE unfoldrM #-}
+unfoldrM :: (IsStream t, MonadAsync m) => (b -> m (Maybe (a, b))) -> b -> t m a
+unfoldrM step = go
+ where
+ go s = fromStream $ Stream $ \svr stp sng yld -> do
+ mayb <- step s
+ case mayb of
+ Nothing -> stp
+ Just (a, b) ->
+ unStream (toStream (return a |: go b)) svr stp sng yld
+
+-------------------------------------------------------------------------------
+-- Special generation
+-------------------------------------------------------------------------------
+
+-- Faster than yieldM because there is no bind. Usually we can construct a
+-- stream from a pure value using "pure" in an applicative, however in case of
+-- Zip streams pure creates an infinite stream.
+--
+-- | Create a singleton stream from a pure value. In monadic streams, 'pure' or
+-- 'return' can be used in place of 'yield', however, in Zip applicative
+-- streams 'pure' is equivalent to 'repeat'.
+--
+-- @since 0.4.0
+yield :: IsStream t => a -> t m a
+yield a = fromStream $ Stream $ \_ _ single _ -> single a
+
+-- | Create a singleton stream from a monadic action. Same as @m \`consM` nil@
+-- but more efficient.
+--
+-- @
+-- > toList $ yieldM getLine
+-- hello
+-- ["hello"]
+-- @
+--
+-- @since 0.4.0
+{-# INLINE yieldM #-}
+yieldM :: (Monad m, IsStream t) => m a -> t m a
+yieldM m = fromStream $ Stream $ \_ _ single _ -> m >>= single
+
+-- | Same as yieldM
+--
+-- @since 0.2.0
+{-# DEPRECATED once "Please use yieldM instead." #-}
+{-# INLINE once #-}
+once :: (Monad m, IsStream t) => m a -> t m a
+once = yieldM
+
+-- | Generate an infinite stream by repeating a pure value.
+--
+-- @since 0.4.0
+repeat :: IsStream t => a -> t m a
+repeat a = let x = cons a x in x
+
+-------------------------------------------------------------------------------
+-- Conversions
+-------------------------------------------------------------------------------
+
+-- | Construct a stream from a 'Foldable' containing pure values.
+--
+-- @since 0.2.0
+{-# INLINE fromFoldable #-}
+fromFoldable :: (IsStream t, Foldable f) => f a -> t m a
+fromFoldable = Prelude.foldr cons nil
+
+{-# INLINE fromList #-}
+fromList :: IsStream t => [a] -> t m a
+fromList = fromFoldable
+
+{-# INLINE fromStreamK #-}
+fromStreamK :: Stream m a -> Stream m a
+fromStreamK = id
+
+-------------------------------------------------------------------------------
+-- Elimination by Folding
+-------------------------------------------------------------------------------
+
+-- | Fold a stream by providing an SVar, a stop continuation, a singleton
+-- continuation and a yield continuation.
+foldStream
+ :: IsStream t
+ => State Stream m a
+ -> m r
+ -> (a -> m r)
+ -> (a -> t m a -> m r)
+ -> t m a
+ -> m r
+foldStream st blank single step m =
+ let yieldk a x = step a (fromStream x)
+ in (unStream (toStream m)) st blank single yieldk
+
+-- | Lazy right associative fold.
+foldr :: (IsStream t, Monad m) => (a -> b -> b) -> b -> t m a -> m b
+foldr step acc m = go (toStream m)
+ where
+ go m1 =
+ let stop = return acc
+ single a = return (step a acc)
+ yieldk a r = go r >>= \b -> return (step a b)
+ in (unStream m1) defState stop single yieldk
+
+-- | Lazy right fold with a monadic step function.
+{-# INLINE foldrM #-}
+foldrM :: (IsStream t, Monad m) => (a -> b -> m b) -> b -> t m a -> m b
+foldrM step acc m = go (toStream m)
+ where
+ go m1 =
+ let stop = return acc
+ single a = step a acc
+ yieldk a r = go r >>= step a
+ in (unStream m1) defState stop single yieldk
+
+-- | Strict left fold with an extraction function. Like the standard strict
+-- left fold, but applies a user supplied extraction function (the third
+-- argument) to the folded value at the end. This is designed to work with the
+-- @foldl@ library. The suffix @x@ is a mnemonic for extraction.
+{-# INLINE foldx #-}
+foldx :: (IsStream t, Monad m)
+ => (x -> a -> x) -> x -> (x -> b) -> t m a -> m b
+foldx step begin done m = get $ go (toStream m) begin
+ where
+ {-# NOINLINE get #-}
+ get m1 =
+ let single = return . done
+ in (unStream m1) undefined undefined single undefined
+
+ -- Note, this can be implemented by making a recursive call to "go",
+ -- however that is more expensive because of unnecessary recursion
+ -- that cannot be tail call optimized. Unfolding recursion explicitly via
+ -- continuations is much more efficient.
+ go m1 !acc = Stream $ \_ _ sng yld ->
+ let stop = sng acc
+ single a = sng $ step acc a
+ yieldk a r =
+ let stream = go r (step acc a)
+ in (unStream stream) defState undefined sng yld
+ in (unStream m1) defState stop single yieldk
+
+-- | Strict left associative fold.
+{-# INLINE foldl' #-}
+foldl' :: (IsStream t, Monad m) => (b -> a -> b) -> b -> t m a -> m b
+foldl' step begin m = foldx step begin id m
+
+-- XXX replace the recursive "go" with explicit continuations.
+-- | Like 'foldx', but with a monadic step function.
+foldxM :: (IsStream t, Monad m)
+ => (x -> a -> m x) -> m x -> (x -> m b) -> t m a -> m b
+foldxM step begin done m = go begin (toStream m)
+ where
+ go !acc m1 =
+ let stop = acc >>= done
+ single a = acc >>= \b -> step b a >>= done
+ yieldk a r = acc >>= \b -> go (step b a) r
+ in (unStream m1) defState stop single yieldk
+
+-- | Like 'foldl'' but with a monadic step function.
+foldlM' :: (IsStream t, Monad m) => (b -> a -> m b) -> b -> t m a -> m b
+foldlM' step begin m = foldxM step (return begin) return m
+
+------------------------------------------------------------------------------
+-- Specialized folds
+------------------------------------------------------------------------------
+
+{-# INLINE runStream #-}
+runStream :: (Monad m, IsStream t) => t m a -> m ()
+runStream m = go (toStream m)
+ where
+ go m1 =
+ let stop = return ()
+ single _ = return ()
+ yieldk _ r = go (toStream r)
+ in unStream m1 defState stop single yieldk
+
+{-# INLINE null #-}
+null :: (IsStream t, Monad m) => t m a -> m Bool
+null m =
+ let stop = return True
+ single _ = return False
+ yieldk _ _ = return False
+ in unStream (toStream m) defState stop single yieldk
+
+{-# INLINE head #-}
+head :: (IsStream t, Monad m) => t m a -> m (Maybe a)
+head m =
+ let stop = return Nothing
+ single a = return (Just a)
+ yieldk a _ = return (Just a)
+ in unStream (toStream m) defState stop single yieldk
+
+{-# INLINE tail #-}
+tail :: (IsStream t, Monad m) => t m a -> m (Maybe (t m a))
+tail m =
+ let stop = return Nothing
+ single _ = return $ Just nil
+ yieldk _ r = return $ Just $ fromStream r
+ in unStream (toStream m) defState stop single yieldk
+
+{-# INLINE elem #-}
+elem :: (IsStream t, Monad m, Eq a) => a -> t m a -> m Bool
+elem e m = go (toStream m)
+ where
+ go m1 =
+ let stop = return False
+ single a = return (a == e)
+ yieldk a r = if a == e then return True else go r
+ in (unStream m1) defState stop single yieldk
+
+{-# INLINE notElem #-}
+notElem :: (IsStream t, Monad m, Eq a) => a -> t m a -> m Bool
+notElem e m = go (toStream m)
+ where
+ go m1 =
+ let stop = return True
+ single a = return (a /= e)
+ yieldk a r = if a == e then return False else go r
+ in (unStream m1) defState stop single yieldk
+
+all :: (IsStream t, Monad m) => (a -> Bool) -> t m a -> m Bool
+all p m = go (toStream m)
+ where
+ go m1 =
+ let single a | p a = return True
+ | otherwise = return False
+ yieldk a r | p a = go r
+ | otherwise = return False
+ in unStream m1 defState (return True) single yieldk
+
+any :: (IsStream t, Monad m) => (a -> Bool) -> t m a -> m Bool
+any p m = go (toStream m)
+ where
+ go m1 =
+ let single a | p a = return True
+ | otherwise = return False
+ yieldk a r | p a = return True
+ | otherwise = go r
+ in unStream m1 defState (return False) single yieldk
+
+-- | Extract the last element of the stream, if any.
+{-# INLINE last #-}
+last :: (IsStream t, Monad m) => t m a -> m (Maybe a)
+last = foldx (\_ y -> Just y) Nothing id
+
+{-# INLINE minimum #-}
+minimum :: (IsStream t, Monad m, Ord a) => t m a -> m (Maybe a)
+minimum m = go Nothing (toStream m)
+ where
+ go Nothing m1 =
+ let stop = return Nothing
+ single a = return (Just a)
+ yieldk a r = go (Just a) r
+ in unStream m1 defState stop single yieldk
+
+ go (Just res) m1 =
+ let stop = return (Just res)
+ single a =
+ if res <= a
+ then return (Just res)
+ else return (Just a)
+ yieldk a r =
+ if res <= a
+ then go (Just res) r
+ else go (Just a) r
+ in unStream m1 defState stop single yieldk
+
+{-# INLINE maximum #-}
+maximum :: (IsStream t, Monad m, Ord a) => t m a -> m (Maybe a)
+maximum m = go Nothing (toStream m)
+ where
+ go Nothing m1 =
+ let stop = return Nothing
+ single a = return (Just a)
+ yieldk a r = go (Just a) r
+ in unStream m1 defState stop single yieldk
+
+ go (Just res) m1 =
+ let stop = return (Just res)
+ single a =
+ if res <= a
+ then return (Just a)
+ else return (Just res)
+ yieldk a r =
+ if res <= a
+ then go (Just a) r
+ else go (Just res) r
+ in unStream m1 defState stop single yieldk
+
+------------------------------------------------------------------------------
+-- Map and Fold
+------------------------------------------------------------------------------
+
+-- | Apply a monadic action to each element of the stream and discard the
+-- output of the action.
+mapM_ :: (IsStream t, Monad m) => (a -> m b) -> t m a -> m ()
+mapM_ f m = go (toStream m)
+ where
+ go m1 =
+ let stop = return ()
+ single a = void (f a)
+ yieldk a r = f a >> go r
+ in (unStream m1) defState stop single yieldk
+
+------------------------------------------------------------------------------
+-- Converting folds
+------------------------------------------------------------------------------
+
+{-# INLINABLE toList #-}
+toList :: (IsStream t, Monad m) => t m a -> m [a]
+toList = foldr (:) []
+
+{-# INLINE toStreamK #-}
+toStreamK :: Stream m a -> Stream m a
+toStreamK = id
+
+-------------------------------------------------------------------------------
+-- Transformation by folding (Scans)
+-------------------------------------------------------------------------------
+
+{-# INLINE scanx #-}
+scanx :: IsStream t => (x -> a -> x) -> x -> (x -> b) -> t m a -> t m b
+scanx step begin done m =
+ cons (done begin) $ fromStream $ go (toStream m) begin
+ where
+ go m1 !acc = Stream $ \st stp sng yld ->
+ let single a = sng (done $ step acc a)
+ yieldk a r =
+ let s = step acc a
+ in yld (done s) (go r s)
+ in unStream m1 (rstState st) stp single yieldk
+
+{-# INLINE scanl' #-}
+scanl' :: IsStream t => (b -> a -> b) -> b -> t m a -> t m b
+scanl' step begin m = scanx step begin id m
+
+-------------------------------------------------------------------------------
+-- Filtering
+-------------------------------------------------------------------------------
+
+{-# INLINE filter #-}
+filter :: IsStream t => (a -> Bool) -> t m a -> t m a
+filter p m = fromStream $ go (toStream m)
+ where
+ go m1 = Stream $ \st stp sng yld ->
+ let single a | p a = sng a
+ | otherwise = stp
+ yieldk a r | p a = yld a (go r)
+ | otherwise = (unStream r) (rstState st) stp single yieldk
+ in unStream m1 (rstState st) stp single yieldk
+
+{-# INLINE take #-}
+take :: IsStream t => Int -> t m a -> t m a
+take n m = fromStream $ go n (toStream m)
+ where
+ go n1 m1 = Stream $ \st stp sng yld ->
+ let yieldk a r = yld a (go (n1 - 1) r)
+ in if n1 <= 0
+ then stp
+ else unStream m1 (rstState st) stp sng yieldk
+
+{-# INLINE takeWhile #-}
+takeWhile :: IsStream t => (a -> Bool) -> t m a -> t m a
+takeWhile p m = fromStream $ go (toStream m)
+ where
+ go m1 = Stream $ \st stp sng yld ->
+ let single a | p a = sng a
+ | otherwise = stp
+ yieldk a r | p a = yld a (go r)
+ | otherwise = stp
+ in unStream m1 (rstState st) stp single yieldk
+
+drop :: IsStream t => Int -> t m a -> t m a
+drop n m = fromStream $ Stream $ \st stp sng yld ->
+ unStream (go n (toStream m)) (rstState st) stp sng yld
+ where
+ go n1 m1 = Stream $ \st stp sng yld ->
+ let single _ = stp
+ yieldk _ r = (unStream $ go (n1 - 1) r) st stp sng yld
+ -- Somehow "<=" check performs better than a ">"
+ in if n1 <= 0
+ then unStream m1 st stp sng yld
+ else unStream m1 st stp single yieldk
+
+{-# INLINE dropWhile #-}
+dropWhile :: IsStream t => (a -> Bool) -> t m a -> t m a
+dropWhile p m = fromStream $ go (toStream m)
+ where
+ go m1 = Stream $ \st stp sng yld ->
+ let single a | p a = stp
+ | otherwise = sng a
+ yieldk a r | p a = (unStream r) (rstState st) stp single yieldk
+ | otherwise = yld a r
+ in unStream m1 (rstState st) stp single yieldk
+
+-------------------------------------------------------------------------------
+-- Mapping
+-------------------------------------------------------------------------------
+
+{-# INLINE map #-}
+map :: (IsStream t, Monad m) => (a -> b) -> t m a -> t m b
+map f m = fromStream $ Stream $ \st stp sng yld ->
+ let single = sng . f
+ yieldk a r = yld (f a) (fmap f r)
+ in unStream (toStream m) (rstState st) stp single yieldk
+
+-- Be careful when modifying this, this uses a consM (|:) deliberately to allow
+-- other stream types to overload it.
+{-# INLINE mapM #-}
+mapM :: (IsStream t, MonadAsync m) => (a -> m b) -> t m a -> t m b
+mapM f m = go (toStream m)
+ where
+ go m1 = fromStream $ Stream $ \st stp sng yld ->
+ let single a = f a >>= sng
+ yieldk a r = unStream (toStream (f a |: (go r))) st stp sng yld
+ in (unStream m1) (rstState st) stp single yieldk
+
+-- Be careful when modifying this, this uses a consM (|:) deliberately to allow
+-- other stream types to overload it.
+{-# INLINE sequence #-}
+sequence :: (IsStream t, MonadAsync m) => t m (m a) -> t m a
+sequence m = go (toStream m)
+ where
+ go m1 = fromStream $ Stream $ \st stp sng yld ->
+ let single ma = ma >>= sng
+ yieldk ma r = unStream (toStream $ ma |: go r) st stp sng yld
+ in (unStream m1) (rstState st) stp single yieldk
+
+-------------------------------------------------------------------------------
+-- Map and Filter
+-------------------------------------------------------------------------------
+
+{-# INLINE mapMaybe #-}
+mapMaybe :: IsStream t => (a -> Maybe b) -> t m a -> t m b
+mapMaybe f m = go (toStream m)
+ where
+ go m1 = fromStream $ Stream $ \st stp sng yld ->
+ let single a = case f a of
+ Just b -> sng b
+ Nothing -> stp
+ yieldk a r = case f a of
+ Just b -> yld b (toStream $ go r)
+ Nothing -> (unStream r) (rstState st) stp single yieldk
+ in unStream m1 (rstState st) stp single yieldk
+
+------------------------------------------------------------------------------
+-- Semigroup
+------------------------------------------------------------------------------
+
+-- | Concatenates two streams sequentially i.e. the first stream is
+-- exhausted completely before yielding any element from the second stream.
+{-# INLINE serial #-}
+serial :: Stream m a -> Stream m a -> Stream m a
+serial m1 m2 = go m1
+ where
+ go (Stream m) = Stream $ \st stp sng yld ->
+ let stop = (unStream m2) (rstState st) stp sng yld
+ single a = yld a m2
+ yieldk a r = yld a (go r)
+ in m (rstState st) stop single yieldk
+
+instance Semigroup (Stream m a) where
+ (<>) = serial
+
+------------------------------------------------------------------------------
+-- Monoid
+------------------------------------------------------------------------------
+
+instance Monoid (Stream m a) where
+ mempty = nil
+ mappend = (<>)
+
+-------------------------------------------------------------------------------
+-- Functor
+-------------------------------------------------------------------------------
+
+instance Monad m => Functor (Stream m) where
+ fmap = map
+
+-------------------------------------------------------------------------------
+-- Bind utility
+-------------------------------------------------------------------------------
+
+{-# INLINE bindWith #-}
+bindWith
+ :: (forall c. Stream m c -> Stream m c -> Stream m c)
+ -> Stream m a
+ -> (a -> Stream m b)
+ -> Stream m b
+bindWith par m f = go m
+ where
+ go (Stream g) =
+ Stream $ \st stp sng yld ->
+ let run x = (unStream x) st stp sng yld
+ single a = run $ f a
+ yieldk a r = run $ f a `par` go r
+ in g (rstState st) stp single yieldk
+
+------------------------------------------------------------------------------
+-- Alternative & MonadPlus
+------------------------------------------------------------------------------
+
+_alt :: Stream m a -> Stream m a -> Stream m a
+_alt m1 m2 = Stream $ \st stp sng yld ->
+ let stop = unStream m2 (rstState st) stp sng yld
+ in unStream m1 (rstState st) stop sng yld
+
+------------------------------------------------------------------------------
+-- MonadReader
+------------------------------------------------------------------------------
+
+withLocal :: MonadReader r m => (r -> r) -> Stream m a -> Stream m a
+withLocal f m =
+ Stream $ \st stp sng yld ->
+ let single = local f . sng
+ yieldk a r = local f $ yld a (withLocal f r)
+ in (unStream m) (rstState st) (local f stp) single yieldk
+
+------------------------------------------------------------------------------
+-- MonadError
+------------------------------------------------------------------------------
+
+{-
+-- XXX handle and test cross thread state transfer
+withCatchError
+ :: MonadError e m
+ => Stream m a -> (e -> Stream m a) -> Stream m a
+withCatchError m h =
+ Stream $ \_ stp sng yld ->
+ let run x = unStream x Nothing stp sng yieldk
+ handle r = r `catchError` \e -> run $ h e
+ yieldk a r = yld a (withCatchError r h)
+ in handle $ run m
+-}
+
+-------------------------------------------------------------------------------
+-- Transformers
+-------------------------------------------------------------------------------
+
+instance MonadTrans Stream where
+ lift = yieldM
diff --git a/src/Streamly/Streams/Zip.hs b/src/Streamly/Streams/Zip.hs
new file mode 100644
index 0000000..f89cf6b
--- /dev/null
+++ b/src/Streamly/Streams/Zip.hs
@@ -0,0 +1,248 @@
+{-# LANGUAGE CPP #-}
+{-# LANGUAGE ConstraintKinds #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE GeneralizedNewtypeDeriving#-}
+{-# LANGUAGE InstanceSigs #-}
+{-# LANGUAGE MultiParamTypeClasses #-}
+{-# LANGUAGE StandaloneDeriving #-}
+{-# LANGUAGE UndecidableInstances #-} -- XXX
+
+-- |
+-- Module : Streamly.Streams.Zip
+-- Copyright : (c) 2017 Harendra Kumar
+--
+-- License : BSD3
+-- Maintainer : harendra.kumar@gmail.com
+-- Stability : experimental
+-- Portability : GHC
+--
+--
+module Streamly.Streams.Zip
+ (
+ zipWith
+ , zipWithM
+ , zipAsyncWith
+ , zipAsyncWithM
+
+ , ZipSerialM
+ , ZipSerial
+ , ZipStream -- deprecated
+ , zipSerially
+ , zipping -- deprecated
+
+ , ZipAsyncM
+ , ZipAsync
+ , zipAsyncly
+ , zippingAsync -- deprecated
+ )
+where
+
+import Data.Semigroup (Semigroup(..))
+import Prelude hiding (map, repeat, zipWith)
+
+import Streamly.Streams.StreamK (IsStream(..), Stream(..))
+import Streamly.Streams.Async (mkAsync')
+import Streamly.Streams.Serial (map)
+import Streamly.SVar (MonadAsync, rstState)
+
+import qualified Streamly.Streams.StreamK as K
+
+#include "Instances.hs"
+
+------------------------------------------------------------------------------
+-- Serial Zipping
+------------------------------------------------------------------------------
+
+{-# INLINE zipWithS #-}
+zipWithS :: (a -> b -> c) -> Stream m a -> Stream m b -> Stream m c
+zipWithS f m1 m2 = go m1 m2
+ where
+ go mx my = Stream $ \st stp sng yld -> do
+ let merge a ra =
+ let single2 b = sng (f a b)
+ yield2 b rb = yld (f a b) (go ra rb)
+ in unStream my (rstState st) stp single2 yield2
+ let single1 a = merge a K.nil
+ yield1 a ra = merge a ra
+ unStream mx (rstState st) stp single1 yield1
+
+-- | Zip two streams serially using a pure zipping function.
+--
+-- @since 0.1.0
+{-# INLINABLE zipWith #-}
+zipWith :: IsStream t => (a -> b -> c) -> t m a -> t m b -> t m c
+zipWith f m1 m2 = fromStream $ zipWithS f (toStream m1) (toStream m2)
+
+-- | Zip two streams serially using a monadic zipping function.
+--
+-- @since 0.1.0
+zipWithM :: (IsStream t, Monad m) => (a -> b -> m c) -> t m a -> t m b -> t m c
+zipWithM f m1 m2 = fromStream $ go (toStream m1) (toStream m2)
+ where
+ go mx my = Stream $ \st stp sng yld -> do
+ let merge a ra =
+ let runIt x = unStream x (rstState st) stp sng yld
+ single2 b = f a b >>= sng
+ yield2 b rb = f a b >>= \x -> runIt (x `K.cons` go ra rb)
+ in unStream my (rstState st) stp single2 yield2
+ let single1 a = merge a K.nil
+ yield1 a ra = merge a ra
+ unStream mx (rstState st) stp single1 yield1
+
+------------------------------------------------------------------------------
+-- Serially Zipping Streams
+------------------------------------------------------------------------------
+
+-- | The applicative instance of 'ZipSerialM' zips a number of streams serially
+-- i.e. it produces one element from each stream serially and then zips all
+-- those elements.
+--
+-- @
+-- main = (toList . 'zipSerially' $ (,,) \<$\> s1 \<*\> s2 \<*\> s3) >>= print
+-- where s1 = fromFoldable [1, 2]
+-- s2 = fromFoldable [3, 4]
+-- s3 = fromFoldable [5, 6]
+-- @
+-- @
+-- [(1,3,5),(2,4,6)]
+-- @
+--
+-- The 'Semigroup' instance of this type works the same way as that of
+-- 'SerialT'.
+--
+-- @since 0.2.0
+newtype ZipSerialM m a = ZipSerialM {getZipSerialM :: Stream m a}
+ deriving (Semigroup, Monoid)
+
+-- |
+-- @since 0.1.0
+{-# DEPRECATED ZipStream "Please use 'ZipSerialM' instead." #-}
+type ZipStream = ZipSerialM
+
+-- | An IO stream whose applicative instance zips streams serially.
+--
+-- @since 0.2.0
+type ZipSerial a = ZipSerialM IO a
+
+-- | Fix the type of a polymorphic stream as 'ZipSerialM'.
+--
+-- @since 0.2.0
+zipSerially :: IsStream t => ZipSerialM m a -> t m a
+zipSerially = K.adapt
+
+-- | Same as 'zipSerially'.
+--
+-- @since 0.1.0
+{-# DEPRECATED zipping "Please use zipSerially instead." #-}
+zipping :: IsStream t => ZipSerialM m a -> t m a
+zipping = zipSerially
+
+instance IsStream ZipSerialM where
+ toStream = getZipSerialM
+ fromStream = ZipSerialM
+
+ {-# INLINE consM #-}
+ {-# SPECIALIZE consM :: IO a -> ZipSerialM IO a -> ZipSerialM IO a #-}
+ consM :: Monad m => m a -> ZipSerialM m a -> ZipSerialM m a
+ consM m r = fromStream $ K.consMSerial m (toStream r)
+
+ {-# INLINE (|:) #-}
+ {-# SPECIALIZE (|:) :: IO a -> ZipSerialM IO a -> ZipSerialM IO a #-}
+ (|:) :: Monad m => m a -> ZipSerialM m a -> ZipSerialM m a
+ m |: r = fromStream $ K.consMSerial m (toStream r)
+
+instance Monad m => Functor (ZipSerialM m) where
+ fmap = map
+
+instance Monad m => Applicative (ZipSerialM m) where
+ pure = ZipSerialM . K.repeat
+ m1 <*> m2 = fromStream $ zipWith id (toStream m1) (toStream m2)
+
+------------------------------------------------------------------------------
+-- Parallel Zipping
+------------------------------------------------------------------------------
+
+-- | Zip two streams concurrently (i.e. both the elements being zipped are
+-- generated concurrently) using a pure zipping function.
+--
+-- @since 0.1.0
+zipAsyncWith :: (IsStream t, MonadAsync m)
+ => (a -> b -> c) -> t m a -> t m b -> t m c
+zipAsyncWith f m1 m2 = fromStream $ Stream $ \st stp sng yld -> do
+ ma <- mkAsync' (rstState st) m1
+ mb <- mkAsync' (rstState st) m2
+ unStream (toStream (zipWith f ma mb)) (rstState st) stp sng yld
+
+-- | Zip two streams asyncly (i.e. both the elements being zipped are generated
+-- concurrently) using a monadic zipping function.
+--
+-- @since 0.4.0
+zipAsyncWithM :: (IsStream t, MonadAsync m)
+ => (a -> b -> m c) -> t m a -> t m b -> t m c
+zipAsyncWithM f m1 m2 = fromStream $ Stream $ \st stp sng yld -> do
+ ma <- mkAsync' (rstState st) m1
+ mb <- mkAsync' (rstState st) m2
+ unStream (toStream (zipWithM f ma mb)) (rstState st) stp sng yld
+
+------------------------------------------------------------------------------
+-- Parallely Zipping Streams
+------------------------------------------------------------------------------
+--
+-- | Like 'ZipSerialM' but zips in parallel, it generates all the elements to
+-- be zipped concurrently.
+--
+-- @
+-- main = (toList . 'zipAsyncly' $ (,,) \<$\> s1 \<*\> s2 \<*\> s3) >>= print
+-- where s1 = fromFoldable [1, 2]
+-- s2 = fromFoldable [3, 4]
+-- s3 = fromFoldable [5, 6]
+-- @
+-- @
+-- [(1,3,5),(2,4,6)]
+-- @
+--
+-- The 'Semigroup' instance of this type works the same way as that of
+-- 'SerialT'.
+--
+-- @since 0.2.0
+newtype ZipAsyncM m a = ZipAsyncM {getZipAsyncM :: Stream m a}
+ deriving (Semigroup, Monoid)
+
+-- | An IO stream whose applicative instance zips streams wAsyncly.
+--
+-- @since 0.2.0
+type ZipAsync a = ZipAsyncM IO a
+
+-- | Fix the type of a polymorphic stream as 'ZipAsyncM'.
+--
+-- @since 0.2.0
+zipAsyncly :: IsStream t => ZipAsyncM m a -> t m a
+zipAsyncly = K.adapt
+
+-- | Same as 'zipAsyncly'.
+--
+-- @since 0.1.0
+{-# DEPRECATED zippingAsync "Please use zipAsyncly instead." #-}
+zippingAsync :: IsStream t => ZipAsyncM m a -> t m a
+zippingAsync = zipAsyncly
+instance IsStream ZipAsyncM where
+ toStream = getZipAsyncM
+ fromStream = ZipAsyncM
+
+ {-# INLINE consM #-}
+ {-# SPECIALIZE consM :: IO a -> ZipAsyncM IO a -> ZipAsyncM IO a #-}
+ consM :: Monad m => m a -> ZipAsyncM m a -> ZipAsyncM m a
+ consM m r = fromStream $ K.consMSerial m (toStream r)
+
+ {-# INLINE (|:) #-}
+ {-# SPECIALIZE (|:) :: IO a -> ZipAsyncM IO a -> ZipAsyncM IO a #-}
+ (|:) :: Monad m => m a -> ZipAsyncM m a -> ZipAsyncM m a
+ m |: r = fromStream $ K.consMSerial m (toStream r)
+
+instance Monad m => Functor (ZipAsyncM m) where
+ fmap = map
+
+instance MonadAsync m => Applicative (ZipAsyncM m) where
+ pure = ZipAsyncM . K.repeat
+ m1 <*> m2 = zipAsyncWith id m1 m2
diff --git a/src/Streamly/Streams/inline.h b/src/Streamly/Streams/inline.h
new file mode 100644
index 0000000..40a0765
--- /dev/null
+++ b/src/Streamly/Streams/inline.h
@@ -0,0 +1,3 @@
+#define INLINE_EARLY INLINE [2]
+#define INLINE_NORMAL INLINE [1]
+#define INLINE_LATE INLINE [0]
diff --git a/src/Streamly/Tutorial.hs b/src/Streamly/Tutorial.hs
index a56fad6..0888da4 100644
--- a/src/Streamly/Tutorial.hs
+++ b/src/Streamly/Tutorial.hs
@@ -185,7 +185,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- represents a single IO action whereas the 'Serial' monad represents a series
-- of IO actions. The only change you need to make to go from 'IO' to 'Serial'
-- is to use 'runStream' to run the monad and to prefix the IO actions with
--- either 'once' or 'liftIO'. If you use liftIO you can switch from 'Serial'
+-- either 'yieldM' or 'liftIO'. If you use liftIO you can switch from 'Serial'
-- to IO monad by simply removing the 'runStream' function; no other changes
-- are needed unless you have used some stream specific composition or
-- combinators.
@@ -347,13 +347,17 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- ["hello","world"]
-- @
--
--- To create a singleton stream from a pure value use 'pure' and to create a
--- singleton stream from a monadic action use 'once'.
+-- To create a singleton stream from a pure value use 'yield' or 'pure' and to
+-- create a singleton stream from a monadic action use 'yieldM'. Note that in
+-- case of Zip applicative streams "pure" repeats the value to generate an
+-- infinite stream.
--
-- @
-- > S.'toList' $ 'pure' 1
-- [1]
--- > S.'toList' $ S.'once' 'getLine'
+-- > S.'toList' $ 'yield' 1
+-- [1]
+-- > S.'toList' $ S.'yieldM' 'getLine'
-- hello
-- ["hello"]
-- @
@@ -506,7 +510,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- seconds. After the delay it prints the number of seconds it slept.
--
-- @
--- delay n = S.'once' $ do
+-- delay n = S.'yieldM' $ do
-- threadDelay (n * 1000000)
-- tid \<- myThreadId
-- putStrLn (show tid ++ ": Delay " ++ show n)
@@ -737,7 +741,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
--
-- @
-- main = 'runStream' . 'asyncly' $ traced (sqrt 9) '<>' traced (sqrt 16) '<>' traced (sqrt 25)
--- where traced m = S.'once' (myThreadId >>= print) >> return m
+-- where traced m = S.'yieldM' (myThreadId >>= print) >> return m
-- @
-- @
-- ThreadId 40
@@ -859,7 +863,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- google = get "https://www.google.com/search?q=haskell"
-- bing = get "https://www.bing.com/search?q=haskell"
-- duckduckgo = get "https://www.duckduckgo.com/?q=haskell"
--- get s = S.'once' (httpNoBody (parseRequest_ s) >> putStrLn (show s))
+-- get s = S.'yieldM' (httpNoBody (parseRequest_ s) >> putStrLn (show s))
-- @
--
-- The polymorphic version of the binary operation '<>' of the 'Parallel' type
@@ -918,7 +922,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- 'runStream' $ 'foldWith' 'async' (map delay [1..10])
-- 'runStream' $ 'foldMapWith' 'async' delay [1..10]
-- 'runStream' $ 'forEachWith' 'async' [1..10] delay
--- where delay n = S.'once' $ threadDelay (n * 1000000) >> print n
+-- where delay n = S.'yieldM' $ threadDelay (n * 1000000) >> print n
-- @
-- $nesting
@@ -988,7 +992,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- import "Streamly"
-- import qualified "Streamly.Prelude" as S
--
--- main = 'runStream' $ forever $ S.once getLine >>= S.once . putStrLn
+-- main = 'runStream' $ forever $ S.yieldM getLine >>= S.yieldM . putStrLn
-- @
--
-- When multiple streams are composed using this style they nest in a DFS
@@ -1003,7 +1007,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- main = 'runStream' $ do
-- x <- S.'fromFoldable' [1,2]
-- y <- S.'fromFoldable' [3,4]
--- S.'once' $ putStrLn $ show (x, y)
+-- S.'yieldM' $ putStrLn $ show (x, y)
-- @
-- @
-- (1,3)
@@ -1100,7 +1104,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- main = 'runStream' . 'asyncly' $ do
-- x <- S.'fromFoldable' [1,2]
-- y <- S.'fromFoldable' [3,4]
--- S.'once' $ putStrLn $ show (x, y)
+-- S.'yieldM' $ putStrLn $ show (x, y)
-- @
-- @
-- (1,3)
@@ -1125,7 +1129,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- main = 'runStream' . 'wSerially' $ do
-- x <- S.'fromFoldable' [1,2]
-- y <- S.'fromFoldable' [3,4]
--- S.once $ putStrLn $ show (x, y)
+-- S.yieldM $ putStrLn $ show (x, y)
-- @
-- @
-- (1,3)
@@ -1153,7 +1157,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- main = 'runStream' . 'wAsyncly' $ do
-- x <- S.'fromFoldable' [1,2]
-- y <- S.'fromFoldable' [3,4]
--- S.'once' $ putStrLn $ show (x, y)
+-- S.'yieldM' $ putStrLn $ show (x, y)
-- @
-- @
-- (1,3)
@@ -1202,7 +1206,7 @@ import Control.Monad.Trans.Class (MonadTrans (lift))
-- sz <- sizes
-- cl <- colors
-- sh <- shapes
--- S.'once' $ putStrLn $ show (sz, cl, sh)
+-- S.'yieldM' $ putStrLn $ show (sz, cl, sh)
--
-- where
--
diff --git a/stack-7.10.yaml b/stack-7.10.yaml
index 32c30d6..2709235 100644
--- a/stack-7.10.yaml
+++ b/stack-7.10.yaml
@@ -10,7 +10,7 @@ extra-deps:
- http-client-0.5.0
- http-client-tls-0.3.0
- SDL-0.6.5.1
- - gauge-0.2.1
+ - gauge-0.2.3
- basement-0.0.7
flags: {}
extra-package-dbs: []
diff --git a/stack-8.0.yaml b/stack-8.0.yaml
index aed11c1..d42112b 100644
--- a/stack-8.0.yaml
+++ b/stack-8.0.yaml
@@ -6,7 +6,7 @@ extra-deps:
- lockfree-queue-0.2.3.1
- simple-conduit-0.6.0
- SDL-0.6.5.1
- - gauge-0.2.1
+ - gauge-0.2.3
- basement-0.0.4
flags: {}
extra-package-dbs: []
diff --git a/stack.yaml b/stack.yaml
index e87d702..eb67b84 100644
--- a/stack.yaml
+++ b/stack.yaml
@@ -1,28 +1,18 @@
-resolver: lts-11.0
+#resolver: lts-11.0
+resolver: nightly-2018-07-06
packages:
- '.'
#- location: ../bench-graph
# extra-dep: true
+allow-newer: true
extra-deps:
- - simple-conduit-0.6.0
- - SDL-0.6.5.1
-
- - git: https://github.com/composewell/bench-graph
- commit: 268a04061cca7eda448b8f741d8d0aa82cd6be3a
-
- - git: https://github.com/harendra-kumar/hs-gauge
- commit: f3bb4a1fc801c581224843759b7e6dabb0aef3dc
-
- - Chart-diagrams-1.8.3
+ - SDL-0.6.6.0
+ - gauge-0.2.3
+ - bench-graph-0.1.0
+ - Chart-1.9
+ - Chart-diagrams-1.9
+ - Unique-0.4.7.2
- SVGFonts-1.6.0.3
- - diagrams-core-1.4.0.1
- - diagrams-lib-1.4.2
- - diagrams-postscript-1.4
- - diagrams-svg-1.4.1.1
- - diagrams-solve-0.1.1
- - dual-tree-0.2.1
- - lens-4.15.4
- - free-4.12.4
flags: {}
extra-package-dbs: []
diff --git a/streamly.cabal b/streamly.cabal
index a61e2af..4be674a 100644
--- a/streamly.cabal
+++ b/streamly.cabal
@@ -1,5 +1,5 @@
name: streamly
-version: 0.3.0
+version: 0.4.0
synopsis: Beautiful Streaming, Concurrent and Reactive Composition
description:
Streamly, short for streaming concurrently, provides monadic streams, with a
@@ -45,7 +45,11 @@ description:
* /Generality/: Unifies functionality provided by several disparate packages
(streaming, concurrency, list transformer, logic programming, reactive
programming) in a concise API.
- * /Performance/: Streamly is designed for high performance. See
+ * /Performance/: Streamly is designed for high performance.
+ It employs stream fusion optimizations for best possible performance.
+ Serial peformance is equivalent to the venerable `vector` library in most
+ cases and even better in some cases. Concurrent performance is unbeatable.
+ See
<https://github.com/composewell/streaming-benchmarks streaming-benchmarks>
for a comparison of popular streaming libraries on micro-benchmarks.
.
@@ -75,6 +79,8 @@ extra-source-files:
stack-7.10.yaml
stack-8.0.yaml
stack.yaml
+ src/Streamly/Streams/Instances.hs
+ src/Streamly/Streams/inline.h
source-repository head
type: git
@@ -90,6 +96,16 @@ flag dev
manual: True
default: False
+flag no-fusion
+ description: Disable rewrite rules
+ manual: True
+ default: False
+
+flag streamk
+ description: Use CPS style streams when possible
+ manual: True
+ default: False
+
flag examples
description: Build including examples
manual: True
@@ -106,17 +122,31 @@ flag examples-sdl
library
hs-source-dirs: src
- other-modules: Streamly.Core
- , Streamly.Streams
+ other-modules: Streamly.SVar
+ , Streamly.Streams.StreamK
+ , Streamly.Streams.StreamD
+ , Streamly.Streams.Serial
+ , Streamly.Streams.SVar
+ , Streamly.Streams.Async
+ , Streamly.Streams.Parallel
+ , Streamly.Streams.Ahead
+ , Streamly.Streams.Zip
+ , Streamly.Streams.Prelude
exposed-modules: Streamly.Prelude
, Streamly.Time
- , Streamly.Tutorial
, Streamly
+ , Streamly.Tutorial
default-language: Haskell2010
ghc-options: -Wall
+ if flag(streamk)
+ cpp-options: -DUSE_STREAMK_ONLY
+
+ if flag(no-fusion)
+ cpp-options: -DDISABLE_FUSION
+
if flag(diag)
cpp-options: -DDIAGNOSTICS
@@ -136,6 +166,7 @@ library
-Wnoncanonical-monadfail-instances
build-depends: base >= 4.8 && < 5
+ , ghc-prim >= 0.2 && < 0.6
, containers >= 0.5 && < 0.6
, heaps >= 0.3 && < 0.4
@@ -243,6 +274,38 @@ test-suite parallel-loops
-- Benchmarks
-------------------------------------------------------------------------------
+benchmark base
+ type: exitcode-stdio-1.0
+ hs-source-dirs: benchmark
+ main-is: BaseStreams.hs
+ other-modules: StreamDOps
+ , StreamKOps
+ default-language: Haskell2010
+ ghc-options: -O2 -Wall
+ if flag(dev)
+ ghc-options: -Wmissed-specialisations
+ -Wall-missed-specialisations
+ -fno-ignore-asserts
+ if impl(ghc >= 8.0)
+ ghc-options: -Wcompat
+ -Wunrecognised-warning-flags
+ -Widentities
+ -Wincomplete-record-updates
+ -Wincomplete-uni-patterns
+ -Wredundant-constraints
+ -Wnoncanonical-monad-instances
+ -Wnoncanonical-monadfail-instances
+ if flag(dev)
+ buildable: True
+ build-depends:
+ streamly
+ , base >= 4.8 && < 5
+ , deepseq >= 1.4.0 && < 1.5
+ , random >= 1.0 && < 2.0
+ , gauge >= 0.2.3 && < 0.3
+ else
+ buildable: False
+
benchmark linear
type: exitcode-stdio-1.0
hs-source-dirs: benchmark
@@ -268,7 +331,7 @@ benchmark linear
, base >= 4.8 && < 5
, deepseq >= 1.4.0 && < 1.5
, random >= 1.0 && < 2.0
- , gauge >= 0.2.1 && < 0.3
+ , gauge >= 0.2.3 && < 0.3
benchmark nested
type: exitcode-stdio-1.0
@@ -295,7 +358,7 @@ benchmark nested
, base >= 4.8 && < 5
, deepseq >= 1.4.0 && < 1.5
, random >= 1.0 && < 2.0
- , gauge >= 0.2.1 && < 0.3
+ , gauge >= 0.2.3 && < 0.3
executable chart-linear
default-language: Haskell2010
diff --git a/test/Main.hs b/test/Main.hs
index 405c52c..45e3960 100644
--- a/test/Main.hs
+++ b/test/Main.hs
@@ -15,22 +15,22 @@ import Test.Hspec
import Streamly
import Streamly.Prelude ((.:), nil)
-import qualified Streamly.Prelude as A
+import qualified Streamly.Prelude as S
singleton :: IsStream t => a -> t m a
singleton a = a .: nil
toListSerial :: SerialT IO a -> IO [a]
-toListSerial = A.toList . serially
+toListSerial = S.toList . serially
toListInterleaved :: WSerialT IO a -> IO [a]
-toListInterleaved = A.toList . wSerially
+toListInterleaved = S.toList . wSerially
toListAsync :: AsyncT IO a -> IO [a]
-toListAsync = A.toList . asyncly
+toListAsync = S.toList . asyncly
toListParallel :: WAsyncT IO a -> IO [a]
-toListParallel = A.toList . wAsyncly
+toListParallel = S.toList . wAsyncly
main :: IO ()
main = hspec $ do
@@ -40,7 +40,7 @@ main = hspec $ do
it "simple serially" $
(runStream . serially) (return (0 :: Int)) `shouldReturn` ()
it "simple serially with IO" $
- (runStream . serially) (A.once $ putStrLn "hello") `shouldReturn` ()
+ (runStream . serially) (S.yieldM $ putStrLn "hello") `shouldReturn` ()
describe "Empty" $ do
it "Monoid - mempty" $
@@ -96,19 +96,21 @@ main = hspec $ do
-- for Monoid that is using the right version of semigroup. Instance
-- deriving can cause us to pick wrong instances sometimes.
- describe "Serial interleaved (<>) ordering check" $ interleaveCheck wSerially (<>)
- describe "Serial interleaved mappend ordering check" $ interleaveCheck wSerially mappend
+ describe "WSerial interleaved (<>) ordering check" $ interleaveCheck wSerially (<>)
+ describe "WSerial interleaved mappend ordering check" $ interleaveCheck wSerially mappend
- describe "Parallel interleaved (<>) ordering check" $ interleaveCheck wAsyncly (<>)
- describe "Parallel interleaved mappend ordering check" $ interleaveCheck wAsyncly mappend
-
- -- describe "Parallel (<>) ordering check" $ interleaveCheck parallely (<>)
- -- describe "Parallel mappend ordering check" $ interleaveCheck parallely mappend
+ -- describe "WAsync interleaved (<>) ordering check" $ interleaveCheck wAsyncly (<>)
+ -- describe "WAsync interleaved mappend ordering check" $ interleaveCheck wAsyncly mappend
describe "Async (<>) time order check" $ parallelCheck asyncly (<>)
describe "Async mappend time order check" $ parallelCheck asyncly mappend
- describe "WAsync (<>) time order check" $ parallelCheck wAsyncly (<>)
- describe "WAsync mappend time order check" $ parallelCheck wAsyncly mappend
+
+ -- XXX this keeps failing intermittently, need to investigate
+ -- describe "WAsync (<>) time order check" $ parallelCheck wAsyncly (<>)
+ -- describe "WAsync mappend time order check" $ parallelCheck wAsyncly mappend
+
+ describe "Parallel (<>) time order check" $ parallelCheck parallely (<>)
+ describe "Parallel mappend time order check" $ parallelCheck parallely mappend
---------------------------------------------------------------------------
-- Monoidal Compositions, multiset equality checks
@@ -176,7 +178,7 @@ main = hspec $ do
`shouldReturn` ([4,4,8,8,0,0,2,2])
-}
it "Nest <|>, <>, <|> (2)" $
- (A.toList . wAsyncly) (
+ (S.toList . wAsyncly) (
s (p (t 4 <> t 8) <> p (t 1 <> t 2))
<> s (p (t 4 <> t 8) <> p (t 1 <> t 2)))
`shouldReturn` ([4,4,8,8,1,1,2,2])
@@ -197,7 +199,7 @@ main = hspec $ do
`shouldReturn` ([4,4,1,1,8,2,9,2])
-}
it "Nest <|>, <|>, <|>" $
- (A.toList . wAsyncly) (
+ (S.toList . wAsyncly) (
((t 4 <> t 8) <> (t 0 <> t 2))
<> ((t 4 <> t 8) <> (t 0 <> t 2)))
`shouldReturn` ([0,0,2,2,4,4,8,8])
@@ -378,14 +380,25 @@ main = hspec $ do
describe "Composed MonadThrow parallely" $ composeWithMonadThrow parallely
describe "Composed MonadThrow aheadly" $ composeWithMonadThrow aheadly
+ describe "take on infinite concurrent stream" $ takeInfinite asyncly
+ describe "take on infinite concurrent stream" $ takeInfinite wAsyncly
+ describe "take on infinite concurrent stream" $ takeInfinite aheadly
+
it "asyncly crosses thread limit (2000 threads)" $
runStream (asyncly $ fold $
- replicate 2000 $ A.once $ threadDelay 1000000)
+ replicate 2000 $ S.yieldM $ threadDelay 1000000)
`shouldReturn` ()
it "aheadly crosses thread limit (4000 threads)" $
runStream (aheadly $ fold $
- replicate 4000 $ A.once $ threadDelay 1000000)
+ replicate 4000 $ S.yieldM $ threadDelay 1000000)
+ `shouldReturn` ()
+
+takeInfinite :: IsStream t => (t IO Int -> SerialT IO Int) -> Spec
+takeInfinite t = do
+ it "take 1" $
+ (runStream $ t $
+ S.take 1 $ S.repeatM (print "hello" >> return (1::Int)))
`shouldReturn` ()
-- XXX need to test that we have promptly cleaned up everything after the error
@@ -420,10 +433,10 @@ composeWithMonadThrow
=> (t IO Int -> SerialT IO Int) -> Spec
composeWithMonadThrow t = do
it "Compose throwM, nil" $
- (try $ tl (throwM (ExampleException "E") <> A.nil))
+ (try $ tl (throwM (ExampleException "E") <> S.nil))
`shouldReturn` (Left (ExampleException "E") :: Either ExampleException [Int])
it "Compose nil, throwM" $
- (try $ tl (A.nil <> throwM (ExampleException "E")))
+ (try $ tl (S.nil <> throwM (ExampleException "E")))
`shouldReturn` (Left (ExampleException "E") :: Either ExampleException [Int])
oneLevelNestedSum "serially" serially
oneLevelNestedSum "wSerially" wSerially
@@ -437,12 +450,12 @@ composeWithMonadThrow t = do
oneLevelNestedProduct "wAsyncly" wAsyncly
where
- tl = A.toList . t
+ tl = S.toList . t
oneLevelNestedSum desc t1 =
it ("One level nested sum " ++ desc) $ do
- let nested = (A.fromFoldable [1..10] <> throwM (ExampleException "E")
- <> A.fromFoldable [1..10])
- (try $ tl (A.nil <> t1 nested <> A.fromFoldable [1..10]))
+ let nested = (S.fromFoldable [1..10] <> throwM (ExampleException "E")
+ <> S.fromFoldable [1..10])
+ (try $ tl (S.nil <> t1 nested <> S.fromFoldable [1..10]))
`shouldReturn` (Left (ExampleException "E") :: Either ExampleException [Int])
oneLevelNestedProduct desc t1 =
@@ -465,11 +478,11 @@ _composeWithMonadError
)
=> (t (ExceptT String IO) Int -> SerialT (ExceptT String IO) Int) -> Spec
_composeWithMonadError t = do
- let tl = A.toList . t
+ let tl = S.toList . t
it "Compose throwError, nil" $
- (runExceptT $ tl (throwError "E" <> A.nil)) `shouldReturn` Left "E"
+ (runExceptT $ tl (throwError "E" <> S.nil)) `shouldReturn` Left "E"
it "Compose nil, error" $
- (runExceptT $ tl (A.nil <> throwError "E")) `shouldReturn` Left "E"
+ (runExceptT $ tl (S.nil <> throwError "E")) `shouldReturn` Left "E"
nestTwoSerial :: Expectation
nestTwoSerial =
@@ -485,7 +498,7 @@ nestTwoAhead :: Expectation
nestTwoAhead =
let s1 = foldMapWith (<>) return [1..4]
s2 = foldMapWith (<>) return [5..8]
- in (A.toList . aheadly) (do
+ in (S.toList . aheadly) (do
x <- s1
y <- s2
return (x + y)
@@ -502,7 +515,7 @@ nestTwoAheadApp :: Expectation
nestTwoAheadApp =
let s1 = foldMapWith (<>) return [1..4]
s2 = foldMapWith (<>) return [5..8]
- in (A.toList . aheadly) ((+) <$> s1 <*> s2)
+ in (S.toList . aheadly) ((+) <$> s1 <*> s2)
`shouldReturn` ([6,7,8,9,7,8,9,10,8,9,10,11,9,10,11,12] :: [Int])
nestTwoInterleaved :: Expectation
@@ -544,7 +557,7 @@ nestTwoWAsync :: Expectation
nestTwoWAsync =
let s1 = foldMapWith (<>) return [1..4]
s2 = foldMapWith (<>) return [5..8]
- in ((A.toList . wAsyncly) (do
+ in ((S.toList . wAsyncly) (do
x <- s1
y <- s2
return (x + y)
@@ -555,7 +568,7 @@ nestTwoParallel :: Expectation
nestTwoParallel =
let s1 = foldMapWith (<>) return [1..4]
s2 = foldMapWith (<>) return [5..8]
- in ((A.toList . parallely) (do
+ in ((S.toList . parallely) (do
x <- s1
y <- s2
return (x + y)
@@ -566,18 +579,18 @@ nestTwoWAsyncApp :: Expectation
nestTwoWAsyncApp =
let s1 = foldMapWith (<>) return [1..4]
s2 = foldMapWith (<>) return [5..8]
- in ((A.toList . wAsyncly) ((+) <$> s1 <*> s2) >>= return . sort)
+ in ((S.toList . wAsyncly) ((+) <$> s1 <*> s2) >>= return . sort)
`shouldReturn` sort ([6,7,7,8,8,8,9,9,9,9,10,10,10,11,11,12] :: [Int])
nestTwoParallelApp :: Expectation
nestTwoParallelApp =
let s1 = foldMapWith (<>) return [1..4]
s2 = foldMapWith (<>) return [5..8]
- in ((A.toList . parallely) ((+) <$> s1 <*> s2) >>= return . sort)
+ in ((S.toList . parallely) ((+) <$> s1 <*> s2) >>= return . sort)
`shouldReturn` sort ([6,7,7,8,8,8,9,9,9,9,10,10,10,11,11,12] :: [Int])
timed :: (IsStream t, Monad (t IO)) => Int -> t IO Int
-timed x = A.once (threadDelay (x * 100000)) >> return x
+timed x = S.yieldM (threadDelay (x * 100000)) >> return x
interleaveCheck :: IsStream t
=> (t IO Int -> SerialT IO Int)
@@ -585,7 +598,7 @@ interleaveCheck :: IsStream t
-> Spec
interleaveCheck t f =
it "Interleave four" $
- (A.toList . t) ((singleton 0 `f` singleton 1) `f` (singleton 100 `f` singleton 101))
+ (S.toList . t) ((singleton 0 `f` singleton 1) `f` (singleton 100 `f` singleton 101))
`shouldReturn` ([0, 100, 1, 101])
parallelCheck :: (IsStream t, Monad (t IO))
@@ -594,14 +607,14 @@ parallelCheck :: (IsStream t, Monad (t IO))
-> Spec
parallelCheck t f = do
it "Parallel ordering left associated" $
- (A.toList . t) (((event 4 `f` event 3) `f` event 2) `f` event 1)
+ (S.toList . t) (((event 4 `f` event 3) `f` event 2) `f` event 1)
`shouldReturn` ([1..4])
it "Parallel ordering right associated" $
- (A.toList . t) (event 4 `f` (event 3 `f` (event 2 `f` event 1)))
+ (S.toList . t) (event 4 `f` (event 3 `f` (event 2 `f` event 1)))
`shouldReturn` ([1..4])
- where event n = (A.once $ threadDelay (n * 100000)) >> (return n)
+ where event n = (S.yieldM $ threadDelay (n * 200000)) >> (return n)
compose :: (IsStream t, Semigroup (t IO Int))
=> (t IO Int -> SerialT IO Int) -> t IO Int -> ([Int] -> [Int]) -> Spec
@@ -634,7 +647,7 @@ compose t z srt = do
((tl $ (((singleton 0 <> singleton 1) <> (singleton 2 <> singleton 3))
<> ((singleton 4 <> singleton 5) <> (singleton 6 <> singleton 7)))
) >>= return . srt) `shouldReturn` [0..7]
- where tl = A.toList . t
+ where tl = S.toList . t
composeAndComposeSimple
:: ( IsStream t1, Semigroup (t1 IO Int)
@@ -649,20 +662,20 @@ composeAndComposeSimple
composeAndComposeSimple t1 t2 answer = do
let rfold = adapt . t2 . foldMapWith (<>) return
it "Compose right associated outer expr, right folded inner" $
- ((A.toList . t1) (rfold [1,2,3] <> (rfold [4,5,6] <> rfold [7,8,9])))
+ ((S.toList . t1) (rfold [1,2,3] <> (rfold [4,5,6] <> rfold [7,8,9])))
`shouldReturn` (answer !! 0)
it "Compose left associated outer expr, right folded inner" $
- ((A.toList . t1) ((rfold [1,2,3] <> rfold [4,5,6]) <> rfold [7,8,9]))
+ ((S.toList . t1) ((rfold [1,2,3] <> rfold [4,5,6]) <> rfold [7,8,9]))
`shouldReturn` (answer !! 1)
let lfold xs = adapt $ t2 $ foldl (<>) mempty $ map return xs
it "Compose right associated outer expr, left folded inner" $
- ((A.toList . t1) (lfold [1,2,3] <> (lfold [4,5,6] <> lfold [7,8,9])))
+ ((S.toList . t1) (lfold [1,2,3] <> (lfold [4,5,6] <> lfold [7,8,9])))
`shouldReturn` (answer !! 2)
it "Compose left associated outer expr, left folded inner" $
- ((A.toList . t1) ((lfold [1,2,3] <> lfold [4,5,6]) <> lfold [7,8,9]))
+ ((S.toList . t1) ((lfold [1,2,3] <> lfold [4,5,6]) <> lfold [7,8,9]))
`shouldReturn` (answer !! 3)
loops
@@ -672,21 +685,21 @@ loops
-> ([Int] -> [Int])
-> Spec
loops t tsrt hsrt = do
- it "Tail recursive loop" $ ((A.toList . adapt) (loopTail 0) >>= return . tsrt)
+ it "Tail recursive loop" $ ((S.toList . adapt) (loopTail 0) >>= return . tsrt)
`shouldReturn` [0..3]
- it "Head recursive loop" $ ((A.toList . adapt) (loopHead 0) >>= return . hsrt)
+ it "Head recursive loop" $ ((S.toList . adapt) (loopHead 0) >>= return . hsrt)
`shouldReturn` [0..3]
where
loopHead x = do
-- this print line is important for the test (causes a bind)
- A.once $ putStrLn "LoopHead..."
+ S.yieldM $ putStrLn "LoopHead..."
t $ (if x < 3 then loopHead (x + 1) else nil) <> return x
loopTail x = do
-- this print line is important for the test (causes a bind)
- A.once $ putStrLn "LoopTail..."
+ S.yieldM $ putStrLn "LoopTail..."
t $ return x <> (if x < 3 then loopTail (x + 1) else nil)
bindAndComposeSimple
@@ -697,12 +710,12 @@ bindAndComposeSimple
bindAndComposeSimple t1 t2 = do
-- XXX need a bind in the body of forEachWith instead of a simple return
it "Compose many (right fold) with bind" $
- ((A.toList . t1) (adapt . t2 $ forEachWith (<>) [1..10 :: Int] return)
+ ((S.toList . t1) (adapt . t2 $ forEachWith (<>) [1..10 :: Int] return)
>>= return . sort) `shouldReturn` [1..10]
it "Compose many (left fold) with bind" $
let forL xs k = foldl (<>) nil $ map k xs
- in ((A.toList . t1) (adapt . t2 $ forL [1..10 :: Int] return)
+ in ((S.toList . t1) (adapt . t2 $ forL [1..10 :: Int] return)
>>= return . sort) `shouldReturn` [1..10]
bindAndComposeHierarchy
@@ -714,7 +727,7 @@ bindAndComposeHierarchy
-> Spec
bindAndComposeHierarchy t1 t2 g = do
it "Bind and compose nested" $
- ((A.toList . t1) bindComposeNested >>= return . sort)
+ ((S.toList . t1) bindComposeNested >>= return . sort)
`shouldReturn` (sort (
[12, 18]
++ replicate 3 13
@@ -754,21 +767,21 @@ mixedOps = do
composeMixed :: SerialT IO Int
composeMixed = do
- A.once $ return ()
- A.once $ putStr ""
+ S.yieldM $ return ()
+ S.yieldM $ putStr ""
x <- return 1
y <- return 2
z <- do
x1 <- wAsyncly $ return 1 <> return 2
- A.once $ return ()
- A.once $ putStr ""
+ S.yieldM $ return ()
+ S.yieldM $ putStr ""
y1 <- asyncly $ return 1 <> return 2
z1 <- do
x11 <- return 1 <> return 2
y11 <- asyncly $ return 1 <> return 2
z11 <- wSerially $ return 1 <> return 2
- A.once $ return ()
- A.once $ putStr ""
+ S.yieldM $ return ()
+ S.yieldM $ putStr ""
return (x11 + y11 + z11)
return (x1 + y1 + z1)
return (x + y + z)
@@ -784,21 +797,21 @@ mixedOpsAheadly = do
composeMixed :: SerialT IO Int
composeMixed = do
- A.once $ return ()
- A.once $ putStr ""
+ S.yieldM $ return ()
+ S.yieldM $ putStr ""
x <- return 1
y <- return 2
z <- do
x1 <- wAsyncly $ return 1 <> return 2
- A.once $ return ()
- A.once $ putStr ""
+ S.yieldM $ return ()
+ S.yieldM $ putStr ""
y1 <- aheadly $ return 1 <> return 2
z1 <- do
x11 <- return 1 <> return 2
y11 <- aheadly $ return 1 <> return 2
z11 <- parallely $ return 1 <> return 2
- A.once $ return ()
- A.once $ putStr ""
+ S.yieldM $ return ()
+ S.yieldM $ putStr ""
return (x11 + y11 + z11)
return (x1 + y1 + z1)
return (x + y + z)
diff --git a/test/Prop.hs b/test/Prop.hs
index 5a6b229..646680b 100644
--- a/test/Prop.hs
+++ b/test/Prop.hs
@@ -21,8 +21,9 @@ import Test.Hspec
import Streamly
import Streamly.Prelude ((.:), nil)
-import qualified Streamly.Prelude as A
+import qualified Streamly.Prelude as S
+-- Coverage build takes too long with default number of tests
maxTestCount :: Int
#ifdef DEVBUILD
maxTestCount = 100
@@ -49,12 +50,15 @@ equals eq stream list = do
constructWithReplicateM
:: IsStream t
=> (t IO Int -> SerialT IO Int)
+ -> Int
+ -> Int
-> Word8
-> Property
-constructWithReplicateM op len =
+constructWithReplicateM op thr buf len = withMaxSuccess maxTestCount $
monadicIO $ do
let x = return (1 :: Int)
- stream <- run $ (A.toList . op) (A.replicateM (fromIntegral len) x)
+ stream <- run $ (S.toList . op) (maxThreads thr $ maxBuffer buf $
+ S.replicateM (fromIntegral len) x)
list <- run $ replicateM (fromIntegral len) x
equals (==) stream list
@@ -67,7 +71,7 @@ transformFromList
-> Property
transformFromList constr eq listOp op a =
monadicIO $ do
- stream <- run ((A.toList . op) (constr a))
+ stream <- run ((S.toList . op) (constr a))
let list = listOp a
equals eq stream list
@@ -105,7 +109,7 @@ concurrentMapM constr eq op n =
let list = [0..n]
stream <- run $ do
mv <- newEmptyMVar :: IO (MVar ())
- (A.toList . (op n mv)) (constr list)
+ (S.toList . (op n mv)) (constr list)
equals eq stream list
concurrentFromFoldable
@@ -119,11 +123,11 @@ concurrentFromFoldable eq op n =
let list = [0..n]
stream <- run $ do
mv <- newEmptyMVar :: IO (MVar ())
- (A.toList . op) (A.fromFoldableM (map (mvarSequenceOp mv n) list))
+ (S.toList . op) (S.fromFoldableM (map (mvarSequenceOp mv n) list))
equals eq stream list
sourceUnfoldrM :: IsStream t => MVar () -> Word8 -> t IO Word8
-sourceUnfoldrM mv n = A.unfoldrM step 0
+sourceUnfoldrM mv n = S.unfoldrM step 0
where
-- argument must be integer to avoid overflow of word8 at 255
step :: Int -> IO (Maybe (Word8, Int))
@@ -154,15 +158,15 @@ concurrentUnfoldrM eq op n =
-- since unfoldr happens in parallel with the stream processing we
-- can do two takeMVar in one iteration. If it is not parallel then
-- this will not work and the test will fail.
- A.toList $ do
+ S.toList $ do
x <- op (sourceUnfoldrM mv n)
-- results may not be yielded in order, in case of
-- Async/WAsync/Parallel. So we use an increasing count
-- instead.
- i <- A.once $ readIORef cnt
- A.once $ modifyIORef cnt (+1)
+ i <- S.yieldM $ readIORef cnt
+ S.yieldM $ modifyIORef cnt (+1)
let msg = show i ++ "/" ++ show n
- A.once $ do
+ S.yieldM $ do
if even i
then do
dbgMVar ("first take concurrentUnfoldrM " ++ msg)
@@ -187,9 +191,9 @@ concurrentApplication n =
-- since unfoldr happens in parallel with the stream processing we
-- can do two takeMVar in one iteration. If it is not parallel then
-- this will not work and the test will fail.
- A.toList $ do
+ S.toList $ do
sourceUnfoldrM mv n |&
- (A.mapM $ \x -> do
+ (S.mapM $ \x -> do
let msg = show x ++ "/" ++ show n
if even x
then do
@@ -204,7 +208,7 @@ concurrentApplication n =
equals (==) stream list
sourceUnfoldrM1 :: IsStream t => Word8 -> t IO Word8
-sourceUnfoldrM1 n = A.unfoldrM step 0
+sourceUnfoldrM1 n = S.unfoldrM step 0
where
-- argument must be integer to avoid overflow of word8 at 255
step :: Int -> IO (Maybe (Word8, Int))
@@ -219,7 +223,7 @@ concurrentFoldlApplication n =
-- XXX we should test empty list case as well
let list = [0..n]
stream <- run $ do
- sourceUnfoldrM1 n |&. A.foldlM' (\xs x -> return (x : xs)) []
+ sourceUnfoldrM1 n |&. S.foldlM' (\xs x -> return (x : xs)) []
equals (==) (reverse stream) list
concurrentFoldrApplication :: Word8 -> Property
@@ -228,7 +232,7 @@ concurrentFoldrApplication n =
-- XXX we should test empty list case as well
let list = [0..n]
stream <- run $ do
- sourceUnfoldrM1 n |&. A.foldrM (\x xs -> return (x : xs)) []
+ sourceUnfoldrM1 n |&. S.foldrM (\x xs -> return (x : xs)) []
equals (==) stream list
transformCombineFromList
@@ -243,8 +247,9 @@ transformCombineFromList
-> [Int]
-> Property
transformCombineFromList constr eq listOp t op a b c =
+ withMaxSuccess maxTestCount $
monadicIO $ do
- stream <- run ((A.toList . t) $
+ stream <- run ((S.toList . t) $
constr a <> op (constr b <> constr c))
let list = a <> listOp (b <> c)
equals eq stream list
@@ -305,42 +310,42 @@ transformOps constr desc t eq = do
let transform = transformFromList constr eq
-- Filtering
prop (desc ++ " filter False") $
- transform (filter (const False)) $ t . (A.filter (const False))
+ transform (filter (const False)) $ t . (S.filter (const False))
prop (desc ++ " filter True") $
- transform (filter (const True)) $ t . (A.filter (const True))
+ transform (filter (const True)) $ t . (S.filter (const True))
prop (desc ++ " filter even") $
- transform (filter even) $ t . (A.filter even)
+ transform (filter even) $ t . (S.filter even)
prop (desc ++ " take maxBound") $
- transform (take maxBound) $ t . (A.take maxBound)
- prop (desc ++ " take 0") $ transform (take 0) $ t . (A.take 0)
- prop (desc ++ " take 1") $ transform (take 1) $ t . (A.take 1)
- prop (desc ++ " take 10") $ transform (take 10) $ t . (A.take 10)
+ transform (take maxBound) $ t . (S.take maxBound)
+ prop (desc ++ " take 0") $ transform (take 0) $ t . (S.take 0)
+ prop (desc ++ " take 1") $ transform (take 1) $ t . (S.take 1)
+ prop (desc ++ " take 10") $ transform (take 10) $ t . (S.take 10)
prop (desc ++ " takeWhile True") $
- transform (takeWhile (const True)) $ t . (A.takeWhile (const True))
+ transform (takeWhile (const True)) $ t . (S.takeWhile (const True))
prop (desc ++ " takeWhile False") $
- transform (takeWhile (const False)) $ t . (A.takeWhile (const False))
+ transform (takeWhile (const False)) $ t . (S.takeWhile (const False))
prop (desc ++ " takeWhile > 0") $
- transform (takeWhile (> 0)) $ t . (A.takeWhile (> 0))
+ transform (takeWhile (> 0)) $ t . (S.takeWhile (> 0))
let f x = if odd x then Just (x + 100) else Nothing
- prop (desc ++ " mapMaybe") $ transform (mapMaybe f) $ t . (A.mapMaybe f)
+ prop (desc ++ " mapMaybe") $ transform (mapMaybe f) $ t . (S.mapMaybe f)
prop (desc ++ " drop maxBound") $
- transform (drop maxBound) $ t . (A.drop maxBound)
- prop (desc ++ " drop 0") $ transform (drop 0) $ t . (A.drop 0)
- prop (desc ++ " drop 1") $ transform (drop 1) $ t . (A.drop 1)
- prop (desc ++ " drop 10") $ transform (drop 10) $ t . (A.drop 10)
+ transform (drop maxBound) $ t . (S.drop maxBound)
+ prop (desc ++ " drop 0") $ transform (drop 0) $ t . (S.drop 0)
+ prop (desc ++ " drop 1") $ transform (drop 1) $ t . (S.drop 1)
+ prop (desc ++ " drop 10") $ transform (drop 10) $ t . (S.drop 10)
prop (desc ++ " dropWhile True") $
- transform (dropWhile (const True)) $ t . (A.dropWhile (const True))
+ transform (dropWhile (const True)) $ t . (S.dropWhile (const True))
prop (desc ++ " dropWhile False") $
- transform (dropWhile (const False)) $ t . (A.dropWhile (const False))
+ transform (dropWhile (const False)) $ t . (S.dropWhile (const False))
prop (desc ++ " dropWhile > 0") $
- transform (dropWhile (> 0)) $ t . (A.dropWhile (> 0))
- prop (desc ++ " scan") $ transform (scanl' (+) 0) $ t . (A.scanl' (+) 0)
- prop (desc ++ " reverse") $ transform reverse $ t . A.reverse
+ transform (dropWhile (> 0)) $ t . (S.dropWhile (> 0))
+ prop (desc ++ " scan") $ transform (scanl' (+) 0) $ t . (S.scanl' (+) 0)
+ prop (desc ++ " reverse") $ transform reverse $ t . S.reverse
concurrentOps
:: IsStream t
@@ -350,19 +355,19 @@ concurrentOps
-> ([Word8] -> [Word8] -> Bool)
-> Spec
concurrentOps constr desc t eq = do
- prop (desc ++ " fromFoldableM") $ withMaxSuccess maxTestCount $
- concurrentFromFoldable eq t
- prop (desc ++ " unfoldrM") $ withMaxSuccess maxTestCount $
- concurrentUnfoldrM eq t
+ let prop1 d p = prop d $ withMaxSuccess maxTestCount p
+
+ prop1 (desc ++ " fromFoldableM") $ concurrentFromFoldable eq t
+ prop1 (desc ++ " unfoldrM") $ concurrentUnfoldrM eq t
-- we pass it the length of the stream n and an mvar mv.
-- The stream is [0..n]. The threads communicate in such a way that the
-- actions coming first in the stream are dependent on the last action. So
-- if the stream is not processed concurrently it will block forever.
-- Note that if the size of the stream is bigger than the thread limit
-- then it will block even if it is concurrent.
- prop (desc ++ " mapM") $ withMaxSuccess maxTestCount $
+ prop1 (desc ++ " mapM") $
concurrentMapM constr eq $ \n mv stream ->
- t $ A.mapM (mvarSequenceOp mv n) stream
+ t $ S.mapM (mvarSequenceOp mv n) stream
-- XXX add tests for MonadReader and MonadError etc. In case an SVar is
-- accidentally passed through them.
@@ -377,32 +382,55 @@ transformCombineOpsCommon constr desc t eq = do
let transform = transformCombineFromList constr eq
-- Filtering
prop (desc ++ " filter False") $
- transform (filter (const False)) t (A.filter (const False))
+ transform (filter (const False)) t (S.filter (const False))
prop (desc ++ " filter True") $
- transform (filter (const True)) t (A.filter (const True))
+ transform (filter (const True)) t (S.filter (const True))
prop (desc ++ " filter even") $
- transform (filter even) t (A.filter even)
+ transform (filter even) t (S.filter even)
+
+ prop (desc ++ " filterM False") $
+ transform (filter (const False)) t (S.filterM (const $ return False))
+ prop (desc ++ " filterM True") $
+ transform (filter (const True)) t (S.filterM (const $ return True))
+ prop (desc ++ " filterM even") $
+ transform (filter even) t (S.filterM (return . even))
prop (desc ++ " take maxBound") $
- transform (take maxBound) t (A.take maxBound)
- prop (desc ++ " take 0") $ transform (take 0) t (A.take 0)
+ transform (take maxBound) t (S.take maxBound)
+ prop (desc ++ " take 0") $ transform (take 0) t (S.take 0)
prop (desc ++ " takeWhile True") $
- transform (takeWhile (const True)) t (A.takeWhile (const True))
+ transform (takeWhile (const True)) t (S.takeWhile (const True))
prop (desc ++ " takeWhile False") $
- transform (takeWhile (const False)) t (A.takeWhile (const False))
+ transform (takeWhile (const False)) t (S.takeWhile (const False))
+
+ prop (desc ++ " takeWhileM True") $
+ transform (takeWhile (const True)) t (S.takeWhileM (const $ return True))
+ prop (desc ++ " takeWhileM False") $
+ transform (takeWhile (const False)) t (S.takeWhileM (const $ return False))
prop (desc ++ " drop maxBound") $
- transform (drop maxBound) t (A.drop maxBound)
- prop (desc ++ " drop 0") $ transform (drop 0) t (A.drop 0)
+ transform (drop maxBound) t (S.drop maxBound)
+ prop (desc ++ " drop 0") $ transform (drop 0) t (S.drop 0)
prop (desc ++ " dropWhile True") $
- transform (dropWhile (const True)) t (A.dropWhile (const True))
+ transform (dropWhile (const True)) t (S.dropWhile (const True))
prop (desc ++ " dropWhile False") $
- transform (dropWhile (const False)) t (A.dropWhile (const False))
+ transform (dropWhile (const False)) t (S.dropWhile (const False))
+
+ prop (desc ++ " dropWhileM True") $
+ transform (dropWhile (const True)) t (S.dropWhileM (const $ return True))
+ prop (desc ++ " dropWhileM False") $
+ transform (dropWhile (const False)) t (S.dropWhileM (const $ return False))
+
+ prop (desc ++ " mapM (+1)") $
+ transform (map (+1)) t (S.mapM (\x -> return (x + 1)))
+
prop (desc ++ " scan") $ transform (scanl' (flip const) 0) t
- (A.scanl' (flip const) 0)
- prop (desc ++ " reverse") $ transform reverse t A.reverse
+ (S.scanl' (flip const) 0)
+ prop (desc ++ " scanlM'") $ transform (scanl' (flip const) 0) t
+ (S.scanlM' (\_ a -> return a) 0)
+ prop (desc ++ " reverse") $ transform reverse t S.reverse
transformCombineOpsOrdered
:: (IsStream t, Semigroup (t IO Int))
@@ -414,18 +442,18 @@ transformCombineOpsOrdered
transformCombineOpsOrdered constr desc t eq = do
let transform = transformCombineFromList constr eq
-- Filtering
- prop (desc ++ " take 1") $ transform (take 1) t (A.take 1)
- prop (desc ++ " take 10") $ transform (take 10) t (A.take 10)
+ prop (desc ++ " take 1") $ transform (take 1) t (S.take 1)
+ prop (desc ++ " take 10") $ transform (take 10) t (S.take 10)
prop (desc ++ " takeWhile > 0") $
- transform (takeWhile (> 0)) t (A.takeWhile (> 0))
+ transform (takeWhile (> 0)) t (S.takeWhile (> 0))
- prop (desc ++ " drop 1") $ transform (drop 1) t (A.drop 1)
- prop (desc ++ " drop 10") $ transform (drop 10) t (A.drop 10)
+ prop (desc ++ " drop 1") $ transform (drop 1) t (S.drop 1)
+ prop (desc ++ " drop 10") $ transform (drop 10) t (S.drop 10)
prop (desc ++ " dropWhile > 0") $
- transform (dropWhile (> 0)) t (A.dropWhile (> 0))
- prop (desc ++ " scan") $ transform (scanl' (+) 0) t (A.scanl' (+) 0)
+ transform (dropWhile (> 0)) t (S.dropWhile (> 0))
+ prop (desc ++ " scan") $ transform (scanl' (+) 0) t (S.scanl' (+) 0)
wrapMaybe :: Eq a1 => ([a1] -> a2) -> [a1] -> Maybe a2
wrapMaybe f =
@@ -441,17 +469,17 @@ eliminationOps
-> Spec
eliminationOps constr desc t = do
-- Elimination
- prop (desc ++ " null") $ eliminateOp constr null $ A.null . t
+ prop (desc ++ " null") $ eliminateOp constr null $ S.null . t
prop (desc ++ " foldl") $
- eliminateOp constr (foldl' (+) 0) $ (A.foldl' (+) 0) . t
- prop (desc ++ " all") $ eliminateOp constr (all even) $ (A.all even) . t
- prop (desc ++ " any") $ eliminateOp constr (any even) $ (A.any even) . t
- prop (desc ++ " length") $ eliminateOp constr length $ A.length . t
- prop (desc ++ " sum") $ eliminateOp constr sum $ A.sum . t
- prop (desc ++ " product") $ eliminateOp constr product $ A.product . t
+ eliminateOp constr (foldl' (+) 0) $ (S.foldl' (+) 0) . t
+ prop (desc ++ " all") $ eliminateOp constr (all even) $ (S.all even) . t
+ prop (desc ++ " any") $ eliminateOp constr (any even) $ (S.any even) . t
+ prop (desc ++ " length") $ eliminateOp constr length $ S.length . t
+ prop (desc ++ " sum") $ eliminateOp constr sum $ S.sum . t
+ prop (desc ++ " product") $ eliminateOp constr product $ S.product . t
- prop (desc ++ " maximum") $ eliminateOp constr (wrapMaybe maximum) $ A.maximum . t
- prop (desc ++ " minimum") $ eliminateOp constr (wrapMaybe minimum) $ A.minimum . t
+ prop (desc ++ " maximum") $ eliminateOp constr (wrapMaybe maximum) $ S.maximum . t
+ prop (desc ++ " minimum") $ eliminateOp constr (wrapMaybe minimum) $ S.minimum . t
-- head/tail/last may depend on the order in case of parallel streams
-- so we test these only for serial streams.
@@ -461,13 +489,13 @@ serialEliminationOps
-> (t IO Int -> SerialT IO Int)
-> Spec
serialEliminationOps constr desc t = do
- prop (desc ++ " head") $ eliminateOp constr (wrapMaybe head) $ A.head . t
+ prop (desc ++ " head") $ eliminateOp constr (wrapMaybe head) $ S.head . t
prop (desc ++ " tail") $ eliminateOp constr (wrapMaybe tail) $ \x -> do
- r <- A.tail (t x)
+ r <- S.tail (t x)
case r of
Nothing -> return Nothing
- Just s -> A.toList s >>= return . Just
- prop (desc ++ " last") $ eliminateOp constr (wrapMaybe last) $ A.last . t
+ Just s -> S.toList s >>= return . Just
+ prop (desc ++ " last") $ eliminateOp constr (wrapMaybe last) $ S.last . t
transformOpsWord8
:: ([Word8] -> t IO Word8)
@@ -475,8 +503,8 @@ transformOpsWord8
-> (t IO Word8 -> SerialT IO Word8)
-> Spec
transformOpsWord8 constr desc t = do
- prop (desc ++ " elem") $ elemOp constr t A.elem elem
- prop (desc ++ " elem") $ elemOp constr t A.notElem notElem
+ prop (desc ++ " elem") $ elemOp constr t S.elem elem
+ prop (desc ++ " elem") $ elemOp constr t S.notElem notElem
-- XXX concatenate streams of multiple elements rather than single elements
semigroupOps
@@ -503,7 +531,7 @@ applicativeOps
-> Property
applicativeOps constr t eq (a, b) = withMaxSuccess maxTestCount $
monadicIO $ do
- stream <- run ((A.toList . t) ((,) <$> (constr a) <*> (constr b)))
+ stream <- run ((S.toList . t) ((,) <$> (constr a) <*> (constr b)))
let list = (,) <$> a <*> b
equals eq stream list
@@ -516,16 +544,16 @@ zipApplicative
-> Property
zipApplicative constr t eq (a, b) = withMaxSuccess maxTestCount $
monadicIO $ do
- stream1 <- run ((A.toList . t) ((,) <$> (constr a) <*> (constr b)))
- stream2 <- run ((A.toList . t) (pure (,) <*> (constr a) <*> (constr b)))
- stream3 <- run ((A.toList . t) (A.zipWith (,) (constr a) (constr b)))
+ stream1 <- run ((S.toList . t) ((,) <$> (constr a) <*> (constr b)))
+ stream2 <- run ((S.toList . t) (pure (,) <*> (constr a) <*> (constr b)))
+ stream3 <- run ((S.toList . t) (S.zipWith (,) (constr a) (constr b)))
let list = getZipList $ (,) <$> ZipList a <*> ZipList b
equals eq stream1 list
equals eq stream2 list
equals eq stream3 list
zipMonadic
- :: (IsStream t, Monad (t IO))
+ :: IsStream t
=> ([Int] -> t IO Int)
-> (t IO (Int, Int) -> SerialT IO (Int, Int))
-> ([(Int, Int)] -> [(Int, Int)] -> Bool)
@@ -535,12 +563,12 @@ zipMonadic constr t eq (a, b) = withMaxSuccess maxTestCount $
monadicIO $ do
stream1 <-
run
- ((A.toList . t)
- (A.zipWithM (\x y -> return (x, y)) (constr a) (constr b)))
+ ((S.toList . t)
+ (S.zipWithM (\x y -> return (x, y)) (constr a) (constr b)))
stream2 <-
run
- ((A.toList . t)
- (A.zipAsyncWithM (\x y -> return (x, y)) (constr a) (constr b)))
+ ((S.toList . t)
+ (S.zipAsyncWithM (\x y -> return (x, y)) (constr a) (constr b)))
let list = getZipList $ (,) <$> ZipList a <*> ZipList b
equals eq stream1 list
equals eq stream2 list
@@ -553,7 +581,7 @@ monadThen
-> ([Int], [Int])
-> Property
monadThen constr t eq (a, b) = withMaxSuccess maxTestCount $ monadicIO $ do
- stream <- run ((A.toList . t) ((constr a) >> (constr b)))
+ stream <- run ((S.toList . t) ((constr a) >> (constr b)))
let list = a >> b
equals eq stream list
@@ -568,11 +596,28 @@ monadBind constr t eq (a, b) = withMaxSuccess maxTestCount $
monadicIO $ do
stream <-
run
- ((A.toList . t)
+ ((S.toList . t)
((constr a) >>= \x -> (constr b) >>= return . (+ x)))
let list = a >>= \x -> b >>= return . (+ x)
equals eq stream list
+constructionConcurrent :: Int -> Int -> Spec
+constructionConcurrent thr buf = do
+ describe (" threads = " ++ show thr ++ "buffer = " ++ show buf) $ do
+ prop "asyncly replicateM" $ constructWithReplicateM asyncly thr buf
+ prop "wAsyncly replicateM" $ constructWithReplicateM wAsyncly thr buf
+ prop "parallely replicateM" $ constructWithReplicateM parallely thr buf
+ prop "aheadly replicateM" $ constructWithReplicateM aheadly thr buf
+
+-- XXX test all concurrent ops for all these combinations
+concurrentAll :: String -> (Int -> Int -> Spec) -> Spec
+concurrentAll desc f = do
+ describe desc $ do
+ f 0 0 -- default
+ f 0 1 -- single buffer
+ f 1 0 -- single thread
+ f (-1) (-1) -- unbounded threads and buffer
+
main :: IO ()
main = hspec $ do
let folded :: IsStream t => [a] -> t IO a
@@ -582,33 +627,33 @@ main = hspec $ do
_ -> foldMapWith (<>) return xs
)
describe "Construction" $ do
- -- XXX test for all types of streams
- prop "serially replicateM" $ constructWithReplicateM serially
+ prop "serially replicateM" $ constructWithReplicateM serially 0 0
it "iterate" $
- (A.toList . serially . (A.take 100) $ (A.iterate (+ 1) (0 :: Int)))
+ (S.toList . serially . (S.take 100) $ (S.iterate (+ 1) (0 :: Int)))
`shouldReturn` (take 100 $ iterate (+ 1) 0)
-
+ -- XXX test for all types of streams
it "iterateM" $ do
let addM = (\ y -> return (y + 1))
- A.toList . serially . (A.take 100) $ A.iterateM addM (0 :: Int)
+ S.toList . serially . (S.take 100) $ S.iterateM addM (0 :: Int)
`shouldReturn` (take 100 $ iterate (+ 1) 0)
+ concurrentAll "Construction" constructionConcurrent
describe "Functor operations" $ do
- functorOps A.fromFoldable "serially" serially (==)
+ functorOps S.fromFoldable "serially" serially (==)
functorOps folded "serially folded" serially (==)
- functorOps A.fromFoldable "wSerially" wSerially (==)
+ functorOps S.fromFoldable "wSerially" wSerially (==)
functorOps folded "wSerially folded" wSerially (==)
- functorOps A.fromFoldable "aheadly" aheadly (==)
+ functorOps S.fromFoldable "aheadly" aheadly (==)
functorOps folded "aheadly folded" aheadly (==)
- functorOps A.fromFoldable "asyncly" asyncly sortEq
+ functorOps S.fromFoldable "asyncly" asyncly sortEq
functorOps folded "asyncly folded" asyncly sortEq
- functorOps A.fromFoldable "wAsyncly" wAsyncly sortEq
+ functorOps S.fromFoldable "wAsyncly" wAsyncly sortEq
functorOps folded "wAsyncly folded" wAsyncly sortEq
- functorOps A.fromFoldable "parallely" parallely sortEq
+ functorOps S.fromFoldable "parallely" parallely sortEq
functorOps folded "parallely folded" parallely sortEq
- functorOps A.fromFoldable "zipSerially" zipSerially (==)
+ functorOps S.fromFoldable "zipSerially" zipSerially (==)
functorOps folded "zipSerially folded" zipSerially (==)
- functorOps A.fromFoldable "zipAsyncly" zipAsyncly (==)
+ functorOps S.fromFoldable "zipAsyncly" zipAsyncly (==)
functorOps folded "zipAsyncly folded" zipAsyncly (==)
describe "Semigroup operations" $ do
@@ -625,43 +670,43 @@ main = hspec $ do
-- The tests using sorted equality are weaker tests
-- We need to have stronger unit tests for all those
-- XXX applicative with three arguments
- prop "serially applicative" $ applicativeOps A.fromFoldable serially (==)
+ prop "serially applicative" $ applicativeOps S.fromFoldable serially (==)
prop "serially applicative folded" $ applicativeOps folded serially (==)
- prop "aheadly applicative" $ applicativeOps A.fromFoldable aheadly (==)
+ prop "aheadly applicative" $ applicativeOps S.fromFoldable aheadly (==)
prop "aheadly applicative folded" $ applicativeOps folded aheadly (==)
- prop "wSerially applicative" $ applicativeOps A.fromFoldable wSerially sortEq
+ prop "wSerially applicative" $ applicativeOps S.fromFoldable wSerially sortEq
prop "wSerially applicative folded" $ applicativeOps folded wSerially sortEq
- prop "asyncly applicative" $ applicativeOps A.fromFoldable asyncly sortEq
+ prop "asyncly applicative" $ applicativeOps S.fromFoldable asyncly sortEq
prop "asyncly applicative folded" $ applicativeOps folded asyncly sortEq
prop "wAsyncly applicative folded" $ applicativeOps folded wAsyncly sortEq
prop "parallely applicative folded" $ applicativeOps folded parallely sortEq
describe "Zip operations" $ do
- prop "zipSerially applicative" $ zipApplicative A.fromFoldable zipSerially (==)
+ prop "zipSerially applicative" $ zipApplicative S.fromFoldable zipSerially (==)
prop "zipSerially applicative folded" $ zipApplicative folded zipSerially (==)
- prop "zipAsyncly applicative" $ zipApplicative A.fromFoldable zipAsyncly (==)
+ prop "zipAsyncly applicative" $ zipApplicative S.fromFoldable zipAsyncly (==)
prop "zipAsyncly applicative folded" $ zipApplicative folded zipAsyncly (==)
- prop "zip monadic serially" $ zipMonadic A.fromFoldable serially (==)
+ prop "zip monadic serially" $ zipMonadic S.fromFoldable serially (==)
prop "zip monadic serially folded" $ zipMonadic folded serially (==)
- prop "zip monadic aheadly" $ zipMonadic A.fromFoldable aheadly (==)
+ prop "zip monadic aheadly" $ zipMonadic S.fromFoldable aheadly (==)
prop "zip monadic aheadly folded" $ zipMonadic folded aheadly (==)
- prop "zip monadic wSerially" $ zipMonadic A.fromFoldable wSerially (==)
+ prop "zip monadic wSerially" $ zipMonadic S.fromFoldable wSerially (==)
prop "zip monadic wSerially folded" $ zipMonadic folded wSerially (==)
- prop "zip monadic asyncly" $ zipMonadic A.fromFoldable asyncly (==)
+ prop "zip monadic asyncly" $ zipMonadic S.fromFoldable asyncly (==)
prop "zip monadic asyncly folded" $ zipMonadic folded asyncly (==)
- prop "zip monadic wAsyncly" $ zipMonadic A.fromFoldable wAsyncly (==)
+ prop "zip monadic wAsyncly" $ zipMonadic S.fromFoldable wAsyncly (==)
prop "zip monadic wAsyncly folded" $ zipMonadic folded wAsyncly (==)
- prop "zip monadic parallely" $ zipMonadic A.fromFoldable parallely (==)
+ prop "zip monadic parallely" $ zipMonadic S.fromFoldable parallely (==)
prop "zip monadic parallely folded" $ zipMonadic folded parallely (==)
describe "Monad operations" $ do
- prop "serially monad then" $ monadThen A.fromFoldable serially (==)
- prop "aheadly monad then" $ monadThen A.fromFoldable aheadly (==)
- prop "wSerially monad then" $ monadThen A.fromFoldable wSerially sortEq
- prop "asyncly monad then" $ monadThen A.fromFoldable asyncly sortEq
- prop "wAsyncly monad then" $ monadThen A.fromFoldable wAsyncly sortEq
- prop "parallely monad then" $ monadThen A.fromFoldable parallely sortEq
+ prop "serially monad then" $ monadThen S.fromFoldable serially (==)
+ prop "aheadly monad then" $ monadThen S.fromFoldable aheadly (==)
+ prop "wSerially monad then" $ monadThen S.fromFoldable wSerially sortEq
+ prop "asyncly monad then" $ monadThen S.fromFoldable asyncly sortEq
+ prop "wAsyncly monad then" $ monadThen S.fromFoldable wAsyncly sortEq
+ prop "parallely monad then" $ monadThen S.fromFoldable parallely sortEq
prop "serially monad then folded" $ monadThen folded serially (==)
prop "aheadly monad then folded" $ monadThen folded aheadly (==)
@@ -670,22 +715,22 @@ main = hspec $ do
prop "wAsyncly monad then folded" $ monadThen folded wAsyncly sortEq
prop "parallely monad then folded" $ monadThen folded parallely sortEq
- prop "serially monad bind" $ monadBind A.fromFoldable serially (==)
- prop "aheadly monad bind" $ monadBind A.fromFoldable aheadly (==)
- prop "wSerially monad bind" $ monadBind A.fromFoldable wSerially sortEq
- prop "asyncly monad bind" $ monadBind A.fromFoldable asyncly sortEq
- prop "wAsyncly monad bind" $ monadBind A.fromFoldable wAsyncly sortEq
- prop "parallely monad bind" $ monadBind A.fromFoldable parallely sortEq
+ prop "serially monad bind" $ monadBind S.fromFoldable serially (==)
+ prop "aheadly monad bind" $ monadBind S.fromFoldable aheadly (==)
+ prop "wSerially monad bind" $ monadBind S.fromFoldable wSerially sortEq
+ prop "asyncly monad bind" $ monadBind S.fromFoldable asyncly sortEq
+ prop "wAsyncly monad bind" $ monadBind S.fromFoldable wAsyncly sortEq
+ prop "parallely monad bind" $ monadBind S.fromFoldable parallely sortEq
describe "Stream transform operations" $ do
- transformOps A.fromFoldable "serially" serially (==)
- transformOps A.fromFoldable "aheadly" aheadly (==)
- transformOps A.fromFoldable "wSerially" wSerially (==)
- transformOps A.fromFoldable "zipSerially" zipSerially (==)
- transformOps A.fromFoldable "zipAsyncly" zipAsyncly (==)
- transformOps A.fromFoldable "asyncly" asyncly sortEq
- transformOps A.fromFoldable "wAsyncly" wAsyncly sortEq
- transformOps A.fromFoldable "parallely" parallely sortEq
+ transformOps S.fromFoldable "serially" serially (==)
+ transformOps S.fromFoldable "aheadly" aheadly (==)
+ transformOps S.fromFoldable "wSerially" wSerially (==)
+ transformOps S.fromFoldable "zipSerially" zipSerially (==)
+ transformOps S.fromFoldable "zipAsyncly" zipAsyncly (==)
+ transformOps S.fromFoldable "asyncly" asyncly sortEq
+ transformOps S.fromFoldable "wAsyncly" wAsyncly sortEq
+ transformOps S.fromFoldable "parallely" parallely sortEq
transformOps folded "serially folded" serially (==)
transformOps folded "aheadly folded" aheadly (==)
@@ -696,14 +741,14 @@ main = hspec $ do
transformOps folded "wAsyncly folded" wAsyncly sortEq
transformOps folded "parallely folded" parallely sortEq
- transformOpsWord8 A.fromFoldable "serially" serially
- transformOpsWord8 A.fromFoldable "aheadly" aheadly
- transformOpsWord8 A.fromFoldable "wSerially" wSerially
- transformOpsWord8 A.fromFoldable "zipSerially" zipSerially
- transformOpsWord8 A.fromFoldable "zipAsyncly" zipAsyncly
- transformOpsWord8 A.fromFoldable "asyncly" asyncly
- transformOpsWord8 A.fromFoldable "wAsyncly" wAsyncly
- transformOpsWord8 A.fromFoldable "parallely" parallely
+ transformOpsWord8 S.fromFoldable "serially" serially
+ transformOpsWord8 S.fromFoldable "aheadly" aheadly
+ transformOpsWord8 S.fromFoldable "wSerially" wSerially
+ transformOpsWord8 S.fromFoldable "zipSerially" zipSerially
+ transformOpsWord8 S.fromFoldable "zipAsyncly" zipAsyncly
+ transformOpsWord8 S.fromFoldable "asyncly" asyncly
+ transformOpsWord8 S.fromFoldable "wAsyncly" wAsyncly
+ transformOpsWord8 S.fromFoldable "parallely" parallely
transformOpsWord8 folded "serially folded" serially
transformOpsWord8 folded "aheadly folded" aheadly
@@ -716,10 +761,10 @@ main = hspec $ do
-- XXX add tests with outputQueue size set to 1
describe "Stream concurrent operations" $ do
- concurrentOps A.fromFoldable "aheadly" aheadly (==)
- concurrentOps A.fromFoldable "asyncly" asyncly sortEq
- concurrentOps A.fromFoldable "wAsyncly" wAsyncly sortEq
- concurrentOps A.fromFoldable "parallely" parallely sortEq
+ concurrentOps S.fromFoldable "aheadly" aheadly (==)
+ concurrentOps S.fromFoldable "asyncly" asyncly sortEq
+ concurrentOps S.fromFoldable "wAsyncly" wAsyncly sortEq
+ concurrentOps S.fromFoldable "parallely" parallely sortEq
concurrentOps folded "aheadly folded" aheadly (==)
concurrentOps folded "asyncly folded" asyncly sortEq
@@ -736,14 +781,14 @@ main = hspec $ do
-- These tests are specifically targeted towards detecting illegal sharing
-- of SVar across conurrent streams.
describe "Stream transform and combine operations" $ do
- transformCombineOpsCommon A.fromFoldable "serially" serially (==)
- transformCombineOpsCommon A.fromFoldable "aheadly" aheadly (==)
- transformCombineOpsCommon A.fromFoldable "wSerially" wSerially sortEq
- transformCombineOpsCommon A.fromFoldable "zipSerially" zipSerially (==)
- transformCombineOpsCommon A.fromFoldable "zipAsyncly" zipAsyncly (==)
- transformCombineOpsCommon A.fromFoldable "asyncly" asyncly sortEq
- transformCombineOpsCommon A.fromFoldable "wAsyncly" wAsyncly sortEq
- transformCombineOpsCommon A.fromFoldable "parallely" parallely sortEq
+ transformCombineOpsCommon S.fromFoldable "serially" serially (==)
+ transformCombineOpsCommon S.fromFoldable "aheadly" aheadly (==)
+ transformCombineOpsCommon S.fromFoldable "wSerially" wSerially sortEq
+ transformCombineOpsCommon S.fromFoldable "zipSerially" zipSerially (==)
+ transformCombineOpsCommon S.fromFoldable "zipAsyncly" zipAsyncly (==)
+ transformCombineOpsCommon S.fromFoldable "asyncly" asyncly sortEq
+ transformCombineOpsCommon S.fromFoldable "wAsyncly" wAsyncly sortEq
+ transformCombineOpsCommon S.fromFoldable "parallely" parallely sortEq
transformCombineOpsCommon folded "serially" serially (==)
transformCombineOpsCommon folded "aheadly" aheadly (==)
@@ -754,20 +799,20 @@ main = hspec $ do
transformCombineOpsCommon folded "wAsyncly" wAsyncly sortEq
transformCombineOpsCommon folded "parallely" parallely sortEq
- transformCombineOpsOrdered A.fromFoldable "serially" serially (==)
- transformCombineOpsOrdered A.fromFoldable "serially" aheadly (==)
- transformCombineOpsOrdered A.fromFoldable "zipSerially" zipSerially (==)
- transformCombineOpsOrdered A.fromFoldable "zipAsyncly" zipAsyncly (==)
+ transformCombineOpsOrdered S.fromFoldable "serially" serially (==)
+ transformCombineOpsOrdered S.fromFoldable "serially" aheadly (==)
+ transformCombineOpsOrdered S.fromFoldable "zipSerially" zipSerially (==)
+ transformCombineOpsOrdered S.fromFoldable "zipAsyncly" zipAsyncly (==)
describe "Stream elimination operations" $ do
- eliminationOps A.fromFoldable "serially" serially
- eliminationOps A.fromFoldable "aheadly" aheadly
- eliminationOps A.fromFoldable "wSerially" wSerially
- eliminationOps A.fromFoldable "zipSerially" zipSerially
- eliminationOps A.fromFoldable "zipAsyncly" zipAsyncly
- eliminationOps A.fromFoldable "asyncly" asyncly
- eliminationOps A.fromFoldable "wAsyncly" wAsyncly
- eliminationOps A.fromFoldable "parallely" parallely
+ eliminationOps S.fromFoldable "serially" serially
+ eliminationOps S.fromFoldable "aheadly" aheadly
+ eliminationOps S.fromFoldable "wSerially" wSerially
+ eliminationOps S.fromFoldable "zipSerially" zipSerially
+ eliminationOps S.fromFoldable "zipAsyncly" zipAsyncly
+ eliminationOps S.fromFoldable "asyncly" asyncly
+ eliminationOps S.fromFoldable "wAsyncly" wAsyncly
+ eliminationOps S.fromFoldable "parallely" parallely
eliminationOps folded "serially folded" serially
eliminationOps folded "aheadly folded" aheadly
@@ -778,12 +823,14 @@ main = hspec $ do
eliminationOps folded "wAsyncly folded" wAsyncly
eliminationOps folded "parallely folded" parallely
+ -- XXX Add a test where we chain all transformation APIs and make sure that
+ -- the state is being passed through all of them.
describe "Stream serial elimination operations" $ do
- serialEliminationOps A.fromFoldable "serially" serially
- serialEliminationOps A.fromFoldable "aheadly" aheadly
- serialEliminationOps A.fromFoldable "wSerially" wSerially
- serialEliminationOps A.fromFoldable "zipSerially" zipSerially
- serialEliminationOps A.fromFoldable "zipAsyncly" zipAsyncly
+ serialEliminationOps S.fromFoldable "serially" serially
+ serialEliminationOps S.fromFoldable "aheadly" aheadly
+ serialEliminationOps S.fromFoldable "wSerially" wSerially
+ serialEliminationOps S.fromFoldable "zipSerially" zipSerially
+ serialEliminationOps S.fromFoldable "zipAsyncly" zipAsyncly
serialEliminationOps folded "serially folded" serially
serialEliminationOps folded "aheadly folded" aheadly
diff --git a/test/loops.hs b/test/loops.hs
index 1c03013..cccf9e8 100644
--- a/test/loops.hs
+++ b/test/loops.hs
@@ -1,6 +1,6 @@
import Streamly
import System.IO (stdout, hSetBuffering, BufferMode(LineBuffering))
-import Streamly.Prelude (nil, once)
+import Streamly.Prelude (nil, yieldM)
main = do
hSetBuffering stdout LineBuffering
@@ -8,32 +8,32 @@ main = do
putStrLn $ "\nloopTail:\n"
runStream $ do
x <- loopTail 0
- once $ print (x :: Int)
+ yieldM $ print (x :: Int)
putStrLn $ "\nloopHead:\n"
runStream $ do
x <- loopHead 0
- once $ print (x :: Int)
+ yieldM $ print (x :: Int)
putStrLn $ "\nloopTailA:\n"
runStream $ do
x <- loopTailA 0
- once $ print (x :: Int)
+ yieldM $ print (x :: Int)
putStrLn $ "\nloopHeadA:\n"
runStream $ do
x <- loopHeadA 0
- once $ print (x :: Int)
+ yieldM $ print (x :: Int)
putStrLn $ "\nwSerial:\n"
runStream $ do
x <- (return 0 <> return 1) `wSerial` (return 100 <> return 101)
- once $ print (x :: Int)
+ yieldM $ print (x :: Int)
putStrLn $ "\nParallel interleave:\n"
runStream $ do
x <- (return 0 <> return 1) `wAsync` (return 100 <> return 101)
- once $ print (x :: Int)
+ yieldM $ print (x :: Int)
where
@@ -45,7 +45,7 @@ main = do
-- stream. Interleaves the generator and the consumer.
loopTail :: Int -> Serial Int
loopTail x = do
- once $ putStrLn "LoopTail..."
+ yieldM $ putStrLn "LoopTail..."
return x <> (if x < 3 then loopTail (x + 1) else nil)
-- Loops and then generates a value. The consumer can run only after the
@@ -53,7 +53,7 @@ main = do
-- at all.
loopHead :: Int -> Serial Int
loopHead x = do
- once $ putStrLn "LoopHead..."
+ yieldM $ putStrLn "LoopHead..."
(if x < 3 then loopHead (x + 1) else nil) <> return x
-------------------------------------------------------------------------------
@@ -62,12 +62,12 @@ main = do
loopTailA :: Int -> Serial Int
loopTailA x = do
- once $ putStrLn "LoopTailA..."
+ yieldM $ putStrLn "LoopTailA..."
return x `async` (if x < 3 then loopTailA (x + 1) else nil)
loopHeadA :: Int -> Serial Int
loopHeadA x = do
- once $ putStrLn "LoopHeadA..."
+ yieldM $ putStrLn "LoopHeadA..."
(if x < 3 then loopHeadA (x + 1) else nil) `async` return x
-------------------------------------------------------------------------------
diff --git a/test/nested-loops.hs b/test/nested-loops.hs
index 2d38990..6bde54a 100644
--- a/test/nested-loops.hs
+++ b/test/nested-loops.hs
@@ -2,23 +2,23 @@ import Control.Concurrent (myThreadId)
import System.IO (stdout, hSetBuffering, BufferMode(LineBuffering))
import System.Random (randomIO)
import Streamly
-import Streamly.Prelude (nil, once)
+import Streamly.Prelude (nil, yieldM)
main = runStream $ do
- once $ hSetBuffering stdout LineBuffering
+ yieldM $ hSetBuffering stdout LineBuffering
x <- loop "A " 2
y <- loop "B " 2
- once $ myThreadId >>= putStr . show
+ yieldM $ myThreadId >>= putStr . show
>> putStr " "
>> print (x, y)
where
-- we can just use
- -- parallely $ mconcat $ replicate n $ once (...)
+ -- parallely $ mconcat $ replicate n $ yieldM (...)
loop :: String -> Int -> SerialT IO String
loop name n = do
- rnd <- once (randomIO :: IO Int)
+ rnd <- yieldM (randomIO :: IO Int)
let result = (name ++ show rnd)
repeat = if n > 1 then loop name (n - 1) else nil
in (return result) `wAsync` repeat
diff --git a/test/parallel-loops.hs b/test/parallel-loops.hs
index d98e94a..4e8819b 100644
--- a/test/parallel-loops.hs
+++ b/test/parallel-loops.hs
@@ -8,19 +8,19 @@ main = do
hSetBuffering stdout LineBuffering
runStream $ do
x <- S.take 10 $ loop "A" `parallel` loop "B"
- S.once $ myThreadId >>= putStr . show
+ S.yieldM $ myThreadId >>= putStr . show
>> putStr " got "
>> print x
where
-- we can just use
- -- parallely $ cycle1 $ once (...)
+ -- parallely $ cycle1 $ yieldM (...)
loop :: String -> Serial (String, Int)
loop name = do
- S.once $ threadDelay 1000000
- rnd <- S.once (randomIO :: IO Int)
- S.once $ myThreadId >>= putStr . show
+ S.yieldM $ threadDelay 1000000
+ rnd <- S.yieldM (randomIO :: IO Int)
+ S.yieldM $ myThreadId >>= putStr . show
>> putStr " yielding "
>> print rnd
return (name, rnd) `parallel` loop name