summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHerbertValerioRiedel <>2019-09-01 12:31:00 (GMT)
committerhdiff <hdiff@hdiff.luite.com>2019-09-01 12:31:00 (GMT)
commit9748c9099c39327f49711afeecf9d1e9d08958e2 (patch)
tree073333a4ac58ce9498790d5432ba51952ff8b6c2
parentc9806f3c23f819813a63dd8d745edf1cce7aa50e (diff)
version 0.5.2.0HEAD0.5.2.0master
-rwxr-xr-x[-rw-r--r--]CHANGES.md6
-rwxr-xr-x[-rw-r--r--]README.md0
-rw-r--r--Setup.hs6
-rw-r--r--cassava.cabal27
-rwxr-xr-x[-rw-r--r--]examples/IncrementalIndexedBasedDecode.hs0
-rwxr-xr-x[-rw-r--r--]examples/IncrementalNamedBasedEncode.hs0
-rwxr-xr-x[-rw-r--r--]examples/IndexBasedDecode.hs0
-rwxr-xr-x[-rw-r--r--]examples/IndexBasedGeneric.hs0
-rwxr-xr-x[-rw-r--r--]examples/NamedBasedDecode.hs0
-rwxr-xr-xexamples/NamedBasedExplicitDecode.hs21
-rwxr-xr-x[-rw-r--r--]examples/NamedBasedGeneric.hs0
-rwxr-xr-x[-rw-r--r--]examples/StreamingIndexBasedDecode.hs0
-rw-r--r--src/Data/Csv.hs (renamed from Data/Csv.hs)4
-rw-r--r--src/Data/Csv/Builder.hs (renamed from Data/Csv/Builder.hs)6
-rw-r--r--src/Data/Csv/Conversion.hs (renamed from Data/Csv/Conversion.hs)32
-rw-r--r--src/Data/Csv/Conversion/Internal.hs (renamed from Data/Csv/Conversion/Internal.hs)26
-rw-r--r--src/Data/Csv/Encoding.hs (renamed from Data/Csv/Encoding.hs)53
-rw-r--r--src/Data/Csv/Incremental.hs (renamed from Data/Csv/Incremental.hs)50
-rw-r--r--src/Data/Csv/Parser.hs (renamed from Data/Csv/Parser.hs)0
-rw-r--r--src/Data/Csv/Streaming.hs (renamed from Data/Csv/Streaming.hs)2
-rw-r--r--src/Data/Csv/Types.hs (renamed from Data/Csv/Types.hs)0
-rw-r--r--src/Data/Csv/Util.hs (renamed from Data/Csv/Util.hs)0
22 files changed, 174 insertions, 59 deletions
diff --git a/CHANGES.md b/CHANGES.md
index d0d54c6..e97cf00 100644..100755
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,9 @@
+## Version 0.5.2.0
+
+ * Add `FromField`/`ToField` instances for `Identity` and `Const` (#158)
+ * New `typeclass`-less decoding functions `decodeWithP` and `decodeByNameWithP` (#67,#167)
+ * Support for final phase of MFP / base-4.13
+
## Version 0.5.1.0
* Add `FromField`/`ToField` instance for `Natural` (#141,#142)
diff --git a/README.md b/README.md
index 8c3ed87..8c3ed87 100644..100755
--- a/README.md
+++ b/README.md
diff --git a/Setup.hs b/Setup.hs
index 9a994af..df27ae3 100644
--- a/Setup.hs
+++ b/Setup.hs
@@ -1,2 +1,8 @@
import Distribution.Simple
main = defaultMain
+
+
+
+
+
+
diff --git a/cassava.cabal b/cassava.cabal
index 467cb7f..82977ce 100644
--- a/cassava.cabal
+++ b/cassava.cabal
@@ -1,6 +1,6 @@
cabal-version: 1.12
Name: cassava
-Version: 0.5.1.0
+Version: 0.5.2.0
Synopsis: A CSV parsing and encoding library
Description: {
@@ -42,7 +42,7 @@ Build-type: Simple
Extra-source-files: examples/*.hs,
CHANGES.md,
README.md
-Tested-with: GHC ==8.2.1, GHC ==8.0.2, GHC ==8.0.1, GHC ==7.10.3, GHC ==7.8.4, GHC ==7.6.3, GHC ==7.4.2
+Tested-with: GHC==8.8.1, GHC==8.6.5, GHC==8.4.4, GHC ==8.2.2, GHC ==8.0.2, GHC ==7.10.3, GHC ==7.8.4, GHC ==7.6.3, GHC ==7.4.2
----------------------------------------------------------------------------
@@ -77,6 +77,8 @@ Library
DataKinds
PolyKinds
+ hs-source-dirs: src
+
Exposed-modules:
Data.Csv
Data.Csv.Builder
@@ -94,13 +96,14 @@ Library
Build-depends:
array >= 0.4 && < 0.6,
attoparsec >= 0.11.3.0 && < 0.14,
- base >= 4.5 && < 5,
+ base >= 4.5 && < 4.14,
bytestring >= 0.9.2 && < 0.11,
- containers >= 0.4.2 && < 0.6,
+ containers >= 0.4.2 && < 0.7,
deepseq >= 1.1 && < 1.5,
- hashable < 1.3,
+ hashable < 1.4,
scientific >= 0.3.4.7 && < 0.4,
text < 1.3,
+ transformers >= 0.2 && < 0.6,
unordered-containers < 0.3,
vector >= 0.8 && < 0.13,
Only >= 0.1 && < 0.1.1
@@ -122,10 +125,14 @@ Library
-- https://ghc.haskell.org/trac/ghc/wiki/Migration/8.0#Recommendationsforforward-compatibility
if impl(ghc >= 8.0)
- ghc-options: -Wcompat -Wnoncanonical-monad-instances -Wnoncanonical-monadfail-instances
+ ghc-options: -Wcompat -Wnoncanonical-monad-instances
+ if impl(ghc >= 8.8)
+ ghc-options: -Wno-star-is-type
+ else
+ ghc-options: -Wnoncanonical-monadfail-instances
else
-- provide/emulate `Control.Monad.Fail` and `Data.Semigroups` API for pre-GHC8
- build-depends: fail == 4.9.*, semigroups == 0.18.*
+ build-depends: fail == 4.9.*, semigroups >= 0.18.2 && <0.20
if impl(ghc >= 8.2)
ghc-options: -Wcpp-undef
@@ -151,7 +158,7 @@ Test-suite unit-tests
, vector
-- extra dependencies not already used by lib:cassava
build-depends: HUnit < 1.7
- , QuickCheck == 2.10.*
+ , QuickCheck == 2.13.*
, quickcheck-instances >= 0.3.12 && < 0.4
, test-framework == 0.8.*
, test-framework-hunit == 0.3.*
@@ -165,14 +172,14 @@ Test-suite unit-tests
-- For Numeric.Natural
if impl(ghc < 7.10)
- build-depends: nats >= 1 && < 1.2
+ build-depends: nats
-- https://ghc.haskell.org/trac/ghc/wiki/Migration/8.0#Recommendationsforforward-compatibility
if impl(ghc >= 8.0)
ghc-options: -Wcompat -Wnoncanonical-monad-instances -Wnoncanonical-monadfail-instances
else
-- provide/emulate `Control.Monad.Fail` and `Data.Semigroups` API for pre-GHC8
- build-depends: fail == 4.9.*, semigroups == 0.18.*
+ build-depends: fail, semigroups
if impl(ghc >= 8.2)
ghc-options: -Wcpp-undef
diff --git a/examples/IncrementalIndexedBasedDecode.hs b/examples/IncrementalIndexedBasedDecode.hs
index b92ad45..b92ad45 100644..100755
--- a/examples/IncrementalIndexedBasedDecode.hs
+++ b/examples/IncrementalIndexedBasedDecode.hs
diff --git a/examples/IncrementalNamedBasedEncode.hs b/examples/IncrementalNamedBasedEncode.hs
index 706dcd1..706dcd1 100644..100755
--- a/examples/IncrementalNamedBasedEncode.hs
+++ b/examples/IncrementalNamedBasedEncode.hs
diff --git a/examples/IndexBasedDecode.hs b/examples/IndexBasedDecode.hs
index 209e04d..209e04d 100644..100755
--- a/examples/IndexBasedDecode.hs
+++ b/examples/IndexBasedDecode.hs
diff --git a/examples/IndexBasedGeneric.hs b/examples/IndexBasedGeneric.hs
index f214212..f214212 100644..100755
--- a/examples/IndexBasedGeneric.hs
+++ b/examples/IndexBasedGeneric.hs
diff --git a/examples/NamedBasedDecode.hs b/examples/NamedBasedDecode.hs
index d92a9ff..d92a9ff 100644..100755
--- a/examples/NamedBasedDecode.hs
+++ b/examples/NamedBasedDecode.hs
diff --git a/examples/NamedBasedExplicitDecode.hs b/examples/NamedBasedExplicitDecode.hs
new file mode 100755
index 0000000..aeea18e
--- /dev/null
+++ b/examples/NamedBasedExplicitDecode.hs
@@ -0,0 +1,21 @@
+{-# LANGUAGE OverloadedStrings #-}
+
+import qualified Data.ByteString.Lazy as BL
+import Data.Csv
+import qualified Data.Vector as V
+
+data Person = Person
+ { name :: String
+ , salary :: Int
+ }
+
+valueParse :: NamedRecord -> Parser Person
+valueParse r = Person <$> r .: "name" <*> r .: "salary"
+
+main :: IO ()
+main = do
+ csvData <- BL.readFile "salaries.csv"
+ case decodeByNameWithP valueParse defaultDecodeOptions csvData of
+ Left err -> putStrLn err
+ Right (_, v) -> V.forM_ v $ \ p ->
+ putStrLn $ name p ++ " earns " ++ show (salary p) ++ " dollars" \ No newline at end of file
diff --git a/examples/NamedBasedGeneric.hs b/examples/NamedBasedGeneric.hs
index fede125..fede125 100644..100755
--- a/examples/NamedBasedGeneric.hs
+++ b/examples/NamedBasedGeneric.hs
diff --git a/examples/StreamingIndexBasedDecode.hs b/examples/StreamingIndexBasedDecode.hs
index cc2a827..cc2a827 100644..100755
--- a/examples/StreamingIndexBasedDecode.hs
+++ b/examples/StreamingIndexBasedDecode.hs
diff --git a/Data/Csv.hs b/src/Data/Csv.hs
index 621e789..03ca5dc 100644
--- a/Data/Csv.hs
+++ b/src/Data/Csv.hs
@@ -48,7 +48,9 @@ module Data.Csv
, DecodeOptions(..)
, defaultDecodeOptions
, decodeWith
+ , decodeWithP
, decodeByNameWith
+ , decodeByNameWithP
, EncodeOptions(..)
, Quoting(..)
, defaultEncodeOptions
@@ -374,7 +376,7 @@ import Data.Csv.Types
-- they're not. You can then write:
--
-- > myOptions :: Options
--- > myOptions = defaultOptions { fieldLabelmodifier = rmUnderscore }
+-- > myOptions = defaultOptions { fieldLabelModifier = rmUnderscore }
-- > where
-- > rmUnderscore ('_':str) = str
-- > rmUnderscore str = str
diff --git a/Data/Csv/Builder.hs b/src/Data/Csv/Builder.hs
index 801fd94..5652f2b 100644
--- a/Data/Csv/Builder.hs
+++ b/src/Data/Csv/Builder.hs
@@ -16,7 +16,7 @@ module Data.Csv.Builder
, encodeDefaultOrderedNamedRecordWith
) where
-import Data.Monoid
+import qualified Data.Monoid as Mon
import Data.ByteString.Builder as Builder
import Data.Csv.Conversion
@@ -53,7 +53,7 @@ encodeHeaderWith = encodeRecordWith
encodeRecordWith :: ToRecord a => EncodeOptions -> a -> Builder.Builder
encodeRecordWith opts r =
Encoding.encodeRecord (encQuoting opts) (encDelimiter opts) (toRecord r)
- <> Encoding.recordSep (encUseCrLf opts)
+ Mon.<> Encoding.recordSep (encUseCrLf opts)
-- | Like 'encodeNamedRecord', but lets you customize how the CSV data
-- is encoded.
@@ -61,7 +61,7 @@ encodeNamedRecordWith :: ToNamedRecord a =>
EncodeOptions -> Header -> a -> Builder.Builder
encodeNamedRecordWith opts hdr nr =
Encoding.encodeNamedRecord hdr (encQuoting opts) (encDelimiter opts)
- (toNamedRecord nr) <> Encoding.recordSep (encUseCrLf opts)
+ (toNamedRecord nr) Mon.<> Encoding.recordSep (encUseCrLf opts)
-- | Like 'encodeDefaultOrderedNamedRecord', but lets you customize
-- how the CSV data is encoded.
diff --git a/Data/Csv/Conversion.hs b/src/Data/Csv/Conversion.hs
index 5746120..1f2ab84 100644
--- a/Data/Csv/Conversion.hs
+++ b/src/Data/Csv/Conversion.hs
@@ -73,7 +73,7 @@ module Data.Csv.Conversion
, header
) where
-import Control.Applicative (Alternative, (<|>), empty)
+import Control.Applicative (Alternative, (<|>), empty, Const(..))
import Control.Monad (MonadPlus, mplus, mzero)
import qualified Control.Monad.Fail as Fail
import Data.Attoparsec.ByteString.Char8 (double)
@@ -84,6 +84,7 @@ import qualified Data.ByteString.Lazy as L
#if MIN_VERSION_bytestring(0,10,4)
import qualified Data.ByteString.Short as SBS
#endif
+import Data.Functor.Identity
import Data.List (intercalate)
import Data.Hashable (Hashable)
import qualified Data.HashMap.Lazy as HM
@@ -91,7 +92,7 @@ import Data.Int (Int8, Int16, Int32, Int64)
import qualified Data.IntMap as IM
import qualified Data.Map as M
import Data.Scientific (Scientific)
-import Data.Semigroup (Semigroup, (<>))
+import Data.Semigroup as Semi (Semigroup, (<>))
import qualified Data.Text as T
import qualified Data.Text.Encoding as T
import qualified Data.Text.Lazy as LT
@@ -798,6 +799,26 @@ instance FromField () where
parseField _ = pure ()
{-# INLINE parseField #-}
+-- | @since 0.5.2.0
+instance FromField a => FromField (Identity a) where
+ parseField = fmap Identity . parseField
+ {-# INLINE parseField #-}
+
+-- | @since 0.5.2.0
+instance ToField a => ToField (Identity a) where
+ toField = toField . runIdentity
+ {-# INLINE toField #-}
+
+-- | @since 0.5.2.0
+instance FromField a => FromField (Const a b) where
+ parseField = fmap getConst . parseField
+ {-# INLINE parseField #-}
+
+-- | @since 0.5.2.0
+instance ToField a => ToField (Const a b) where
+ toField = toField . getConst
+ {-# INLINE toField #-}
+
-- | Assumes UTF-8 encoding.
instance FromField Char where
parseField s =
@@ -1188,8 +1209,11 @@ instance Monad Parser where
{-# INLINE (>>) #-}
return = pure
{-# INLINE return #-}
+
+#if !MIN_VERSION_base(4,13,0)
fail = Fail.fail
{-# INLINE fail #-}
+#endif
-- | @since 0.5.0.0
instance Fail.MonadFail Parser where
@@ -1221,14 +1245,14 @@ instance MonadPlus Parser where
{-# INLINE mplus #-}
-- | @since 0.5.0.0
-instance Semigroup (Parser a) where
+instance Semi.Semigroup (Parser a) where
(<>) = mplus
{-# INLINE (<>) #-}
instance Monoid (Parser a) where
mempty = fail "mempty"
{-# INLINE mempty #-}
- mappend = (<>)
+ mappend = (Semi.<>)
{-# INLINE mappend #-}
apP :: Parser (a -> b) -> Parser a -> Parser b
diff --git a/Data/Csv/Conversion/Internal.hs b/src/Data/Csv/Conversion/Internal.hs
index b4f1125..fa368a7 100644
--- a/Data/Csv/Conversion/Internal.hs
+++ b/src/Data/Csv/Conversion/Internal.hs
@@ -13,7 +13,7 @@ import Data.Array.IArray
import qualified Data.ByteString as B
import Data.Char (ord)
import Data.Int
-import Data.Monoid
+import qualified Data.Monoid as Mon
import Data.Scientific (Scientific)
import Data.Word
@@ -51,9 +51,9 @@ formatDecimal :: Integral a => a -> Builder
:: Word64 -> Builder #-}
{-# NOINLINE formatDecimal #-}
formatDecimal i
- | i < 0 = minus <>
+ | i < 0 = minus Mon.<>
if i <= -128
- then formatPositive (-(i `quot` 10)) <> digit (-(i `rem` 10))
+ then formatPositive (-(i `quot` 10)) Mon.<> digit (-(i `rem` 10))
else formatPositive (-i)
| otherwise = formatPositive i
@@ -64,9 +64,9 @@ formatBoundedSigned :: (Integral a, Bounded a) => a -> Builder
{-# SPECIALIZE formatBoundedSigned :: Int32 -> Builder #-}
{-# SPECIALIZE formatBoundedSigned :: Int64 -> Builder #-}
formatBoundedSigned i
- | i < 0 = minus <>
+ | i < 0 = minus Mon.<>
if i == minBound
- then formatPositive (-(i `quot` 10)) <> digit (-(i `rem` 10))
+ then formatPositive (-(i `quot` 10)) Mon.<> digit (-(i `rem` 10))
else formatPositive (-i)
| otherwise = formatPositive i
@@ -83,7 +83,7 @@ formatPositive :: Integral a => a -> Builder
{-# SPECIALIZE formatPositive :: Word64 -> Builder #-}
formatPositive = go
where go n | n < 10 = digit n
- | otherwise = go (n `quot` 10) <> digit (n `rem` 10)
+ | otherwise = go (n `quot` 10) Mon.<> digit (n `rem` 10)
minus :: Builder
minus = word8 45
@@ -129,7 +129,7 @@ formatRealFloat fmt x
| isInfinite x = if x < 0
then string8 "-Infinity"
else string8 "Infinity"
- | x < 0 || isNegativeZero x = minus <> doFmt fmt (floatToDigits (-x))
+ | x < 0 || isNegativeZero x = minus Mon.<> doFmt fmt (floatToDigits (-x))
| otherwise = doFmt fmt (floatToDigits x)
where
doFmt format (is, e) =
@@ -142,17 +142,17 @@ formatRealFloat fmt x
let show_e' = formatDecimal (e-1) in
case ds of
[48] -> string8 "0.0e0"
- [d] -> word8 d <> string8 ".0e" <> show_e'
- (d:ds') -> word8 d <> char8 '.' <> word8s ds' <>
- char8 'e' <> show_e'
+ [d] -> word8 d Mon.<> string8 ".0e" Mon.<> show_e'
+ (d:ds') -> word8 d Mon.<> char8 '.' Mon.<> word8s ds' Mon.<>
+ char8 'e' Mon.<> show_e'
[] -> error "formatRealFloat/doFmt/Exponent: []"
Fixed
- | e <= 0 -> string8 "0." <>
- byteString (B.replicate (-e) zero) <>
+ | e <= 0 -> string8 "0." Mon.<>
+ byteString (B.replicate (-e) zero) Mon.<>
word8s ds
| otherwise ->
let
- f 0 s rs = mk0 (reverse s) <> char8 '.' <> mk0 rs
+ f 0 s rs = mk0 (reverse s) Mon.<> char8 '.' Mon.<> mk0 rs
f n s [] = f (n-1) (zero:s) []
f n s (r:rs) = f (n-1) (r:s) rs
in
diff --git a/Data/Csv/Encoding.hs b/src/Data/Csv/Encoding.hs
index f34ece8..758bdb8 100644
--- a/Data/Csv/Encoding.hs
+++ b/src/Data/Csv/Encoding.hs
@@ -24,7 +24,9 @@ module Data.Csv.Encoding
, DecodeOptions(..)
, defaultDecodeOptions
, decodeWith
+ , decodeWithP
, decodeByNameWith
+ , decodeByNameWithP
, EncodeOptions(..)
, defaultEncodeOptions
, encodeWith
@@ -81,7 +83,7 @@ decode = decodeWith defaultDecodeOptions
-- | Efficiently deserialize CSV records from a lazy 'L.ByteString'.
-- If this fails due to incomplete or invalid input, @'Left' msg@ is
--- returned. The data is assumed to be preceeded by a header.
+-- returned. The data is assumed to be preceded by a header.
-- Equivalent to @'decodeByNameWith' 'defaultDecodeOptions'@.
decodeByName :: FromNamedRecord a
=> L.ByteString -- ^ CSV data
@@ -117,7 +119,7 @@ decodeWith :: FromRecord a
-- skipped
-> L.ByteString -- ^ CSV data
-> Either String (Vector a)
-decodeWith = decodeWithC csv
+decodeWith = decodeWithC (csv parseRecord)
{-# INLINE [1] decodeWith #-}
{-# RULES
@@ -130,12 +132,25 @@ idDecodeWith :: DecodeOptions -> HasHeader -> L.ByteString
-> Either String (Vector (Vector B.ByteString))
idDecodeWith = decodeWithC Parser.csv
+-- | Like 'decodeWith'', but lets you specify a parser function.
+--
+-- @since 0.5.2.0
+decodeWithP :: (Record -> Conversion.Parser a)
+ -- ^ Custom parser function
+ -> DecodeOptions -- ^ Decoding options
+ -> HasHeader -- ^ Data contains header that should be
+ -- skipped
+ -> L.ByteString -- ^ CSV data
+ -> Either String (Vector a)
+decodeWithP _parseRecord = decodeWithC (csv _parseRecord)
+{-# INLINE [1] decodeWithP #-}
+
-- | Decode CSV data using the provided parser, skipping a leading
-- header if 'hasHeader' is 'HasHeader'. Returns 'Left' @errMsg@ on
-- failure.
decodeWithC :: (DecodeOptions -> AL.Parser a) -> DecodeOptions -> HasHeader
-> BL8.ByteString -> Either String a
-decodeWithC p !opts hasHeader = decodeWithP parser
+decodeWithC p !opts hasHeader = decodeWithP' parser
where parser = case hasHeader of
HasHeader -> header (decDelimiter opts) *> p opts
NoHeader -> p opts
@@ -147,7 +162,18 @@ decodeByNameWith :: FromNamedRecord a
=> DecodeOptions -- ^ Decoding options
-> L.ByteString -- ^ CSV data
-> Either String (Header, Vector a)
-decodeByNameWith !opts = decodeWithP (csvWithHeader opts)
+decodeByNameWith !opts = decodeWithP' (csvWithHeader parseNamedRecord opts)
+
+-- | Like 'decodeByNameWith', but lets you specify a parser function.
+--
+-- @since 0.5.2.0
+decodeByNameWithP :: (NamedRecord -> Conversion.Parser a)
+ -- ^ Custom parser function
+ -> DecodeOptions -- ^ Decoding options
+ -> L.ByteString -- ^ CSV data
+ -> Either String (Header, Vector a)
+decodeByNameWithP _parseNamedRecord !opts =
+ decodeWithP' (csvWithHeader _parseNamedRecord opts)
-- | Should quoting be applied to fields, and at which level?
data Quoting
@@ -328,8 +354,8 @@ prependToAll :: Builder -> [Builder] -> [Builder]
prependToAll _ [] = []
prependToAll sep (x:xs) = sep <> x : prependToAll sep xs
-decodeWithP :: AL.Parser a -> L.ByteString -> Either String a
-decodeWithP p s =
+decodeWithP' :: AL.Parser a -> L.ByteString -> Either String a
+decodeWithP' p s =
case AL.parse p s of
AL.Done _ v -> Right v
AL.Fail left _ msg -> Left errMsg
@@ -338,7 +364,7 @@ decodeWithP p s =
(if BL8.length left > 100
then (take 100 $ BL8.unpack left) ++ " (truncated)"
else show (BL8.unpack left))
-{-# INLINE decodeWithP #-}
+{-# INLINE decodeWithP' #-}
-- These alternative implementation of the 'csv' and 'csvWithHeader'
-- parsers from the 'Parser' module performs the
@@ -351,8 +377,9 @@ decodeWithP p s =
-- "parse error: conversion error: ...".
-- | Parse a CSV file that does not include a header.
-csv :: FromRecord a => DecodeOptions -> AL.Parser (V.Vector a)
-csv !opts = do
+csv :: (Record -> Conversion.Parser a) -> DecodeOptions
+ -> AL.Parser (V.Vector a)
+csv _parseRecord !opts = do
vals <- records
return $! V.fromList vals
where
@@ -360,7 +387,7 @@ csv !opts = do
!r <- record (decDelimiter opts)
if blankLine r
then (endOfInput *> pure []) <|> (endOfLine *> records)
- else case runParser (parseRecord r) of
+ else case runParser (_parseRecord r) of
Left msg -> fail $ "conversion error: " ++ msg
Right val -> do
!vals <- (endOfInput *> AP.pure []) <|> (endOfLine *> records)
@@ -368,9 +395,9 @@ csv !opts = do
{-# INLINE csv #-}
-- | Parse a CSV file that includes a header.
-csvWithHeader :: FromNamedRecord a => DecodeOptions
+csvWithHeader :: (NamedRecord -> Conversion.Parser a) -> DecodeOptions
-> AL.Parser (Header, V.Vector a)
-csvWithHeader !opts = do
+csvWithHeader _parseNamedRecord !opts = do
!hdr <- header (decDelimiter opts)
vals <- records hdr
let !v = V.fromList vals
@@ -386,4 +413,4 @@ csvWithHeader !opts = do
!vals <- (endOfInput *> pure []) <|> (endOfLine *> records hdr)
return (val : vals)
- convert hdr = parseNamedRecord . Types.toNamedRecord hdr
+ convert hdr = _parseNamedRecord . Types.toNamedRecord hdr
diff --git a/Data/Csv/Incremental.hs b/src/Data/Csv/Incremental.hs
index ff74afe..a6fc94f 100644
--- a/Data/Csv/Incremental.hs
+++ b/src/Data/Csv/Incremental.hs
@@ -55,11 +55,13 @@ module Data.Csv.Incremental
, HasHeader(..)
, decode
, decodeWith
+ , decodeWithP
-- ** Name-based record conversion
-- $namebased
, decodeByName
, decodeByNameWith
+ , decodeByNameWithP
-- * Encoding
-- ** Index-based record conversion
@@ -85,7 +87,7 @@ import Data.Attoparsec.ByteString.Char8 (endOfInput)
import qualified Data.ByteString as B
import qualified Data.ByteString.Builder as Builder
import qualified Data.ByteString.Lazy as L
-import Data.Semigroup (Semigroup, (<>))
+import Data.Semigroup as Semi (Semigroup, (<>))
import qualified Data.Vector as V
import Data.Word (Word8)
@@ -247,17 +249,28 @@ decodeWith :: FromRecord a
-> HasHeader -- ^ Data contains header that should be
-- skipped
-> Parser a
-decodeWith !opts hasHeader = case hasHeader of
+decodeWith !opts hasHeader = decodeWithP parseRecord opts hasHeader
+
+-- | Like 'decodeWith', but lets you pass an explicit parser value instead of
+-- using a typeclass
+--
+-- @since 0.5.2.0
+decodeWithP :: (Record -> Conversion.Parser a)
+ -> DecodeOptions -- ^ Decoding options
+ -> HasHeader -- ^ Data contains header that should be
+ -- skipped
+ -> Parser a
+decodeWithP p !opts hasHeader = case hasHeader of
HasHeader -> go (decodeHeaderWith opts)
- NoHeader -> Many [] $ \ s -> decodeWithP parseRecord opts s
+ NoHeader -> Many [] $ \ s -> decodeWithP' p opts s
where go (FailH rest msg) = Fail rest msg
go (PartialH k) = Many [] $ \ s' -> go (k s')
- go (DoneH _ rest) = decodeWithP parseRecord opts rest
+ go (DoneH _ rest) = decodeWithP' p opts rest
------------------------------------------------------------------------
-- | Efficiently deserialize CSV in an incremental fashion. The data
--- is assumed to be preceeded by a header. Returns a 'HeaderParser'
+-- is assumed to be preceded by a header. Returns a 'HeaderParser'
-- that when done produces a 'Parser' for parsing the actual records.
-- Equivalent to @'decodeByNameWith' 'defaultDecodeOptions'@.
decodeByName :: FromNamedRecord a
@@ -269,12 +282,21 @@ decodeByName = decodeByNameWith defaultDecodeOptions
decodeByNameWith :: FromNamedRecord a
=> DecodeOptions -- ^ Decoding options
-> HeaderParser (Parser a)
-decodeByNameWith !opts = go (decodeHeaderWith opts)
+decodeByNameWith !opts = decodeByNameWithP parseNamedRecord opts
+
+-- | Like 'decodeByNameWith', but lets you pass an explicit parser value instead
+-- of using a typeclass
+--
+-- @since 0.5.2.0
+decodeByNameWithP :: (NamedRecord -> Conversion.Parser a)
+ -> DecodeOptions -- ^ Decoding options
+ -> HeaderParser (Parser a)
+decodeByNameWithP p !opts = go (decodeHeaderWith opts)
where
go (FailH rest msg) = FailH rest msg
go (PartialH k) = PartialH $ \ s -> go (k s)
go (DoneH hdr rest) =
- DoneH hdr (decodeWithP (parseNamedRecord . toNamedRecord hdr) opts rest)
+ DoneH hdr (decodeWithP' (p . toNamedRecord hdr) opts rest)
------------------------------------------------------------------------
@@ -282,9 +304,9 @@ decodeByNameWith !opts = go (decodeHeaderWith opts)
-- 'B.ByteString' input.
-- | Like 'decode', but lets you customize how the CSV data is parsed.
-decodeWithP :: (Record -> Conversion.Parser a) -> DecodeOptions -> B.ByteString
+decodeWithP' :: (Record -> Conversion.Parser a) -> DecodeOptions -> B.ByteString
-> Parser a
-decodeWithP p !opts = go Incomplete [] . parser
+decodeWithP' p !opts = go Incomplete [] . parser
where
go !_ !acc (A.Fail rest _ msg)
| null acc = Fail rest err
@@ -294,7 +316,7 @@ decodeWithP p !opts = go Incomplete [] . parser
where cont s = go m [] (k s)
where m | B.null s = Complete
| otherwise = Incomplete
- go Complete _ (A.Partial _) = moduleError "decodeWithP" msg
+ go Complete _ (A.Partial _) = moduleError "decodeWithP'" msg
where msg = "attoparsec should never return Partial in this case"
go m acc (A.Done rest r)
| B.null rest = case m of
@@ -309,7 +331,7 @@ decodeWithP p !opts = go Incomplete [] . parser
parser = A.parse (record (decDelimiter opts) <* (endOfLine <|> endOfInput))
convert = runParser . p
-{-# INLINE decodeWithP #-}
+{-# INLINE decodeWithP' #-}
blankLine :: V.Vector B.ByteString -> Bool
blankLine v = V.length v == 1 && (B.null (V.head v))
@@ -345,14 +367,14 @@ newtype Builder a = Builder {
}
-- | @since 0.5.0.0
-instance Semigroup (Builder a) where
+instance Semi.Semigroup (Builder a) where
Builder f <> Builder g =
Builder $ \ qtng delim useCrlf ->
f qtng delim useCrlf <> g qtng delim useCrlf
instance Monoid (Builder a) where
mempty = Builder (\ _ _ _ -> mempty)
- mappend = (<>)
+ mappend = (Semi.<>)
------------------------------------------------------------------------
-- ** Index-based record conversion
@@ -429,7 +451,7 @@ instance Semigroup (NamedBuilder a) where
instance Monoid (NamedBuilder a) where
mempty = NamedBuilder (\ _ _ _ _ -> mempty)
- mappend = (<>)
+ mappend = (Semi.<>)
------------------------------------------------------------------------
diff --git a/Data/Csv/Parser.hs b/src/Data/Csv/Parser.hs
index d028167..d028167 100644
--- a/Data/Csv/Parser.hs
+++ b/src/Data/Csv/Parser.hs
diff --git a/Data/Csv/Streaming.hs b/src/Data/Csv/Streaming.hs
index f86c03c..686a81e 100644
--- a/Data/Csv/Streaming.hs
+++ b/src/Data/Csv/Streaming.hs
@@ -164,7 +164,7 @@ decodeWith !opts hasHeader s0 =
go (s:ss) (Many xs k) = foldr Cons (go ss (k s)) xs
-- | Efficiently deserialize CSV in a streaming fashion. The data is
--- assumed to be preceeded by a header. Returns @'Left' errMsg@ if
+-- assumed to be preceded by a header. Returns @'Left' errMsg@ if
-- parsing the header fails. Equivalent to @'decodeByNameWith'
-- 'defaultDecodeOptions'@.
decodeByName :: FromNamedRecord a
diff --git a/Data/Csv/Types.hs b/src/Data/Csv/Types.hs
index 217939c..217939c 100644
--- a/Data/Csv/Types.hs
+++ b/src/Data/Csv/Types.hs
diff --git a/Data/Csv/Util.hs b/src/Data/Csv/Util.hs
index 8bb705b..8bb705b 100644
--- a/Data/Csv/Util.hs
+++ b/src/Data/Csv/Util.hs