summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGES15
-rw-r--r--Hledger.hs1
-rw-r--r--Hledger/Data/AutoTransaction.hs193
-rw-r--r--Hledger/Data/Commodity.hs9
-rw-r--r--Hledger/Data/Dates.hs268
-rw-r--r--Hledger/Data/Journal.hs166
-rw-r--r--Hledger/Data/StringFormat.hs7
-rw-r--r--Hledger/Data/Transaction.hs2
-rw-r--r--Hledger/Data/Types.hs35
-rw-r--r--Hledger/Query.hs8
-rw-r--r--Hledger/Read/Common.hs1394
-rw-r--r--Hledger/Read/CsvReader.hs38
-rw-r--r--Hledger/Read/JournalReader.hs274
-rw-r--r--Hledger/Read/TimeclockReader.hs11
-rw-r--r--Hledger/Read/TimedotReader.hs15
-rw-r--r--Hledger/Reports/BudgetReport.hs4
-rw-r--r--Hledger/Reports/MultiBalanceReports.hs1
-rw-r--r--Hledger/Reports/ReportOptions.hs5
-rw-r--r--Hledger/Utils.hs7
-rw-r--r--Hledger/Utils/Parse.hs67
-rw-r--r--Hledger/Utils/String.hs3
-rw-r--r--Hledger/Utils/Text.hs59
-rw-r--r--Text/Megaparsec/Compat.hs73
-rw-r--r--Text/Megaparsec/Custom.hs248
-rw-r--r--hledger-lib.cabal44
-rw-r--r--hledger_csv.52
-rw-r--r--hledger_csv.info60
-rw-r--r--hledger_csv.txt2
-rw-r--r--hledger_journal.5418
-rw-r--r--hledger_journal.info552
-rw-r--r--hledger_journal.txt392
-rw-r--r--hledger_timeclock.52
-rw-r--r--hledger_timeclock.info4
-rw-r--r--hledger_timeclock.txt2
-rw-r--r--hledger_timedot.52
-rw-r--r--hledger_timedot.info8
-rw-r--r--hledger_timedot.txt2
-rw-r--r--tests/doctests.hs5
38 files changed, 2832 insertions, 1566 deletions
diff --git a/CHANGES b/CHANGES
index c1bc449..968c7ce 100644
--- a/CHANGES
+++ b/CHANGES
@@ -2,6 +2,21 @@ API-ish changes in the hledger-lib package.
Most user-visible changes are noted in the hledger changelog, instead.
+# 1.10 (2018/6/30)
+
+* build cleanly with all supported GHC versions again (7.10 to 8.4)
+
+* support/use latest base-compat (#794)
+
+* support/require megaparsec 6.4+
+
+* extensive refactoring and cleanup of parsers and related types and utilities
+
+* readJournalFile(s) cleanup, these now use InputOpts
+
+* doctests now run a bit faster (#802)
+
+
# 1.9.1 (2018/4/30)
* new generic PeriodicReport, and some report-related type aliases
diff --git a/Hledger.hs b/Hledger.hs
index 089df4f..8ff39b5 100644
--- a/Hledger.hs
+++ b/Hledger.hs
@@ -17,4 +17,5 @@ tests_Hledger = TestList
,tests_Hledger_Query
,tests_Hledger_Read
,tests_Hledger_Reports
+ ,tests_Hledger_Utils
]
diff --git a/Hledger/Data/AutoTransaction.hs b/Hledger/Data/AutoTransaction.hs
index 8a0f369..7bcacb8 100644
--- a/Hledger/Data/AutoTransaction.hs
+++ b/Hledger/Data/AutoTransaction.hs
@@ -1,4 +1,5 @@
{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ViewPatterns #-}
{-# LANGUAGE CPP #-}
{-|
@@ -17,6 +18,9 @@ module Hledger.Data.AutoTransaction
, mtvaluequery
, jdatespan
, periodTransactionInterval
+
+ -- * Misc
+ , checkPeriodicTransactionStartDate
)
where
@@ -29,8 +33,8 @@ import qualified Data.Text as T
import Hledger.Data.Types
import Hledger.Data.Dates
import Hledger.Data.Amount
+import Hledger.Data.Posting (post)
import Hledger.Data.Transaction
-import Hledger.Utils.Parse
import Hledger.Utils.UTF8IOCompat (error')
import Hledger.Query
-- import Hledger.Utils.Debug
@@ -139,144 +143,189 @@ renderPostingCommentDates p = p { pcomment = comment' }
| T.null datesComment = pcomment p
| otherwise = T.intercalate "\n" $ filter (not . T.null) [T.strip $ pcomment p, "[" <> datesComment <> "]"]
+-- doctest helper, too much hassle to define in the comment
+-- XXX duplicates some logic in periodictransactionp
+_ptgen str = do
+ let
+ t = T.pack str
+ (i,s) = parsePeriodExpr' nulldate t
+ case checkPeriodicTransactionStartDate i s t of
+ Just e -> error' e
+ Nothing ->
+ mapM_ (putStr . show) $
+ runPeriodicTransaction
+ nullperiodictransaction{ ptperiodexpr=t , ptspan=s, ptinterval=i, ptpostings=["a" `post` usd 1] }
+ nulldatespan
+
-- | Generate transactions from 'PeriodicTransaction' within a 'DateSpan'
--
-- Note that new transactions require 'txnTieKnot' post-processing.
--
--- >>> let gen str = mapM_ (putStr . show) $ runPeriodicTransaction (PeriodicTransaction str ["hi" `post` usd 1]) nulldatespan
--- >>> gen "monthly from 2017/1 to 2017/4"
+-- >>> _ptgen "monthly from 2017/1 to 2017/4"
-- 2017/01/01
--- hi $1.00
+-- ; recur: monthly from 2017/1 to 2017/4
+-- a $1.00
-- <BLANKLINE>
-- 2017/02/01
--- hi $1.00
+-- ; recur: monthly from 2017/1 to 2017/4
+-- a $1.00
-- <BLANKLINE>
-- 2017/03/01
--- hi $1.00
+-- ; recur: monthly from 2017/1 to 2017/4
+-- a $1.00
-- <BLANKLINE>
--- >>> gen "monthly from 2017/1 to 2017/5"
+--
+-- >>> _ptgen "monthly from 2017/1 to 2017/5"
-- 2017/01/01
--- hi $1.00
+-- ; recur: monthly from 2017/1 to 2017/5
+-- a $1.00
-- <BLANKLINE>
-- 2017/02/01
--- hi $1.00
+-- ; recur: monthly from 2017/1 to 2017/5
+-- a $1.00
-- <BLANKLINE>
-- 2017/03/01
--- hi $1.00
+-- ; recur: monthly from 2017/1 to 2017/5
+-- a $1.00
-- <BLANKLINE>
-- 2017/04/01
--- hi $1.00
+-- ; recur: monthly from 2017/1 to 2017/5
+-- a $1.00
-- <BLANKLINE>
--- >>> gen "every 2nd day of month from 2017/02 to 2017/04"
+--
+-- >>> _ptgen "every 2nd day of month from 2017/02 to 2017/04"
-- 2017/01/02
--- hi $1.00
+-- ; recur: every 2nd day of month from 2017/02 to 2017/04
+-- a $1.00
-- <BLANKLINE>
-- 2017/02/02
--- hi $1.00
+-- ; recur: every 2nd day of month from 2017/02 to 2017/04
+-- a $1.00
-- <BLANKLINE>
-- 2017/03/02
--- hi $1.00
--- <BLANKLINE>
--- >>> gen "monthly from 2017/1 to 2017/4"
--- 2017/01/01
--- hi $1.00
--- <BLANKLINE>
--- 2017/02/01
--- hi $1.00
--- <BLANKLINE>
--- 2017/03/01
--- hi $1.00
+-- ; recur: every 2nd day of month from 2017/02 to 2017/04
+-- a $1.00
-- <BLANKLINE>
--- >>> gen "every 30th day of month from 2017/1 to 2017/5"
+--
+-- >>> _ptgen "every 30th day of month from 2017/1 to 2017/5"
-- 2016/12/30
--- hi $1.00
+-- ; recur: every 30th day of month from 2017/1 to 2017/5
+-- a $1.00
-- <BLANKLINE>
-- 2017/01/30
--- hi $1.00
+-- ; recur: every 30th day of month from 2017/1 to 2017/5
+-- a $1.00
-- <BLANKLINE>
-- 2017/02/28
--- hi $1.00
+-- ; recur: every 30th day of month from 2017/1 to 2017/5
+-- a $1.00
-- <BLANKLINE>
-- 2017/03/30
--- hi $1.00
+-- ; recur: every 30th day of month from 2017/1 to 2017/5
+-- a $1.00
-- <BLANKLINE>
-- 2017/04/30
--- hi $1.00
+-- ; recur: every 30th day of month from 2017/1 to 2017/5
+-- a $1.00
-- <BLANKLINE>
--- >>> gen "every 2nd Thursday of month from 2017/1 to 2017/4"
+--
+-- >>> _ptgen "every 2nd Thursday of month from 2017/1 to 2017/4"
-- 2016/12/08
--- hi $1.00
+-- ; recur: every 2nd Thursday of month from 2017/1 to 2017/4
+-- a $1.00
-- <BLANKLINE>
-- 2017/01/12
--- hi $1.00
+-- ; recur: every 2nd Thursday of month from 2017/1 to 2017/4
+-- a $1.00
-- <BLANKLINE>
-- 2017/02/09
--- hi $1.00
+-- ; recur: every 2nd Thursday of month from 2017/1 to 2017/4
+-- a $1.00
-- <BLANKLINE>
-- 2017/03/09
--- hi $1.00
+-- ; recur: every 2nd Thursday of month from 2017/1 to 2017/4
+-- a $1.00
-- <BLANKLINE>
--- >>> gen "every nov 29th from 2017 to 2019"
+--
+-- >>> _ptgen "every nov 29th from 2017 to 2019"
-- 2016/11/29
--- hi $1.00
+-- ; recur: every nov 29th from 2017 to 2019
+-- a $1.00
-- <BLANKLINE>
-- 2017/11/29
--- hi $1.00
+-- ; recur: every nov 29th from 2017 to 2019
+-- a $1.00
-- <BLANKLINE>
-- 2018/11/29
--- hi $1.00
+-- ; recur: every nov 29th from 2017 to 2019
+-- a $1.00
-- <BLANKLINE>
--- >>> gen "2017/1"
+--
+-- >>> _ptgen "2017/1"
-- 2017/01/01
--- hi $1.00
+-- ; recur: 2017/1
+-- a $1.00
-- <BLANKLINE>
--- >>> gen ""
--- ... Failed to parse ...
--- >>> gen "weekly from 2017"
--- *** Exception: Unable to generate transactions according to "weekly from 2017" as 2017-01-01 is not a first day of the week
--- >>> gen "monthly from 2017/5/4"
--- *** Exception: Unable to generate transactions according to "monthly from 2017/5/4" as 2017-05-04 is not a first day of the month
--- >>> gen "every quarter from 2017/1/2"
--- *** Exception: Unable to generate transactions according to "every quarter from 2017/1/2" as 2017-01-02 is not a first day of the quarter
--- >>> gen "yearly from 2017/1/14"
--- *** Exception: Unable to generate transactions according to "yearly from 2017/1/14" as 2017-01-14 is not a first day of the year
--
--- >>> let reportperiod="daily from 2018/01/03" in runPeriodicTransaction (PeriodicTransaction reportperiod [post "a" (usd 1)]) (DateSpan (Just $ parsedate "2018-01-01") (Just $ parsedate "2018-01-03"))
+-- >>> _ptgen ""
+-- *** Exception: failed to parse...
+-- ...
+--
+-- >>> _ptgen "weekly from 2017"
+-- *** Exception: Unable to generate transactions according to "weekly from 2017" because 2017-01-01 is not a first day of the week
+--
+-- >>> _ptgen "monthly from 2017/5/4"
+-- *** Exception: Unable to generate transactions according to "monthly from 2017/5/4" because 2017-05-04 is not a first day of the month
+--
+-- >>> _ptgen "every quarter from 2017/1/2"
+-- *** Exception: Unable to generate transactions according to "every quarter from 2017/1/2" because 2017-01-02 is not a first day of the quarter
+--
+-- >>> _ptgen "yearly from 2017/1/14"
+-- *** Exception: Unable to generate transactions according to "yearly from 2017/1/14" because 2017-01-14 is not a first day of the year
+--
+-- >>> let reportperiod="daily from 2018/01/03" in let (i,s) = parsePeriodExpr' nulldate reportperiod in runPeriodicTransaction (nullperiodictransaction{ptperiodexpr=reportperiod, ptspan=s, ptinterval=i, ptpostings=["a" `post` usd 1]}) (DateSpan (Just $ parsedate "2018-01-01") (Just $ parsedate "2018-01-03"))
-- []
-runPeriodicTransaction :: PeriodicTransaction -> (DateSpan -> [Transaction])
-runPeriodicTransaction pt =
- \requestedspan ->
- let fillspan = ptspan `spanIntersect` requestedspan
- in [ t{tdate=d} | (DateSpan (Just d) _) <- ptinterval `splitSpan` fillspan ]
+--
+runPeriodicTransaction :: PeriodicTransaction -> DateSpan -> [Transaction]
+runPeriodicTransaction PeriodicTransaction{..} requestedspan =
+ [ t{tdate=d} | (DateSpan (Just d) _) <- ptinterval `splitSpan` spantofill ]
where
- t = nulltransaction { tpostings = ptpostings pt }
- periodexpr = ptperiodicexpr pt
- currentdateerr = error' $ "Current date cannot be referenced in " ++ show (T.unpack periodexpr)
- (ptinterval, ptspan) =
- case parsePeriodExpr currentdateerr periodexpr of
- Left e -> error' $ "Failed to parse " ++ show (T.unpack periodexpr) ++ ": " ++ showDateParseError e
- Right x -> checkPeriodTransactionStartDate periodexpr x
+ spantofill = spanIntervalIntersect ptinterval ptspan requestedspan
+ t = nulltransaction{
+ tstatus = ptstatus
+ ,tcode = ptcode
+ ,tdescription = ptdescription
+ ,tcomment = (if T.null ptcomment then "\n" else ptcomment) <> "recur: " <> ptperiodexpr
+ ,ttags = ("recur", ptperiodexpr) : pttags
+ ,tpostings = ptpostings
+ }
-checkPeriodTransactionStartDate :: T.Text -> (Interval, DateSpan) -> (Interval, DateSpan)
-checkPeriodTransactionStartDate periodexpr (i,s) =
- case (i,spanStart s) of
+-- | Check that this date span begins at a boundary of this interval,
+-- or return an explanatory error message including the provided period expression
+-- (from which the span and interval are derived).
+checkPeriodicTransactionStartDate :: Interval -> DateSpan -> T.Text -> Maybe String
+checkPeriodicTransactionStartDate i s periodexpr =
+ case (i, spanStart s) of
(Weeks _, Just d) -> checkStart d "week"
(Months _, Just d) -> checkStart d "month"
(Quarters _, Just d) -> checkStart d "quarter"
(Years _, Just d) -> checkStart d "year"
- _ -> (i,s)
+ _ -> Nothing
where
checkStart d x =
let firstDate = fixSmartDate d ("","this",x)
in
- if d == firstDate then (i,s)
- else error' $ "Unable to generate transactions according to "++(show periodexpr)++" as "++(show d)++" is not a first day of the "++x
+ if d == firstDate
+ then Nothing
+ else Just $
+ "Unable to generate transactions according to "++show (T.unpack periodexpr)
+ ++" because "++show d++" is not a first day of the "++x
-- | What is the interval of this 'PeriodicTransaction's period expression, if it can be parsed ?
periodTransactionInterval :: PeriodicTransaction -> Maybe Interval
periodTransactionInterval pt =
let
- expr = ptperiodicexpr pt
+ expr = ptperiodexpr pt
err = error' $ "Current date cannot be referenced in " ++ show (T.unpack expr)
in
case parsePeriodExpr err expr of
diff --git a/Hledger/Data/Commodity.hs b/Hledger/Data/Commodity.hs
index 519fa19..c82d690 100644
--- a/Hledger/Data/Commodity.hs
+++ b/Hledger/Data/Commodity.hs
@@ -12,6 +12,7 @@ are thousands separated by comma, significant decimal places and so on.
module Hledger.Data.Commodity
where
+import Data.Char (isDigit)
import Data.List
import Data.Maybe (fromMaybe)
#if !(MIN_VERSION_base(4,11,0))
@@ -28,7 +29,13 @@ import Hledger.Utils
-- characters that may not be used in a non-quoted commodity symbol
nonsimplecommoditychars = "0123456789-+.@*;\n \"{}=" :: [Char]
-quoteCommoditySymbolIfNeeded s | any (`elem` nonsimplecommoditychars) (T.unpack s) = "\"" <> s <> "\""
+isNonsimpleCommodityChar :: Char -> Bool
+isNonsimpleCommodityChar c = isDigit c || c `textElem` otherChars
+ where
+ otherChars = "-+.@*;\n \"{}=" :: T.Text
+ textElem = T.any . (==)
+
+quoteCommoditySymbolIfNeeded s | T.any (isNonsimpleCommodityChar) s = "\"" <> s <> "\""
| otherwise = s
commodity = ""
diff --git a/Hledger/Data/Dates.hs b/Hledger/Data/Dates.hs
index 04e9a72..dd34898 100644
--- a/Hledger/Data/Dates.hs
+++ b/Hledger/Data/Dates.hs
@@ -4,6 +4,7 @@
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE PackageImports #-}
{-|
Date parsing and utilities for hledger.
@@ -43,18 +44,22 @@ module Hledger.Data.Dates (
showDateSpanMonthAbbrev,
elapsedSeconds,
prevday,
+ periodexprp,
parsePeriodExpr,
+ parsePeriodExpr',
nulldatespan,
failIfInvalidYear,
failIfInvalidMonth,
failIfInvalidDay,
datesepchar,
datesepchars,
+ isDateSepChar,
spanStart,
spanEnd,
spansSpan,
spanIntersect,
spansIntersect,
+ spanIntervalIntersect,
spanDefaultsFrom,
spanUnion,
spansUnion,
@@ -71,9 +76,9 @@ module Hledger.Data.Dates (
where
import Prelude ()
-import Prelude.Compat
+import "base-compat-batteries" Prelude.Compat
import Control.Monad
-import Data.List.Compat
+import "base-compat-batteries" Data.List.Compat
import Data.Default
import Data.Maybe
import Data.Text (Text)
@@ -89,7 +94,8 @@ import Data.Time.Calendar.OrdinalDate
import Data.Time.Clock
import Data.Time.LocalTime
import Safe (headMay, lastMay, readMay)
-import Text.Megaparsec.Compat
+import Text.Megaparsec
+import Text.Megaparsec.Char
import Text.Megaparsec.Perm
import Text.Printf
@@ -260,6 +266,27 @@ spanIntersect (DateSpan b1 e1) (DateSpan b2 e2) = DateSpan b e
b = latest b1 b2
e = earliest e1 e2
+-- | Calculate the intersection of two DateSpans, adjusting the start date so
+-- the interval is preserved.
+--
+-- >>> let intervalIntersect = spanIntervalIntersect (Days 3)
+-- >>> mkdatespan "2018-01-01" "2018-01-03" `intervalIntersect` mkdatespan "2018-01-01" "2018-01-05"
+-- DateSpan 2018/01/01-2018/01/02
+-- >>> mkdatespan "2018-01-01" "2018-01-05" `intervalIntersect` mkdatespan "2018-01-02" "2018-01-05"
+-- DateSpan 2018/01/04
+-- >>> mkdatespan "2018-01-01" "2018-01-05" `intervalIntersect` mkdatespan "2018-01-03" "2018-01-05"
+-- DateSpan 2018/01/04
+-- >>> mkdatespan "2018-01-01" "2018-01-05" `intervalIntersect` mkdatespan "2018-01-04" "2018-01-05"
+-- DateSpan 2018/01/04
+-- >>> mkdatespan "2018-01-01" "2018-01-05" `intervalIntersect` mkdatespan "2017-12-01" "2018-01-05"
+-- DateSpan 2018/01/01-2018/01/04
+spanIntervalIntersect :: Interval -> DateSpan -> DateSpan -> DateSpan
+spanIntervalIntersect (Days n) (DateSpan (Just b1) e1) sp2@(DateSpan (Just b2) _) =
+ DateSpan (Just b) e1 `spanIntersect` sp2
+ where
+ b = if b1 < b2 then addDays (diffDays b1 b2 `mod` toInteger n) b2 else b1
+spanIntervalIntersect _ sp1 sp2 = sp1 `spanIntersect` sp2
+
-- | Fill any unspecified dates in the first span with the dates from
-- the second one. Sort of a one-way spanIntersect.
spanDefaultsFrom (DateSpan a1 b1) (DateSpan a2 b2) = DateSpan a b
@@ -287,8 +314,14 @@ earliest (Just d1) (Just d2) = Just $ min d1 d2
-- | Parse a period expression to an Interval and overall DateSpan using
-- the provided reference date, or return a parse error.
-parsePeriodExpr :: Day -> Text -> Either (ParseError Char MPErr) (Interval, DateSpan)
-parsePeriodExpr refdate s = parsewith (periodexpr refdate <* eof) (T.toLower s)
+parsePeriodExpr :: Day -> Text -> Either (ParseError Char CustomErr) (Interval, DateSpan)
+parsePeriodExpr refdate s = parsewith (periodexprp refdate <* eof) (T.toLower s)
+
+-- | Like parsePeriodExpr, but call error' on failure.
+parsePeriodExpr' :: Day -> Text -> (Interval, DateSpan)
+parsePeriodExpr' refdate s =
+ either (error' . ("failed to parse:" ++) . parseErrorPretty) id $
+ parsePeriodExpr refdate s
maybePeriod :: Day -> Text -> Maybe (Interval,DateSpan)
maybePeriod refdate = either (const Nothing) Just . parsePeriodExpr refdate
@@ -347,13 +380,13 @@ fixSmartDateStr :: Day -> Text -> String
fixSmartDateStr d s = either
(\e->error' $ printf "could not parse date %s %s" (show s) (show e))
id
- $ (fixSmartDateStrEither d s :: Either (ParseError Char MPErr) String)
+ $ (fixSmartDateStrEither d s :: Either (ParseError Char CustomErr) String)
-- | A safe version of fixSmartDateStr.
-fixSmartDateStrEither :: Day -> Text -> Either (ParseError Char MPErr) String
+fixSmartDateStrEither :: Day -> Text -> Either (ParseError Char CustomErr) String
fixSmartDateStrEither d = either Left (Right . showDate) . fixSmartDateStrEither' d
-fixSmartDateStrEither' :: Day -> Text -> Either (ParseError Char MPErr) Day
+fixSmartDateStrEither' :: Day -> Text -> Either (ParseError Char CustomErr) Day
fixSmartDateStrEither' d s = case parsewith smartdateonly (T.toLower s) of
Right sd -> Right $ fixSmartDate d sd
Left e -> Left e
@@ -650,29 +683,62 @@ parsedate s = fromMaybe (error' $ "could not parse date \"" ++ s ++ "\"")
-- #endif
{-|
-Parse a date in any of the formats allowed in ledger's period expressions,
-and maybe some others:
-
-> 2004
-> 2004/10
-> 2004/10/1
-> 10/1
-> 21
-> october, oct
-> yesterday, today, tomorrow
-> this/next/last week/day/month/quarter/year
-
-Returns a SmartDate, to be converted to a full date later (see fixSmartDate).
+Parse a date in any of the formats allowed in Ledger's period expressions, and some others.
Assumes any text in the parse stream has been lowercased.
+Returns a SmartDate, to be converted to a full date later (see fixSmartDate).
+
+Examples:
+
+> 2004 (start of year, which must have 4+ digits)
+> 2004/10 (start of month, which must be 1-12)
+> 2004/10/1 (exact date, day must be 1-31)
+> 10/1 (month and day in current year)
+> 21 (day in current month)
+> october, oct (start of month in current year)
+> yesterday, today, tomorrow (-1, 0, 1 days from today)
+> last/this/next day/week/month/quarter/year (-1, 0, 1 periods from the current period)
+> 20181201 (8 digit YYYYMMDD with valid year month and day)
+> 201812 (6 digit YYYYMM with valid year and month)
+
+Note malformed digit sequences might give surprising results:
+
+> 201813 (6 digits with an invalid month is parsed as start of 6-digit year)
+> 20181301 (8 digits with an invalid month is parsed as start of 8-digit year)
+> 20181232 (8 digits with an invalid day gives an error)
+> 201801012 (9+ digits beginning with a valid YYYYMMDD gives an error)
+
+Eg:
+
+YYYYMMDD is parsed as year-month-date if those parts are valid
+(>=4 digits, 1-12, and 1-31 respectively):
+>>> parsewith (smartdate <* eof) "20181201"
+Right ("2018","12","01")
+
+YYYYMM is parsed as year-month-01 if year and month are valid:
+>>> parsewith (smartdate <* eof) "201804"
+Right ("2018","04","01")
+
+With an invalid month, it's parsed as a year:
+>>> parsewith (smartdate <* eof) "201813"
+Right ("201813","","")
+
+A 9+ digit number beginning with valid YYYYMMDD gives an error:
+>>> parsewith (smartdate <* eof) "201801012"
+Left (...)
+
+Big numbers not beginning with a valid YYYYMMDD are parsed as a year:
+>>> parsewith (smartdate <* eof) "201813012"
+Right ("201813012","","")
+
-}
-smartdate :: SimpleTextParser SmartDate
+smartdate :: TextParser m SmartDate
smartdate = do
-- XXX maybe obscures date errors ? see ledgerdate
- (y,m,d) <- choice' [yyyymmdd, ymd, ym, md, y, d, month, mon, today, yesterday, tomorrow, lastthisnextthing]
+ (y,m,d) <- choice' [yyyymmdd, yyyymm, ymd, ym, md, y, d, month, mon, today, yesterday, tomorrow, lastthisnextthing]
return (y,m,d)
-- | Like smartdate, but there must be nothing other than whitespace after the date.
-smartdateonly :: SimpleTextParser SmartDate
+smartdateonly :: TextParser m SmartDate
smartdateonly = do
d <- smartdate
skipMany spacenonewline
@@ -681,8 +747,12 @@ smartdateonly = do
datesepchars :: [Char]
datesepchars = "/-."
+
datesepchar :: TextParser m Char
-datesepchar = oneOf datesepchars
+datesepchar = satisfy isDateSepChar
+
+isDateSepChar :: Char -> Bool
+isDateSepChar c = c == '/' || c == '-' || c == '.'
validYear, validMonth, validDay :: String -> Bool
validYear s = length s >= 4 && isJust (readMay s :: Maybe Year)
@@ -694,7 +764,7 @@ failIfInvalidYear s = unless (validYear s) $ fail $ "bad year number: " ++ s
failIfInvalidMonth s = unless (validMonth s) $ fail $ "bad month number: " ++ s
failIfInvalidDay s = unless (validDay s) $ fail $ "bad day number: " ++ s
-yyyymmdd :: SimpleTextParser SmartDate
+yyyymmdd :: TextParser m SmartDate
yyyymmdd = do
y <- count 4 digitChar
m <- count 2 digitChar
@@ -703,7 +773,14 @@ yyyymmdd = do
failIfInvalidDay d
return (y,m,d)
-ymd :: SimpleTextParser SmartDate
+yyyymm :: TextParser m SmartDate
+yyyymm = do
+ y <- count 4 digitChar
+ m <- count 2 digitChar
+ failIfInvalidMonth m
+ return (y,m,"01")
+
+ymd :: TextParser m SmartDate
ymd = do
y <- some digitChar
failIfInvalidYear y
@@ -715,7 +792,7 @@ ymd = do
failIfInvalidDay d
return $ (y,m,d)
-ym :: SimpleTextParser SmartDate
+ym :: TextParser m SmartDate
ym = do
y <- some digitChar
failIfInvalidYear y
@@ -724,19 +801,19 @@ ym = do
failIfInvalidMonth m
return (y,m,"")
-y :: SimpleTextParser SmartDate
+y :: TextParser m SmartDate
y = do
y <- some digitChar
failIfInvalidYear y
return (y,"","")
-d :: SimpleTextParser SmartDate
+d :: TextParser m SmartDate
d = do
d <- some digitChar
failIfInvalidDay d
return ("","",d)
-md :: SimpleTextParser SmartDate
+md :: TextParser m SmartDate
md = do
m <- some digitChar
failIfInvalidMonth m
@@ -754,38 +831,38 @@ weekdayabbrevs = ["mon","tue","wed","thu","fri","sat","sun"]
monthIndex t = maybe 0 (+1) $ t `elemIndex` months
monIndex t = maybe 0 (+1) $ t `elemIndex` monthabbrevs
-month :: SimpleTextParser SmartDate
+month :: TextParser m SmartDate
month = do
- m <- choice $ map (try . string) months
+ m <- choice $ map (try . string') months
let i = monthIndex m
return ("",show i,"")
-mon :: SimpleTextParser SmartDate
+mon :: TextParser m SmartDate
mon = do
- m <- choice $ map (try . string) monthabbrevs
+ m <- choice $ map (try . string') monthabbrevs
let i = monIndex m
return ("",show i,"")
-weekday :: SimpleTextParser Int
+weekday :: TextParser m Int
weekday = do
wday <- choice . map string' $ weekdays ++ weekdayabbrevs
let i = head . catMaybes $ [wday `elemIndex` weekdays, wday `elemIndex` weekdayabbrevs]
return (i+1)
-today,yesterday,tomorrow :: SimpleTextParser SmartDate
-today = string "today" >> return ("","","today")
-yesterday = string "yesterday" >> return ("","","yesterday")
-tomorrow = string "tomorrow" >> return ("","","tomorrow")
+today,yesterday,tomorrow :: TextParser m SmartDate
+today = string' "today" >> return ("","","today")
+yesterday = string' "yesterday" >> return ("","","yesterday")
+tomorrow = string' "tomorrow" >> return ("","","tomorrow")
-lastthisnextthing :: SimpleTextParser SmartDate
+lastthisnextthing :: TextParser m SmartDate
lastthisnextthing = do
- r <- choice $ map mptext [
+ r <- choice $ map string' [
"last"
,"this"
,"next"
]
skipMany spacenonewline -- make the space optional for easier scripting
- p <- choice $ map mptext [
+ p <- choice $ map string' [
"day"
,"week"
,"month"
@@ -793,7 +870,7 @@ lastthisnextthing = do
,"year"
]
-- XXX support these in fixSmartDate
--- ++ (map string $ months ++ monthabbrevs ++ weekdays ++ weekdayabbrevs)
+-- ++ (map string' $ months ++ monthabbrevs ++ weekdays ++ weekdayabbrevs)
return ("", T.unpack r, T.unpack p)
@@ -841,61 +918,63 @@ lastthisnextthing = do
-- Right (DayOfMonth 2,DateSpan 2009/01/01-)
-- >>> p "every 2nd day of month 2009-"
-- Right (DayOfMonth 2,DateSpan 2009/01/01-)
-periodexpr :: Day -> SimpleTextParser (Interval, DateSpan)
-periodexpr rdate = surroundedBy (skipMany spacenonewline) . choice $ map try [
- intervalanddateperiodexpr rdate,
- (,) NoInterval <$> periodexprdatespan rdate
+periodexprp :: Day -> TextParser m (Interval, DateSpan)
+periodexprp rdate = do
+ skipMany spacenonewline
+ choice $ map try [
+ intervalanddateperiodexprp rdate,
+ (,) NoInterval <$> periodexprdatespanp rdate
]
-intervalanddateperiodexpr :: Day -> SimpleTextParser (Interval, DateSpan)
-intervalanddateperiodexpr rdate = do
- i <- reportinginterval
+intervalanddateperiodexprp :: Day -> TextParser m (Interval, DateSpan)
+intervalanddateperiodexprp rdate = do
+ i <- reportingintervalp
s <- option def . try $ do
skipMany spacenonewline
- periodexprdatespan rdate
+ periodexprdatespanp rdate
return (i,s)
-- Parse a reporting interval.
-reportinginterval :: SimpleTextParser Interval
-reportinginterval = choice' [
+reportingintervalp :: TextParser m Interval
+reportingintervalp = choice' [
tryinterval "day" "daily" Days,
tryinterval "week" "weekly" Weeks,
tryinterval "month" "monthly" Months,
tryinterval "quarter" "quarterly" Quarters,
tryinterval "year" "yearly" Years,
- do string "biweekly"
+ do string' "biweekly"
return $ Weeks 2,
- do string "bimonthly"
+ do string' "bimonthly"
return $ Months 2,
- do string "every"
+ do string' "every"
skipMany spacenonewline
n <- nth
skipMany spacenonewline
- string "day"
+ string' "day"
of_ "week"
return $ DayOfWeek n,
- do string "every"
+ do string' "every"
skipMany spacenonewline
n <- weekday
return $ DayOfWeek n,
- do string "every"
+ do string' "every"
skipMany spacenonewline
n <- nth
skipMany spacenonewline
- string "day"
+ string' "day"
optOf_ "month"
return $ DayOfMonth n,
- do string "every"
+ do string' "every"
let mnth = choice' [month, mon] >>= \(_,m,_) -> return (read m)
d_o_y <- makePermParser $ DayOfYear <$$> try (skipMany spacenonewline *> mnth) <||> try (skipMany spacenonewline *> nth)
optOf_ "year"
return d_o_y,
- do string "every"
+ do string' "every"
skipMany spacenonewline
("",m,d) <- md
optOf_ "year"
return $ DayOfYear (read m) (read d),
- do string "every"
+ do string' "every"
skipMany spacenonewline
n <- nth
skipMany spacenonewline
@@ -906,31 +985,31 @@ reportinginterval = choice' [
where
of_ period = do
skipMany spacenonewline
- string "of"
+ string' "of"
skipMany spacenonewline
- string period
+ string' period
optOf_ period = optional $ try $ of_ period
nth = do n <- some digitChar
- choice' $ map string ["st","nd","rd","th"]
+ choice' $ map string' ["st","nd","rd","th"]
return $ read n
-- Parse any of several variants of a basic interval, eg "daily", "every day", "every N days".
- tryinterval :: String -> String -> (Int -> Interval) -> SimpleTextParser Interval
+ tryinterval :: String -> String -> (Int -> Interval) -> TextParser m Interval
tryinterval singular compact intcons =
choice' [
- do mptext compact'
+ do string' compact'
return $ intcons 1,
- do mptext "every"
+ do string' "every"
skipMany spacenonewline
- mptext singular'
+ string' singular'
return $ intcons 1,
- do mptext "every"
+ do string' "every"
skipMany spacenonewline
n <- fmap read $ some digitChar
skipMany spacenonewline
- mptext plural'
+ string' plural'
return $ intcons n
]
where
@@ -938,46 +1017,49 @@ reportinginterval = choice' [
singular' = T.pack singular
plural' = T.pack $ singular ++ "s"
-periodexprdatespan :: Day -> SimpleTextParser DateSpan
-periodexprdatespan rdate = choice $ map try [
- doubledatespan rdate,
- fromdatespan rdate,
- todatespan rdate,
- justdatespan rdate
+periodexprdatespanp :: Day -> TextParser m DateSpan
+periodexprdatespanp rdate = choice $ map try [
+ doubledatespanp rdate,
+ fromdatespanp rdate,
+ todatespanp rdate,
+ justdatespanp rdate
]
-doubledatespan :: Day -> SimpleTextParser DateSpan
-doubledatespan rdate = do
- optional (string "from" >> skipMany spacenonewline)
+-- |
+-- -- >>> parsewith (doubledatespan (parsedate "2018/01/01") <* eof) "20180101-201804"
+-- Right DateSpan 2018/01/01-2018/04/01
+doubledatespanp :: Day -> TextParser m DateSpan
+doubledatespanp rdate = do
+ optional (string' "from" >> skipMany spacenonewline)
b <- smartdate
skipMany spacenonewline
- optional (choice [string "to", string "-"] >> skipMany spacenonewline)
+ optional (choice [string' "to", string' "-"] >> skipMany spacenonewline)
e <- smartdate
return $ DateSpan (Just $ fixSmartDate rdate b) (Just $ fixSmartDate rdate e)
-fromdatespan :: Day -> SimpleTextParser DateSpan
-fromdatespan rdate = do
+fromdatespanp :: Day -> TextParser m DateSpan
+fromdatespanp rdate = do
b <- choice [
do
- string "from" >> skipMany spacenonewline
+ string' "from" >> skipMany spacenonewline
smartdate
,
do
d <- smartdate
- string "-"
+ string' "-"
return d
]
return $ DateSpan (Just $ fixSmartDate rdate b) Nothing
-todatespan :: Day -> SimpleTextParser DateSpan
-todatespan rdate = do
- choice [string "to", string "-"] >> skipMany spacenonewline
+todatespanp :: Day -> TextParser m DateSpan
+todatespanp rdate = do
+ choice [string' "to", string' "-"] >> skipMany spacenonewline
e <- smartdate
return $ DateSpan Nothing (Just $ fixSmartDate rdate e)
-justdatespan :: Day -> SimpleTextParser DateSpan
-justdatespan rdate = do
- optional (string "in" >> skipMany spacenonewline)
+justdatespanp :: Day -> TextParser m DateSpan
+justdatespanp rdate = do
+ optional (string' "in" >> skipMany spacenonewline)
d <- smartdate
return $ spanFromSmartDate rdate d
diff --git a/Hledger/Data/Journal.hs b/Hledger/Data/Journal.hs
index abf0c71..8477d5c 100644
--- a/Hledger/Data/Journal.hs
+++ b/Hledger/Data/Journal.hs
@@ -486,21 +486,21 @@ filterJournalTransactionsByAccount apats j@Journal{jtxns=ts} = j{jtxns=filter tm
-}
-- | Do post-parse processing on a parsed journal to make it ready for
--- use. Reverse parsed data to normal order, canonicalise amount
+-- use. Reverse parsed data to normal order, standardise amount
-- formats, check/ensure that transactions are balanced, and maybe
-- check balance assertions.
journalFinalise :: ClockTime -> FilePath -> Text -> Bool -> ParsedJournal -> Either String Journal
-journalFinalise t path txt assrt j@Journal{jfiles=fs} = do
- (journalTieTransactions <$>
- (journalBalanceTransactions assrt $
- journalApplyCommodityStyles $
- j{ jfiles = (path,txt) : reverse fs
- , jlastreadtime = t
- , jtxns = reverse $ jtxns j -- NOTE: see addTransaction
- , jmodifiertxns = reverse $ jmodifiertxns j -- NOTE: see addModifierTransaction
- , jperiodictxns = reverse $ jperiodictxns j -- NOTE: see addPeriodicTransaction
- , jmarketprices = reverse $ jmarketprices j -- NOTE: see addMarketPrice
- }))
+journalFinalise t path txt assrt j@Journal{jfiles=fs} =
+ journalTieTransactions <$>
+ (journalBalanceTransactions assrt $
+ journalApplyCommodityStyles $
+ j {jfiles = (path,txt) : reverse fs
+ ,jlastreadtime = t
+ ,jtxns = reverse $ jtxns j -- NOTE: see addTransaction
+ ,jmodifiertxns = reverse $ jmodifiertxns j -- NOTE: see addModifierTransaction
+ ,jperiodictxns = reverse $ jperiodictxns j -- NOTE: see addPeriodicTransaction
+ ,jmarketprices = reverse $ jmarketprices j -- NOTE: see addMarketPrice
+ })
journalNumberAndTieTransactions = journalTieTransactions . journalNumberTransactions
@@ -522,9 +522,12 @@ journalUntieTransactions t@Transaction{tpostings=ps} = t{tpostings=map (\p -> p{
-- message if any of them fail.
journalCheckBalanceAssertions :: Journal -> Either String Journal
journalCheckBalanceAssertions j =
- runST $ journalBalanceTransactionsST True j
- (return ()) (\_ _ -> return ()) (const $ return j) -- noops
-
+ runST $ journalBalanceTransactionsST
+ True
+ j
+ (return ())
+ (\_ _ -> return ())
+ (const $ return j)
-- | Check a posting's balance assertion and return an error if it
-- fails.
@@ -562,43 +565,26 @@ checkBalanceAssertion p@Posting{ pbalanceassertion = Just (ass,_)} amt
(diffplus ++ showAmount diff)
checkBalanceAssertion _ _ = Right ()
--- | Environment for 'CurrentBalancesModifier'
-data Env s = Env { eBalances :: HT.HashTable s AccountName MixedAmount
- , eStoreTx :: Transaction -> ST s ()
- , eAssrt :: Bool
- , eStyles :: Maybe (M.Map CommoditySymbol AmountStyle) }
-
--- | Monad transformer stack with a reference to a mutable hashtable
--- of current account balances and a mutable array of finished
--- transactions in original parsing order.
-type CurrentBalancesModifier s = R.ReaderT (Env s) (ExceptT String (ST s))
-
-- | Fill in any missing amounts and check that all journal transactions
-- balance, or return an error message. This is done after parsing all
-- amounts and applying canonical commodity styles, since balancing
-- depends on display precision. Reports only the first error encountered.
journalBalanceTransactions :: Bool -> Journal -> Either String Journal
journalBalanceTransactions assrt j =
- runST $
- journalBalanceTransactionsST
- assrt -- check balance assertions also ?
- (journalNumberTransactions j) -- journal to process
- (newArray_ (1, genericLength $ jtxns j) :: forall s. ST s (STArray s Integer Transaction)) -- initialise state
- (\arr tx -> writeArray arr (tindex tx) tx) -- update state
- (fmap (\txns -> j{ jtxns = txns}) . getElems) -- summarise state
-
-
--- | Generalization used in the definition of
--- 'journalBalanceTransactionsST and 'journalCheckBalanceAssertions'
+ runST $ journalBalanceTransactionsST
+ assrt -- check balance assertions also ?
+ (journalNumberTransactions j) -- journal to process
+ (newArray_ (1, genericLength $ jtxns j) :: forall s. ST s (STArray s Integer Transaction)) -- initialise state
+ (\arr tx -> writeArray arr (tindex tx) tx) -- update state
+ (fmap (\txns -> j{ jtxns = txns}) . getElems) -- summarise state
+
+-- | Helper used by 'journalBalanceTransactions' and 'journalCheckBalanceAssertions'.
journalBalanceTransactionsST ::
Bool
-> Journal
- -> ST s txns
- -- ^ creates transaction store
- -> (txns -> Transaction -> ST s ())
- -- ^ "store" operation
- -> (txns -> ST s a)
- -- ^ calculate result from transactions
+ -> ST s txns -- ^ initialise state
+ -> (txns -> Transaction -> ST s ()) -- ^ update state
+ -> (txns -> ST s a) -- ^ summarise state
-> ST s (Either String a)
journalBalanceTransactionsST assrt j createStore storeIn extract =
runExceptT $ do
@@ -610,14 +596,27 @@ journalBalanceTransactionsST assrt j createStore storeIn extract =
(Just $ journalCommodityStyles j)
flip R.runReaderT env $ do
dated <- fmap snd . sortBy (comparing fst) . concat
- <$> mapM' discriminateByDate (jtxns j)
+ <$> mapM' discriminateByDate (jtxns j)
mapM' checkInferAndRegisterAmounts dated
lift $ extract txStore
- where size = genericLength $ journalPostings j
+ where
+ size = genericLength $ journalPostings j
--- | This converts a transaction into a list of objects whose dates
--- have to be considered when checking balance assertions and handled
--- by 'checkInferAndRegisterAmounts'.
+-- | Monad transformer stack with a reference to a mutable hashtable
+-- of current account balances and a mutable array of finished
+-- transactions in original parsing order.
+type CurrentBalancesModifier s = R.ReaderT (Env s) (ExceptT String (ST s))
+
+-- | Environment for 'CurrentBalancesModifier'
+data Env s = Env { eBalances :: HT.HashTable s AccountName MixedAmount
+ , eStoreTx :: Transaction -> ST s ()
+ , eAssrt :: Bool
+ , eStyles :: Maybe (M.Map CommoditySymbol AmountStyle)
+ }
+
+-- | This converts a transaction into a list of transactions or
+-- postings whose dates have to be considered when checking
+-- balance assertions and handled by 'checkInferAndRegisterAmounts'.
--
-- Transaction without balance assignments can be balanced and stored
-- immediately and their (possibly) dated postings are returned.
@@ -630,25 +629,24 @@ discriminateByDate :: Transaction
discriminateByDate tx
| null (assignmentPostings tx) = do
styles <- R.reader $ eStyles
- balanced <- lift $ ExceptT $ return
- $ balanceTransaction styles tx
+ balanced <- lift $ ExceptT $ return $ balanceTransaction styles tx
storeTransaction balanced
- return $ fmap (postingDate &&& (Left . removePrices))
- $ tpostings $ balanced
+ return $
+ fmap (postingDate &&& (Left . removePrices)) $ tpostings $ balanced
| True = do
when (any (isJust . pdate) $ tpostings tx) $
throwError $ unlines $
["Not supported: Transactions with balance assignments "
,"AND dated postings without amount:\n"
, showTransaction tx]
- return [(tdate tx, Right
- $ tx { tpostings = removePrices <$> tpostings tx })]
-
--- | This function takes different objects describing changes to
--- account balances on a single day. It can handle either a single
--- posting (from an already balanced transaction without assigments)
--- or a whole transaction with assignments (which is required to no
--- posting with pdate set.).
+ return
+ [(tdate tx, Right $ tx { tpostings = removePrices <$> tpostings tx })]
+
+-- | This function takes an object describing changes to
+-- account balances on a single day - either a single posting
+-- (from an already balanced transaction without assignments)
+-- or a whole transaction with assignments (which is required to
+-- have no posting with pdate set).
--
-- For a single posting, there is not much to do. Only add its amount
-- to its account and check the assertion, if there is one. This
@@ -658,9 +656,9 @@ discriminateByDate tx
-- 'addAmountAndCheckBalance', if there is an amount. If there is no
-- amount, the amount is inferred by the assertion or left empty if
-- there is no assertion. Then, the transaction is balanced, the
--- inferred amount added to the balance (all in
--- 'balanceTransactionUpdate') and the resulting transaction with no
--- missing amounts is stored in the array, for later retrieval.
+-- inferred amount added to the balance (all in 'balanceTransactionUpdate')
+-- and the resulting transaction with no missing amounts is stored
+-- in the array, for later retrieval.
--
-- Again in short:
--
@@ -686,45 +684,42 @@ checkInferAndRegisterAmounts (Right oldTx) = do
(fmap (\a -> p { pamount = a, porigin = Just $ originalPosting p }) . setBalance (paccount p) . fst)
$ pbalanceassertion p
--- | Adds a posting's amonut to the posting's account balance and
--- checks a possible balance assertion. If there is no amount, it runs
--- the supplied fallback action.
-addAmountAndCheckBalance :: (Posting -> CurrentBalancesModifier s Posting)
- -- ^ action to execute, if posting has no amount
- -> Posting
- -> CurrentBalancesModifier s Posting
+-- | Adds a posting's amount to the posting's account balance and
+-- checks a possible balance assertion. Or if there is no amount,
+-- runs the supplied fallback action.
+addAmountAndCheckBalance ::
+ (Posting -> CurrentBalancesModifier s Posting) -- ^ action if posting has no amount
+ -> Posting
+ -> CurrentBalancesModifier s Posting
addAmountAndCheckBalance _ p | hasAmount p = do
newAmt <- addToBalance (paccount p) $ pamount p
assrt <- R.reader eAssrt
- lift $ when assrt $ ExceptT $ return
- $ checkBalanceAssertion p newAmt
+ lift $ when assrt $ ExceptT $ return $ checkBalanceAssertion p newAmt
return p
addAmountAndCheckBalance fallback p = fallback p
-- | Sets an account's balance to a given amount and returns the
--- difference of new and old amount
+-- difference of new and old amount.
setBalance :: AccountName -> Amount -> CurrentBalancesModifier s MixedAmount
setBalance acc amt = liftModifier $ \Env{ eBalances = bals } -> do
old <- HT.lookup bals acc
let new = Mixed $ (amt :) $ maybe []
- (filter ((/= acommodity amt) . acommodity) . amounts) old
+ (filter ((/= acommodity amt) . acommodity) . amounts) old
HT.insert bals acc new
return $ maybe new (new -) old
--- | Adds an amount to an account's balance and returns the resulting
--- balance
+-- | Adds an amount to an account's balance and returns the resulting balance.
addToBalance :: AccountName -> MixedAmount -> CurrentBalancesModifier s MixedAmount
addToBalance acc amt = liftModifier $ \Env{ eBalances = bals } -> do
new <- maybe amt (+ amt) <$> HT.lookup bals acc
HT.insert bals acc new
return new
--- | Stores a transaction in the transaction array in original parsing
--- order.
+-- | Stores a transaction in the transaction array in original parsing order.
storeTransaction :: Transaction -> CurrentBalancesModifier s ()
storeTransaction tx = liftModifier $ ($tx) . eStoreTx
--- | Helper function
+-- | Helper function.
liftModifier :: (Env s -> ST s a) -> CurrentBalancesModifier s a
liftModifier f = R.ask >>= lift . lift . f
@@ -742,18 +737,19 @@ journalApplyCommodityStyles j@Journal{jtxns=ts, jmarketprices=mps} = j''
fixposting p@Posting{pamount=a} = p{pamount=styleMixedAmount styles a}
fixmarketprice mp@MarketPrice{mpamount=a} = mp{mpamount=styleAmount styles a}
--- | Get all the amount styles defined in this journal, either
--- declared by a commodity directive (preferred) or inferred from amounts,
--- as a map from symbol to style.
+-- | Get all the amount styles defined in this journal, either declared by
+-- a commodity directive or inferred from amounts, as a map from symbol to style.
+-- Styles declared by commodity directives take precedence, and these also are
+-- guaranteed to know their decimal point character.
journalCommodityStyles :: Journal -> M.Map CommoditySymbol AmountStyle
journalCommodityStyles j = declaredstyles <> inferredstyles
where
declaredstyles = M.mapMaybe cformat $ jcommodities j
inferredstyles = jinferredcommodities j
--- | Infer a display format for each commodity based on the amounts parsed.
--- "hledger... will use the format of the first posting amount in the
--- commodity, and the highest precision of all posting amounts in the commodity."
+-- | Collect and save inferred amount styles for each commodity based on
+-- the posting amounts in that commodity (excluding price amounts), ie:
+-- "the format of the first amount, adjusted to the highest precision of all amounts".
journalInferCommodityStyles :: Journal -> Journal
journalInferCommodityStyles j =
j{jinferredcommodities =
diff --git a/Hledger/Data/StringFormat.hs b/Hledger/Data/StringFormat.hs
index 87b7c14..daf9d57 100644
--- a/Hledger/Data/StringFormat.hs
+++ b/Hledger/Data/StringFormat.hs
@@ -2,7 +2,7 @@
-- hledger's report item fields. The formats are used by
-- report-specific renderers like renderBalanceReportItem.
-{-# LANGUAGE FlexibleContexts, TypeFamilies #-}
+{-# LANGUAGE FlexibleContexts, TypeFamilies, PackageImports #-}
module Hledger.Data.StringFormat (
parseStringFormat
@@ -14,12 +14,13 @@ module Hledger.Data.StringFormat (
) where
import Prelude ()
-import Prelude.Compat
+import "base-compat-batteries" Prelude.Compat
import Numeric
import Data.Char (isPrint)
import Data.Maybe
import Test.HUnit
-import Text.Megaparsec.Compat
+import Text.Megaparsec
+import Text.Megaparsec.Char
import Hledger.Utils.Parse
import Hledger.Utils.String (formatString)
diff --git a/Hledger/Data/Transaction.hs b/Hledger/Data/Transaction.hs
index 2f22446..8ef5a58 100644
--- a/Hledger/Data/Transaction.hs
+++ b/Hledger/Data/Transaction.hs
@@ -69,7 +69,7 @@ instance Show ModifierTransaction where
show t = "= " ++ T.unpack (mtvalueexpr t) ++ "\n" ++ unlines (map show (mtpostings t))
instance Show PeriodicTransaction where
- show t = "~ " ++ T.unpack (ptperiodicexpr t) ++ "\n" ++ unlines (map show (ptpostings t))
+ show t = "~ " ++ T.unpack (ptperiodexpr t) ++ "\n" ++ unlines (map show (ptpostings t))
sourceFilePath :: GenericSourcePos -> FilePath
sourceFilePath = \case
diff --git a/Hledger/Data/Types.hs b/Hledger/Data/Types.hs
index b2a05e0..3cdba27 100644
--- a/Hledger/Data/Types.hs
+++ b/Hledger/Data/Types.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE CPP, DeriveDataTypeable, StandaloneDeriving, DeriveGeneric, TypeSynonymInstances, FlexibleInstances #-}
+{-# LANGUAGE CPP, DeriveDataTypeable, StandaloneDeriving, DeriveGeneric, TypeSynonymInstances, FlexibleInstances, OverloadedStrings #-}
{-|
Most data types are defined here to avoid import cycles.
@@ -183,6 +183,7 @@ instance NFData PostingType
type TagName = Text
type TagValue = Text
type Tag = (TagName, TagValue) -- ^ A tag name and (possibly empty) value.
+type DateTag = (TagName, Day)
-- | The status of a transaction or posting, recorded with a status mark
-- (nothing, !, or *). What these mean is ultimately user defined.
@@ -222,13 +223,16 @@ instance Eq Posting where
-- TODO: needs renaming, or removal if no longer needed. See also TextPosition in Hledger.UI.Editor
-- | The position of parse errors (eg), like parsec's SourcePos but generic.
-data GenericSourcePos = GenericSourcePos FilePath Int Int -- ^ name, 1-based line number and 1-based column number.
- | JournalSourcePos FilePath (Int, Int) -- ^ file name, inclusive range of 1-based line numbers (first, last).
+data GenericSourcePos = GenericSourcePos FilePath Int Int -- ^ file path, 1-based line number and 1-based column number.
+ | JournalSourcePos FilePath (Int, Int) -- ^ file path, inclusive range of 1-based line numbers (first, last).
deriving (Eq, Read, Show, Ord, Data, Generic, Typeable)
instance NFData GenericSourcePos
-{-# ANN Transaction "HLint: ignore" #-}
+--{-# ANN Transaction "HLint: ignore" #-}
+-- Ambiguous type variable ‘p0’ arising from an annotation
+-- prevents the constraint ‘(Data p0)’ from being solved.
+-- Probable fix: use a type annotation to specify what ‘p0’ should be.
data Transaction = Transaction {
tindex :: Integer, -- ^ this transaction's 1-based position in the input stream, or 0 when not available
tsourcepos :: GenericSourcePos,
@@ -252,11 +256,32 @@ data ModifierTransaction = ModifierTransaction {
instance NFData ModifierTransaction
+-- ^ A periodic transaction rule, describing a transaction that recurs.
data PeriodicTransaction = PeriodicTransaction {
- ptperiodicexpr :: Text,
+ ptperiodexpr :: Text, -- ^ the period expression as written
+ ptinterval :: Interval, -- ^ the interval at which this transaction recurs
+ ptspan :: DateSpan, -- ^ the (possibly unbounded) period during which this transaction recurs. Contains a whole number of intervals.
+ --
+ ptstatus :: Status, -- ^ some of Transaction's fields
+ ptcode :: Text,
+ ptdescription :: Text,
+ ptcomment :: Text,
+ pttags :: [Tag],
ptpostings :: [Posting]
} deriving (Eq,Typeable,Data,Generic)
+nullperiodictransaction = PeriodicTransaction{
+ ptperiodexpr = ""
+ ,ptinterval = def
+ ,ptspan = def
+ ,ptstatus = Unmarked
+ ,ptcode = ""
+ ,ptdescription = ""
+ ,ptcomment = ""
+ ,pttags = []
+ ,ptpostings = []
+}
+
instance NFData PeriodicTransaction
data TimeclockCode = SetBalance | SetRequiredHours | In | Out | FinalOut deriving (Eq,Ord,Typeable,Data,Generic)
diff --git a/Hledger/Query.hs b/Hledger/Query.hs
index 5273661..7c60fc7 100644
--- a/Hledger/Query.hs
+++ b/Hledger/Query.hs
@@ -58,7 +58,8 @@ import qualified Data.Text as T
import Data.Time.Calendar
import Safe (readDef, headDef)
import Test.HUnit
-import Text.Megaparsec.Compat
+import Text.Megaparsec
+import Text.Megaparsec.Char
import Hledger.Utils hiding (words')
import Hledger.Data.Types
@@ -191,10 +192,10 @@ words'' prefixes = fromparse . parsewith maybeprefixedquotedphrases -- XXX
maybeprefixedquotedphrases = choice' [prefixedQuotedPattern, singleQuotedPattern, doubleQuotedPattern, pattern] `sepBy` skipSome spacenonewline
prefixedQuotedPattern :: SimpleTextParser T.Text
prefixedQuotedPattern = do
- not' <- fromMaybe "" `fmap` (optional $ mptext "not:")
+ not' <- fromMaybe "" `fmap` (optional $ string "not:")
let allowednexts | T.null not' = prefixes
| otherwise = prefixes ++ [""]
- next <- choice' $ map mptext allowednexts
+ next <- choice' $ map string allowednexts
let prefix :: T.Text
prefix = not' <> next
p <- singleQuotedPattern <|> doubleQuotedPattern
@@ -306,6 +307,7 @@ tests_parseQueryTerm = [
"real:1" `gives` (Left $ Real True)
"date:2008" `gives` (Left $ Date $ DateSpan (Just $ parsedate "2008/01/01") (Just $ parsedate "2009/01/01"))
"date:from 2012/5/17" `gives` (Left $ Date $ DateSpan (Just $ parsedate "2012/05/17") Nothing)
+ "date:20180101-201804" `gives` (Left $ Date $ DateSpan (Just $ parsedate "2018/01/01") (Just $ parsedate "2018/04/01"))
"inacct:a" `gives` (Right $ QueryOptInAcct "a")
"tag:a" `gives` (Left $ Tag "a" Nothing)
"tag:a=some value" `gives` (Left $ Tag "a" (Just "some value"))
diff --git a/Hledger/Read/Common.hs b/Hledger/Read/Common.hs
index 1663602..e75058d 100644
--- a/Hledger/Read/Common.hs
+++ b/Hledger/Read/Common.hs
@@ -14,36 +14,111 @@ Some of these might belong in Hledger.Read.JournalReader or Hledger.Read.
--- * module
{-# LANGUAGE CPP, BangPatterns, DeriveDataTypeable, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings #-}
+{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE LambdaCase #-}
-
-module Hledger.Read.Common
+{-# LANGUAGE PackageImports #-}
+
+module Hledger.Read.Common (
+ Reader (..),
+ InputOpts (..),
+ definputopts,
+ rawOptsToInputOpts,
+
+ -- * parsing utilities
+ runTextParser,
+ rtp,
+ runJournalParser,
+ rjp,
+ genericSourcePos,
+ journalSourcePos,
+ generateAutomaticPostings,
+ parseAndFinaliseJournal,
+ parseAndFinaliseJournal', -- TODO unused ? check addons
+ setYear,
+ getYear,
+ setDefaultCommodityAndStyle,
+ getDefaultCommodityAndStyle,
+ getDefaultAmountStyle,
+ getAmountStyle,
+ pushAccount,
+ pushParentAccount,
+ popParentAccount,
+ getParentAccount,
+ addAccountAlias,
+ getAccountAliases,
+ clearAccountAliases,
+ journalAddFile,
+
+ -- * parsers
+ -- ** transaction bits
+ statusp,
+ codep,
+ descriptionp,
+
+ -- ** dates
+ datep,
+ datetimep,
+ secondarydatep,
+
+ -- ** account names
+ modifiedaccountnamep,
+ accountnamep,
+
+ -- ** amounts
+ spaceandamountormissingp,
+ amountp,
+ amountp',
+ mamountp',
+ commoditysymbolp,
+ priceamountp,
+ partialbalanceassertionp,
+ fixedlotpricep,
+ numberp,
+ fromRawNumber,
+ rawnumberp,
+
+ -- ** comments
+ multilinecommentp,
+ emptyorcommentlinep,
+
+ followingcommentp,
+ transactioncommentp,
+ postingcommentp,
+
+ -- ** bracketed dates
+ bracketeddatetagsp,
+
+ -- ** misc
+ singlespacedtextp,
+ singlespacep
+)
where
--- * imports
import Prelude ()
-import Prelude.Compat hiding (readFile)
-import Control.Arrow ((***))
-import Control.Monad.Compat
-import Control.Monad.Except (ExceptT(..), runExceptT, throwError) --, catchError)
+import "base-compat-batteries" Prelude.Compat hiding (readFile)
+import "base-compat-batteries" Control.Monad.Compat
+import Control.Monad.Except (ExceptT(..), throwError)
import Control.Monad.State.Strict
+import Data.Bifunctor (bimap, second)
import Data.Char
import Data.Data
+import Data.Decimal (DecimalRaw (Decimal), Decimal)
import Data.Default
import Data.Functor.Identity
-import Data.List.Compat
+import "base-compat-batteries" Data.List.Compat
import Data.List.NonEmpty (NonEmpty(..))
-import Data.List.Split (wordsBy)
import Data.Maybe
import qualified Data.Map as M
-#if !(MIN_VERSION_base(4,11,0))
-import Data.Monoid
-#endif
+import qualified Data.Semigroup as Sem
import Data.Text (Text)
import qualified Data.Text as T
import Data.Time.Calendar
import Data.Time.LocalTime
-import Safe
import System.Time (getClockTime)
-import Text.Megaparsec.Compat
+import Text.Megaparsec
+import Text.Megaparsec.Char
+import Text.Megaparsec.Char.Lexer (decimal)
+import Text.Megaparsec.Custom
import Hledger.Data
import Hledger.Utils
@@ -107,31 +182,22 @@ rawOptsToInputOpts rawopts = InputOpts{
,auto_ = boolopt "auto" rawopts
}
---- * parsing utils
+--- * parsing utilities
-- | Run a string parser with no state in the identity monad.
-runTextParser, rtp :: TextParser Identity a -> Text -> Either (ParseError Char MPErr) a
+runTextParser, rtp :: TextParser Identity a -> Text -> Either (ParseError Char CustomErr) a
runTextParser p t = runParser p "" t
rtp = runTextParser
--- XXX odd, why doesn't this take a JournalParser ?
-- | Run a journal parser with a null journal-parsing state.
-runJournalParser, rjp :: Monad m => TextParser m a -> Text -> m (Either (ParseError Char MPErr) a)
-runJournalParser p t = runParserT p "" t
+runJournalParser, rjp :: Monad m => JournalParser m a -> Text -> m (Either (ParseError Char CustomErr) a)
+runJournalParser p t = runParserT (evalStateT p mempty) "" t
rjp = runJournalParser
--- | Run an error-raising journal parser with a null journal-parsing state.
-runErroringJournalParser, rejp :: Monad m => ErroringJournalParser m a -> Text -> m (Either String a)
-runErroringJournalParser p t =
- runExceptT $
- runJournalParser (evalStateT p mempty)
- t >>=
- either (throwError . parseErrorPretty) return
-rejp = runErroringJournalParser
-
genericSourcePos :: SourcePos -> GenericSourcePos
genericSourcePos p = GenericSourcePos (sourceName p) (fromIntegral . unPos $ sourceLine p) (fromIntegral . unPos $ sourceColumn p)
+-- | Construct a generic start & end line parse position from start and end megaparsec SourcePos's.
journalSourcePos :: SourcePos -> SourcePos -> GenericSourcePos
journalSourcePos p p' = JournalSourcePos (sourceName p) (fromIntegral . unPos $ sourceLine p, fromIntegral $ line')
where line'
@@ -149,19 +215,19 @@ generateAutomaticPostings j = j { jtxns = map modifier $ jtxns j }
-- | Given a megaparsec ParsedJournal parser, input options, file
-- path and file content: parse and post-process a Journal, or give an error.
-parseAndFinaliseJournal :: ErroringJournalParser IO ParsedJournal -> InputOpts
+parseAndFinaliseJournal :: JournalParser IO ParsedJournal -> InputOpts
-> FilePath -> Text -> ExceptT String IO Journal
parseAndFinaliseJournal parser iopts f txt = do
t <- liftIO getClockTime
y <- liftIO getCurrentYear
- ep <- runParserT (evalStateT parser nulljournal {jparsedefaultyear=Just y}) f txt
+ ep <- liftIO $ runParserT (evalStateT parser nulljournal {jparsedefaultyear=Just y}) f txt
case ep of
Right pj ->
let pj' = if auto_ iopts then generateAutomaticPostings pj else pj in
case journalFinalise t f txt (not $ ignore_assertions_ iopts) pj' of
Right j -> return j
Left e -> throwError e
- Left e -> throwError $ parseErrorPretty e
+ Left e -> throwError $ customParseErrorPretty txt e
parseAndFinaliseJournal' :: JournalParser Identity ParsedJournal -> InputOpts
-> FilePath -> Text -> ExceptT String IO Journal
@@ -249,19 +315,8 @@ journalAddFile f j@Journal{jfiles=fs} = j{jfiles=fs++[f]}
-- append, unlike the other fields, even though we do a final reverse,
-- to compensate for additional reversal due to including/monoid-concatting
--- -- | Terminate parsing entirely, returning the given error message
--- -- with the current parse position prepended.
--- parserError :: String -> ErroringJournalParser a
--- parserError s = do
--- pos <- getPosition
--- parserErrorAt pos s
-
--- | Terminate parsing entirely, returning the given error message
--- with the given parse position prepended.
-parserErrorAt :: Monad m => SourcePos -> String -> ErroringJournalParser m a
-parserErrorAt pos s = throwError $ sourcePosPretty pos ++ ":\n" ++ s
-
--- * parsers
+
--- ** transaction bits
statusp :: TextParser m Status
@@ -271,13 +326,19 @@ statusp =
, skipMany spacenonewline >> char '!' >> return Pending
, return Unmarked
]
- <?> "cleared status"
-codep :: TextParser m String
-codep = try (do { skipSome spacenonewline; char '(' <?> "codep"; anyChar `manyTill` char ')' } ) <|> return ""
+codep :: TextParser m Text
+codep = option "" $ do
+ try $ do
+ skipSome spacenonewline
+ char '('
+ code <- takeWhileP Nothing $ \c -> c /= ')' && c /= '\n'
+ char ')' <?> "closing bracket ')' for transaction code"
+ pure code
-descriptionp :: JournalParser m String
-descriptionp = many (noneOf (";\n" :: [Char]))
+descriptionp :: TextParser m Text
+descriptionp = takeWhileP Nothing (not . semicolonOrNewline)
+ where semicolonOrNewline c = c == ';' || c == '\n'
--- ** dates
@@ -287,28 +348,53 @@ descriptionp = many (noneOf (";\n" :: [Char]))
-- Leading zeroes may be omitted.
datep :: JournalParser m Day
datep = do
- -- hacky: try to ensure precise errors for invalid dates
- -- XXX reported error position is not too good
- -- pos <- genericSourcePos <$> getPosition
- datestr <- do
- c <- digitChar
- cs <- lift $ many $ choice' [digitChar, datesepchar]
- return $ c:cs
- let sepchars = nub $ sort $ filter (`elem` datesepchars) datestr
- when (length sepchars /= 1) $ fail $ "bad date, different separators used: " ++ datestr
- let dateparts = wordsBy (`elem` datesepchars) datestr
- currentyear <- getYear
- [y,m,d] <- case (dateparts,currentyear) of
- ([m,d],Just y) -> return [show y,m,d]
- ([_,_],Nothing) -> fail $ "partial date "++datestr++" found, but the current year is unknown"
- ([y,m,d],_) -> return [y,m,d]
- _ -> fail $ "bad date: " ++ datestr
- let maybedate = fromGregorianValid (read y) (read m) (read d)
- case maybedate of
- Nothing -> fail $ "bad date: " ++ datestr
- Just date -> return date
+ mYear <- getYear
+ lift $ datep' mYear
+
+datep' :: Maybe Year -> TextParser m Day
+datep' mYear = do
+ startPos <- getPosition
+ d1 <- decimal <?> "year or month"
+ sep <- satisfy isDateSepChar <?> "date separator"
+ d2 <- decimal <?> "month or day"
+ fullDate startPos d1 sep d2 <|> partialDate startPos mYear d1 sep d2
<?> "full or partial date"
+ where
+
+ fullDate :: SourcePos -> Integer -> Char -> Int -> TextParser m Day
+ fullDate startPos year sep1 month = do
+ sep2 <- satisfy isDateSepChar <?> "date separator"
+ day <- decimal <?> "day"
+ endPos <- getPosition
+ let dateStr = show year ++ [sep1] ++ show month ++ [sep2] ++ show day
+
+ when (sep1 /= sep2) $ parseErrorAtRegion startPos endPos $
+ "invalid date (mixing date separators is not allowed): " ++ dateStr
+
+ case fromGregorianValid year month day of
+ Nothing -> parseErrorAtRegion startPos endPos $
+ "well-formed but invalid date: " ++ dateStr
+ Just date -> pure $! date
+
+ partialDate
+ :: SourcePos -> Maybe Year -> Integer -> Char -> Int -> TextParser m Day
+ partialDate startPos mYear month sep day = do
+ endPos <- getPosition
+ case mYear of
+ Just year ->
+ case fromGregorianValid year (fromIntegral month) day of
+ Nothing -> parseErrorAtRegion startPos endPos $
+ "well-formed but invalid date: " ++ dateStr
+ Just date -> pure $! date
+ where dateStr = show year ++ [sep] ++ show month ++ [sep] ++ show day
+
+ Nothing -> parseErrorAtRegion startPos endPos $
+ "partial date "++dateStr++" found, but the current year is unknown"
+ where dateStr = show month ++ [sep] ++ show day
+
+{-# INLINABLE datep' #-}
+
-- | Parse a date and time in YYYY/MM/DD HH:MM[:SS][+-ZZZZ] format.
-- Hyphen (-) and period (.) are also allowed as date separators.
-- The year may be omitted if a default year has been set.
@@ -317,103 +403,107 @@ datep = do
-- Leading zeroes may be omitted (except in a timezone).
datetimep :: JournalParser m LocalTime
datetimep = do
- day <- datep
- lift $ skipSome spacenonewline
- h <- some digitChar
- let h' = read h
- guard $ h' >= 0 && h' <= 23
- char ':'
- m <- some digitChar
- let m' = read m
- guard $ m' >= 0 && m' <= 59
- s <- optional $ char ':' >> some digitChar
- let s' = case s of Just sstr -> read sstr
- Nothing -> 0
- guard $ s' >= 0 && s' <= 59
- {- tz <- -}
- optional $ do
- plusminus <- oneOf ("-+" :: [Char])
- d1 <- digitChar
- d2 <- digitChar
- d3 <- digitChar
- d4 <- digitChar
- return $ plusminus:d1:d2:d3:d4:""
- -- ltz <- liftIO $ getCurrentTimeZone
- -- let tz' = maybe ltz (fromMaybe ltz . parseTime defaultTimeLocale "%z") tz
- -- return $ localTimeToUTC tz' $ LocalTime day $ TimeOfDay h' m' (fromIntegral s')
- return $ LocalTime day $ TimeOfDay h' m' (fromIntegral s')
-
-secondarydatep :: Day -> JournalParser m Day
-secondarydatep primarydate = do
- char '='
- -- kludgy way to use primary date for default year
- let withDefaultYear d p = do
- y <- getYear
- let (y',_,_) = toGregorian d in setYear y'
- r <- p
- when (isJust y) $ setYear $ fromJust y -- XXX
- -- mapM setYear <$> y
- return r
- withDefaultYear primarydate datep
-
--- |
--- >> parsewith twoorthreepartdatestringp "2016/01/2"
--- Right "2016/01/2"
--- twoorthreepartdatestringp = do
--- n1 <- some digitChar
--- c <- datesepchar
--- n2 <- some digitChar
--- mn3 <- optional $ char c >> some digitChar
--- return $ n1 ++ c:n2 ++ maybe "" (c:) mn3
+ mYear <- getYear
+ lift $ datetimep' mYear
+
+datetimep' :: Maybe Year -> TextParser m LocalTime
+datetimep' mYear = do
+ day <- datep' mYear
+ skipSome spacenonewline
+ time <- timeOfDay
+ optional timeZone -- ignoring time zones
+ pure $ LocalTime day time
+
+ where
+ timeOfDay :: TextParser m TimeOfDay
+ timeOfDay = do
+ pos1 <- getPosition
+ h' <- twoDigitDecimal <?> "hour"
+ pos2 <- getPosition
+ unless (h' >= 0 && h' <= 23) $ parseErrorAtRegion pos1 pos2
+ "invalid time (bad hour)"
+
+ char ':' <?> "':' (hour-minute separator)"
+ pos3 <- getPosition
+ m' <- twoDigitDecimal <?> "minute"
+ pos4 <- getPosition
+ unless (m' >= 0 && m' <= 59) $ parseErrorAtRegion pos3 pos4
+ "invalid time (bad minute)"
+
+ s' <- option 0 $ do
+ char ':' <?> "':' (minute-second separator)"
+ pos5 <- getPosition
+ s' <- twoDigitDecimal <?> "second"
+ pos6 <- getPosition
+ unless (s' >= 0 && s' <= 59) $ parseErrorAtRegion pos5 pos6
+ "invalid time (bad second)" -- we do not support leap seconds
+ pure s'
+
+ pure $ TimeOfDay h' m' (fromIntegral s')
+
+ twoDigitDecimal :: TextParser m Int
+ twoDigitDecimal = do
+ d1 <- digitToInt <$> digitChar
+ d2 <- digitToInt <$> (digitChar <?> "a second digit")
+ pure $ d1*10 + d2
+
+ timeZone :: TextParser m String
+ timeZone = do
+ plusminus <- satisfy $ \c -> c == '-' || c == '+'
+ fourDigits <- count 4 (digitChar <?> "a digit (for a time zone)")
+ pure $ plusminus:fourDigits
+
+secondarydatep :: Day -> TextParser m Day
+secondarydatep primaryDate = char '=' *> datep' (Just primaryYear)
+ where primaryYear = first3 $ toGregorian primaryDate
--- ** account names
--- | Parse an account name, then apply any parent account prefix and/or account aliases currently in effect.
+-- | Parse an account name (plus one following space if present),
+-- then apply any parent account prefix and/or account aliases currently in effect,
+-- in that order. (Ie first add the parent account prefix, then rewrite with aliases).
modifiedaccountnamep :: JournalParser m AccountName
modifiedaccountnamep = do
parent <- getParentAccount
aliases <- getAccountAliases
a <- lift accountnamep
- return $
+ return $!
accountNameApplyAliases aliases $
-- XXX accountNameApplyAliasesMemo ? doesn't seem to make a difference
joinAccountNames parent
a
--- | Parse an account name. Account names start with a non-space, may
--- have single spaces inside them, and are terminated by two or more
--- spaces (or end of input). Also they have one or more components of
--- at least one character, separated by the account separator char.
--- (This parser will also consume one following space, if present.)
+-- | Parse an account name, plus one following space if present.
+-- Account names start with a non-space, may have single spaces inside them,
+-- and are terminated by two or more spaces (or end of input).
+-- (Also they have one or more components of at least one character,
+-- separated by the account separator character, but we don't check that here.)
accountnamep :: TextParser m AccountName
-accountnamep = do
- astr <- do
- c <- nonspace
- cs <- striptrailingspace <$> many (nonspace <|> singlespace)
- return $ c:cs
- let a = T.pack astr
- when (accountNameFromComponents (accountNameComponents a) /= a)
- (fail $ "account name seems ill-formed: "++astr)
- return a
- where
- singlespace = try (do {spacenonewline; do {notFollowedBy spacenonewline; return ' '}})
- striptrailingspace "" = ""
- striptrailingspace s = if last s == ' ' then init s else s
-
--- accountnamechar = notFollowedBy (oneOf "()[]") >> nonspace
--- <?> "account name character (non-bracket, non-parenthesis, non-whitespace)"
+accountnamep = singlespacedtextp
+
+-- | Parse any text beginning with a non-whitespace character, until a double space or the end of input.
+-- Consumes one of the following spaces, if present.
+singlespacedtextp :: TextParser m T.Text
+singlespacedtextp = do
+ firstPart <- part
+ otherParts <- many $ try $ singlespacep *> part
+ pure $! T.unwords $ firstPart : otherParts
+ where
+ part = takeWhile1P Nothing (not . isSpace)
+
+-- | Parse one non-newline whitespace character that is not followed by another one.
+singlespacep = void spacenonewline *> notFollowedBy spacenonewline
--- ** amounts
-- | Parse whitespace then an amount, with an optional left or right
-- currency symbol and optional price, or return the special
-- "missing" marker amount.
-spaceandamountormissingp :: Monad m => JournalParser m MixedAmount
+spaceandamountormissingp :: JournalParser m MixedAmount
spaceandamountormissingp =
- try (do
- lift $ skipSome spacenonewline
- (Mixed . (:[])) `fmap` amountp <|> return missingmixedamt
- ) <|> return missingmixedamt
+ option missingmixedamt $ try $ do
+ lift $ skipSome spacenonewline
+ Mixed . (:[]) <$> amountp
#ifdef TESTS
assertParseEqual' :: (Show a, Eq a) => (Either ParseError a) -> a -> Assertion
@@ -432,8 +522,86 @@ test_spaceandamountormissingp = do
-- | Parse a single-commodity amount, with optional symbol on the left or
-- right, optional unit or total price, and optional (ignored)
-- ledger-style balance assertion or fixed lot price declaration.
-amountp :: Monad m => JournalParser m Amount
-amountp = try leftsymbolamountp <|> try rightsymbolamountp <|> nosymbolamountp
+amountp :: JournalParser m Amount
+amountp = label "amount" $ do
+ amount <- amountwithoutpricep
+ lift $ skipMany spacenonewline
+ price <- priceamountp
+ pure $ amount { aprice = price }
+
+amountwithoutpricep :: JournalParser m Amount
+amountwithoutpricep = do
+ (mult, sign) <- lift $ (,) <$> multiplierp <*> signp
+ leftsymbolamountp mult sign <|> rightornosymbolamountp mult sign
+
+ where
+
+ leftsymbolamountp :: Bool -> (Decimal -> Decimal) -> JournalParser m Amount
+ leftsymbolamountp mult sign = label "amount" $ do
+ c <- lift commoditysymbolp
+ suggestedStyle <- getAmountStyle c
+
+ commodityspaced <- lift $ skipMany' spacenonewline
+
+ sign2 <- lift $ signp
+ posBeforeNum <- getPosition
+ ambiguousRawNum <- lift rawnumberp
+ mExponent <- lift $ optional $ try exponentp
+ posAfterNum <- getPosition
+ let numRegion = (posBeforeNum, posAfterNum)
+
+ (q,prec,mdec,mgrps) <- lift $
+ interpretNumber numRegion suggestedStyle ambiguousRawNum mExponent
+ let s = amountstyle{ascommodityside=L, ascommodityspaced=commodityspaced, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}
+ return $ Amount c (sign (sign2 q)) NoPrice s mult
+
+ rightornosymbolamountp
+ :: Bool -> (Decimal -> Decimal) -> JournalParser m Amount
+ rightornosymbolamountp mult sign = label "amount" $ do
+ posBeforeNum <- getPosition
+ ambiguousRawNum <- lift rawnumberp
+ mExponent <- lift $ optional $ try exponentp
+ posAfterNum <- getPosition
+ let numRegion = (posBeforeNum, posAfterNum)
+
+ mSpaceAndCommodity <- lift $ optional $ try $
+ (,) <$> skipMany' spacenonewline <*> commoditysymbolp
+
+ case mSpaceAndCommodity of
+ Just (commodityspaced, c) -> do
+ suggestedStyle <- getAmountStyle c
+ (q,prec,mdec,mgrps) <- lift $
+ interpretNumber numRegion suggestedStyle ambiguousRawNum mExponent
+
+ let s = amountstyle{ascommodityside=R, ascommodityspaced=commodityspaced, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}
+ return $ Amount c (sign q) NoPrice s mult
+
+ Nothing -> do
+ suggestedStyle <- getDefaultAmountStyle
+ (q,prec,mdec,mgrps) <- lift $
+ interpretNumber numRegion suggestedStyle ambiguousRawNum mExponent
+
+ -- apply the most recently seen default commodity and style to this commodityless amount
+ defcs <- getDefaultCommodityAndStyle
+ let (c,s) = case defcs of
+ Just (defc,defs) -> (defc, defs{asprecision=max (asprecision defs) prec})
+ Nothing -> ("", amountstyle{asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps})
+ return $ Amount c (sign q) NoPrice s mult
+
+ -- For reducing code duplication. Doesn't parse anything. Has the type
+ -- of a parser only in order to throw parse errors (for convenience).
+ interpretNumber
+ :: (SourcePos, SourcePos)
+ -> Maybe AmountStyle
+ -> Either AmbiguousNumber RawNumber
+ -> Maybe Int
+ -> TextParser m (Quantity, Int, Maybe Char, Maybe DigitGroupStyle)
+ interpretNumber posRegion suggestedStyle ambiguousNum mExp =
+ let rawNum = either (disambiguateNumber suggestedStyle) id ambiguousNum
+ in case fromRawNumber rawNum mExp of
+ Left errMsg -> uncurry parseErrorAtRegion posRegion errMsg
+ Right res -> pure res
+
#ifdef TESTS
test_amountp = do
@@ -460,17 +628,11 @@ amountp' s =
mamountp' :: String -> MixedAmount
mamountp' = Mixed . (:[]) . amountp'
-signp :: TextParser m String
-signp = do
- sign <- optional $ oneOf ("+-" :: [Char])
- return $ case sign of Just '-' -> "-"
- _ -> ""
+signp :: Num a => TextParser m (a -> a)
+signp = char '-' *> pure negate <|> char '+' *> pure id <|> pure id
multiplierp :: TextParser m Bool
-multiplierp = do
- multiplier <- optional $ oneOf ("*" :: [Char])
- return $ case multiplier of Just '*' -> True
- _ -> False
+multiplierp = option False $ char '*' *> pure True
-- | This is like skipMany but it returns True if at least one element
-- was skipped. This is helpful if you’re just using many to check if
@@ -484,89 +646,38 @@ skipMany' p = go False
then go True
else pure isNull
-leftsymbolamountp :: Monad m => JournalParser m Amount
-leftsymbolamountp = do
- sign <- lift signp
- m <- lift multiplierp
- c <- lift commoditysymbolp
- suggestedStyle <- getAmountStyle c
- commodityspaced <- lift $ skipMany' spacenonewline
- (q,prec,mdec,mgrps) <- lift $ numberp suggestedStyle
- let s = amountstyle{ascommodityside=L, ascommodityspaced=commodityspaced, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}
- p <- priceamountp
- let applysign = if sign=="-" then negate else id
- return $ applysign $ Amount c q p s m
- <?> "left-symbol amount"
-
-rightsymbolamountp :: Monad m => JournalParser m Amount
-rightsymbolamountp = do
- m <- lift multiplierp
- sign <- lift signp
- rawnum <- lift $ rawnumberp
- expMod <- lift . option id $ try exponentp
- commodityspaced <- lift $ skipMany' spacenonewline
- c <- lift commoditysymbolp
- suggestedStyle <- getAmountStyle c
- let (q0,prec0,mdec,mgrps) = fromRawNumber suggestedStyle (sign == "-") rawnum
- (q, prec) = expMod (q0, prec0)
- p <- priceamountp
- let s = amountstyle{ascommodityside=R, ascommodityspaced=commodityspaced, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}
- return $ Amount c q p s m
- <?> "right-symbol amount"
-
-nosymbolamountp :: Monad m => JournalParser m Amount
-nosymbolamountp = do
- m <- lift multiplierp
- suggestedStyle <- getDefaultAmountStyle
- (q,prec,mdec,mgrps) <- lift $ numberp suggestedStyle
- p <- priceamountp
- -- apply the most recently seen default commodity and style to this commodityless amount
- defcs <- getDefaultCommodityAndStyle
- let (c,s) = case defcs of
- Just (defc,defs) -> (defc, defs{asprecision=max (asprecision defs) prec})
- Nothing -> ("", amountstyle{asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps})
- return $ Amount c q p s m
- <?> "no-symbol amount"
-
commoditysymbolp :: TextParser m CommoditySymbol
-commoditysymbolp = (quotedcommoditysymbolp <|> simplecommoditysymbolp) <?> "commodity symbol"
+commoditysymbolp =
+ quotedcommoditysymbolp <|> simplecommoditysymbolp <?> "commodity symbol"
quotedcommoditysymbolp :: TextParser m CommoditySymbol
-quotedcommoditysymbolp = do
- char '"'
- s <- some $ noneOf (";\n\"" :: [Char])
- char '"'
- return $ T.pack s
+quotedcommoditysymbolp =
+ between (char '"') (char '"') $ takeWhile1P Nothing f
+ where f c = c /= ';' && c /= '\n' && c /= '\"'
simplecommoditysymbolp :: TextParser m CommoditySymbol
-simplecommoditysymbolp = T.pack <$> some (noneOf nonsimplecommoditychars)
-
-priceamountp :: Monad m => JournalParser m Price
-priceamountp =
- try (do
- lift (skipMany spacenonewline)
- char '@'
- try (do
- char '@'
- lift (skipMany spacenonewline)
- a <- amountp -- XXX can parse more prices ad infinitum, shouldn't
- return $ TotalPrice a)
- <|> (do
- lift (skipMany spacenonewline)
- a <- amountp -- XXX can parse more prices ad infinitum, shouldn't
- return $ UnitPrice a))
- <|> return NoPrice
-
-partialbalanceassertionp :: Monad m => JournalParser m BalanceAssertion
-partialbalanceassertionp =
- try (do
- lift (skipMany spacenonewline)
- sourcepos <- genericSourcePos <$> lift getPosition
- char '='
- lift (skipMany spacenonewline)
- a <- amountp -- XXX should restrict to a simple amount
- return $ Just (a, sourcepos))
- <|> return Nothing
+simplecommoditysymbolp = takeWhile1P Nothing (not . isNonsimpleCommodityChar)
+
+priceamountp :: JournalParser m Price
+priceamountp = option NoPrice $ do
+ char '@'
+ priceConstructor <- char '@' *> pure TotalPrice <|> pure UnitPrice
+
+ lift (skipMany spacenonewline)
+ priceAmount <- amountwithoutpricep <?> "amount (as a price)"
+
+ pure $ priceConstructor priceAmount
+
+partialbalanceassertionp :: JournalParser m BalanceAssertion
+partialbalanceassertionp = optional $ do
+ sourcepos <- try $ do
+ lift (skipMany spacenonewline)
+ sourcepos <- genericSourcePos <$> lift getPosition
+ char '='
+ pure sourcepos
+ lift (skipMany spacenonewline)
+ a <- amountp <?> "amount (for a balance assertion or assignment)" -- XXX should restrict to a simple amount
+ return (a, sourcepos)
-- balanceassertion :: Monad m => TextParser m (Maybe MixedAmount)
-- balanceassertion =
@@ -579,19 +690,18 @@ partialbalanceassertionp =
-- <|> return Nothing
-- http://ledger-cli.org/3.0/doc/ledger3.html#Fixing-Lot-Prices
-fixedlotpricep :: Monad m => JournalParser m (Maybe Amount)
-fixedlotpricep =
- try (do
- lift (skipMany spacenonewline)
- char '{'
- lift (skipMany spacenonewline)
- char '='
- lift (skipMany spacenonewline)
- a <- amountp -- XXX should restrict to a simple amount
- lift (skipMany spacenonewline)
- char '}'
- return $ Just a)
- <|> return Nothing
+fixedlotpricep :: JournalParser m (Maybe Amount)
+fixedlotpricep = optional $ do
+ try $ do
+ lift (skipMany spacenonewline)
+ char '{'
+ lift (skipMany spacenonewline)
+ char '='
+ lift (skipMany spacenonewline)
+ a <- amountp -- XXX should restrict to a simple amount
+ lift (skipMany spacenonewline)
+ char '}'
+ return a
-- | Parse a string representation of a number for its value and display
-- attributes.
@@ -606,89 +716,215 @@ fixedlotpricep =
-- and the digit group style if any.
--
numberp :: Maybe AmountStyle -> TextParser m (Quantity, Int, Maybe Char, Maybe DigitGroupStyle)
-numberp suggestedStyle = do
+numberp suggestedStyle = label "number" $ do
-- a number is an optional sign followed by a sequence of digits possibly
-- interspersed with periods, commas, or both
-- ptrace "numberp"
sign <- signp
- raw <- rawnumberp
- dbg8 "numberp parsed" raw `seq` return ()
- let num@(q, prec, decSep, groups) = dbg8 "numberp quantity,precision,mdecimalpoint,mgrps" (fromRawNumber suggestedStyle (sign == "-") raw)
- option num . try $ do
- when (isJust groups) $ fail "groups and exponent are not mixable"
- (q', prec') <- exponentp <*> pure (q, prec)
- return (q', prec', decSep, groups)
- <?> "numberp"
-
-exponentp :: TextParser m ((Quantity, Int) -> (Quantity, Int))
-exponentp = do
- char' 'e'
- exp <- liftM read $ (++) <$> signp <*> some digitChar
- return $ (* 10^^exp) *** (0 `max`) . (+ (-exp))
- <?> "exponentp"
-
-fromRawNumber :: Maybe AmountStyle -> Bool -> (Maybe Char, [String], Maybe (Char, String)) -> (Quantity, Int, Maybe Char, Maybe DigitGroupStyle)
-fromRawNumber suggestedStyle negated raw = (quantity, precision, mdecimalpoint, mgrps) where
- -- unpack with a hint if useful
- (mseparator, intparts, mdecimalpoint, frac) =
- case raw of
- -- just a single punctuation between two digits groups, assume it's a decimal point
- (Just s, [firstGroup, lastGroup], Nothing)
- -- if have a decimalHint restrict this assumpion only to a matching separator
- | maybe True (`asdecimalcheck` s) suggestedStyle -> (Nothing, [firstGroup], Just s, lastGroup)
-
- (firstSep, digitGroups, Nothing) -> (firstSep, digitGroups, Nothing, [])
- (firstSep, digitGroups, Just (d, frac)) -> (firstSep, digitGroups, Just d, frac)
-
- -- get the digit group sizes and digit group style if any
- groupsizes = reverse $ case map length intparts of
- (a:b:cs) | a < b -> b:cs
- gs -> gs
- mgrps = (`DigitGroups` groupsizes) <$> mseparator
-
- -- put the parts back together without digit group separators, get the precision and parse the value
- repr = (if negated then "-" else "") ++ "0" ++ concat intparts ++ (if null frac then "" else "." ++ frac)
- quantity = read repr
- precision = length frac
-
- asdecimalcheck :: AmountStyle -> Char -> Bool
- asdecimalcheck = \case
- AmountStyle{asdecimalpoint = Just d} -> (d ==)
- AmountStyle{asdigitgroups = Just (DigitGroups g _)} -> (g /=)
- AmountStyle{asprecision = 0} -> const False
- _ -> const True
-
-
-rawnumberp :: TextParser m (Maybe Char, [String], Maybe (Char, String))
-rawnumberp = do
- let sepChars = ['.', ','] -- all allowed punctuation characters
-
- (firstSep, groups) <- option (Nothing, []) $ do
- leadingDigits <- some digitChar
- option (Nothing, [leadingDigits]) . try $ do
- firstSep <- oneOf sepChars <|> whitespaceChar
- groups <- some digitChar `sepBy1` char firstSep
- return (Just firstSep, leadingDigits : groups)
-
- let remSepChars = maybe sepChars (`delete` sepChars) firstSep
- modifier
- | null groups = fmap Just -- if no digits so far, we require at least some decimals
- | otherwise = optional
-
- extraGroup <- modifier $ do
- lastSep <- oneOf remSepChars
- digits <- modifier $ some digitChar -- decimal separator allowed to be without digits if had some before
- return (lastSep, fromMaybe [] digits)
-
- -- make sure we didn't leading part of mistyped number
- notFollowedBy $ oneOf sepChars <|> (whitespaceChar >> digitChar)
-
- return $ dbg8 "rawnumberp" (firstSep, groups, extraGroup)
- <?> "rawnumberp"
-
--- | Parse a unicode char that represents any non-control space char (Zs general category).
-whitespaceChar :: TextParser m Char
-whitespaceChar = charCategory Space
+ rawNum <- either (disambiguateNumber suggestedStyle) id <$> rawnumberp
+ mExp <- optional $ try $ exponentp
+ dbg8 "numberp suggestedStyle" suggestedStyle `seq` return ()
+ case dbg8 "numberp quantity,precision,mdecimalpoint,mgrps"
+ $ fromRawNumber rawNum mExp of
+ Left errMsg -> fail errMsg
+ Right (q, p, d, g) -> pure (sign q, p, d, g)
+
+exponentp :: TextParser m Int
+exponentp = char' 'e' *> signp <*> decimal <?> "exponent"
+
+-- | Interpret a raw number as a decimal number.
+--
+-- Returns:
+-- - the decimal number
+-- - the precision (number of digits after the decimal point)
+-- - the decimal point character, if any
+-- - the digit group style, if any (digit group character and sizes of digit groups)
+fromRawNumber
+ :: RawNumber
+ -> Maybe Int
+ -> Either String
+ (Quantity, Int, Maybe Char, Maybe DigitGroupStyle)
+fromRawNumber raw mExp = case raw of
+
+ NoSeparators digitGrp mDecimals ->
+ let mDecPt = fmap fst mDecimals
+ decimalGrp = maybe mempty snd mDecimals
+
+ (quantity, precision) =
+ maybe id applyExp mExp $ toQuantity digitGrp decimalGrp
+
+ in Right (quantity, precision, mDecPt, Nothing)
+
+ WithSeparators digitSep digitGrps mDecimals -> case mExp of
+ Nothing ->
+ let mDecPt = fmap fst mDecimals
+ decimalGrp = maybe mempty snd mDecimals
+ digitGroupStyle = DigitGroups digitSep (groupSizes digitGrps)
+
+ (quantity, precision) = toQuantity (mconcat digitGrps) decimalGrp
+
+ in Right (quantity, precision, mDecPt, Just digitGroupStyle)
+ Just _ -> Left
+ "invalid number: mixing digit separators with exponents is not allowed"
+
+ where
+ -- Outputs digit group sizes from least significant to most significant
+ groupSizes :: [DigitGrp] -> [Int]
+ groupSizes digitGrps = reverse $ case map digitGroupLength digitGrps of
+ (a:b:cs) | a < b -> b:cs
+ gs -> gs
+
+ toQuantity :: DigitGrp -> DigitGrp -> (Quantity, Int)
+ toQuantity preDecimalGrp postDecimalGrp = (quantity, precision)
+ where
+ quantity = Decimal (fromIntegral precision)
+ (digitGroupNumber $ preDecimalGrp <> postDecimalGrp)
+ precision = digitGroupLength postDecimalGrp
+
+ applyExp :: Int -> (Decimal, Int) -> (Decimal, Int)
+ applyExp exponent (quantity, precision) =
+ (quantity * 10^^exponent, max 0 (precision - exponent))
+
+
+disambiguateNumber :: Maybe AmountStyle -> AmbiguousNumber -> RawNumber
+disambiguateNumber suggestedStyle (AmbiguousNumber grp1 sep grp2) =
+ -- If present, use the suggested style to disambiguate;
+ -- otherwise, assume that the separator is a decimal point where possible.
+ if isDecimalPointChar sep &&
+ maybe True (sep `isValidDecimalBy`) suggestedStyle
+ then NoSeparators grp1 (Just (sep, grp2))
+ else WithSeparators sep [grp1, grp2] Nothing
+ where
+ isValidDecimalBy :: Char -> AmountStyle -> Bool
+ isValidDecimalBy c = \case
+ AmountStyle{asdecimalpoint = Just d} -> d == c
+ AmountStyle{asdigitgroups = Just (DigitGroups g _)} -> g /= c
+ AmountStyle{asprecision = 0} -> False
+ _ -> True
+
+-- | Parse and interpret the structure of a number without external hints.
+-- Numbers are digit strings, possibly separated into digit groups by one
+-- of two types of separators. (1) Numbers may optionally have a decimal
+-- point, which may be either a period or comma. (2) Numbers may
+-- optionally contain digit group separators, which must all be either a
+-- period, a comma, or a space.
+--
+-- It is our task to deduce the identities of the decimal point and digit
+-- separator characters, based on the allowed syntax. For instance, we
+-- make use of the fact that a decimal point can occur at most once and
+-- must succeed all digit group separators.
+--
+-- >>> parseTest rawnumberp "1,234,567.89"
+-- Right (WithSeparators ',' ["1","234","567"] (Just ('.',"89")))
+-- >>> parseTest rawnumberp "1,000"
+-- Left (AmbiguousNumber "1" ',' "000")
+-- >>> parseTest rawnumberp "1 000"
+-- Right (WithSeparators ' ' ["1","000"] Nothing)
+--
+rawnumberp :: TextParser m (Either AmbiguousNumber RawNumber)
+rawnumberp = label "number" $ do
+ rawNumber <- fmap Right leadingDecimalPt <|> leadingDigits
+
+ -- Guard against mistyped numbers
+ mExtraDecimalSep <- optional $ lookAhead $ satisfy isDecimalPointChar
+ when (isJust mExtraDecimalSep) $
+ fail "invalid number (invalid use of separator)"
+
+ mExtraFragment <- optional $ lookAhead $ try $
+ char ' ' *> getPosition <* digitChar
+ case mExtraFragment of
+ Just pos -> parseErrorAt pos "invalid number (excessive trailing digits)"
+ Nothing -> pure ()
+
+ return $ dbg8 "rawnumberp" rawNumber
+ where
+
+ leadingDecimalPt :: TextParser m RawNumber
+ leadingDecimalPt = do
+ decPt <- satisfy isDecimalPointChar
+ decGrp <- digitgroupp
+ pure $ NoSeparators mempty (Just (decPt, decGrp))
+
+ leadingDigits :: TextParser m (Either AmbiguousNumber RawNumber)
+ leadingDigits = do
+ grp1 <- digitgroupp
+ withSeparators grp1 <|> fmap Right (trailingDecimalPt grp1)
+ <|> pure (Right $ NoSeparators grp1 Nothing)
+
+ withSeparators :: DigitGrp -> TextParser m (Either AmbiguousNumber RawNumber)
+ withSeparators grp1 = do
+ (sep, grp2) <- try $ (,) <$> satisfy isDigitSeparatorChar <*> digitgroupp
+ grps <- many $ try $ char sep *> digitgroupp
+
+ let digitGroups = grp1 : grp2 : grps
+ fmap Right (withDecimalPt sep digitGroups)
+ <|> pure (withoutDecimalPt grp1 sep grp2 grps)
+
+ withDecimalPt :: Char -> [DigitGrp] -> TextParser m RawNumber
+ withDecimalPt digitSep digitGroups = do
+ decPt <- satisfy $ \c -> isDecimalPointChar c && c /= digitSep
+ decDigitGrp <- option mempty digitgroupp
+
+ pure $ WithSeparators digitSep digitGroups (Just (decPt, decDigitGrp))
+
+ withoutDecimalPt
+ :: DigitGrp
+ -> Char
+ -> DigitGrp
+ -> [DigitGrp]
+ -> Either AmbiguousNumber RawNumber
+ withoutDecimalPt grp1 sep grp2 grps
+ | null grps && isDecimalPointChar sep =
+ Left $ AmbiguousNumber grp1 sep grp2
+ | otherwise = Right $ WithSeparators sep (grp1:grp2:grps) Nothing
+
+ trailingDecimalPt :: DigitGrp -> TextParser m RawNumber
+ trailingDecimalPt grp1 = do
+ decPt <- satisfy isDecimalPointChar
+ pure $ NoSeparators grp1 (Just (decPt, mempty))
+
+
+isDecimalPointChar :: Char -> Bool
+isDecimalPointChar c = c == '.' || c == ','
+
+isDigitSeparatorChar :: Char -> Bool
+isDigitSeparatorChar c = isDecimalPointChar c || c == ' '
+
+
+data DigitGrp = DigitGrp {
+ digitGroupLength :: !Int,
+ digitGroupNumber :: !Integer
+} deriving (Eq)
+
+instance Show DigitGrp where
+ show (DigitGrp len num)
+ | len > 0 = "\"" ++ padding ++ numStr ++ "\""
+ | otherwise = "\"\""
+ where numStr = show num
+ padding = replicate (len - length numStr) '0'
+
+instance Sem.Semigroup DigitGrp where
+ DigitGrp l1 n1 <> DigitGrp l2 n2 = DigitGrp (l1 + l2) (n1 * 10^l2 + n2)
+
+instance Monoid DigitGrp where
+ mempty = DigitGrp 0 0
+ mappend = (Sem.<>)
+
+digitgroupp :: TextParser m DigitGrp
+digitgroupp = label "digits"
+ $ makeGroup <$> takeWhile1P (Just "digit") isDigit
+ where
+ makeGroup = uncurry DigitGrp . foldl' step (0, 0) . T.unpack
+ step (!l, !a) c = (l+1, a*10 + fromIntegral (digitToInt c))
+
+
+data RawNumber
+ = NoSeparators DigitGrp (Maybe (Char, DigitGrp)) -- 100 or 100. or .100 or 100.50
+ | WithSeparators Char [DigitGrp] (Maybe (Char, DigitGrp)) -- 1,000,000 or 1,000.50
+ deriving (Show, Eq)
+
+data AmbiguousNumber = AmbiguousNumber DigitGrp Char DigitGrp -- 1,000
+ deriving (Show, Eq)
-- test_numberp = do
-- let s `is` n = assertParseEqual (parseWithState mempty numberp s) n
@@ -714,229 +950,240 @@ whitespaceChar = charCategory Space
--- ** comments
-multilinecommentp :: JournalParser m ()
-multilinecommentp = do
- string "comment" >> lift (skipMany spacenonewline) >> newline
- go
+multilinecommentp :: TextParser m ()
+multilinecommentp = startComment *> anyLine `skipManyTill` endComment
where
- go = try (eof <|> (string "end comment" >> newline >> return ()))
- <|> (anyLine >> go)
- anyLine = anyChar `manyTill` newline
+ startComment = string "comment" *> trailingSpaces
+ endComment = eof <|> string "end comment" *> trailingSpaces
+
+ trailingSpaces = skipMany spacenonewline <* newline
+ anyLine = void $ takeWhileP Nothing (\c -> c /= '\n') *> newline
+
+{-# INLINABLE multilinecommentp #-}
-emptyorcommentlinep :: JournalParser m ()
+emptyorcommentlinep :: TextParser m ()
emptyorcommentlinep = do
- lift (skipMany spacenonewline) >> (linecommentp <|> (lift (skipMany spacenonewline) >> newline >> return ""))
- return ()
+ skipMany spacenonewline
+ skiplinecommentp <|> void newline
+ where
+ -- A line (file-level) comment can start with a semicolon, hash, or star
+ -- (allowing org nodes).
+ skiplinecommentp :: TextParser m ()
+ skiplinecommentp = do
+ satisfy $ \c -> c == ';' || c == '#' || c == '*'
+ void $ takeWhileP Nothing (\c -> c /= '\n')
+ optional newline
+ pure ()
+
+{-# INLINABLE emptyorcommentlinep #-}
+
+-- A parser combinator for parsing (possibly multiline) comments
+-- following journal items.
+--
+-- Several journal items may be followed by comments, which begin with
+-- semicolons and extend to the end of the line. Such comments may span
+-- multiple lines, but comment lines below the journal item must be
+-- preceeded by leading whitespace.
+--
+-- This parser combinator accepts a parser that consumes all input up
+-- until the next newline. This parser should extract the "content" from
+-- comments. The resulting parser returns this content plus the raw text
+-- of the comment itself.
+followingcommentp' :: (Monoid a) => TextParser m a -> TextParser m (Text, a)
+followingcommentp' contentp = do
+ skipMany spacenonewline
+ sameLine <- try headerp *> match' contentp <|> pure ("", mempty)
+ _ <- eolof
+ lowerLines <- many $
+ try (skipSome spacenonewline *> headerp) *> match' contentp <* eolof
+
+ let (textLines, results) = unzip $ sameLine : lowerLines
+ strippedCommentText = T.unlines $ map T.strip textLines
+ result = mconcat results
+ pure (strippedCommentText, result)
+
+ where
+ headerp = char ';' *> skipMany spacenonewline
+
+{-# INLINABLE followingcommentp' #-}
--- | Parse a possibly multi-line comment following a semicolon.
-followingcommentp :: JournalParser m Text
+-- | Parse the text of a (possibly multiline) comment following a journal
+-- item.
+followingcommentp :: TextParser m Text
followingcommentp =
- -- ptrace "followingcommentp"
- do samelinecomment <- lift (skipMany spacenonewline) >> (try commentp <|> (newline >> return ""))
- newlinecomments <- many (try (lift (skipSome spacenonewline) >> commentp))
- return $ T.unlines $ samelinecomment:newlinecomments
-
--- | Parse a possibly multi-line comment following a semicolon, and
--- any tags and/or posting dates within it. Posting dates can be
--- expressed with "date"/"date2" tags and/or bracketed dates. The
--- dates are parsed in full here so that errors are reported in the
--- right position. Missing years can be inferred if a default date is
--- provided.
---
--- >>> rejp (followingcommentandtagsp (Just $ fromGregorian 2000 1 2)) "; a:b, date:3/4, [=5/6]"
--- Right ("a:b, date:3/4, [=5/6]\n",[("a","b"),("date","3/4")],Just 2000-03-04,Just 2000-05-06)
+ fst <$> followingcommentp' (void $ takeWhileP Nothing (/= '\n'))
+{-# INLINABLE followingcommentp #-}
+
+
+-- | Parse a transaction comment and extract its tags.
+--
+-- The first line of a transaction may be followed by comments, which
+-- begin with semicolons and extend to the end of the line. Transaction
+-- comments may span multiple lines, but comment lines below the
+-- transaction must be preceeded by leading whitespace.
+--
+-- 2000/1/1 ; a transaction comment starting on the same line ...
+-- ; extending to the next line
+-- account1 $1
+-- account2
+--
+-- Tags are name-value pairs.
+--
+-- >>> let getTags (_,tags) = tags
+-- >>> let parseTags = fmap getTags . rtp transactioncommentp
+--
+-- >>> parseTags "; name1: val1, name2:all this is value2"
+-- Right [("name1","val1"),("name2","all this is value2")]
+--
+-- A tag's name must be immediately followed by a colon, without
+-- separating whitespace. The corresponding value consists of all the text
+-- following the colon up until the next colon or newline, stripped of
+-- leading and trailing whitespace.
+--
+transactioncommentp :: TextParser m (Text, [Tag])
+transactioncommentp = followingcommentp' commenttagsp
+{-# INLINABLE transactioncommentp #-}
+
+commenttagsp :: TextParser m [Tag]
+commenttagsp = do
+ tagName <- fmap (last . T.split isSpace)
+ $ takeWhileP Nothing (\c -> c /= ':' && c /= '\n')
+ atColon tagName <|> pure [] -- if not ':', then either '\n' or EOF
+
+ where
+ atColon :: Text -> TextParser m [Tag]
+ atColon name = char ':' *> do
+ if T.null name
+ then commenttagsp
+ else do
+ skipMany spacenonewline
+ val <- tagValue
+ let tag = (name, val)
+ (tag:) <$> commenttagsp
+
+ tagValue :: TextParser m Text
+ tagValue = do
+ val <- T.strip <$> takeWhileP Nothing (\c -> c /= ',' && c /= '\n')
+ _ <- optional $ char ','
+ pure val
+
+{-# INLINABLE commenttagsp #-}
+
+
+-- | Parse a posting comment and extract its tags and dates.
+--
+-- Postings may be followed by comments, which begin with semicolons and
+-- extend to the end of the line. Posting comments may span multiple
+-- lines, but comment lines below the posting must be preceeded by
+-- leading whitespace.
+--
+-- 2000/1/1
+-- account1 $1 ; a posting comment starting on the same line ...
+-- ; extending to the next line
+--
+-- account2
+-- ; a posting comment beginning on the next line
+--
+-- Tags are name-value pairs.
--
--- Year unspecified and no default provided -> unknown year error, at correct position:
--- >>> rejp (followingcommentandtagsp Nothing) " ; xxx date:3/4\n ; second line"
--- Left ...1:22...partial date 3/4 found, but the current year is unknown...
+-- >>> let getTags (_,tags,_,_) = tags
+-- >>> let parseTags = fmap getTags . rtp (postingcommentp Nothing)
--
--- Date tag value contains trailing text - forgot the comma, confused:
--- the syntaxes ? We'll accept the leading date anyway
--- >>> rejp (followingcommentandtagsp (Just $ fromGregorian 2000 1 2)) "; date:3/4=5/6"
+-- >>> parseTags "; name1: val1, name2:all this is value2"
+-- Right [("name1","val1"),("name2","all this is value2")]
+--
+-- A tag's name must be immediately followed by a colon, without
+-- separating whitespace. The corresponding value consists of all the text
+-- following the colon up until the next colon or newline, stripped of
+-- leading and trailing whitespace.
+--
+-- Posting dates may be expressed with "date"/"date2" tags or with
+-- bracketed date syntax. Posting dates will inherit their year from the
+-- transaction date if the year is not specified. We throw parse errors on
+-- invalid dates.
+--
+-- >>> let getDates (_,_,d1,d2) = (d1, d2)
+-- >>> let parseDates = fmap getDates . rtp (postingcommentp (Just 2000))
+--
+-- >>> parseDates "; date: 1/2, date2: 1999/12/31"
+-- Right (Just 2000-01-02,Just 1999-12-31)
+-- >>> parseDates "; [1/2=1999/12/31]"
+-- Right (Just 2000-01-02,Just 1999-12-31)
+--
+-- Example: tags, date tags, and bracketed dates
+-- >>> rtp (postingcommentp (Just 2000)) "; a:b, date:3/4, [=5/6]"
+-- Right ("a:b, date:3/4, [=5/6]\n",[("a","b"),("date","3/4")],Just 2000-03-04,Just 2000-05-06)
+--
+-- Example: extraction of dates from date tags ignores trailing text
+-- >>> rtp (postingcommentp (Just 2000)) "; date:3/4=5/6"
-- Right ("date:3/4=5/6\n",[("date","3/4=5/6")],Just 2000-03-04,Nothing)
--
-followingcommentandtagsp :: MonadIO m => Maybe Day
- -> ErroringJournalParser m (Text, [Tag], Maybe Day, Maybe Day)
-followingcommentandtagsp mdefdate = do
- -- pdbg 0 "followingcommentandtagsp"
-
- -- Parse a single or multi-line comment, starting on this line or the next one.
- -- Save the starting position and preserve all whitespace for the subsequent re-parsing,
- -- to get good error positions.
- startpos <- getPosition
- commentandwhitespace :: String <- do
- let commentp' = (:) <$> char ';' <*> anyChar `manyTill` eolof
- sp1 <- lift (many spacenonewline)
- l1 <- try (lift commentp') <|> (newline >> return "")
- ls <- lift . many $ try ((++) <$> some spacenonewline <*> commentp')
- return $ unlines $ (sp1 ++ l1) : ls
- let comment = T.pack $ unlines $ map (lstrip . dropWhile (==';') . strip) $ lines commentandwhitespace
- -- pdbg 0 $ "commentws:"++show commentandwhitespace
- -- pdbg 0 $ "comment:"++show comment
-
- -- Reparse the comment for any tags.
- tags <- case runTextParser (setPosition startpos >> tagsp) $ T.pack commentandwhitespace of
- Right ts -> return ts
- Left e -> throwError $ parseErrorPretty e
- -- pdbg 0 $ "tags: "++show tags
-
- -- Reparse the comment for any posting dates. Use the transaction date for defaults, if provided.
- epdates <- liftIO $ rejp (setPosition startpos >> postingdatesp mdefdate) $ T.pack commentandwhitespace
- pdates <- case epdates of
- Right ds -> return ds
- Left e -> throwError e
- -- pdbg 0 $ "pdates: "++show pdates
- let mdate = headMay $ map snd $ filter ((=="date").fst) pdates
- mdate2 = headMay $ map snd $ filter ((=="date2").fst) pdates
-
- return (comment, tags, mdate, mdate2)
-
--- A transaction/posting comment must start with a semicolon.
--- This parser ignores leading whitespace.
-commentp :: JournalParser m Text
-commentp = commentStartingWithp ";"
-
--- A line (file-level) comment can start with a semicolon, hash,
--- or star (allowing org nodes). This parser ignores leading whitespace.
-linecommentp :: JournalParser m Text
-linecommentp = commentStartingWithp ";#*"
-
-commentStartingWithp :: [Char] -> JournalParser m Text
-commentStartingWithp cs = do
- -- ptrace "commentStartingWith"
- oneOf cs
- lift (skipMany spacenonewline)
- l <- anyChar `manyTill` (lift eolof)
- optional newline
- return $ T.pack l
-
---- ** tags
-
--- | Extract any tags (name:value ended by comma or newline) embedded in a string.
---
--- >>> commentTags "a b:, c:c d:d, e"
--- [("b",""),("c","c d:d")]
---
--- >>> commentTags "a [1/1/1] [1/1] [1], [=1/1/1] [=1/1] [=1] [1/1=1/1/1] [1=1/1/1] b:c"
--- [("b","c")]
---
--- --[("date","1/1/1"),("date","1/1"),("date2","1/1/1"),("date2","1/1"),("date","1/1"),("date2","1/1/1"),("date","1"),("date2","1/1/1")]
---
--- >>> commentTags "\na b:, \nd:e, f"
--- [("b",""),("d","e")]
---
-commentTags :: Text -> [Tag]
-commentTags s =
- case runTextParser tagsp s of
- Right r -> r
- Left _ -> [] -- shouldn't happen
-
--- | Parse all tags found in a string.
-tagsp :: SimpleTextParser [Tag]
-tagsp = -- do
- -- pdbg 0 $ "tagsp"
- many (try (nontagp >> tagp))
-
--- | Parse everything up till the first tag.
---
--- >>> rtp nontagp "\na b:, \nd:e, f"
--- Right "\na "
-nontagp :: SimpleTextParser String
-nontagp = -- do
- -- pdbg 0 "nontagp"
- -- anyChar `manyTill` (lookAhead (try (tagorbracketeddatetagsp Nothing >> return ()) <|> eof))
- anyChar `manyTill` lookAhead (try (void tagp) <|> eof)
- -- XXX costly ?
-
--- | Tags begin with a colon-suffixed tag name (a word beginning with
--- a letter) and are followed by a tag value (any text up to a comma
--- or newline, whitespace-stripped).
---
--- >>> rtp tagp "a:b b , c AuxDate: 4/2"
--- Right ("a","b b")
---
-tagp :: SimpleTextParser Tag
-tagp = do
- -- pdbg 0 "tagp"
- n <- tagnamep
- v <- tagvaluep
- return (n,v)
-
--- |
--- >>> rtp tagnamep "a:"
--- Right "a"
-tagnamep :: SimpleTextParser Text
-tagnamep = -- do
- -- pdbg 0 "tagnamep"
- T.pack <$> some (noneOf (": \t\n" :: [Char])) <* char ':'
-
-tagvaluep :: TextParser m Text
-tagvaluep = do
- -- ptrace "tagvalue"
- v <- anyChar `manyTill` (void (try (char ',')) <|> eolof)
- return $ T.pack $ strip $ reverse $ dropWhile (==',') $ reverse $ strip v
-
---- ** posting dates
-
--- | Parse all posting dates found in a string. Posting dates can be
--- expressed with date/date2 tags and/or bracketed dates. The dates
--- are parsed fully to give useful errors. Missing years can be
--- inferred only if a default date is provided.
---
-postingdatesp :: Monad m => Maybe Day -> ErroringJournalParser m [(TagName,Day)]
-postingdatesp mdefdate = do
- -- pdbg 0 $ "postingdatesp"
- let p = ((:[]) <$> datetagp mdefdate) <|> bracketeddatetagsp mdefdate
- nonp =
- many (notFollowedBy p >> anyChar)
- -- anyChar `manyTill` (lookAhead (try (p >> return ()) <|> eof))
- concat <$> many (try (nonp >> p))
-
---- ** date tags
-
--- | Date tags are tags with name "date" or "date2". Their value is
--- parsed as a date, using the provided default date if any for
--- inferring a missing year if needed. Any error in date parsing is
--- reported and terminates parsing.
---
--- >>> rejp (datetagp Nothing) "date: 2000/1/2 "
--- Right ("date",2000-01-02)
---
--- >>> rejp (datetagp (Just $ fromGregorian 2001 2 3)) "date2:3/4"
--- Right ("date2",2001-03-04)
---
--- >>> rejp (datetagp Nothing) "date: 3/4"
--- Left ...1:9...partial date 3/4 found, but the current year is unknown...
---
-datetagp :: Monad m => Maybe Day -> ErroringJournalParser m (TagName,Day)
-datetagp mdefdate = do
- -- pdbg 0 "datetagp"
- string "date"
- n <- fromMaybe "" <$> optional (mptext "2")
- char ':'
- startpos <- getPosition
- v <- lift tagvaluep
- -- re-parse value as a date.
- j <- get
- let ep :: Either (ParseError Char MPErr) Day
- ep = parseWithState'
- j{jparsedefaultyear=first3.toGregorian <$> mdefdate}
- -- The value extends to a comma, newline, or end of file.
- -- It seems like ignoring any extra stuff following a date
- -- gives better errors here.
- (do
- setPosition startpos
- datep) -- <* eof)
- v
- case ep
- of Left e -> throwError $ parseErrorPretty e
- Right d -> return ("date"<>n, d)
+postingcommentp
+ :: Maybe Year -> TextParser m (Text, [Tag], Maybe Day, Maybe Day)
+postingcommentp mYear = do
+ (commentText, (tags, dateTags)) <-
+ followingcommentp' (commenttagsanddatesp mYear)
+ let mdate = fmap snd $ find ((=="date") .fst) dateTags
+ mdate2 = fmap snd $ find ((=="date2").fst) dateTags
+ pure (commentText, tags, mdate, mdate2)
+{-# INLINABLE postingcommentp #-}
+
+
+commenttagsanddatesp
+ :: Maybe Year -> TextParser m ([Tag], [DateTag])
+commenttagsanddatesp mYear = do
+ (txt, dateTags) <- match $ readUpTo ':'
+ -- next char is either ':' or '\n' (or EOF)
+ let tagName = last (T.split isSpace txt)
+ (fmap.second) (dateTags++) (atColon tagName) <|> pure ([], dateTags) -- if not ':', then either '\n' or EOF
---- ** bracketed dates
+ where
+ readUpTo :: Char -> TextParser m [DateTag]
+ readUpTo end = do
+ void $ takeWhileP Nothing (\c -> c /= end && c /= '\n' && c /= '[')
+ -- if not '[' then ':' or '\n' or EOF
+ atBracket (readUpTo end) <|> pure []
+
+ atBracket :: TextParser m [DateTag] -> TextParser m [DateTag]
+ atBracket cont = do
+ -- Uses the fact that bracketed date-tags cannot contain newlines
+ dateTags <- option [] $ lookAhead (bracketeddatetagsp mYear)
+ _ <- char '['
+ dateTags' <- cont
+ pure $ dateTags ++ dateTags'
+
+ atColon :: Text -> TextParser m ([Tag], [DateTag])
+ atColon name = char ':' *> do
+ skipMany spacenonewline
+ (tags, dateTags) <- case name of
+ "" -> pure ([], [])
+ "date" -> dateValue name
+ "date2" -> dateValue name
+ _ -> tagValue name
+ _ <- optional $ char ','
+ bimap (tags++) (dateTags++) <$> commenttagsanddatesp mYear
+
+ dateValue :: Text -> TextParser m ([Tag], [DateTag])
+ dateValue name = do
+ (txt, (date, dateTags)) <- match' $ do
+ date <- datep' mYear
+ dateTags <- readUpTo ','
+ pure (date, dateTags)
+ let val = T.strip txt
+ pure $ ( [(name, val)]
+ , (name, date) : dateTags )
+
+ tagValue :: Text -> TextParser m ([Tag], [DateTag])
+ tagValue name = do
+ (txt, dateTags) <- match' $ readUpTo ','
+ let val = T.strip txt
+ pure $ ( [(name, val)]
+ , dateTags )
+
+{-# INLINABLE commenttagsanddatesp #-}
--- tagorbracketeddatetagsp :: Monad m => Maybe Day -> TextParser u m [Tag]
--- tagorbracketeddatetagsp mdefdate =
--- bracketeddatetagsp mdefdate <|> ((:[]) <$> tagp)
+
+--- ** bracketed dates
-- | Parse Ledger-style bracketed posting dates ([DATE=DATE2]), as
-- "date" and/or "date2" tags. Anything that looks like an attempt at
@@ -949,49 +1196,52 @@ datetagp mdefdate = do
-- default date is provided. A missing year in DATE2 will be inferred
-- from DATE.
--
--- >>> rejp (bracketeddatetagsp Nothing) "[2016/1/2=3/4]"
+-- >>> either (Left . parseErrorPretty) Right $ rtp (bracketeddatetagsp Nothing) "[2016/1/2=3/4]"
-- Right [("date",2016-01-02),("date2",2016-03-04)]
--
--- >>> rejp (bracketeddatetagsp Nothing) "[1]"
+-- >>> either (Left . parseErrorPretty) Right $ rtp (bracketeddatetagsp Nothing) "[1]"
-- Left ...not a bracketed date...
--
--- >>> rejp (bracketeddatetagsp Nothing) "[2016/1/32]"
--- Left ...1:11:...bad date: 2016/1/32...
+-- >>> either (Left . parseErrorPretty) Right $ rtp (bracketeddatetagsp Nothing) "[2016/1/32]"
+-- Left ...1:11:...well-formed but invalid date: 2016/1/32...
--
--- >>> rejp (bracketeddatetagsp Nothing) "[1/31]"
+-- >>> either (Left . parseErrorPretty) Right $ rtp (bracketeddatetagsp Nothing) "[1/31]"
-- Left ...1:6:...partial date 1/31 found, but the current year is unknown...
--
--- >>> rejp (bracketeddatetagsp Nothing) "[0123456789/-.=/-.=]"
--- Left ...1:15:...bad date, different separators...
+-- >>> either (Left . parseErrorPretty) Right $ rtp (bracketeddatetagsp Nothing) "[0123456789/-.=/-.=]"
+-- Left ...1:13:...expecting month or day...
--
-bracketeddatetagsp :: Monad m => Maybe Day -> ErroringJournalParser m [(TagName, Day)]
-bracketeddatetagsp mdefdate = do
+bracketeddatetagsp
+ :: Maybe Year -> TextParser m [(TagName, Day)]
+bracketeddatetagsp mYear1 = do
-- pdbg 0 "bracketeddatetagsp"
- char '['
- startpos <- getPosition
- let digits = "0123456789"
- s <- some (oneOf $ '=':digits++datesepchars)
- char ']'
- unless (any (`elem` s) digits && any (`elem` datesepchars) s) $
- fail "not a bracketed date"
-
- -- looks sufficiently like a bracketed date, now we
- -- re-parse as dates and throw any errors
- j <- get
- let ep :: Either (ParseError Char MPErr) (Maybe Day, Maybe Day)
- ep = parseWithState'
- j{jparsedefaultyear=first3.toGregorian <$> mdefdate}
- (do
- setPosition startpos
- md1 <- optional datep
- maybe (return ()) (setYear.first3.toGregorian) md1
- md2 <- optional $ char '=' >> datep
- eof
- return (md1,md2)
- )
- (T.pack s)
- case ep
- of Left e -> throwError $ parseErrorPretty e
- Right (md1,md2) -> return $ catMaybes
- [("date",) <$> md1, ("date2",) <$> md2]
+ try $ do
+ s <- lookAhead
+ $ between (char '[') (char ']')
+ $ takeWhile1P Nothing isBracketedDateChar
+ unless (T.any isDigit s && T.any isDateSepChar s) $
+ fail "not a bracketed date"
+ -- Looks sufficiently like a bracketed date to commit to parsing a date
+
+ between (char '[') (char ']') $ do
+ md1 <- optional $ datep' mYear1
+
+ let mYear2 = fmap readYear md1 <|> mYear1
+ md2 <- optional $ char '=' *> datep' mYear2
+
+ pure $ catMaybes [("date",) <$> md1, ("date2",) <$> md2]
+
+ where
+ readYear = first3 . toGregorian
+ isBracketedDateChar c = isDigit c || isDateSepChar c || c == '='
+
+{-# INLINABLE bracketeddatetagsp #-}
+
+
+--- ** helper parsers
+-- A version of `match` that is strict in the returned text
+match' :: TextParser m a -> TextParser m (Text, a)
+match' p = do
+ (!txt, p) <- match p
+ pure (txt, p)
diff --git a/Hledger/Read/CsvReader.hs b/Hledger/Read/CsvReader.hs
index a4363da..a17c066 100644
--- a/Hledger/Read/CsvReader.hs
+++ b/Hledger/Read/CsvReader.hs
@@ -11,6 +11,7 @@ A reader for CSV data, using an extra rules file to help interpret the data.
{-# LANGUAGE ViewPatterns #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE PackageImports #-}
module Hledger.Read.CsvReader (
-- * Reader
@@ -28,16 +29,18 @@ module Hledger.Read.CsvReader (
)
where
import Prelude ()
-import Prelude.Compat hiding (getContents)
+import "base-compat-batteries" Prelude.Compat hiding (getContents)
import Control.Exception hiding (try)
import Control.Monad
import Control.Monad.Except
import Control.Monad.State.Strict (StateT, get, modify', evalStateT)
-- import Test.HUnit
import Data.Char (toLower, isDigit, isSpace)
-import Data.List.Compat
+import "base-compat-batteries" Data.List.Compat
+import Data.List.NonEmpty (fromList)
import Data.Maybe
import Data.Ord
+import qualified Data.Set as S
import Data.Text (Text)
import qualified Data.Text as T
import qualified Data.Text.IO as T
@@ -53,7 +56,8 @@ import System.Directory (doesFileExist)
import System.FilePath
import Test.HUnit hiding (State)
import Text.CSV (parseCSV, CSV)
-import Text.Megaparsec.Compat hiding (parse)
+import Text.Megaparsec hiding (parse)
+import Text.Megaparsec.Char
import qualified Text.Parsec as Parsec
import Text.Printf (printf)
@@ -135,7 +139,7 @@ readJournalFromCsv mrulesfile csvfile csvdata =
(\pos r ->
let
SourcePos name line col = pos
- line' = (mpMkPos . (+1) . mpUnPos) line
+ line' = (mkPos . (+1) . unPos) line
pos' = SourcePos name line' col
in
(pos, transactionFromCsvRecord pos' rules r)
@@ -391,11 +395,15 @@ parseAndValidateCsvRules rulesfile s = do
Right r -> do
r_ <- liftIO $ runExceptT $ validateRules r
ExceptT $ case r_ of
- Left s -> return $ Left $ parseErrorPretty $ mpMkParseError rulesfile s
+ Left s -> return $ Left $ parseErrorPretty $ makeParseError rulesfile s
Right r -> return $ Right r
+ where
+ makeParseError :: FilePath -> String -> ParseError Char String
+ makeParseError f s = FancyError (fromList [initialPos f]) (S.singleton $ ErrorFail s)
+
-- | Parse this text as CSV conversion rules. The file path is for error messages.
-parseCsvRules :: FilePath -> T.Text -> Either (ParseError Char MPErr) CsvRules
+parseCsvRules :: FilePath -> T.Text -> Either (ParseError Char CustomErr) CsvRules
-- parseCsvRules rulesfile s = runParser csvrulesfile nullrules{baseAccount=takeBaseName rulesfile} rulesfile s
parseCsvRules rulesfile s =
runParser (evalStateT rulesp rules) rulesfile s
@@ -447,7 +455,7 @@ commentcharp = oneOf (";#*" :: [Char])
directivep :: CsvRulesParser (DirectiveName, String)
directivep = (do
lift $ pdbg 3 "trying directive"
- d <- fmap T.unpack $ choiceInState $ map (lift . mptext . T.pack) directives
+ d <- fmap T.unpack $ choiceInState $ map (lift . string . T.pack) directives
v <- (((char ':' >> lift (many spacenonewline)) <|> lift (some spacenonewline)) >> directivevalp)
<|> (optional (char ':') >> lift (skipMany spacenonewline) >> lift eolof >> return "")
return (d, v)
@@ -505,7 +513,7 @@ fieldassignmentp = do
journalfieldnamep :: CsvRulesParser String
journalfieldnamep = do
lift (pdbg 2 "trying journalfieldnamep")
- T.unpack <$> choiceInState (map (lift . mptext . T.pack) journalfieldnames)
+ T.unpack <$> choiceInState (map (lift . string . T.pack) journalfieldnames)
-- Transaction fields and pseudo fields for CSV conversion.
-- Names must precede any other name they contain, for the parser
@@ -565,7 +573,7 @@ recordmatcherp = do
<?> "record matcher"
matchoperatorp :: CsvRulesParser String
-matchoperatorp = fmap T.unpack $ choiceInState $ map mptext
+matchoperatorp = fmap T.unpack $ choiceInState $ map string
["~"
-- ,"!~"
-- ,"="
@@ -717,11 +725,17 @@ getAmountStr rules record =
case (render mamount, render mamountin, render mamountout) of
(Just "", Nothing, Nothing) -> Nothing
(Just a, Nothing, Nothing) -> Just a
- (Nothing, Just "", Just "") -> error' $ "neither amount-in or amount-out has a value\n"++showRecord record
+ (Nothing, Just "", Just "") -> error' $ "neither amount-in or amount-out has a value\n"
+ ++ " record: " ++ showRecord record
(Nothing, Just i, Just "") -> Just i
(Nothing, Just "", Just o) -> Just $ negateStr o
- (Nothing, Just _, Just _) -> error' $ "both amount-in and amount-out have a value\n"++showRecord record
- _ -> error' $ "found values for amount and for amount-in/amount-out - please use either amount or amount-in/amount-out\n"++showRecord record
+ (Nothing, Just i, Just o) -> error' $ "both amount-in and amount-out have a value\n"
+ ++ " amount-in: " ++ i ++ "\n"
+ ++ " amount-out: " ++ o ++ "\n"
+ ++ " record: " ++ showRecord record
+ _ -> error' $ "found values for amount and for amount-in/amount-out\n"
+ ++ "please use either amount or amount-in/amount-out\n"
+ ++ " record: " ++ showRecord record
type CsvAmountString = String
diff --git a/Hledger/Read/JournalReader.hs b/Hledger/Read/JournalReader.hs
index 58641a8..f2a18d4 100644
--- a/Hledger/Read/JournalReader.hs
+++ b/Hledger/Read/JournalReader.hs
@@ -29,7 +29,7 @@ import cycles.
--- * module
-{-# LANGUAGE CPP, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings #-}
+{-# LANGUAGE CPP, RecordWildCards, NamedFieldPuns, NoMonoLocalBinds, ScopedTypeVariables, FlexibleContexts, TupleSections, OverloadedStrings, PackageImports #-}
module Hledger.Read.JournalReader (
--- * exports
@@ -42,8 +42,6 @@ module Hledger.Read.JournalReader (
parseAndFinaliseJournal,
runJournalParser,
rjp,
- runErroringJournalParser,
- rejp,
-- * Parsers used elsewhere
getParentAccount,
@@ -72,15 +70,13 @@ module Hledger.Read.JournalReader (
where
--- * imports
import Prelude ()
-import Prelude.Compat hiding (readFile)
+import "base-compat-batteries" Prelude.Compat hiding (readFile)
import qualified Control.Exception as C
import Control.Monad
-import Control.Monad.Except (ExceptT(..), runExceptT, throwError)
+import Control.Monad.Except (ExceptT(..))
import Control.Monad.State.Strict
+import Data.Bifunctor (first)
import qualified Data.Map.Strict as M
-#if !(MIN_VERSION_base(4,11,0))
-import Data.Monoid
-#endif
import Data.Text (Text)
import Data.String
import Data.List
@@ -93,7 +89,9 @@ import Test.HUnit
import Test.Framework
import Text.Megaparsec.Error
#endif
-import Text.Megaparsec.Compat hiding (parse)
+import Text.Megaparsec hiding (parse)
+import Text.Megaparsec.Char
+import Text.Megaparsec.Custom
import Text.Printf
import System.FilePath
@@ -137,10 +135,10 @@ aliasesFromOpts = map (\a -> fromparse $ runParser accountaliasp ("--alias "++qu
-- | A journal parser. Accumulates and returns a "ParsedJournal",
-- which should be finalised/validated before use.
--
--- >>> rejp (journalp <* eof) "2015/1/1\n a 0\n"
+-- >>> rjp (journalp <* eof) "2015/1/1\n a 0\n"
-- Right Journal with 1 transactions, 1 accounts
--
-journalp :: MonadIO m => ErroringJournalParser m ParsedJournal
+journalp :: MonadIO m => JournalParser m ParsedJournal
journalp = do
many addJournalItemP
eof
@@ -148,7 +146,7 @@ journalp = do
-- | A side-effecting parser; parses any kind of journal item
-- and updates the parse state accordingly.
-addJournalItemP :: MonadIO m => ErroringJournalParser m ()
+addJournalItemP :: MonadIO m => JournalParser m ()
addJournalItemP =
-- all journal line types can be distinguished by the first
-- character, can use choice without backtracking
@@ -158,8 +156,8 @@ addJournalItemP =
, modifiertransactionp >>= modify' . addModifierTransaction
, periodictransactionp >>= modify' . addPeriodicTransaction
, marketpricedirectivep >>= modify' . addMarketPrice
- , void emptyorcommentlinep
- , void multilinecommentp
+ , void (lift emptyorcommentlinep)
+ , void (lift multilinecommentp)
] <?> "transaction or directive"
--- ** directives
@@ -167,7 +165,7 @@ addJournalItemP =
-- | Parse any journal directive and update the parse state accordingly.
-- Cf http://hledger.org/manual.html#directives,
-- http://ledger-cli.org/3.0/doc/ledger3.html#Command-Directives
-directivep :: MonadIO m => ErroringJournalParser m ()
+directivep :: MonadIO m => JournalParser m ()
directivep = (do
optional $ char '!'
choice [
@@ -187,40 +185,44 @@ directivep = (do
]
) <?> "directive"
-includedirectivep :: MonadIO m => ErroringJournalParser m ()
+includedirectivep :: MonadIO m => JournalParser m ()
includedirectivep = do
string "include"
lift (skipSome spacenonewline)
- filename <- lift restofline
- parentpos <- getPosition
- parentj <- get
+ filename <- T.unpack <$> takeWhileP Nothing (/= '\n') -- don't consume newline yet
+
+ -- save parent state
+ parentParserState <- getParserState
+ parentj <- get
+
let childj = newJournalWithParseStateFrom parentj
- (ej :: Either String ParsedJournal) <-
- liftIO $ runExceptT $ do
- let curdir = takeDirectory (sourceName parentpos)
- filepath <- expandPath curdir filename `orRethrowIOError` (show parentpos ++ " locating " ++ filename)
- txt <- readFilePortably filepath `orRethrowIOError` (show parentpos ++ " reading " ++ filepath)
- (ej1::Either (ParseError Char MPErr) ParsedJournal) <-
- runParserT
- (evalStateT
- (choiceInState
- [journalp
- ,timeclockfilep
- ,timedotfilep
- -- can't include a csv file yet, that reader is special
- ])
- childj)
- filepath txt
- either
- (throwError
- . ((show parentpos ++ " in included file " ++ show filename ++ ":\n") ++)
- . parseErrorPretty)
- (return . journalAddFile (filepath, txt))
- ej1
- case ej of
- Left e -> throwError e
- Right childj -> modify' (\parentj -> childj <> parentj)
- -- discard child's parse info, prepend its (reversed) list data, combine other fields
+ parentpos <- getPosition
+
+ -- read child input
+ let curdir = takeDirectory (sourceName parentpos)
+ filepath <- lift $ expandPath curdir filename `orRethrowIOError` (show parentpos ++ " locating " ++ filename)
+ childInput <- lift $ readFilePortably filepath `orRethrowIOError` (show parentpos ++ " reading " ++ filepath)
+
+ -- set child state
+ setInput childInput
+ pushPosition $ initialPos filepath
+ put childj
+
+ -- parse include file
+ let parsers = [ journalp
+ , timeclockfilep
+ , timedotfilep
+ ] -- can't include a csv file yet, that reader is special
+ updatedChildj <- journalAddFile (filepath, childInput) <$>
+ region (withSource childInput) (choiceInState parsers)
+
+ -- restore parent state, prepending the child's parse info
+ setParserState parentParserState
+ put $ updatedChildj <> parentj
+ -- discard child's parse info, prepend its (reversed) list data, combine other fields
+
+ void newline
+
newJournalWithParseStateFrom :: Journal -> Journal
newJournalWithParseStateFrom j = mempty{
@@ -235,26 +237,22 @@ newJournalWithParseStateFrom j = mempty{
-- | Lift an IO action into the exception monad, rethrowing any IO
-- error with the given message prepended.
-orRethrowIOError :: IO a -> String -> ExceptT String IO a
-orRethrowIOError io msg =
- ExceptT $
- (Right <$> io)
- `C.catch` \(e::C.IOException) -> return $ Left $ printf "%s:\n%s" msg (show e)
+orRethrowIOError :: MonadIO m => IO a -> String -> TextParser m a
+orRethrowIOError io msg = do
+ eResult <- liftIO $ (Right <$> io) `C.catch` \(e::C.IOException) -> pure $ Left $ printf "%s:\n%s" msg (show e)
+ case eResult of
+ Right res -> pure res
+ Left errMsg -> fail errMsg
accountdirectivep :: JournalParser m ()
accountdirectivep = do
string "account"
lift (skipSome spacenonewline)
- acct <- lift accountnamep -- eats single spaces
+ acct <- modifiedaccountnamep -- account directives can be modified by alias/apply account
macode' :: Maybe String <- (optional $ lift $ skipSome spacenonewline >> some digitChar)
let macode :: Maybe AccountCode = read <$> macode'
newline
- _tags <- many $ do
- startpos <- getPosition
- l <- indentedlinep
- case runTextParser (setPosition startpos >> tagsp) $ T.pack l of
- Right ts -> return ts
- Left _e -> return [] -- TODO throwError $ parseErrorPretty e
+ skipMany indentedlinep
modify' (\j -> j{jaccounts = (acct, macode) : jaccounts j})
@@ -263,36 +261,44 @@ indentedlinep = lift (skipSome spacenonewline) >> (rstrip <$> lift restofline)
-- | Parse a one-line or multi-line commodity directive.
--
--- >>> Right _ <- rejp commoditydirectivep "commodity $1.00"
--- >>> Right _ <- rejp commoditydirectivep "commodity $\n format $1.00"
--- >>> Right _ <- rejp commoditydirectivep "commodity $\n\n" -- a commodity with no format
--- >>> Right _ <- rejp commoditydirectivep "commodity $1.00\n format $1.00" -- both, what happens ?
-commoditydirectivep :: Monad m => ErroringJournalParser m ()
-commoditydirectivep = try commoditydirectiveonelinep <|> commoditydirectivemultilinep
+-- >>> Right _ <- rjp commoditydirectivep "commodity $1.00"
+-- >>> Right _ <- rjp commoditydirectivep "commodity $\n format $1.00"
+-- >>> Right _ <- rjp commoditydirectivep "commodity $\n\n" -- a commodity with no format
+-- >>> Right _ <- rjp commoditydirectivep "commodity $1.00\n format $1.00" -- both, what happens ?
+commoditydirectivep :: JournalParser m ()
+commoditydirectivep = commoditydirectiveonelinep <|> commoditydirectivemultilinep
-- | Parse a one-line commodity directive.
--
--- >>> Right _ <- rejp commoditydirectiveonelinep "commodity $1.00"
--- >>> Right _ <- rejp commoditydirectiveonelinep "commodity $1.00 ; blah\n"
-commoditydirectiveonelinep :: Monad m => JournalParser m ()
+-- >>> Right _ <- rjp commoditydirectiveonelinep "commodity $1.00"
+-- >>> Right _ <- rjp commoditydirectiveonelinep "commodity $1.00 ; blah\n"
+commoditydirectiveonelinep :: JournalParser m ()
commoditydirectiveonelinep = do
- string "commodity"
- lift (skipSome spacenonewline)
- Amount{acommodity,astyle} <- amountp
+ (pos, Amount{acommodity,astyle}) <- try $ do
+ string "commodity"
+ lift (skipSome spacenonewline)
+ pos <- getPosition
+ amount <- amountp
+ pure $ (pos, amount)
lift (skipMany spacenonewline)
- _ <- followingcommentp <|> (lift eolof >> return "")
- let comm = Commodity{csymbol=acommodity, cformat=Just astyle}
- modify' (\j -> j{jcommodities=M.insert acommodity comm $ jcommodities j})
+ _ <- lift followingcommentp
+ let comm = Commodity{csymbol=acommodity, cformat=Just $ dbg2 "style from commodity directive" astyle}
+ if asdecimalpoint astyle == Nothing
+ then parseErrorAt pos pleaseincludedecimalpoint
+ else modify' (\j -> j{jcommodities=M.insert acommodity comm $ jcommodities j})
+
+pleaseincludedecimalpoint :: String
+pleaseincludedecimalpoint = "to avoid ambiguity, please include a decimal point in commodity directives"
-- | Parse a multi-line commodity directive, containing 0 or more format subdirectives.
--
--- >>> Right _ <- rejp commoditydirectivemultilinep "commodity $ ; blah \n format $1.00 ; blah"
-commoditydirectivemultilinep :: Monad m => ErroringJournalParser m ()
+-- >>> Right _ <- rjp commoditydirectivemultilinep "commodity $ ; blah \n format $1.00 ; blah"
+commoditydirectivemultilinep :: JournalParser m ()
commoditydirectivemultilinep = do
string "commodity"
lift (skipSome spacenonewline)
sym <- lift commoditysymbolp
- _ <- followingcommentp <|> (lift eolof >> return "")
+ _ <- lift followingcommentp
mformat <- lastMay <$> many (indented $ formatdirectivep sym)
let comm = Commodity{csymbol=sym, cformat=mformat}
modify' (\j -> j{jcommodities=M.insert sym comm $ jcommodities j})
@@ -301,16 +307,19 @@ commoditydirectivemultilinep = do
-- | Parse a format (sub)directive, throwing a parse error if its
-- symbol does not match the one given.
-formatdirectivep :: Monad m => CommoditySymbol -> ErroringJournalParser m AmountStyle
+formatdirectivep :: CommoditySymbol -> JournalParser m AmountStyle
formatdirectivep expectedsym = do
string "format"
lift (skipSome spacenonewline)
pos <- getPosition
Amount{acommodity,astyle} <- amountp
- _ <- followingcommentp <|> (lift eolof >> return "")
+ _ <- lift followingcommentp
if acommodity==expectedsym
- then return astyle
- else parserErrorAt pos $
+ then
+ if asdecimalpoint astyle == Nothing
+ then parseErrorAt pos pleaseincludedecimalpoint
+ else return $ dbg2 "style from format subdirective" astyle
+ else parseErrorAt pos $
printf "commodity directive symbol \"%s\" and format directive symbol \"%s\" should be the same" expectedsym acommodity
keywordp :: String -> JournalParser m ()
@@ -395,15 +404,18 @@ defaultyeardirectivep = do
failIfInvalidYear y
setYear y'
-defaultcommoditydirectivep :: Monad m => JournalParser m ()
+defaultcommoditydirectivep :: JournalParser m ()
defaultcommoditydirectivep = do
char 'D' <?> "default commodity"
lift (skipSome spacenonewline)
- Amount{..} <- amountp
+ pos <- getPosition
+ Amount{acommodity,astyle} <- amountp
lift restofline
- setDefaultCommodityAndStyle (acommodity, astyle)
+ if asdecimalpoint astyle == Nothing
+ then parseErrorAt pos pleaseincludedecimalpoint
+ else setDefaultCommodityAndStyle (acommodity, astyle)
-marketpricedirectivep :: Monad m => JournalParser m MarketPrice
+marketpricedirectivep :: JournalParser m MarketPrice
marketpricedirectivep = do
char 'P' <?> "market price"
lift (skipMany spacenonewline)
@@ -423,7 +435,7 @@ ignoredpricecommoditydirectivep = do
lift restofline
return ()
-commodityconversiondirectivep :: Monad m => JournalParser m ()
+commodityconversiondirectivep :: JournalParser m ()
commodityconversiondirectivep = do
char 'C' <?> "commodity conversion"
lift (skipSome spacenonewline)
@@ -437,7 +449,7 @@ commodityconversiondirectivep = do
--- ** transactions
-modifiertransactionp :: MonadIO m => ErroringJournalParser m ModifierTransaction
+modifiertransactionp :: JournalParser m ModifierTransaction
modifiertransactionp = do
char '=' <?> "modifier transaction"
lift (skipMany spacenonewline)
@@ -445,30 +457,66 @@ modifiertransactionp = do
postings <- postingsp Nothing
return $ ModifierTransaction valueexpr postings
-periodictransactionp :: MonadIO m => ErroringJournalParser m PeriodicTransaction
+-- | Parse a periodic transaction
+periodictransactionp :: MonadIO m => JournalParser m PeriodicTransaction
periodictransactionp = do
+
+ -- first line
char '~' <?> "periodic transaction"
- lift (skipMany spacenonewline)
- periodexpr <- T.pack <$> lift restofline
- postings <- postingsp Nothing
- return $ PeriodicTransaction periodexpr postings
+ lift $ skipMany spacenonewline
+ -- a period expression
+ pos <- getPosition
+ d <- liftIO getCurrentDay
+ (periodtxt, (interval, span)) <- lift $ first T.strip <$> match (periodexprp d)
+ -- In periodic transactions, the period expression has an additional constraint:
+ case checkPeriodicTransactionStartDate interval span periodtxt of
+ Just e -> parseErrorAt pos e
+ Nothing -> pure ()
+ -- The line can end here, or it can continue with one or more spaces
+ -- and then zero or more of the following fields. A bit awkward.
+ (status, code, description, (comment, tags)) <-
+ (lift eolof >> return (Unmarked, "", "", ("", [])))
+ <|>
+ (do
+ lift $ skipSome spacenonewline
+ s <- lift statusp
+ c <- lift codep
+ desc <- lift $ T.strip <$> descriptionp
+ (cmt, ts) <- lift transactioncommentp
+ return (s,c,desc,(cmt,ts))
+ )
+
+ -- next lines
+ postings <- postingsp (Just $ first3 $ toGregorian d)
+
+ return $ nullperiodictransaction{
+ ptperiodexpr=periodtxt
+ ,ptinterval=interval
+ ,ptspan=span
+ ,ptstatus=status
+ ,ptcode=code
+ ,ptdescription=description
+ ,ptcomment=comment
+ ,pttags=tags
+ ,ptpostings=postings
+ }
-- | Parse a (possibly unbalanced) transaction.
-transactionp :: MonadIO m => ErroringJournalParser m Transaction
+transactionp :: JournalParser m Transaction
transactionp = do
-- ptrace "transactionp"
- pos <- getPosition
+ startpos <- getPosition
date <- datep <?> "transaction"
- edate <- optional (secondarydatep date) <?> "secondary date"
+ edate <- optional (lift $ secondarydatep date) <?> "secondary date"
lookAhead (lift spacenonewline <|> newline) <?> "whitespace or newline"
status <- lift statusp <?> "cleared status"
- code <- T.pack <$> lift codep <?> "transaction code"
- description <- T.pack . strip <$> descriptionp
- comment <- try followingcommentp <|> (newline >> return "")
- let tags = commentTags comment
- postings <- postingsp (Just date)
- pos' <- getPosition
- let sourcepos = journalSourcePos pos pos'
+ code <- lift codep <?> "transaction code"
+ description <- lift $ T.strip <$> descriptionp
+ (comment, tags) <- lift transactioncommentp
+ let year = first3 $ toGregorian date
+ postings <- postingsp (Just year)
+ endpos <- getPosition
+ let sourcepos = journalSourcePos startpos endpos
return $ txnTieKnot $ Transaction 0 sourcepos date edate status code description comment tags postings ""
#ifdef TESTS
@@ -568,30 +616,32 @@ test_transactionp = do
-- Parse the following whitespace-beginning lines as postings, posting
-- tags, and/or comments (inferring year, if needed, from the given date).
-postingsp :: MonadIO m => Maybe Day -> ErroringJournalParser m [Posting]
-postingsp mdate = many (try $ postingp mdate) <?> "postings"
+postingsp :: Maybe Year -> JournalParser m [Posting]
+postingsp mTransactionYear = many (postingp mTransactionYear) <?> "postings"
--- linebeginningwithspaces :: Monad m => JournalParser m String
+-- linebeginningwithspaces :: JournalParser m String
-- linebeginningwithspaces = do
-- sp <- lift (skipSome spacenonewline)
-- c <- nonspace
-- cs <- lift restofline
-- return $ sp ++ (c:cs) ++ "\n"
-postingp :: MonadIO m => Maybe Day -> ErroringJournalParser m Posting
-postingp mtdate = do
+postingp :: Maybe Year -> JournalParser m Posting
+postingp mTransactionYear = do
-- pdbg 0 "postingp"
- lift (skipSome spacenonewline)
- status <- lift statusp
- lift (skipMany spacenonewline)
- account <- modifiedaccountnamep
+ (status, account) <- try $ do
+ lift (skipSome spacenonewline)
+ status <- lift statusp
+ lift (skipMany spacenonewline)
+ account <- modifiedaccountnamep
+ return (status, account)
let (ptype, account') = (accountNamePostingType account, textUnbracket account)
- amount <- spaceandamountormissingp
+ lift (skipMany spacenonewline)
+ amount <- option missingmixedamt $ Mixed . (:[]) <$> amountp
massertion <- partialbalanceassertionp
_ <- fixedlotpricep
lift (skipMany spacenonewline)
- (comment,tags,mdate,mdate2) <-
- try (followingcommentandtagsp mtdate) <|> (newline >> return ("",[],Nothing,Nothing))
+ (comment,tags,mdate,mdate2) <- lift $ postingcommentp mTransactionYear
return posting
{ pdate=mdate
, pdate2=mdate2
diff --git a/Hledger/Read/TimeclockReader.hs b/Hledger/Read/TimeclockReader.hs
index 07a6169..0971fe0 100644
--- a/Hledger/Read/TimeclockReader.hs
+++ b/Hledger/Read/TimeclockReader.hs
@@ -40,7 +40,7 @@ i, o or O. The meanings of the codes are:
-}
-{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE OverloadedStrings, PackageImports #-}
module Hledger.Read.TimeclockReader (
-- * Reader
@@ -52,7 +52,7 @@ module Hledger.Read.TimeclockReader (
)
where
import Prelude ()
-import Prelude.Compat
+import "base-compat-batteries" Prelude.Compat
import Control.Monad
import Control.Monad.Except (ExceptT)
import Control.Monad.State.Strict
@@ -60,7 +60,8 @@ import Data.Maybe (fromMaybe)
import Data.Text (Text)
import qualified Data.Text as T
import Test.HUnit
-import Text.Megaparsec.Compat hiding (parse)
+import Text.Megaparsec hiding (parse)
+import Text.Megaparsec.Char
import Hledger.Data
-- XXX too much reuse ?
@@ -82,7 +83,7 @@ reader = Reader
parse :: InputOpts -> FilePath -> Text -> ExceptT String IO Journal
parse = parseAndFinaliseJournal timeclockfilep
-timeclockfilep :: ErroringJournalParser IO ParsedJournal
+timeclockfilep :: MonadIO m => JournalParser m ParsedJournal
timeclockfilep = do many timeclockitemp
eof
j@Journal{jparsetimeclockentries=es} <- get
@@ -100,7 +101,7 @@ timeclockfilep = do many timeclockitemp
-- character, excepting transactions versus empty (blank or
-- comment-only) lines, can use choice w/o try
timeclockitemp = choice [
- void emptyorcommentlinep
+ void (lift emptyorcommentlinep)
, timeclockentryp >>= \e -> modify' (\j -> j{jparsetimeclockentries = e : jparsetimeclockentries j})
] <?> "timeclock entry, or default year or historical price directive"
diff --git a/Hledger/Read/TimedotReader.hs b/Hledger/Read/TimedotReader.hs
index 375fe94..656384f 100644
--- a/Hledger/Read/TimedotReader.hs
+++ b/Hledger/Read/TimedotReader.hs
@@ -23,7 +23,7 @@ inc.client1 .... .... ..
-}
-{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE OverloadedStrings, PackageImports #-}
module Hledger.Read.TimedotReader (
-- * Reader
@@ -35,7 +35,7 @@ module Hledger.Read.TimedotReader (
)
where
import Prelude ()
-import Prelude.Compat
+import "base-compat-batteries" Prelude.Compat
import Control.Monad
import Control.Monad.Except (ExceptT)
import Control.Monad.State.Strict
@@ -44,7 +44,8 @@ import Data.List (foldl')
import Data.Maybe
import Data.Text (Text)
import Test.HUnit
-import Text.Megaparsec.Compat hiding (parse)
+import Text.Megaparsec hiding (parse)
+import Text.Megaparsec.Char
import Hledger.Data
import Hledger.Read.Common
@@ -77,7 +78,7 @@ timedotfilep = do many timedotfileitemp
timedotfileitemp = do
ptrace "timedotfileitemp"
choice [
- void emptyorcommentlinep
+ void $ lift emptyorcommentlinep
,timedotdayp >>= \ts -> modify' (addTransactions ts)
] <?> "timedot day entry, or default year or comment line or blank line"
@@ -95,7 +96,7 @@ timedotdayp :: JournalParser m [Transaction]
timedotdayp = do
ptrace " timedotdayp"
d <- datep <* lift eolof
- es <- catMaybes <$> many (const Nothing <$> try emptyorcommentlinep <|>
+ es <- catMaybes <$> many (const Nothing <$> try (lift emptyorcommentlinep) <|>
Just <$> (notFollowedBy datep >> timedotentryp))
return $ map (\t -> t{tdate=d}) es -- <$> many timedotentryp
@@ -111,9 +112,9 @@ timedotentryp = do
a <- modifiedaccountnamep
lift (skipMany spacenonewline)
hours <-
- try (followingcommentp >> return 0)
+ try (lift followingcommentp >> return 0)
<|> (timedotdurationp <*
- (try followingcommentp <|> (newline >> return "")))
+ (try (lift followingcommentp) <|> (newline >> return "")))
let t = nulltransaction{
tsourcepos = pos,
tstatus = Cleared,
diff --git a/Hledger/Reports/BudgetReport.hs b/Hledger/Reports/BudgetReport.hs
index 83eb0bb..9f988c0 100644
--- a/Hledger/Reports/BudgetReport.hs
+++ b/Hledger/Reports/BudgetReport.hs
@@ -353,8 +353,8 @@ budgetReportAsTable
-- XXX here for now
-- | Drop leading components of accounts names as specified by --drop, but only in --flat mode.
maybeAccountNameDrop :: ReportOpts -> AccountName -> AccountName
-maybeAccountNameDrop opts a | tree_ opts = a
- | otherwise = accountNameDrop (drop_ opts) a
+maybeAccountNameDrop opts a | flat_ opts = accountNameDrop (drop_ opts) a
+ | otherwise = a
tests_Hledger_Reports_BudgetReport :: Test
tests_Hledger_Reports_BudgetReport = TestList [
diff --git a/Hledger/Reports/MultiBalanceReports.hs b/Hledger/Reports/MultiBalanceReports.hs
index 721a8d7..d2d5451 100644
--- a/Hledger/Reports/MultiBalanceReports.hs
+++ b/Hledger/Reports/MultiBalanceReports.hs
@@ -212,6 +212,7 @@ multiBalanceReport opts q j =
sortedrows = concatMap (\a -> maybe [] (:[]) $ lookup (aname a) anamesandrows) sortedaccounts
-- Sort the report rows by account code if any, with the empty account code coming last, then account name.
+ -- TODO keep children below their parent. Have to convert to tree ?
sortFlatMultiBalanceReportRowsByAccountCodeAndName = sortBy (comparing acodeandname)
where
acodeandname r = (acode', aname)
diff --git a/Hledger/Reports/ReportOptions.hs b/Hledger/Reports/ReportOptions.hs
index 39c05d2..a1e398b 100644
--- a/Hledger/Reports/ReportOptions.hs
+++ b/Hledger/Reports/ReportOptions.hs
@@ -38,9 +38,6 @@ where
import Control.Applicative ((<|>))
import Data.Data (Data)
-#if !MIN_VERSION_base(4,8,0)
-import Data.Functor.Compat ((<$>))
-#endif
import Data.List
import Data.Maybe
import qualified Data.Text as T
@@ -419,7 +416,7 @@ reportSpan j ropts = do
dbg2 "specifieddates" <$> specifiedStartEndDates ropts
let
DateSpan mjournalstartdate mjournalenddate =
- dbg2 "journalspan" $ journalDateSpan False j -- don't bother with secondary dates
+ dbg2 "journalspan" $ journalDateSpan False j -- ignore secondary dates
mstartdate = mspecifiedstartdate <|> mjournalstartdate
menddate = mspecifiedenddate <|> mjournalenddate
return $ dbg1 "reportspan" $ DateSpan mstartdate menddate
diff --git a/Hledger/Utils.hs b/Hledger/Utils.hs
index ef94797..41fb481 100644
--- a/Hledger/Utils.hs
+++ b/Hledger/Utils.hs
@@ -33,6 +33,8 @@ module Hledger.Utils (---- provide these frequently used modules - or not, for c
-- the rest need to be done in each module I think
)
where
+import Test.HUnit
+
import Control.Monad (liftM, when)
-- import Data.Char
import Data.Default
@@ -212,3 +214,8 @@ sequence' ms = do
{-# INLINABLE mapM' #-}
mapM' :: Monad f => (a -> f b) -> [a] -> f [b]
mapM' f = sequence' . map f
+
+tests_Hledger_Utils :: Test
+tests_Hledger_Utils = TestList [
+ tests_Hledger_Utils_Text
+ ] \ No newline at end of file
diff --git a/Hledger/Utils/Parse.hs b/Hledger/Utils/Parse.hs
index de69798..a60a3d4 100644
--- a/Hledger/Utils/Parse.hs
+++ b/Hledger/Utils/Parse.hs
@@ -1,32 +1,56 @@
{-# LANGUAGE CPP, TypeFamilies #-}
-module Hledger.Utils.Parse where
-import Control.Monad.Except
+module Hledger.Utils.Parse (
+ SimpleStringParser,
+ SimpleTextParser,
+ TextParser,
+ JournalParser,
+
+ choice',
+ choiceInState,
+ surroundedBy,
+ parsewith,
+ parsewithString,
+ parseWithState,
+ parseWithState',
+ fromparse,
+ parseerror,
+ showDateParseError,
+ nonspace,
+ isNonNewlineSpace,
+ spacenonewline,
+ restofline,
+ eolof,
+
+ -- * re-exports
+ CustomErr
+)
+where
+
import Control.Monad.State.Strict (StateT, evalStateT)
import Data.Char
import Data.Functor.Identity (Identity(..))
import Data.List
import Data.Text (Text)
-import Text.Megaparsec.Compat
+import Text.Megaparsec
+import Text.Megaparsec.Char
+import Text.Megaparsec.Custom
import Text.Printf
import Hledger.Data.Types
import Hledger.Utils.UTF8IOCompat (error')
-- | A parser of string to some type.
-type SimpleStringParser a = Parsec MPErr String a
+type SimpleStringParser a = Parsec CustomErr String a
-- | A parser of strict text to some type.
-type SimpleTextParser = Parsec MPErr Text -- XXX an "a" argument breaks the CsvRulesParser declaration somehow
+type SimpleTextParser = Parsec CustomErr Text -- XXX an "a" argument breaks the CsvRulesParser declaration somehow
-- | A parser of text in some monad.
-type TextParser m a = ParsecT MPErr Text m a
+type TextParser m a = ParsecT CustomErr Text m a
-- | A parser of text in some monad, with a journal as state.
-type JournalParser m a = StateT Journal (ParsecT MPErr Text m) a
-
--- | A parser of text in some monad, with a journal as state, that can throw an error string mid-parse.
-type ErroringJournalParser m a = StateT Journal (ParsecT MPErr Text (ExceptT String m)) a
+type JournalParser m a = StateT Journal (ParsecT CustomErr Text m) a
-- | Backtracking choice, use this when alternatives share a prefix.
-- Consumes no input if all choices fail.
@@ -35,7 +59,7 @@ choice' = choice . map try
-- | Backtracking choice, use this when alternatives share a prefix.
-- Consumes no input if all choices fail.
-choiceInState :: [StateT s (ParsecT MPErr Text m) a] -> StateT s (ParsecT MPErr Text m) a
+choiceInState :: [StateT s (ParsecT CustomErr Text m) a] -> StateT s (ParsecT CustomErr Text m) a
choiceInState = choice . map try
surroundedBy :: Applicative m => m openclose -> m a -> m a
@@ -47,15 +71,15 @@ parsewith p = runParser p ""
parsewithString :: Parsec e String a -> String -> Either (ParseError Char e) a
parsewithString p = runParser p ""
-parseWithState :: Monad m => st -> StateT st (ParsecT MPErr Text m) a -> Text -> m (Either (ParseError Char MPErr) a)
+parseWithState :: Monad m => st -> StateT st (ParsecT CustomErr Text m) a -> Text -> m (Either (ParseError Char CustomErr) a)
parseWithState ctx p s = runParserT (evalStateT p ctx) "" s
-parseWithState' :: (
- Stream s
-#if !MIN_VERSION_megaparsec(6,0,0)
- ,ErrorComponent e
-#endif
- ) => st -> StateT st (ParsecT e s Identity) a -> s -> (Either (ParseError (Token s) e) a)
+parseWithState'
+ :: (Stream s)
+ => st
+ -> StateT st (ParsecT e s Identity) a
+ -> s
+ -> (Either (ParseError (Token s) e) a)
parseWithState' ctx p s = runParser (evalStateT p ctx) "" s
fromparse :: (Show t, Show e) => Either (ParseError t e) a -> a
@@ -73,8 +97,11 @@ showDateParseError e = printf "date parse error (%s)" (intercalate ", " $ tail $
nonspace :: TextParser m Char
nonspace = satisfy (not . isSpace)
-spacenonewline :: (Stream s, Char ~ Token s) => ParsecT MPErr s m Char
-spacenonewline = satisfy (`elem` " \v\f\t")
+isNonNewlineSpace :: Char -> Bool
+isNonNewlineSpace c = c /= '\n' && isSpace c
+
+spacenonewline :: (Stream s, Char ~ Token s) => ParsecT CustomErr s m Char
+spacenonewline = satisfy isNonNewlineSpace
restofline :: TextParser m String
restofline = anyChar `manyTill` newline
diff --git a/Hledger/Utils/String.hs b/Hledger/Utils/String.hs
index 5fb0ad3..909fec1 100644
--- a/Hledger/Utils/String.hs
+++ b/Hledger/Utils/String.hs
@@ -49,7 +49,8 @@ module Hledger.Utils.String (
import Data.Char
import Data.List
-import Text.Megaparsec.Compat
+import Text.Megaparsec
+import Text.Megaparsec.Char
import Text.Printf (printf)
import Hledger.Utils.Parse
diff --git a/Hledger/Utils/Text.hs b/Hledger/Utils/Text.hs
index ecdb52f..e84fed9 100644
--- a/Hledger/Utils/Text.hs
+++ b/Hledger/Utils/Text.hs
@@ -5,37 +5,37 @@
{-# LANGUAGE CPP #-}
module Hledger.Utils.Text
- -- (
+ (
-- -- * misc
-- lowercase,
-- uppercase,
-- underline,
-- stripbrackets,
- -- unbracket,
+ textUnbracket,
-- -- quoting
- -- quoteIfSpaced,
+ quoteIfSpaced,
-- quoteIfNeeded,
-- singleQuoteIfNeeded,
-- -- quotechars,
-- -- whitespacechars,
- -- escapeDoubleQuotes,
+ escapeDoubleQuotes,
-- escapeSingleQuotes,
-- escapeQuotes,
-- words',
-- unwords',
- -- stripquotes,
+ stripquotes,
-- isSingleQuoted,
-- isDoubleQuoted,
-- -- * single-line layout
- -- strip,
- -- lstrip,
- -- rstrip,
+ textstrip,
+ textlstrip,
+ textrstrip,
-- chomp,
-- elideLeft,
- -- elideRight,
+ textElideRight,
-- formatString,
-- -- * multi-line layout
- -- concatTopPadded,
+ textConcatTopPadded,
-- concatBottomPadded,
-- concatOneLine,
-- vConcatLeftAligned,
@@ -46,15 +46,18 @@ module Hledger.Utils.Text
-- padright,
-- cliptopleft,
-- fitto,
+ fitText,
-- -- * wide-character-aware layout
- -- strWidth,
- -- textTakeWidth,
+ textWidth,
+ textTakeWidth,
-- fitString,
-- fitStringMulti,
- -- padLeftWide,
- -- padRightWide
- -- )
+ textPadLeftWide,
+ textPadRightWide,
+ tests_Hledger_Utils_Text
+ )
where
+import Test.HUnit
-- import Data.Char
import Data.List
@@ -123,7 +126,7 @@ textElideRight width t =
quoteIfSpaced :: T.Text -> T.Text
quoteIfSpaced s | isSingleQuoted s || isDoubleQuoted s = s
| not $ any (`elem` (T.unpack s)) whitespacechars = s
- | otherwise = "'"<>escapeSingleQuotes s<>"'"
+ | otherwise = quoteIfNeeded s
-- -- | Wrap a string in double quotes, and \-prefix any embedded single
-- -- quotes, if it contains whitespace and is not already single- or
@@ -135,9 +138,9 @@ quoteIfSpaced s | isSingleQuoted s || isDoubleQuoted s = s
-- -- | Double-quote this string if it contains whitespace, single quotes
-- -- or double-quotes, escaping the quotes as needed.
--- quoteIfNeeded :: T.Text -> T.Text
--- quoteIfNeeded s | any (`elem` T.unpack s) (quotechars++whitespacechars) = "\"" <> escapeDoubleQuotes s <> "\""
--- | otherwise = s
+quoteIfNeeded :: T.Text -> T.Text
+quoteIfNeeded s | any (`elem` T.unpack s) (quotechars++whitespacechars) = "\"" <> escapeDoubleQuotes s <> "\""
+ | otherwise = s
-- -- | Single-quote this string if it contains whitespace or double-quotes.
-- -- No good for strings containing single quotes.
@@ -150,10 +153,10 @@ quotechars = "'\""
whitespacechars = " \t\n\r"
escapeDoubleQuotes :: T.Text -> T.Text
-escapeDoubleQuotes = T.replace "\"" "\""
+escapeDoubleQuotes = T.replace "\"" "\\\""
-escapeSingleQuotes :: T.Text -> T.Text
-escapeSingleQuotes = T.replace "'" "\'"
+-- escapeSingleQuotes :: T.Text -> T.Text
+-- escapeSingleQuotes = T.replace "'" "\'"
-- escapeQuotes :: String -> String
-- escapeQuotes = regexReplace "([\"'])" "\\1"
@@ -294,7 +297,7 @@ difforzero a b = maximum [(a - b), 0]
-- It clips and pads on the right when the fourth argument is true, otherwise on the left.
-- It treats wide characters as double width.
fitText :: Maybe Int -> Maybe Int -> Bool -> Bool -> Text -> Text
-fitText mminwidth mmaxwidth ellipsify rightside s = (clip . pad) s
+fitText mminwidth mmaxwidth ellipsify rightside = clip . pad
where
clip :: Text -> Text
clip s =
@@ -415,3 +418,13 @@ textWidth s = maximum $ map (T.foldr (\a b -> charWidth a + b) 0) $ T.lines s
-- | c >= '\x20000' && c <= '\x3FFFD' -> 2
-- | otherwise -> 1
+
+tests_Hledger_Utils_Text = TestList [
+ quoteIfSpaced "a'a" ~?= "a'a"
+ , quoteIfSpaced "a\"a" ~?= "a\"a"
+ , quoteIfSpaced "a a" ~?= "\"a a\""
+ , quoteIfSpaced "mimi's cafe" ~?= "\"mimi's cafe\""
+ , quoteIfSpaced "\"alex\" cafe" ~?= "\"\\\"alex\\\" cafe\""
+ , quoteIfSpaced "le'shan's cafe" ~?= "\"le'shan's cafe\""
+ , quoteIfSpaced "\"be'any's\" cafe" ~?= "\"\\\"be'any's\\\" cafe\""
+ ] \ No newline at end of file
diff --git a/Text/Megaparsec/Compat.hs b/Text/Megaparsec/Compat.hs
deleted file mode 100644
index 33545f9..0000000
--- a/Text/Megaparsec/Compat.hs
+++ /dev/null
@@ -1,73 +0,0 @@
--- | Paper over some differences between megaparsec 5 and 6,
--- making it possible to write code that supports both.
-
-{-# LANGUAGE CPP, FlexibleContexts #-}
-
-module Text.Megaparsec.Compat (
- module Text.Megaparsec
-#if MIN_VERSION_megaparsec(6,0,0)
- ,module Text.Megaparsec.Char
-#endif
- ,MPErr
- ,mptext
- ,mpMkPos
- ,mpUnPos
- ,mpMkParseError
- )
-where
-
-import qualified Data.Set as S
-import Data.Text
-import Text.Megaparsec
-
-#if MIN_VERSION_megaparsec(6,0,0)
-
-import Text.Megaparsec.Char
-import Data.List.NonEmpty (fromList)
-import Data.Void (Void)
-
--- | A basic parse error type.
-type MPErr = Void
-
--- | Make a simple parse error.
-mpMkParseError :: FilePath -> String -> ParseError Char String
-mpMkParseError f s = FancyError (fromList [initialPos f]) (S.singleton $ ErrorFail s)
-
--- | Make a Pos. With a negative argument, throws InvalidPosException (megaparsec >= 6)
--- or calls error (megaparsec < 6).
-mpMkPos :: Int -> Pos
-mpMkPos = mkPos
-
--- | Unmake a Pos.
-mpUnPos :: Pos -> Int
-mpUnPos = unPos
-
--- | Parse and return some Text.
-mptext :: MonadParsec e Text m => Tokens Text -> m (Tokens Text)
-mptext = string
-
-#else
-
-import Text.Megaparsec.Prim (MonadParsec)
-
--- | A basic parse error type.
-type MPErr = Dec
-
--- | Make a simple parse error.
-mpMkParseError :: FilePath -> String -> ParseError Char String
-mpMkParseError f s = (mempty :: ParseError Char String){errorCustom = S.singleton $ f ++ ": " ++ s}
-
--- | Make a Pos. With a negative argument, throws InvalidPosException (megaparsec >= 6)
--- or calls error (megaparsec < 6).
-mpMkPos :: Int -> Pos
-mpMkPos = unsafePos . fromIntegral
-
--- | Unmake a Pos.
-mpUnPos :: Pos -> Int
-mpUnPos = fromIntegral . unPos
-
--- | Parse and return some Text.
-mptext :: MonadParsec e Text m => Text -> m Text
-mptext = fmap pack . string . unpack
-
-#endif
diff --git a/Text/Megaparsec/Custom.hs b/Text/Megaparsec/Custom.hs
new file mode 100644
index 0000000..5dce6f7
--- /dev/null
+++ b/Text/Megaparsec/Custom.hs
@@ -0,0 +1,248 @@
+{-# LANGUAGE BangPatterns #-}
+{-# LANGUAGE FlexibleContexts #-}
+{-# LANGUAGE LambdaCase #-}
+{-# LANGUAGE PackageImports #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# LANGUAGE StandaloneDeriving #-}
+
+module Text.Megaparsec.Custom (
+ -- * Custom parse error type
+ CustomErr,
+
+ -- * Throwing custom parse errors
+ parseErrorAt,
+ parseErrorAtRegion,
+ withSource,
+
+ -- * Pretty-printing custom parse errors
+ customParseErrorPretty
+)
+where
+
+import Prelude ()
+import "base-compat-batteries" Prelude.Compat hiding (readFile)
+
+import Data.Foldable (asum, toList)
+import qualified Data.List.NonEmpty as NE
+import Data.Proxy (Proxy (Proxy))
+import qualified Data.Set as S
+import Data.Text (Text)
+import Data.Void (Void)
+import Text.Megaparsec
+
+
+--- * Custom parse error type
+
+-- | A custom error type for the parser. The type is specialized to
+-- parsers of 'Text' streams.
+
+data CustomErr
+ -- | Fail with a message at a specific source position interval. The
+ -- interval must be contained within a single line.
+ = ErrorFailAt SourcePos -- Starting position
+ Pos -- Ending position (column; same line as start)
+ String -- Error message
+ -- | Attach a source file to a parse error (for error reporting from
+ -- include files, e.g. with the 'region' parser combinator)
+ | ErrorWithSource Text -- Source file contents
+ (ParseError Char CustomErr) -- The original
+ deriving (Show, Eq, Ord)
+
+-- We require an 'Ord' instance for 'CustomError' so that they may be
+-- stored in a 'Set'. The actual instance is inconsequential, so we just
+-- derive it, but this requires an (orphan) instance for 'ParseError'.
+-- Hopefully this does not cause any trouble.
+
+deriving instance (Ord c, Ord e) => Ord (ParseError c e)
+
+instance ShowErrorComponent CustomErr where
+ showErrorComponent (ErrorFailAt _ _ errMsg) = errMsg
+ showErrorComponent (ErrorWithSource _ e) = parseErrorTextPretty e
+
+
+--- * Throwing custom parse errors
+
+-- | Fail at a specific source position.
+
+parseErrorAt :: MonadParsec CustomErr s m => SourcePos -> String -> m a
+parseErrorAt pos msg = customFailure (ErrorFailAt pos (sourceColumn pos) msg)
+{-# INLINABLE parseErrorAt #-}
+
+-- | Fail at a specific source interval (within a single line). The
+-- interval is inclusive on the left and exclusive on the right; that is,
+-- it spans from the start position to just before (and not including) the
+-- end position.
+
+parseErrorAtRegion
+ :: MonadParsec CustomErr s m
+ => SourcePos -- ^ Start position
+ -> SourcePos -- ^ End position
+ -> String -- ^ Error message
+ -> m a
+parseErrorAtRegion startPos endPos msg =
+ let startCol = sourceColumn startPos
+ endCol' = mkPos $ subtract 1 $ unPos $ sourceColumn endPos
+ endCol = if startCol <= endCol'
+ && sourceLine startPos == sourceLine endPos
+ then endCol' else startCol
+ in customFailure (ErrorFailAt startPos endCol msg)
+{-# INLINABLE parseErrorAtRegion #-}
+
+-- | Attach a source file to a parse error. Intended for use with the
+-- 'region' parser combinator.
+
+withSource :: Text -> ParseError Char CustomErr -> ParseError Char CustomErr
+withSource s e =
+ FancyError (errorPos e) $ S.singleton $ ErrorCustom $ ErrorWithSource s e
+
+
+--- * Pretty-printing custom parse errors
+
+-- | Pretty-print our custom parse errors and display the line on which
+-- the parse error occured. Use this instead of 'parseErrorPretty'.
+--
+-- If any custom errors are present, arbitrarily take the first one (since
+-- only one custom error should be used at a time).
+
+customParseErrorPretty :: Text -> ParseError Char CustomErr -> String
+customParseErrorPretty source err = case findCustomError err of
+ Nothing -> customParseErrorPretty' source err pos1
+
+ Just (ErrorWithSource customSource customErr) ->
+ customParseErrorPretty customSource customErr
+
+ Just (ErrorFailAt sourcePos col errMsg) ->
+ let newPositionStack = sourcePos NE.:| NE.tail (errorPos err)
+ errorIntervalLength = mkPos $ max 1 $
+ unPos col - unPos (sourceColumn sourcePos) + 1
+
+ newErr :: ParseError Char Void
+ newErr = FancyError newPositionStack (S.singleton (ErrorFail errMsg))
+
+ in customParseErrorPretty' source newErr errorIntervalLength
+
+ where
+ findCustomError :: ParseError Char CustomErr -> Maybe CustomErr
+ findCustomError err = case err of
+ FancyError _ errSet ->
+ finds (\case {ErrorCustom e -> Just e; _ -> Nothing}) errSet
+ _ -> Nothing
+
+ finds :: (Foldable t) => (a -> Maybe b) -> t a -> Maybe b
+ finds f = asum . map f . toList
+
+
+--- * Modified Megaparsec source
+
+-- The below code has been copied from Megaparsec (v.6.4.1,
+-- Text.Megaparsec.Error) and modified to suit our needs. These changes are
+-- indicated by square brackets. The following copyright notice, conditions,
+-- and disclaimer apply to all code below this point.
+--
+-- Copyright © 2015–2018 Megaparsec contributors<br>
+-- Copyright © 2007 Paolo Martini<br>
+-- Copyright © 1999–2000 Daan Leijen
+--
+-- All rights reserved.
+--
+-- Redistribution and use in source and binary forms, with or without
+-- modification, are permitted provided that the following conditions are met:
+--
+-- * Redistributions of source code must retain the above copyright notice,
+-- this list of conditions and the following disclaimer.
+--
+-- * Redistributions in binary form must reproduce the above copyright notice,
+-- this list of conditions and the following disclaimer in the documentation
+-- and/or other materials provided with the distribution.
+--
+-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS “AS IS” AND ANY EXPRESS
+-- OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+-- OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+-- NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT,
+-- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+-- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+-- OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+-- LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+-- NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+-- EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+-- | Pretty-print a 'ParseError Char CustomErr' and display the line on
+-- which the parse error occurred. The rendered 'String' always ends with
+-- a newline.
+
+customParseErrorPretty'
+ :: ( ShowToken (Token s)
+ , LineToken (Token s)
+ , ShowErrorComponent e
+ , Stream s )
+ => s -- ^ Original input stream
+ -> ParseError (Token s) e -- ^ Parse error to render
+ -> Pos -- ^ Length of error interval [added]
+ -> String -- ^ Result of rendering
+customParseErrorPretty' = customParseErrorPretty_ defaultTabWidth
+
+
+customParseErrorPretty_
+ :: forall s e.
+ ( ShowToken (Token s)
+ , LineToken (Token s)
+ , ShowErrorComponent e
+ , Stream s )
+ => Pos -- ^ Tab width
+ -> s -- ^ Original input stream
+ -> ParseError (Token s) e -- ^ Parse error to render
+ -> Pos -- ^ Length of error interval [added]
+ -> String -- ^ Result of rendering
+customParseErrorPretty_ w s e l =
+ sourcePosStackPretty (errorPos e) <> ":\n" <>
+ padding <> "|\n" <>
+ lineNumber <> " | " <> rline <> "\n" <>
+ padding <> "| " <> rpadding <> highlight <> "\n" <> -- [added `highlight`]
+ parseErrorTextPretty e
+ where
+ epos = NE.head (errorPos e) -- [changed from NE.last to NE.head]
+ lineNumber = (show . unPos . sourceLine) epos
+ padding = replicate (length lineNumber + 1) ' '
+ rpadding = replicate (unPos (sourceColumn epos) - 1) ' '
+ highlight = replicate (unPos l) '^' -- [added]
+ rline =
+ case rline' of
+ [] -> "<empty line>"
+ xs -> expandTab w xs
+ rline' = fmap tokenAsChar . chunkToTokens (Proxy :: Proxy s) $
+ selectLine (sourceLine epos) s
+
+-- | Select a line from input stream given its number.
+
+selectLine
+ :: forall s. (LineToken (Token s), Stream s)
+ => Pos -- ^ Number of line to select
+ -> s -- ^ Input stream
+ -> Tokens s -- ^ Selected line
+selectLine l = go pos1
+ where
+ go !n !s =
+ if n == l
+ then fst (takeWhile_ notNewline s)
+ else go (n <> pos1) (stripNewline $ snd (takeWhile_ notNewline s))
+ notNewline = not . tokenIsNewline
+ stripNewline s =
+ case take1_ s of
+ Nothing -> s
+ Just (_, s') -> s'
+
+-- | Replace tab characters with given number of spaces.
+
+expandTab
+ :: Pos
+ -> String
+ -> String
+expandTab w' = go 0
+ where
+ go 0 [] = []
+ go 0 ('\t':xs) = go w xs
+ go 0 (x:xs) = x : go 0 xs
+ go !n xs = ' ' : go (n - 1) xs
+ w = unPos w'
+
diff --git a/hledger-lib.cabal b/hledger-lib.cabal
index 4f6789e..ef185ae 100644
--- a/hledger-lib.cabal
+++ b/hledger-lib.cabal
@@ -2,10 +2,10 @@
--
-- see: https://github.com/sol/hpack
--
--- hash: 1905b347a666e216347e595c5fcb0b340e5618a383a5493438200c6bcd5e6f98
+-- hash: 757469e81007e12da21192dc913e3abfe79ca36c62479f7d08abc8726656b4c1
name: hledger-lib
-version: 1.9.1
+version: 1.10
synopsis: Core data types, parsers and functionality for the hledger accounting tools
description: This is a reusable library containing hledger's core functionality.
.
@@ -23,7 +23,7 @@ author: Simon Michael <simon@joyful.com>
maintainer: Simon Michael <simon@joyful.com>
license: GPL-3
license-file: LICENSE
-tested-with: GHC==7.10.3, GHC==8.0.2, GHC==8.2.1
+tested-with: GHC==7.10.3, GHC==8.0.2, GHC==8.2.2, GHC==8.4.1
build-type: Simple
cabal-version: >= 1.10
extra-source-files:
@@ -93,9 +93,9 @@ library
Hledger.Utils.Text
Hledger.Utils.Tree
Hledger.Utils.UTF8IOCompat
- Text.Megaparsec.Compat
Text.Tabular.AsciiWide
other-modules:
+ Text.Megaparsec.Custom
Paths_hledger_lib
hs-source-dirs:
./.
@@ -106,7 +106,7 @@ library
, ansi-terminal >=0.6.2.3
, array
, base >=4.8 && <4.12
- , base-compat >=0.8.1
+ , base-compat-batteries >=0.10.1 && <0.11
, blaze-markup >=0.5.1
, bytestring
, cmdargs >=0.10
@@ -117,12 +117,13 @@ library
, directory
, extra
, filepath
- , hashtables >=1.2
- , megaparsec >=5.0
+ , hashtables >=1.2.3.1
+ , megaparsec >=6.4.1
, mtl
, mtl-compat
, old-time
, parsec >=3
+ , parser-combinators >=0.4.0
, pretty-show >=1.6.4
, regex-tdfa
, safe >=0.2
@@ -187,7 +188,7 @@ test-suite doctests
Hledger.Utils.Text
Hledger.Utils.Tree
Hledger.Utils.UTF8IOCompat
- Text.Megaparsec.Compat
+ Text.Megaparsec.Custom
Text.Tabular.AsciiWide
Paths_hledger_lib
hs-source-dirs:
@@ -201,7 +202,7 @@ test-suite doctests
, ansi-terminal >=0.6.2.3
, array
, base >=4.8 && <4.12
- , base-compat >=0.8.1
+ , base-compat-batteries >=0.10.1 && <0.11
, blaze-markup >=0.5.1
, bytestring
, cmdargs >=0.10
@@ -213,12 +214,13 @@ test-suite doctests
, doctest >=0.8
, extra
, filepath
- , hashtables >=1.2
- , megaparsec >=5.0
+ , hashtables >=1.2.3.1
+ , megaparsec >=6.4.1
, mtl
, mtl-compat
, old-time
, parsec >=3
+ , parser-combinators >=0.4.0
, pretty-show >=1.6.4
, regex-tdfa
, safe >=0.2
@@ -232,8 +234,6 @@ test-suite doctests
if (!impl(ghc >= 8.0))
build-depends:
semigroups ==0.18.*
- if impl(ghc >= 8.4) && os(darwin)
- buildable: False
default-language: Haskell2010
test-suite easytests
@@ -285,7 +285,7 @@ test-suite easytests
Hledger.Utils.Text
Hledger.Utils.Tree
Hledger.Utils.UTF8IOCompat
- Text.Megaparsec.Compat
+ Text.Megaparsec.Custom
Text.Tabular.AsciiWide
Paths_hledger_lib
hs-source-dirs:
@@ -298,7 +298,7 @@ test-suite easytests
, ansi-terminal >=0.6.2.3
, array
, base >=4.8 && <4.12
- , base-compat >=0.8.1
+ , base-compat-batteries >=0.10.1 && <0.11
, blaze-markup >=0.5.1
, bytestring
, cmdargs >=0.10
@@ -310,13 +310,14 @@ test-suite easytests
, easytest
, extra
, filepath
- , hashtables >=1.2
+ , hashtables >=1.2.3.1
, hledger-lib
- , megaparsec >=5.0
+ , megaparsec >=6.4.1
, mtl
, mtl-compat
, old-time
, parsec >=3
+ , parser-combinators >=0.4.0
, pretty-show >=1.6.4
, regex-tdfa
, safe >=0.2
@@ -381,7 +382,7 @@ test-suite hunittests
Hledger.Utils.Text
Hledger.Utils.Tree
Hledger.Utils.UTF8IOCompat
- Text.Megaparsec.Compat
+ Text.Megaparsec.Custom
Text.Tabular.AsciiWide
Paths_hledger_lib
hs-source-dirs:
@@ -394,7 +395,7 @@ test-suite hunittests
, ansi-terminal >=0.6.2.3
, array
, base >=4.8 && <4.12
- , base-compat >=0.8.1
+ , base-compat-batteries >=0.10.1 && <0.11
, blaze-markup >=0.5.1
, bytestring
, cmdargs >=0.10
@@ -405,13 +406,14 @@ test-suite hunittests
, directory
, extra
, filepath
- , hashtables >=1.2
+ , hashtables >=1.2.3.1
, hledger-lib
- , megaparsec >=5.0
+ , megaparsec >=6.4.1
, mtl
, mtl-compat
, old-time
, parsec >=3
+ , parser-combinators >=0.4.0
, pretty-show >=1.6.4
, regex-tdfa
, safe >=0.2
diff --git a/hledger_csv.5 b/hledger_csv.5
index 1043528..7679366 100644
--- a/hledger_csv.5
+++ b/hledger_csv.5
@@ -1,5 +1,5 @@
-.TH "hledger_csv" "5" "April 2018" "hledger 1.9.1" "hledger User Manuals"
+.TH "hledger_csv" "5" "June 2018" "hledger 1.9.99" "hledger User Manuals"
diff --git a/hledger_csv.info b/hledger_csv.info
index 0cf93c9..f5eaccb 100644
--- a/hledger_csv.info
+++ b/hledger_csv.info
@@ -3,8 +3,8 @@ This is hledger_csv.info, produced by makeinfo version 6.5 from stdin.

File: hledger_csv.info, Node: Top, Next: CSV RULES, Up: (dir)
-hledger_csv(5) hledger 1.9.1
-****************************
+hledger_csv(5) hledger 1.9.99
+*****************************
hledger can read CSV (comma-separated value) files as if they were
journal files, automatically converting each CSV record into a
@@ -317,33 +317,33 @@ one rules file will be used for all the CSV files being read.

Tag Table:
Node: Top72
-Node: CSV RULES2165
-Ref: #csv-rules2273
-Node: skip2535
-Ref: #skip2629
-Node: date-format2801
-Ref: #date-format2928
-Node: field list3434
-Ref: #field-list3571
-Node: field assignment4276
-Ref: #field-assignment4431
-Node: conditional block4935
-Ref: #conditional-block5089
-Node: include5985
-Ref: #include6115
-Node: newest-first6346
-Ref: #newest-first6460
-Node: CSV TIPS6871
-Ref: #csv-tips6965
-Node: CSV ordering7083
-Ref: #csv-ordering7201
-Node: CSV accounts7382
-Ref: #csv-accounts7520
-Node: CSV amounts7774
-Ref: #csv-amounts7920
-Node: CSV balance assertions8695
-Ref: #csv-balance-assertions8877
-Node: Reading multiple CSV files9082
-Ref: #reading-multiple-csv-files9252
+Node: CSV RULES2167
+Ref: #csv-rules2275
+Node: skip2537
+Ref: #skip2631
+Node: date-format2803
+Ref: #date-format2930
+Node: field list3436
+Ref: #field-list3573
+Node: field assignment4278
+Ref: #field-assignment4433
+Node: conditional block4937
+Ref: #conditional-block5091
+Node: include5987
+Ref: #include6117
+Node: newest-first6348
+Ref: #newest-first6462
+Node: CSV TIPS6873
+Ref: #csv-tips6967
+Node: CSV ordering7085
+Ref: #csv-ordering7203
+Node: CSV accounts7384
+Ref: #csv-accounts7522
+Node: CSV amounts7776
+Ref: #csv-amounts7922
+Node: CSV balance assertions8697
+Ref: #csv-balance-assertions8879
+Node: Reading multiple CSV files9084
+Ref: #reading-multiple-csv-files9254

End Tag Table
diff --git a/hledger_csv.txt b/hledger_csv.txt
index b629029..687390a 100644
--- a/hledger_csv.txt
+++ b/hledger_csv.txt
@@ -249,4 +249,4 @@ SEE ALSO
-hledger 1.9.1 April 2018 hledger_csv(5)
+hledger 1.9.99 June 2018 hledger_csv(5)
diff --git a/hledger_journal.5 b/hledger_journal.5
index 2afe30b..57acb9a 100644
--- a/hledger_journal.5
+++ b/hledger_journal.5
@@ -1,6 +1,6 @@
.\"t
-.TH "hledger_journal" "5" "April 2018" "hledger 1.9.1" "hledger User Manuals"
+.TH "hledger_journal" "5" "June 2018" "hledger 1.9.99" "hledger User Manuals"
@@ -439,8 +439,9 @@ will be the maximum from all posting amounts in that commmodity
or if there are no such amounts in the journal, a default format is used
(like \f[C]$1000.00\f[]).
.PP
-Price amounts and amounts in D directives usually don't affect amount
-format inference, but in some situations they can do so indirectly.
+Price amounts and amounts in \f[C]D\f[] directives usually don't affect
+amount format inference, but in some situations they can do so
+indirectly.
(Eg when D's default commodity is applied to a commodity\-less amount,
or when an amountless posting is balanced using a price's commodity, or
when \-V is used.) If you find this causing problems, set the desired
@@ -627,7 +628,6 @@ assignment).
Note that using balance assignments makes your journal a little less
explicit; to know the exact amount posted, you have to run hledger or do
the calculations yourself, instead of just reading it.
-.SS Prices
.SS Transaction prices
.PP
Within a transaction, you can note an amount's price in another
@@ -636,10 +636,10 @@ This can be used to document the cost (in a purchase) or selling price
(in a sale).
For example, transaction prices are useful to record purchases of a
foreign currency.
-.PP
-Transaction prices are fixed, and do not change over time.
-(Ledger users: Ledger uses a different syntax for fixed prices,
-\f[C]{=UNITPRICE}\f[], which hledger currently ignores).
+Note transaction prices are fixed at the time of the transaction, and do
+not change over time.
+See also market prices, which represent prevailing exchange rates on a
+certain date.
.PP
There are several ways to record a transaction price:
.IP "1." 3
@@ -680,10 +680,13 @@ hledger infer the price that balances the transaction:
.fi
.RE
.PP
-Amounts with transaction prices can be displayed in the transaction
-price's commodity by using the \f[C]\-B/\-\-cost\f[] flag (except for
-#551) (\[lq]B\[rq] is from \[lq]cost Basis\[rq]).
-Eg for the above, here is how \-B affects the balance report:
+(Ledger users: Ledger uses a different syntax for fixed prices,
+\f[C]{=UNITPRICE}\f[], which hledger currently ignores).
+.PP
+Use the \f[C]\-B/\-\-cost\f[] flag to convert amounts to their
+transaction price's commodity, if any.
+(mnemonic: \[lq]B\[rq] is from \[lq]cost Basis\[rq], as in Ledger).
+Eg here is how \-B affects the balance report for the example above:
.IP
.nf
\f[C]
@@ -717,40 +720,6 @@ $\ hledger\ bal\ \-N\ \-\-flat\ \-B
\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ €100\ \ assets:euros
\f[]
.fi
-.SS Market prices
-.PP
-Market prices are not tied to a particular transaction; they represent
-historical exchange rates between two commodities.
-(Ledger calls them historical prices.) For example, the prices published
-by a stock exchange or the foreign exchange market.
-hledger can use these prices to show the market value of things at a
-given date, see market value.
-.PP
-To record market prices, use P directives in the main journal or in an
-included file.
-Their format is:
-.IP
-.nf
-\f[C]
-P\ DATE\ COMMODITYBEINGPRICED\ UNITPRICE
-\f[]
-.fi
-.PP
-DATE is a simple date as usual.
-COMMODITYBEINGPRICED is the symbol of the commodity being priced.
-UNITPRICE is an ordinary amount (symbol and quantity) in a second
-commodity, specifying the unit price or conversion rate for the first
-commodity in terms of the second, on the given date.
-.PP
-For example, the following directives say that one euro was worth 1.35
-US dollars during 2009, and $1.40 from 2010 onward:
-.IP
-.nf
-\f[C]
-P\ 2009/1/1\ €\ $1.35
-P\ 2010/1/1\ €\ $1.40
-\f[]
-.fi
.SS Comments
.PP
Lines in the journal beginning with a semicolon (\f[C];\f[]) or hash
@@ -853,6 +822,176 @@ For example, the following transaction has three tags (\f[C]A\f[],
Tags are like Ledger's metadata feature, except hledger's tag values are
simple strings.
.SS Directives
+.PP
+A directive is a line in the journal beginning with a special keyword,
+that influences how the journal is processed.
+hledger's directives are based on a subset of Ledger's, but there are
+many differences (and also some differences between hledger versions).
+.PP
+Directives' behaviour and interactions can get a little bit complex, so
+here is a table summarising the directives and their effects, with links
+to more detailed docs.
+.PP
+.TS
+tab(@);
+lw(7.8n) lw(8.6n) lw(7.0n) lw(27.8n) lw(18.8n).
+T{
+directive
+T}@T{
+end directive
+T}@T{
+subdirectives
+T}@T{
+purpose
+T}@T{
+can affect (as of 2018/06)
+T}
+_
+T{
+\f[C]account\f[]
+T}@T{
+T}@T{
+any text
+T}@T{
+declare an account name & optional account code
+T}@T{
+account code: balance reports (except \f[C]balance\f[] single\-column
+mode)
+T}
+T{
+\f[C]alias\f[]
+T}@T{
+\f[C]end\ aliases\f[]
+T}@T{
+T}@T{
+rewrite account names
+T}@T{
+following inline/included entries until end of current file or end
+directive
+T}
+T{
+\f[C]apply\ account\f[]
+T}@T{
+\f[C]end\ apply\ account\f[]
+T}@T{
+T}@T{
+prepend a common parent to account names
+T}@T{
+following inline/included entries until end of current file or end
+directive
+T}
+T{
+\f[C]comment\f[]
+T}@T{
+\f[C]end\ comment\f[]
+T}@T{
+T}@T{
+ignore part of journal
+T}@T{
+following inline/included entries until end of current file or end
+directive
+T}
+T{
+\f[C]commodity\f[]
+T}@T{
+T}@T{
+\f[C]format\f[]
+T}@T{
+declare a commodity and its number notation & display style
+T}@T{
+number notation: following entries in that commodity in all files;
+display style: amounts of that commodity in reports
+T}
+T{
+\f[C]D\f[]
+T}@T{
+T}@T{
+T}@T{
+declare a commodity, number notation & display style for commodityless
+amounts
+T}@T{
+commodity: all commodityless entries in all files; number notation:
+following commodityless entries and entries in that commodity in all
+files; display style: amounts of that commodity in reports
+T}
+T{
+\f[C]include\f[]
+T}@T{
+T}@T{
+T}@T{
+include entries/directives from another file
+T}@T{
+what the included directives affect
+T}
+T{
+\f[C]P\f[]
+T}@T{
+T}@T{
+T}@T{
+declare a market price for a commodity
+T}@T{
+amounts of that commodity in reports, when \-V is used
+T}
+T{
+\f[C]Y\f[]
+T}@T{
+T}@T{
+T}@T{
+declare a year for yearless dates
+T}@T{
+following inline/included entries until end of current file
+T}
+.TE
+.PP
+And some definitions:
+.PP
+.TS
+tab(@);
+lw(8.9n) lw(61.1n).
+T{
+subdirective
+T}@T{
+optional indented directive or unparsed text lines immediately following
+a parent directive
+T}
+T{
+account code
+T}@T{
+numeric code influencing account display order in most balance reports
+T}
+T{
+number notation
+T}@T{
+how to interpret numbers when parsing journal entries (the identity of
+the decimal separator character).
+(Currently each commodity can have its own notation, even in the same
+file.)
+T}
+T{
+display style
+T}@T{
+how to display amounts of a commodity in reports (symbol side and
+spacing, digit groups, decimal separator, decimal places)
+T}
+T{
+directive scope
+T}@T{
+which entries and (when there are multiple files) which files are
+affected by a directive
+T}
+.TE
+.PP
+As you can see, directives vary in which journal entries and files they
+affect, and whether they are focussed on input (parsing) or output
+(reports).
+Some directives have multiple effects.
+.PP
+If you have a journal made up of multiple files, or pass multiple \-f
+options on the command line, note that directives which affect input
+typically last only until the end of their defining file.
+This provides more simplicity and predictability, eg reports are not
+changed by writing file options in a different order.
+It can be surprising at times though.
.SS Comment blocks
.PP
A line containing just \f[C]comment\f[] starts a commented region of the
@@ -946,11 +1085,11 @@ with a decimal point (a period or comma, followed by 0 or more decimal
digits).
.SS Default commodity
.PP
-The D directive sets a default commodity (and display format), to be
-used for amounts without a commodity symbol (ie, plain numbers).
+The \f[C]D\f[] directive sets a default commodity (and display format),
+to be used for amounts without a commodity symbol (ie, plain numbers).
(Note this differs from Ledger's default commodity directive.) The
commodity and display format will be applied to all subsequent
-commodity\-less amounts, or until the next D directive.
+commodity\-less amounts, or until the next \f[C]D\f[] directive.
.IP
.nf
\f[C]
@@ -966,6 +1105,41 @@ D\ $1,000.00
.PP
As with the \f[C]commodity\f[] directive, the amount must always be
written with a decimal point.
+.SS Market prices
+.PP
+The \f[C]P\f[] directive declares a market price, which is an exchange
+rate between two commodities on a certain date.
+(In Ledger, they are called \[lq]historical prices\[rq].) These are
+often obtained from a stock exchange, cryptocurrency exchange, or the
+foreign exchange market.
+.PP
+Here is the format:
+.IP
+.nf
+\f[C]
+P\ DATE\ COMMODITYA\ COMMODITYBAMOUNT
+\f[]
+.fi
+.IP \[bu] 2
+DATE is a simple date
+.IP \[bu] 2
+COMMODITYA is the symbol of the commodity being priced
+.IP \[bu] 2
+COMMODITYBAMOUNT is an amount (symbol and quantity) in a second
+commodity, giving the price in commodity B of one unit of commodity A.
+.PP
+These two market price directives say that one euro was worth 1.35 US
+dollars during 2009, and $1.40 from 2010 onward:
+.IP
+.nf
+\f[C]
+P\ 2009/1/1\ €\ $1.35
+P\ 2010/1/1\ €\ $1.40
+\f[]
+.fi
+.PP
+The \f[C]\-V/\-\-value\f[] flag can be used to convert reported amounts
+to another commodity using these prices.
.SS Declaring accounts
.PP
The \f[C]account\f[] directive predeclares account names.
@@ -996,11 +1170,24 @@ account\ expenses\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 6000
\f[]
.fi
.PP
-This affects account display order in reports: accounts with codes are
-listed before accounts without codes, in increasing code order.
-(Otherwise, accounts are listed alphabetically.) Account codes should be
-all numeric digits, unique, and separated from the account name by at
-least two spaces (since account names may contain single spaces).
+This affects how accounts are sorted in account and balance reports:
+accounts with codes are listed before accounts without codes, and in
+increasing code order (instead of listing all accounts alphabetically).
+Warning, this feature is incomplete; account codes do not yet affect
+sort order in
+.IP \[bu] 2
+the \f[C]accounts\f[] command
+.IP \[bu] 2
+the \f[C]balance\f[] command's single\-column mode
+.IP \[bu] 2
+flat mode balance reports (to work around this, declare account codes on
+the subaccounts as well).
+.IP \[bu] 2
+hledger\-web's sidebar
+.PP
+Account codes should be all numeric digits, unique, and separated from
+the account name by at least two spaces (since account names may contain
+single spaces).
By convention, often the first digit indicates the type of account, as
in this numbering scheme and the example above.
In future, we might use this to recognize account types.
@@ -1021,9 +1208,9 @@ account\ assets:bank:checking\ \ \ 1110
.fi
.SS Rewriting accounts
.PP
-You can define aliases which rewrite your account names (after reading
-the journal, before generating reports).
-hledger's account aliases can be useful for:
+You can define account alias rules which rewrite your account names, or
+parts of them, before generating reports.
+This can be useful for:
.IP \[bu] 2
expanding shorthand account names to their full form, allowing easier
data entry and a less verbose journal
@@ -1035,6 +1222,10 @@ combining two accounts into one
.IP \[bu] 2
customising reports
.PP
+Account aliases also rewrite account names in account directives.
+They do not affect account names being entered via hledger add or
+hledger\-web.
+.PP
See also Cookbook: Rewrite account names.
.SS Basic aliases
.PP
@@ -1055,7 +1246,7 @@ command line.
This affects all entries.
It's useful for trying out aliases interactively.
.PP
-OLD and NEW are full account names.
+OLD and NEW are case sensitive full account names.
hledger will replace any occurrence of the old account name with the new
one.
Subaccounts are also affected.
@@ -1165,50 +1356,99 @@ include\ personal.journal
.PP
Prior to hledger 1.0, legacy \f[C]account\f[] and \f[C]end\f[] spellings
were also supported.
+.PP
+A default parent account also affects account directives.
+It does not affect account names being entered via hledger add or
+hledger\-web.
+If account aliases are present, they are applied after the default
+parent account.
.SS Periodic transactions
.PP
-Periodic transaction rules (enabled by \f[C]\-\-forecast\f[] or
-\f[C]\-\-budget\f[]) describe recurring transactions.
-They look like a transaction where the first line is a tilde
-(\f[C]~\f[]) followed by a period expression (mnemonic: \f[C]~\f[] is
-like a recurring sine wave):
+Periodic transaction rules describe transactions that recur.
+They allow you to generate future transactions for forecasting, without
+having to write them out explicitly in the journal (with
+\f[C]\-\-forecast\f[]).
+Secondly, they also can be used to define budget goals (with
+\f[C]\-\-budget\f[]).
+.PP
+A periodic transaction rule looks like a normal journal entry, with the
+date replaced by a tilde (\f[C]~\f[]) followed by a period expression
+(mnemonic: \f[C]~\f[] looks like a repeating sine wave):
.IP
.nf
\f[C]
-~\ weekly
-\ \ assets:bank:checking\ \ \ $400\ ;\ paycheck
-\ \ income:acme\ inc
+~\ monthly
+\ \ \ \ expenses:rent\ \ \ \ \ \ \ \ \ \ $2000
+\ \ \ \ assets:bank:checking
\f[]
.fi
.PP
-Periodic transactions have a dual purpose:
-.IP \[bu] 2
-With \f[C]\-\-forecast\f[], each periodic transaction rule generates
-future transactions, recurring at the specified interval, which can be
-seen in reports.
-Forecast transactions begin the day after the latest recorded journal
-transaction (or today, if there are no transactions) and end 6 months
-from today (or at the report end date, if specified).
-.IP \[bu] 2
-With \f[C]\-\-budget\f[] (supported by the balance command), each
-periodic transaction rule declares recurring budget goals for the
-specified accounts, which can be seen in budget reports.
-Eg the example above declares the goal of receiving $400 from
-\f[C]income:acme\ inc\f[] (and also, depositing $400 into
-\f[C]assets:bank:checking\f[]) every week.
+There is an additional constraint on the period expression: the start
+date must fall on a natural boundary of the interval.
+Eg \f[C]monthly\ from\ 2018/1/1\f[] is valid, but
+\f[C]monthly\ from\ 2018/1/15\f[] is not.
.PP
-(Actually, you can generate one\-off transactions too, by writing a
-period expression with no report interval.)
+If you write a transaction description or same\-line comment, it must be
+separated from the period expression by \f[B]two or more spaces\f[].
+Eg:
+.IP
+.nf
+\f[C]
+;\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 2\ or\ more\ spaces
+;\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ ||
+;\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ vv
+~\ every\ 2\ weeks\ from\ 2018/6\ to\ 2018/9\ \ paycheck
+\ \ \ \ assets:bank:checking\ \ \ $1500
+\ \ \ \ income:acme\ inc
+\f[]
+.fi
+.SS Forecasting with periodic transactions
+.PP
+With the \f[C]\-\-forecast\f[] flag, each periodic transaction rule
+generates future transactions recurring at the specified interval.
+These are not saved in the journal, but appear in all reports.
+They will look like normal transactions, but with an extra tag named
+\f[C]recur\f[], whose value is the generating period expression.
+.PP
+Forecast transactions begin on or after the day after the latest normal
+(non\-periodic) transaction in the journal, or today if there are none.
+.PP
+They end on or before the report end date if specified, or 180 days from
+today if unspecified.
+.PP
+Forecasting can be useful for estimating balances into the future, and
+experimenting with different scenarios.
+Note the start date logic means that forecasted transactions are
+automatically replaced by normal transactions as you add those.
+.PP
+Forecasting can also help with data entry: describe most of your
+transactions with periodic rules, and every so often copy the output of
+\f[C]print\ \-\-forecast\f[] to the journal.
+.PP
+You can generate one\-time transactions too: just write a period
+expression specifying a date with no report interval.
+(You could also write a normal transaction with a future date, but
+remember this disables forecast transactions on previous dates.)
+.SS Budgeting with periodic transactions
+.PP
+With the \f[C]\-\-budget\f[] flag, currently supported by the balance
+command, each periodic transaction rule declares recurring budget goals
+for the specified accounts.
+Eg the first example above declares a goal of spending $2000 on rent
+(and also, a goal of depositing $2000 into checking) every month.
+Goals and actual performance can then be compared in budget reports.
.PP
For more details, see: balance: Budget report and Cookbook: Budgeting
and Forecasting.
.SS Automated postings
.PP
-Automated postings (enabled by \f[C]\-\-auto\f[]) are postings added
-automatically by rule to certain transactions.
-An automated posting rule looks like a transaction where the first line
-is an equal sign (\f[C]=\f[]) followed by a query (mnemonic: \f[C]=\f[]
-tests for matching transactions, and also looks like posting lines):
+Automated posting rules describe extra postings that should be added to
+certain transactions at report time, when the \f[C]\-\-auto\f[] flag is
+used.
+.PP
+An automated posting rule looks like a normal journal entry, except the
+first line is an equal sign (\f[C]=\f[]) followed by a query (mnemonic:
+\f[C]=\f[] looks like posting lines):
.IP
.nf
\f[C]
@@ -1277,7 +1517,7 @@ T}
T{
Vim
T}@T{
-https://github.com/ledger/ledger/wiki/Getting\-started
+https://github.com/ledger/vim\-ledger
T}
T{
Sublime Text
diff --git a/hledger_journal.info b/hledger_journal.info
index 51f0518..fe23603 100644
--- a/hledger_journal.info
+++ b/hledger_journal.info
@@ -4,8 +4,8 @@ stdin.

File: hledger_journal.info, Node: Top, Next: FILE FORMAT, Up: (dir)
-hledger_journal(5) hledger 1.9.1
-********************************
+hledger_journal(5) hledger 1.9.99
+*********************************
hledger's usual data source is a plain text file containing journal
entries in hledger journal format. This file represents a standard
@@ -77,7 +77,7 @@ File: hledger_journal.info, Node: FILE FORMAT, Next: EDITOR SUPPORT, Prev: To
* Virtual Postings::
* Balance Assertions::
* Balance Assignments::
-* Prices::
+* Transaction prices::
* Comments::
* Tags::
* Directives::
@@ -400,12 +400,12 @@ written). The display format is chosen as follows:
* or if there are no such amounts in the journal, a default format is
used (like '$1000.00').
- Price amounts and amounts in D directives usually don't affect amount
-format inference, but in some situations they can do so indirectly. (Eg
-when D's default commodity is applied to a commodity-less amount, or
-when an amountless posting is balanced using a price's commodity, or
-when -V is used.) If you find this causing problems, set the desired
-format with a commodity directive.
+ Price amounts and amounts in 'D' directives usually don't affect
+amount format inference, but in some situations they can do so
+indirectly. (Eg when D's default commodity is applied to a
+commodity-less amount, or when an amountless posting is balanced using a
+price's commodity, or when -V is used.) If you find this causing
+problems, set the desired format with a commodity directive.

File: hledger_journal.info, Node: Virtual Postings, Next: Balance Assertions, Prev: Amounts, Up: FILE FORMAT
@@ -569,7 +569,7 @@ virtual. They are not affected by the '--real/-R' flag or 'real:'
query.

-File: hledger_journal.info, Node: Balance Assignments, Next: Prices, Prev: Balance Assertions, Up: FILE FORMAT
+File: hledger_journal.info, Node: Balance Assignments, Next: Transaction prices, Prev: Balance Assertions, Up: FILE FORMAT
1.10 Balance Assignments
========================
@@ -602,30 +602,18 @@ little less explicit; to know the exact amount posted, you have to run
hledger or do the calculations yourself, instead of just reading it.

-File: hledger_journal.info, Node: Prices, Next: Comments, Prev: Balance Assignments, Up: FILE FORMAT
-
-1.11 Prices
-===========
-
-* Menu:
-
-* Transaction prices::
-* Market prices::
-
-
-File: hledger_journal.info, Node: Transaction prices, Next: Market prices, Up: Prices
+File: hledger_journal.info, Node: Transaction prices, Next: Comments, Prev: Balance Assignments, Up: FILE FORMAT
-1.11.1 Transaction prices
--------------------------
+1.11 Transaction prices
+=======================
Within a transaction, you can note an amount's price in another
commodity. This can be used to document the cost (in a purchase) or
selling price (in a sale). For example, transaction prices are useful
-to record purchases of a foreign currency.
-
- Transaction prices are fixed, and do not change over time. (Ledger
-users: Ledger uses a different syntax for fixed prices, '{=UNITPRICE}',
-which hledger currently ignores).
+to record purchases of a foreign currency. Note transaction prices are
+fixed at the time of the transaction, and do not change over time. See
+also market prices, which represent prevailing exchange rates on a
+certain date.
There are several ways to record a transaction price:
@@ -648,10 +636,13 @@ which hledger currently ignores).
assets:euros €100 ; one hundred euros purchased
assets:dollars $-135 ; for $135
- Amounts with transaction prices can be displayed in the transaction
-price's commodity by using the '-B/--cost' flag (except for #551) ("B"
-is from "cost Basis"). Eg for the above, here is how -B affects the
-balance report:
+ (Ledger users: Ledger uses a different syntax for fixed prices,
+'{=UNITPRICE}', which hledger currently ignores).
+
+ Use the '-B/--cost' flag to convert amounts to their transaction
+price's commodity, if any. (mnemonic: "B" is from "cost Basis", as in
+Ledger). Eg here is how -B affects the balance report for the example
+above:
$ hledger bal -N --flat
$-135 assets:dollars
@@ -674,36 +665,7 @@ $ hledger bal -N --flat -B
€100 assets:euros

-File: hledger_journal.info, Node: Market prices, Prev: Transaction prices, Up: Prices
-
-1.11.2 Market prices
---------------------
-
-Market prices are not tied to a particular transaction; they represent
-historical exchange rates between two commodities. (Ledger calls them
-historical prices.) For example, the prices published by a stock
-exchange or the foreign exchange market. hledger can use these prices
-to show the market value of things at a given date, see market value.
-
- To record market prices, use P directives in the main journal or in
-an included file. Their format is:
-
-P DATE COMMODITYBEINGPRICED UNITPRICE
-
- DATE is a simple date as usual. COMMODITYBEINGPRICED is the symbol
-of the commodity being priced. UNITPRICE is an ordinary amount (symbol
-and quantity) in a second commodity, specifying the unit price or
-conversion rate for the first commodity in terms of the second, on the
-given date.
-
- For example, the following directives say that one euro was worth
-1.35 US dollars during 2009, and $1.40 from 2010 onward:
-
-P 2009/1/1 € $1.35
-P 2010/1/1 € $1.40
-
-
-File: hledger_journal.info, Node: Comments, Next: Tags, Prev: Prices, Up: FILE FORMAT
+File: hledger_journal.info, Node: Comments, Next: Tags, Prev: Transaction prices, Up: FILE FORMAT
1.12 Comments
=============
@@ -792,6 +754,96 @@ File: hledger_journal.info, Node: Directives, Next: Periodic transactions, Pr
1.14 Directives
===============
+A directive is a line in the journal beginning with a special keyword,
+that influences how the journal is processed. hledger's directives are
+based on a subset of Ledger's, but there are many differences (and also
+some differences between hledger versions).
+
+ Directives' behaviour and interactions can get a little bit complex,
+so here is a table summarising the directives and their effects, with
+links to more detailed docs.
+
+directiveend subdirectivespurpose can affect (as of
+ directive 2018/06)
+-----------------------------------------------------------------------------
+'account' any declare an account name & account code:
+ text optional account code balance reports
+ (except 'balance'
+ single-column
+ mode)
+'alias' 'end rewrite account names following
+ aliases' inline/included
+ entries until end
+ of current file
+ or end directive
+'apply 'end prepend a common parent to following
+account' apply account names inline/included
+ account' entries until end
+ of current file
+ or end directive
+'comment''end ignore part of journal following
+ comment' inline/included
+ entries until end
+ of current file
+ or end directive
+'commodity' 'format'declare a commodity and its number notation:
+ number notation & display following entries
+ style in that commodity
+ in all files;
+ display style:
+ amounts of that
+ commodity in
+ reports
+'D' declare a commodity, number commodity: all
+ notation & display style commodityless
+ for commodityless amounts entries in all
+ files; number
+ notation:
+ following
+ commodityless
+ entries and
+ entries in that
+ commodity in all
+ files; display
+ style: amounts of
+ that commodity in
+ reports
+'include' include entries/directives what the included
+ from another file directives affect
+'P' declare a market price for amounts of that
+ a commodity commodity in
+ reports, when -V
+ is used
+'Y' declare a year for yearless following
+ dates inline/included
+ entries until end
+ of current file
+
+ And some definitions:
+
+subdirectiveoptional indented directive or unparsed text lines
+ immediately following a parent directive
+account numeric code influencing account display order in most
+code balance reports
+number how to interpret numbers when parsing journal entries (the
+notation identity of the decimal separator character). (Currently
+ each commodity can have its own notation, even in the same
+ file.)
+display how to display amounts of a commodity in reports (symbol side
+style and spacing, digit groups, decimal separator, decimal places)
+directive which entries and (when there are multiple files) which files
+scope are affected by a directive
+
+ As you can see, directives vary in which journal entries and files
+they affect, and whether they are focussed on input (parsing) or output
+(reports). Some directives have multiple effects.
+
+ If you have a journal made up of multiple files, or pass multiple -f
+options on the command line, note that directives which affect input
+typically last only until the end of their defining file. This provides
+more simplicity and predictability, eg reports are not changed by
+writing file options in a different order. It can be surprising at
+times though.
* Menu:
* Comment blocks::
@@ -799,6 +851,7 @@ File: hledger_journal.info, Node: Directives, Next: Periodic transactions, Pr
* Default year::
* Declaring commodities::
* Default commodity::
+* Market prices::
* Declaring accounts::
* Rewriting accounts::
* Default parent account::
@@ -895,16 +948,16 @@ always be written with a decimal point (a period or comma, followed by 0
or more decimal digits).

-File: hledger_journal.info, Node: Default commodity, Next: Declaring accounts, Prev: Declaring commodities, Up: Directives
+File: hledger_journal.info, Node: Default commodity, Next: Market prices, Prev: Declaring commodities, Up: Directives
1.14.5 Default commodity
------------------------
-The D directive sets a default commodity (and display format), to be
+The 'D' directive sets a default commodity (and display format), to be
used for amounts without a commodity symbol (ie, plain numbers). (Note
this differs from Ledger's default commodity directive.) The commodity
and display format will be applied to all subsequent commodity-less
-amounts, or until the next D directive.
+amounts, or until the next 'D' directive.
# commodity-less amounts should be treated as dollars
# (and displayed with symbol on the left, thousands separators and two decimal places)
@@ -918,9 +971,39 @@ D $1,000.00
with a decimal point.

-File: hledger_journal.info, Node: Declaring accounts, Next: Rewriting accounts, Prev: Default commodity, Up: Directives
+File: hledger_journal.info, Node: Market prices, Next: Declaring accounts, Prev: Default commodity, Up: Directives
+
+1.14.6 Market prices
+--------------------
+
+The 'P' directive declares a market price, which is an exchange rate
+between two commodities on a certain date. (In Ledger, they are called
+"historical prices".) These are often obtained from a stock exchange,
+cryptocurrency exchange, or the foreign exchange market.
+
+ Here is the format:
+
+P DATE COMMODITYA COMMODITYBAMOUNT
+
+ * DATE is a simple date
+ * COMMODITYA is the symbol of the commodity being priced
+ * COMMODITYBAMOUNT is an amount (symbol and quantity) in a second
+ commodity, giving the price in commodity B of one unit of commodity
+ A.
+
+ These two market price directives say that one euro was worth 1.35 US
+dollars during 2009, and $1.40 from 2010 onward:
+
+P 2009/1/1 € $1.35
+P 2010/1/1 € $1.40
+
+ The '-V/--value' flag can be used to convert reported amounts to
+another commodity using these prices.
+
+
+File: hledger_journal.info, Node: Declaring accounts, Next: Rewriting accounts, Prev: Market prices, Up: Directives
-1.14.6 Declaring accounts
+1.14.7 Declaring accounts
-------------------------
The 'account' directive predeclares account names. The simplest form is
@@ -940,14 +1023,23 @@ account liabilities 2000
account revenues 4000
account expenses 6000
- This affects account display order in reports: accounts with codes
-are listed before accounts without codes, in increasing code order.
-(Otherwise, accounts are listed alphabetically.) Account codes should
-be all numeric digits, unique, and separated from the account name by at
-least two spaces (since account names may contain single spaces). By
-convention, often the first digit indicates the type of account, as in
-this numbering scheme and the example above. In future, we might use
-this to recognize account types.
+ This affects how accounts are sorted in account and balance reports:
+accounts with codes are listed before accounts without codes, and in
+increasing code order (instead of listing all accounts alphabetically).
+Warning, this feature is incomplete; account codes do not yet affect
+sort order in
+
+ * the 'accounts' command
+ * the 'balance' command's single-column mode
+ * flat mode balance reports (to work around this, declare account
+ codes on the subaccounts as well).
+ * hledger-web's sidebar
+
+ Account codes should be all numeric digits, unique, and separated
+from the account name by at least two spaces (since account names may
+contain single spaces). By convention, often the first digit indicates
+the type of account, as in this numbering scheme and the example above.
+In future, we might use this to recognize account types.
An account directive can also have indented subdirectives following
it, which are currently ignored. Here is the full syntax:
@@ -962,12 +1054,11 @@ account assets:bank:checking 1110

File: hledger_journal.info, Node: Rewriting accounts, Next: Default parent account, Prev: Declaring accounts, Up: Directives
-1.14.7 Rewriting accounts
+1.14.8 Rewriting accounts
-------------------------
-You can define aliases which rewrite your account names (after reading
-the journal, before generating reports). hledger's account aliases can
-be useful for:
+You can define account alias rules which rewrite your account names, or
+parts of them, before generating reports. This can be useful for:
* expanding shorthand account names to their full form, allowing
easier data entry and a less verbose journal
@@ -976,6 +1067,10 @@ be useful for:
or combining two accounts into one
* customising reports
+ Account aliases also rewrite account names in account directives.
+They do not affect account names being entered via hledger add or
+hledger-web.
+
See also Cookbook: Rewrite account names.
* Menu:
@@ -987,7 +1082,7 @@ be useful for:

File: hledger_journal.info, Node: Basic aliases, Next: Regex aliases, Up: Rewriting accounts
-1.14.7.1 Basic aliases
+1.14.8.1 Basic aliases
......................
To set an account alias, use the 'alias' directive in your journal file.
@@ -1000,9 +1095,9 @@ alias OLD = NEW
This affects all entries. It's useful for trying out aliases
interactively.
- OLD and NEW are full account names. hledger will replace any
-occurrence of the old account name with the new one. Subaccounts are
-also affected. Eg:
+ OLD and NEW are case sensitive full account names. hledger will
+replace any occurrence of the old account name with the new one.
+Subaccounts are also affected. Eg:
alias checking = assets:bank:wells fargo:checking
# rewrites "checking" to "assets:bank:wells fargo:checking", or "checking:a" to "assets:bank:wells fargo:checking:a"
@@ -1010,7 +1105,7 @@ alias checking = assets:bank:wells fargo:checking

File: hledger_journal.info, Node: Regex aliases, Next: Multiple aliases, Prev: Basic aliases, Up: Rewriting accounts
-1.14.7.2 Regex aliases
+1.14.8.2 Regex aliases
......................
There is also a more powerful variant that uses a regular expression,
@@ -1035,7 +1130,7 @@ whitespace.

File: hledger_journal.info, Node: Multiple aliases, Next: end aliases, Prev: Regex aliases, Up: Rewriting accounts
-1.14.7.3 Multiple aliases
+1.14.8.3 Multiple aliases
.........................
You can define as many aliases as you like using directives or
@@ -1051,7 +1146,7 @@ following order:

File: hledger_journal.info, Node: end aliases, Prev: Multiple aliases, Up: Rewriting accounts
-1.14.7.4 'end aliases'
+1.14.8.4 'end aliases'
......................
You can clear (forget) all currently defined aliases with the 'end
@@ -1062,7 +1157,7 @@ end aliases

File: hledger_journal.info, Node: Default parent account, Prev: Rewriting accounts, Up: Directives
-1.14.8 Default parent account
+1.14.9 Default parent account
-----------------------------
You can specify a parent account which will be prepended to all accounts
@@ -1095,38 +1190,94 @@ include personal.journal
Prior to hledger 1.0, legacy 'account' and 'end' spellings were also
supported.
+ A default parent account also affects account directives. It does
+not affect account names being entered via hledger add or hledger-web.
+If account aliases are present, they are applied after the default
+parent account.
+

File: hledger_journal.info, Node: Periodic transactions, Next: Automated postings, Prev: Directives, Up: FILE FORMAT
1.15 Periodic transactions
==========================
-Periodic transaction rules (enabled by '--forecast' or '--budget')
-describe recurring transactions. They look like a transaction where the
-first line is a tilde ('~') followed by a period expression (mnemonic:
-'~' is like a recurring sine wave):
+Periodic transaction rules describe transactions that recur. They allow
+you to generate future transactions for forecasting, without having to
+write them out explicitly in the journal (with '--forecast'). Secondly,
+they also can be used to define budget goals (with '--budget').
+
+ A periodic transaction rule looks like a normal journal entry, with
+the date replaced by a tilde ('~') followed by a period expression
+(mnemonic: '~' looks like a repeating sine wave):
-~ weekly
- assets:bank:checking $400 ; paycheck
- income:acme inc
+~ monthly
+ expenses:rent $2000
+ assets:bank:checking
- Periodic transactions have a dual purpose:
+ There is an additional constraint on the period expression: the start
+date must fall on a natural boundary of the interval. Eg 'monthly from
+2018/1/1' is valid, but 'monthly from 2018/1/15' is not.
- * With '--forecast', each periodic transaction rule generates future
- transactions, recurring at the specified interval, which can be
- seen in reports. Forecast transactions begin the day after the
- latest recorded journal transaction (or today, if there are no
- transactions) and end 6 months from today (or at the report end
- date, if specified).
+ If you write a transaction description or same-line comment, it must
+be separated from the period expression by *two or more spaces*. Eg:
- * With '--budget' (supported by the balance command), each periodic
- transaction rule declares recurring budget goals for the specified
- accounts, which can be seen in budget reports. Eg the example
- above declares the goal of receiving $400 from 'income:acme inc'
- (and also, depositing $400 into 'assets:bank:checking') every week.
+; 2 or more spaces
+; ||
+; vv
+~ every 2 weeks from 2018/6 to 2018/9 paycheck
+ assets:bank:checking $1500
+ income:acme inc
- (Actually, you can generate one-off transactions too, by writing a
-period expression with no report interval.)
+* Menu:
+
+* Forecasting with periodic transactions::
+* Budgeting with periodic transactions::
+
+
+File: hledger_journal.info, Node: Forecasting with periodic transactions, Next: Budgeting with periodic transactions, Up: Periodic transactions
+
+1.15.1 Forecasting with periodic transactions
+---------------------------------------------
+
+With the '--forecast' flag, each periodic transaction rule generates
+future transactions recurring at the specified interval. These are not
+saved in the journal, but appear in all reports. They will look like
+normal transactions, but with an extra tag named 'recur', whose value is
+the generating period expression.
+
+ Forecast transactions begin on or after the day after the latest
+normal (non-periodic) transaction in the journal, or today if there are
+none.
+
+ They end on or before the report end date if specified, or 180 days
+from today if unspecified.
+
+ Forecasting can be useful for estimating balances into the future,
+and experimenting with different scenarios. Note the start date logic
+means that forecasted transactions are automatically replaced by normal
+transactions as you add those.
+
+ Forecasting can also help with data entry: describe most of your
+transactions with periodic rules, and every so often copy the output of
+'print --forecast' to the journal.
+
+ You can generate one-time transactions too: just write a period
+expression specifying a date with no report interval. (You could also
+write a normal transaction with a future date, but remember this
+disables forecast transactions on previous dates.)
+
+
+File: hledger_journal.info, Node: Budgeting with periodic transactions, Prev: Forecasting with periodic transactions, Up: Periodic transactions
+
+1.15.2 Budgeting with periodic transactions
+-------------------------------------------
+
+With the '--budget' flag, currently supported by the balance command,
+each periodic transaction rule declares recurring budget goals for the
+specified accounts. Eg the first example above declares a goal of
+spending $2000 on rent (and also, a goal of depositing $2000 into
+checking) every month. Goals and actual performance can then be
+compared in budget reports.
For more details, see: balance: Budget report and Cookbook: Budgeting
and Forecasting.
@@ -1137,11 +1288,12 @@ File: hledger_journal.info, Node: Automated postings, Prev: Periodic transacti
1.16 Automated postings
=======================
-Automated postings (enabled by '--auto') are postings added
-automatically by rule to certain transactions. An automated posting
-rule looks like a transaction where the first line is an equal sign
-('=') followed by a query (mnemonic: '=' tests for matching
-transactions, and also looks like posting lines):
+Automated posting rules describe extra postings that should be added to
+certain transactions at report time, when the '--auto' flag is used.
+
+ An automated posting rule looks like a normal journal entry, except
+the first line is an equal sign ('=') followed by a query (mnemonic: '='
+looks like posting lines):
= expenses:gifts
budget:gifts *-1
@@ -1189,7 +1341,7 @@ files:
Editor
--------------------------------------------------------------------------
Emacs http://www.ledger-cli.org/3.0/doc/ledger-mode.html
-Vim https://github.com/ledger/ledger/wiki/Getting-started
+Vim https://github.com/ledger/vim-ledger
Sublime https://github.com/ledger/ledger/wiki/Editing-Ledger-files-with-Sublime-Text-or-RubyMine
Text
Textmate https://github.com/ledger/ledger/wiki/Using-TextMate-2
@@ -1202,89 +1354,91 @@ Code

Tag Table:
Node: Top76
-Node: FILE FORMAT2374
-Ref: #file-format2498
-Node: Transactions2770
-Ref: #transactions2891
-Node: Postings3575
-Ref: #postings3702
-Node: Dates4697
-Ref: #dates4812
-Node: Simple dates4877
-Ref: #simple-dates5003
-Node: Secondary dates5369
-Ref: #secondary-dates5523
-Node: Posting dates7086
-Ref: #posting-dates7215
-Node: Status8589
-Ref: #status8709
-Node: Description10417
-Ref: #description10555
-Node: Payee and note10874
-Ref: #payee-and-note10988
-Node: Account names11230
-Ref: #account-names11373
-Node: Amounts11860
-Ref: #amounts11996
-Node: Virtual Postings15011
-Ref: #virtual-postings15170
-Node: Balance Assertions16390
-Ref: #balance-assertions16565
-Node: Assertions and ordering17461
-Ref: #assertions-and-ordering17647
-Node: Assertions and included files18347
-Ref: #assertions-and-included-files18588
-Node: Assertions and multiple -f options18921
-Ref: #assertions-and-multiple--f-options19175
-Node: Assertions and commodities19307
-Ref: #assertions-and-commodities19542
-Node: Assertions and subaccounts20238
-Ref: #assertions-and-subaccounts20470
-Node: Assertions and virtual postings20991
-Ref: #assertions-and-virtual-postings21198
-Node: Balance Assignments21340
-Ref: #balance-assignments21509
-Node: Prices22629
-Ref: #prices22762
-Node: Transaction prices22813
-Ref: #transaction-prices22958
-Node: Market prices25114
-Ref: #market-prices25249
-Node: Comments26209
-Ref: #comments26331
-Node: Tags27501
-Ref: #tags27619
-Node: Directives29021
-Ref: #directives29164
-Node: Comment blocks29357
-Ref: #comment-blocks29502
-Node: Including other files29678
-Ref: #including-other-files29858
-Node: Default year30247
-Ref: #default-year30416
-Node: Declaring commodities30839
-Ref: #declaring-commodities31022
-Node: Default commodity32249
-Ref: #default-commodity32430
-Node: Declaring accounts33062
-Ref: #declaring-accounts33242
-Node: Rewriting accounts34589
-Ref: #rewriting-accounts34774
-Node: Basic aliases35378
-Ref: #basic-aliases35524
-Node: Regex aliases36214
-Ref: #regex-aliases36385
-Node: Multiple aliases37103
-Ref: #multiple-aliases37278
-Node: end aliases37776
-Ref: #end-aliases37923
-Node: Default parent account38024
-Ref: #default-parent-account38190
-Node: Periodic transactions38849
-Ref: #periodic-transactions39028
-Node: Automated postings40327
-Ref: #automated-postings40481
-Node: EDITOR SUPPORT41614
-Ref: #editor-support41732
+Node: FILE FORMAT2376
+Ref: #file-format2500
+Node: Transactions2784
+Ref: #transactions2905
+Node: Postings3589
+Ref: #postings3716
+Node: Dates4711
+Ref: #dates4826
+Node: Simple dates4891
+Ref: #simple-dates5017
+Node: Secondary dates5383
+Ref: #secondary-dates5537
+Node: Posting dates7100
+Ref: #posting-dates7229
+Node: Status8603
+Ref: #status8723
+Node: Description10431
+Ref: #description10569
+Node: Payee and note10888
+Ref: #payee-and-note11002
+Node: Account names11244
+Ref: #account-names11387
+Node: Amounts11874
+Ref: #amounts12010
+Node: Virtual Postings15027
+Ref: #virtual-postings15186
+Node: Balance Assertions16406
+Ref: #balance-assertions16581
+Node: Assertions and ordering17477
+Ref: #assertions-and-ordering17663
+Node: Assertions and included files18363
+Ref: #assertions-and-included-files18604
+Node: Assertions and multiple -f options18937
+Ref: #assertions-and-multiple--f-options19191
+Node: Assertions and commodities19323
+Ref: #assertions-and-commodities19558
+Node: Assertions and subaccounts20254
+Ref: #assertions-and-subaccounts20486
+Node: Assertions and virtual postings21007
+Ref: #assertions-and-virtual-postings21214
+Node: Balance Assignments21356
+Ref: #balance-assignments21537
+Node: Transaction prices22657
+Ref: #transaction-prices22826
+Node: Comments25094
+Ref: #comments25228
+Node: Tags26398
+Ref: #tags26516
+Node: Directives27918
+Ref: #directives28061
+Node: Comment blocks33917
+Ref: #comment-blocks34062
+Node: Including other files34238
+Ref: #including-other-files34418
+Node: Default year34807
+Ref: #default-year34976
+Node: Declaring commodities35399
+Ref: #declaring-commodities35582
+Node: Default commodity36809
+Ref: #default-commodity36985
+Node: Market prices37621
+Ref: #market-prices37786
+Node: Declaring accounts38627
+Ref: #declaring-accounts38803
+Node: Rewriting accounts40474
+Ref: #rewriting-accounts40659
+Node: Basic aliases41393
+Ref: #basic-aliases41539
+Node: Regex aliases42243
+Ref: #regex-aliases42414
+Node: Multiple aliases43132
+Ref: #multiple-aliases43307
+Node: end aliases43805
+Ref: #end-aliases43952
+Node: Default parent account44053
+Ref: #default-parent-account44219
+Node: Periodic transactions45103
+Ref: #periodic-transactions45282
+Node: Forecasting with periodic transactions46492
+Ref: #forecasting-with-periodic-transactions46735
+Node: Budgeting with periodic transactions47976
+Ref: #budgeting-with-periodic-transactions48215
+Node: Automated postings48674
+Ref: #automated-postings48828
+Node: EDITOR SUPPORT49967
+Ref: #editor-support50085

End Tag Table
diff --git a/hledger_journal.txt b/hledger_journal.txt
index dc6e261..ec59540 100644
--- a/hledger_journal.txt
+++ b/hledger_journal.txt
@@ -466,16 +466,14 @@ FILE FORMAT
less explicit; to know the exact amount posted, you have to run hledger
or do the calculations yourself, instead of just reading it.
- Prices
Transaction prices
Within a transaction, you can note an amount's price in another commod-
ity. This can be used to document the cost (in a purchase) or selling
price (in a sale). For example, transaction prices are useful to
- record purchases of a foreign currency.
-
- Transaction prices are fixed, and do not change over time. (Ledger
- users: Ledger uses a different syntax for fixed prices, {=UNITPRICE},
- which hledger currently ignores).
+ record purchases of a foreign currency. Note transaction prices are
+ fixed at the time of the transaction, and do not change over time. See
+ also market prices, which represent prevailing exchange rates on a cer-
+ tain date.
There are several ways to record a transaction price:
@@ -498,10 +496,12 @@ FILE FORMAT
assets:euros 100 ; one hundred euros purchased
assets:dollars $-135 ; for $135
- Amounts with transaction prices can be displayed in the transaction
- price's commodity by using the -B/--cost flag (except for #551) ("B" is
- from "cost Basis"). Eg for the above, here is how -B affects the bal-
- ance report:
+ (Ledger users: Ledger uses a different syntax for fixed prices, {=UNIT-
+ PRICE}, which hledger currently ignores).
+
+ Use the -B/--cost flag to convert amounts to their transaction price's
+ commodity, if any. (mnemonic: "B" is from "cost Basis", as in Ledger).
+ Eg here is how -B affects the balance report for the example above:
$ hledger bal -N --flat
$-135 assets:dollars
@@ -510,8 +510,8 @@ FILE FORMAT
$-135 assets:dollars
$135 assets:euros # <- the euros' cost
- Note -B is sensitive to the order of postings when a transaction price
- is inferred: the inferred price will be in the commodity of the last
+ Note -B is sensitive to the order of postings when a transaction price
+ is inferred: the inferred price will be in the commodity of the last
amount. So if example 3's postings are reversed, while the transaction
is equivalent, -B shows something different:
@@ -523,40 +523,16 @@ FILE FORMAT
-100 assets:dollars # <- the dollars' selling price
100 assets:euros
- Market prices
- Market prices are not tied to a particular transaction; they represent
- historical exchange rates between two commodities. (Ledger calls them
- historical prices.) For example, the prices published by a stock
- exchange or the foreign exchange market. hledger can use these prices
- to show the market value of things at a given date, see market value.
-
- To record market prices, use P directives in the main journal or in an
- included file. Their format is:
-
- P DATE COMMODITYBEINGPRICED UNITPRICE
-
- DATE is a simple date as usual. COMMODITYBEINGPRICED is the symbol of
- the commodity being priced. UNITPRICE is an ordinary amount (symbol
- and quantity) in a second commodity, specifying the unit price or con-
- version rate for the first commodity in terms of the second, on the
- given date.
-
- For example, the following directives say that one euro was worth 1.35
- US dollars during 2009, and $1.40 from 2010 onward:
-
- P 2009/1/1 $1.35
- P 2010/1/1 $1.40
-
Comments
Lines in the journal beginning with a semicolon (;) or hash (#) or star
- (*) are comments, and will be ignored. (Star comments cause org-mode
- nodes to be ignored, allowing emacs users to fold and navigate their
+ (*) are comments, and will be ignored. (Star comments cause org-mode
+ nodes to be ignored, allowing emacs users to fold and navigate their
journals with org-mode or orgstruct-mode.)
- You can attach comments to a transaction by writing them after the
- description and/or indented on the following lines (before the post-
- ings). Similarly, you can attach comments to an individual posting by
- writing them after the amount and/or indented on the following lines.
+ You can attach comments to a transaction by writing them after the
+ description and/or indented on the following lines (before the post-
+ ings). Similarly, you can attach comments to an individual posting by
+ writing them after the amount and/or indented on the following lines.
Transaction and posting comments must begin with a semicolon (;).
Some examples:
@@ -580,24 +556,24 @@ FILE FORMAT
; another comment line for posting 2
; a file comment (because not indented)
- You can also comment larger regions of a file using comment and
+ You can also comment larger regions of a file using comment and
end comment directives.
Tags
- Tags are a way to add extra labels or labelled data to postings and
+ Tags are a way to add extra labels or labelled data to postings and
transactions, which you can then search or pivot on.
- A simple tag is a word (which may contain hyphens) followed by a full
+ A simple tag is a word (which may contain hyphens) followed by a full
colon, written inside a transaction or posting comment line:
2017/1/16 bought groceries ; sometag:
- Tags can have a value, which is the text after the colon, up to the
+ Tags can have a value, which is the text after the colon, up to the
next comma or end of line, with leading/trailing whitespace removed:
expenses:food $10 ; a-posting-tag: the tag value
- Note this means hledger's tag values can not contain commas or new-
+ Note this means hledger's tag values can not contain commas or new-
lines. Ending at commas means you can write multiple short tags on one
line, comma separated:
@@ -611,39 +587,132 @@ FILE FORMAT
o "tag2" is another tag, whose value is "some value ..."
- Tags in a transaction comment affect the transaction and all of its
- postings, while tags in a posting comment affect only that posting.
- For example, the following transaction has three tags (A, TAG2,
+ Tags in a transaction comment affect the transaction and all of its
+ postings, while tags in a posting comment affect only that posting.
+ For example, the following transaction has three tags (A, TAG2,
third-tag) and the posting has four (those plus posting-tag):
1/1 a transaction ; A:, TAG2:
; third-tag: a third transaction tag, <- with a value
(a) $1 ; posting-tag:
- Tags are like Ledger's metadata feature, except hledger's tag values
+ Tags are like Ledger's metadata feature, except hledger's tag values
are simple strings.
Directives
+ A directive is a line in the journal beginning with a special keyword,
+ that influences how the journal is processed. hledger's directives are
+ based on a subset of Ledger's, but there are many differences (and also
+ some differences between hledger versions).
+
+ Directives' behaviour and interactions can get a little bit complex, so
+ here is a table summarising the directives and their effects, with
+ links to more detailed docs.
+
+
+ direc- end subdi- purpose can affect (as of
+ tive directive rec- 2018/06)
+ tives
+ -------------------------------------------------------------------------------------------------
+ account any declare an account name & account code: bal-
+ text optional account code ance reports
+ (except balance
+ single-column mode)
+ alias end aliases rewrite account names following
+ inline/included
+ entries until end
+ of current file or
+ end directive
+ apply account end apply account prepend a common parent to following
+ account names inline/included
+ entries until end
+ of current file or
+ end directive
+ comment end comment ignore part of journal following
+ inline/included
+ entries until end
+ of current file or
+ end directive
+ commodity format declare a commodity and its number notation:
+ number notation & display following entries
+ style in that commodity
+ in all files; dis-
+ play style: amounts
+ of that commodity
+ in reports
+ D declare a commodity, number commodity: all com-
+ notation & display style for modityless entries
+ commodityless amounts in all files; num-
+ ber notation: fol-
+ lowing commodity-
+ less entries and
+ entries in that
+ commodity in all
+ files; display
+ style: amounts of
+ that commodity in
+ reports
+ include include entries/directives what the included
+ from another file directives affect
+ P declare a market price for a amounts of that
+ commodity commodity in
+ reports, when -V is
+ used
+ Y declare a year for yearless following
+ dates inline/included
+ entries until end
+ of current file
+
+ And some definitions:
+
+
+ subdirec- optional indented directive or unparsed text lines immedi-
+ tive ately following a parent directive
+ account numeric code influencing account display order in most bal-
+ code ance reports
+
+
+
+ number how to interpret numbers when parsing journal entries (the
+ notation identity of the decimal separator character). (Currently
+ each commodity can have its own notation, even in the same
+ file.)
+ display how to display amounts of a commodity in reports (symbol side
+ style and spacing, digit groups, decimal separator, decimal places)
+ directive which entries and (when there are multiple files) which files
+ scope are affected by a directive
+
+ As you can see, directives vary in which journal entries and files they
+ affect, and whether they are focussed on input (parsing) or output
+ (reports). Some directives have multiple effects.
+
+ If you have a journal made up of multiple files, or pass multiple -f
+ options on the command line, note that directives which affect input
+ typically last only until the end of their defining file. This pro-
+ vides more simplicity and predictability, eg reports are not changed by
+ writing file options in a different order. It can be surprising at
+ times though.
+
Comment blocks
- A line containing just comment starts a commented region of the file,
+ A line containing just comment starts a commented region of the file,
and a line containing just end comment (or the end of the current file)
ends it. See also comments.
Including other files
- You can pull in the content of additional files by writing an include
+ You can pull in the content of additional files by writing an include
directive, like this:
include path/to/file.journal
- If the path does not begin with a slash, it is relative to the current
+ If the path does not begin with a slash, it is relative to the current
file. Glob patterns (*) are not currently supported.
- The include directive can only be used in journal files. It can
+ The include directive can only be used in journal files. It can
include journal, timeclock or timedot files, but not CSV files.
Default year
- You can set a default year to be used for subsequent dates which don't
- specify a year. This is a line beginning with Y followed by the year.
+ You can set a default year to be used for subsequent dates which don't
+ specify a year. This is a line beginning with Y followed by the year.
Eg:
Y2009 ; set default year to 2009
@@ -663,8 +732,8 @@ FILE FORMAT
assets
Declaring commodities
- The commodity directive declares commodities which may be used in the
- journal (though currently we do not enforce this). It may be written
+ The commodity directive declares commodities which may be used in the
+ journal (though currently we do not enforce this). It may be written
on a single line, like this:
; commodity EXAMPLEAMOUNT
@@ -674,8 +743,8 @@ FILE FORMAT
; separating thousands with comma.
commodity 1,000.0000 AAAA
- or on multiple lines, using the "format" subdirective. In this case
- the commodity symbol appears twice and should be the same in both
+ or on multiple lines, using the "format" subdirective. In this case
+ the commodity symbol appears twice and should be the same in both
places:
; commodity SYMBOL
@@ -687,19 +756,19 @@ FILE FORMAT
commodity INR
format INR 9,99,99,999.00
- Commodity directives have a second purpose: they define the standard
+ Commodity directives have a second purpose: they define the standard
display format for amounts in the commodity. Normally the display for-
- mat is inferred from journal entries, but this can be unpredictable;
- declaring it with a commodity directive overrides this and removes
- ambiguity. Towards this end, amounts in commodity directives must
- always be written with a decimal point (a period or comma, followed by
+ mat is inferred from journal entries, but this can be unpredictable;
+ declaring it with a commodity directive overrides this and removes
+ ambiguity. Towards this end, amounts in commodity directives must
+ always be written with a decimal point (a period or comma, followed by
0 or more decimal digits).
Default commodity
- The D directive sets a default commodity (and display format), to be
+ The D directive sets a default commodity (and display format), to be
used for amounts without a commodity symbol (ie, plain numbers). (Note
- this differs from Ledger's default commodity directive.) The commodity
- and display format will be applied to all subsequent commodity-less
+ this differs from Ledger's default commodity directive.) The commodity
+ and display format will be applied to all subsequent commodity-less
amounts, or until the next D directive.
# commodity-less amounts should be treated as dollars
@@ -713,13 +782,39 @@ FILE FORMAT
As with the commodity directive, the amount must always be written with
a decimal point.
+ Market prices
+ The P directive declares a market price, which is an exchange rate
+ between two commodities on a certain date. (In Ledger, they are called
+ "historical prices".) These are often obtained from a stock exchange,
+ cryptocurrency exchange, or the foreign exchange market.
+
+ Here is the format:
+
+ P DATE COMMODITYA COMMODITYBAMOUNT
+
+ o DATE is a simple date
+
+ o COMMODITYA is the symbol of the commodity being priced
+
+ o COMMODITYBAMOUNT is an amount (symbol and quantity) in a second com-
+ modity, giving the price in commodity B of one unit of commodity A.
+
+ These two market price directives say that one euro was worth 1.35 US
+ dollars during 2009, and $1.40 from 2010 onward:
+
+ P 2009/1/1 $1.35
+ P 2010/1/1 $1.40
+
+ The -V/--value flag can be used to convert reported amounts to another
+ commodity using these prices.
+
Declaring accounts
- The account directive predeclares account names. The simplest form is
+ The account directive predeclares account names. The simplest form is
account ACCTNAME, eg:
account assets:bank:checking
- Currently this mainly helps with account name autocompletion in eg
+ Currently this mainly helps with account name autocompletion in eg
hledger add, hledger-iadd, hledger-web, and ledger-mode.
In future it will also help detect misspelled accounts.
@@ -731,14 +826,26 @@ FILE FORMAT
account revenues 4000
account expenses 6000
- This affects account display order in reports: accounts with codes are
- listed before accounts without codes, in increasing code order. (Oth-
- erwise, accounts are listed alphabetically.) Account codes should be
- all numeric digits, unique, and separated from the account name by at
- least two spaces (since account names may contain single spaces). By
- convention, often the first digit indicates the type of account, as in
- this numbering scheme and the example above. In future, we might use
- this to recognize account types.
+ This affects how accounts are sorted in account and balance reports:
+ accounts with codes are listed before accounts without codes, and in
+ increasing code order (instead of listing all accounts alphabetically).
+ Warning, this feature is incomplete; account codes do not yet affect
+ sort order in
+
+ o the accounts command
+
+ o the balance command's single-column mode
+
+ o flat mode balance reports (to work around this, declare account codes
+ on the subaccounts as well).
+
+ o hledger-web's sidebar
+
+ Account codes should be all numeric digits, unique, and separated from
+ the account name by at least two spaces (since account names may con-
+ tain single spaces). By convention, often the first digit indicates
+ the type of account, as in this numbering scheme and the example above.
+ In future, we might use this to recognize account types.
An account directive can also have indented subdirectives following it,
which are currently ignored. Here is the full syntax:
@@ -751,9 +858,8 @@ FILE FORMAT
some-tag:12345
Rewriting accounts
- You can define aliases which rewrite your account names (after reading
- the journal, before generating reports). hledger's account aliases can
- be useful for:
+ You can define account alias rules which rewrite your account names, or
+ parts of them, before generating reports. This can be useful for:
o expanding shorthand account names to their full form, allowing easier
data entry and a less verbose journal
@@ -765,6 +871,10 @@ FILE FORMAT
o customising reports
+ Account aliases also rewrite account names in account directives. They
+ do not affect account names being entered via hledger add or
+ hledger-web.
+
See also Cookbook: Rewrite account names.
Basic aliases
@@ -777,9 +887,9 @@ FILE FORMAT
Or, you can use the --alias 'OLD=NEW' option on the command line. This
affects all entries. It's useful for trying out aliases interactively.
- OLD and NEW are full account names. hledger will replace any occur-
- rence of the old account name with the new one. Subaccounts are also
- affected. Eg:
+ OLD and NEW are case sensitive full account names. hledger will
+ replace any occurrence of the old account name with the new one. Sub-
+ accounts are also affected. Eg:
alias checking = assets:bank:wells fargo:checking
# rewrites "checking" to "assets:bank:wells fargo:checking", or "checking:a" to "assets:bank:wells fargo:checking:a"
@@ -853,43 +963,84 @@ FILE FORMAT
Prior to hledger 1.0, legacy account and end spellings were also sup-
ported.
+ A default parent account also affects account directives. It does not
+ affect account names being entered via hledger add or hledger-web. If
+ account aliases are present, they are applied after the default parent
+ account.
+
Periodic transactions
- Periodic transaction rules (enabled by --forecast or --budget) describe
- recurring transactions. They look like a transaction where the first
- line is a tilde (~) followed by a period expression (mnemonic: ~ is
- like a recurring sine wave):
-
- ~ weekly
- assets:bank:checking $400 ; paycheck
- income:acme inc
-
- Periodic transactions have a dual purpose:
-
- o With --forecast, each periodic transaction rule generates future
- transactions, recurring at the specified interval, which can be seen
- in reports. Forecast transactions begin the day after the latest
- recorded journal transaction (or today, if there are no transactions)
- and end 6 months from today (or at the report end date, if speci-
- fied).
-
- o With --budget (supported by the balance command), each periodic
- transaction rule declares recurring budget goals for the specified
- accounts, which can be seen in budget reports. Eg the example above
- declares the goal of receiving $400 from income:acme inc (and also,
- depositing $400 into assets:bank:checking) every week.
-
- (Actually, you can generate one-off transactions too, by writing a
- period expression with no report interval.)
-
- For more details, see: balance: Budget report and Cookbook: Budgeting
+ Periodic transaction rules describe transactions that recur. They
+ allow you to generate future transactions for forecasting, without hav-
+ ing to write them out explicitly in the journal (with --forecast).
+ Secondly, they also can be used to define budget goals (with --budget).
+
+ A periodic transaction rule looks like a normal journal entry, with the
+ date replaced by a tilde (~) followed by a period expression (mnemonic:
+ ~ looks like a repeating sine wave):
+
+ ~ monthly
+ expenses:rent $2000
+ assets:bank:checking
+
+ There is an additional constraint on the period expression: the start
+ date must fall on a natural boundary of the interval. Eg
+ monthly from 2018/1/1 is valid, but monthly from 2018/1/15 is not.
+
+ If you write a transaction description or same-line comment, it must be
+ separated from the period expression by two or more spaces. Eg:
+
+ ; 2 or more spaces
+ ; ||
+ ; vv
+ ~ every 2 weeks from 2018/6 to 2018/9 paycheck
+ assets:bank:checking $1500
+ income:acme inc
+
+ Forecasting with periodic transactions
+ With the --forecast flag, each periodic transaction rule generates
+ future transactions recurring at the specified interval. These are not
+ saved in the journal, but appear in all reports. They will look like
+ normal transactions, but with an extra tag named recur, whose value is
+ the generating period expression.
+
+ Forecast transactions begin on or after the day after the latest normal
+ (non-periodic) transaction in the journal, or today if there are none.
+
+ They end on or before the report end date if specified, or 180 days
+ from today if unspecified.
+
+ Forecasting can be useful for estimating balances into the future, and
+ experimenting with different scenarios. Note the start date logic
+ means that forecasted transactions are automatically replaced by normal
+ transactions as you add those.
+
+ Forecasting can also help with data entry: describe most of your trans-
+ actions with periodic rules, and every so often copy the output of
+ print --forecast to the journal.
+
+ You can generate one-time transactions too: just write a period expres-
+ sion specifying a date with no report interval. (You could also write
+ a normal transaction with a future date, but remember this disables
+ forecast transactions on previous dates.)
+
+ Budgeting with periodic transactions
+ With the --budget flag, currently supported by the balance command,
+ each periodic transaction rule declares recurring budget goals for the
+ specified accounts. Eg the first example above declares a goal of
+ spending $2000 on rent (and also, a goal of depositing $2000 into
+ checking) every month. Goals and actual performance can then be com-
+ pared in budget reports.
+
+ For more details, see: balance: Budget report and Cookbook: Budgeting
and Forecasting.
Automated postings
- Automated postings (enabled by --auto) are postings added automatically
- by rule to certain transactions. An automated posting rule looks like
- a transaction where the first line is an equal sign (=) followed by a
- query (mnemonic: = tests for matching transactions, and also looks like
- posting lines):
+ Automated posting rules describe extra postings that should be added to
+ certain transactions at report time, when the --auto flag is used.
+
+ An automated posting rule looks like a normal journal entry, except the
+ first line is an equal sign (=) followed by a query (mnemonic: = looks
+ like posting lines):
= expenses:gifts
budget:gifts *-1
@@ -933,12 +1084,13 @@ EDITOR SUPPORT
Editor
--------------------------------------------------------------------------
Emacs http://www.ledger-cli.org/3.0/doc/ledger-mode.html
- Vim https://github.com/ledger/ledger/wiki/Getting-started
+ Vim https://github.com/ledger/vim-ledger
Sublime Text https://github.com/ledger/ledger/wiki/Edit-
ing-Ledger-files-with-Sublime-Text-or-RubyMine
Textmate https://github.com/ledger/ledger/wiki/Using-TextMate-2
Text Wran- https://github.com/ledger/ledger/wiki/Edit-
gler ing-Ledger-files-with-TextWrangler
+
Visual Stu- https://marketplace.visualstudio.com/items?item-
dio Code Name=mark-hansen.hledger-vscode
@@ -967,4 +1119,4 @@ SEE ALSO
-hledger 1.9.1 April 2018 hledger_journal(5)
+hledger 1.9.99 June 2018 hledger_journal(5)
diff --git a/hledger_timeclock.5 b/hledger_timeclock.5
index 98f36e3..1c8702a 100644
--- a/hledger_timeclock.5
+++ b/hledger_timeclock.5
@@ -1,5 +1,5 @@
-.TH "hledger_timeclock" "5" "April 2018" "hledger 1.9.1" "hledger User Manuals"
+.TH "hledger_timeclock" "5" "June 2018" "hledger 1.9.99" "hledger User Manuals"
diff --git a/hledger_timeclock.info b/hledger_timeclock.info
index 780b0aa..49a6222 100644
--- a/hledger_timeclock.info
+++ b/hledger_timeclock.info
@@ -4,8 +4,8 @@ stdin.

File: hledger_timeclock.info, Node: Top, Up: (dir)
-hledger_timeclock(5) hledger 1.9.1
-**********************************
+hledger_timeclock(5) hledger 1.9.99
+***********************************
hledger can read timeclock files. As with Ledger, these are (a subset
of) timeclock.el's format, containing clock-in and clock-out entries as
diff --git a/hledger_timeclock.txt b/hledger_timeclock.txt
index d57f4f6..0f694a3 100644
--- a/hledger_timeclock.txt
+++ b/hledger_timeclock.txt
@@ -77,4 +77,4 @@ SEE ALSO
-hledger 1.9.1 April 2018 hledger_timeclock(5)
+hledger 1.9.99 June 2018 hledger_timeclock(5)
diff --git a/hledger_timedot.5 b/hledger_timedot.5
index ea203c7..f389f17 100644
--- a/hledger_timedot.5
+++ b/hledger_timedot.5
@@ -1,5 +1,5 @@
-.TH "hledger_timedot" "5" "April 2018" "hledger 1.9.1" "hledger User Manuals"
+.TH "hledger_timedot" "5" "June 2018" "hledger 1.9.99" "hledger User Manuals"
diff --git a/hledger_timedot.info b/hledger_timedot.info
index 96268f9..400ba21 100644
--- a/hledger_timedot.info
+++ b/hledger_timedot.info
@@ -4,8 +4,8 @@ stdin.

File: hledger_timedot.info, Node: Top, Next: FILE FORMAT, Up: (dir)
-hledger_timedot(5) hledger 1.9.1
-********************************
+hledger_timedot(5) hledger 1.9.99
+*********************************
Timedot is a plain text format for logging dated, categorised quantities
(of time, usually), supported by hledger. It is convenient for
@@ -110,7 +110,7 @@ $ hledger -f t.timedot --alias /\\./=: bal date:2016/2/4

Tag Table:
Node: Top76
-Node: FILE FORMAT809
-Ref: #file-format910
+Node: FILE FORMAT811
+Ref: #file-format912

End Tag Table
diff --git a/hledger_timedot.txt b/hledger_timedot.txt
index 6ba0028..23caf20 100644
--- a/hledger_timedot.txt
+++ b/hledger_timedot.txt
@@ -124,4 +124,4 @@ SEE ALSO
-hledger 1.9.1 April 2018 hledger_timedot(5)
+hledger 1.9.99 June 2018 hledger_timedot(5)
diff --git a/tests/doctests.hs b/tests/doctests.hs
index f4f02fb..c08e0da 100644
--- a/tests/doctests.hs
+++ b/tests/doctests.hs
@@ -8,4 +8,7 @@ main = do
fs1 <- glob "Hledger/**/*.hs"
fs2 <- glob "Text/**/*.hs"
--fs3 <- glob "other/ledger-parse/**/*.hs"
- doctest $ filter (not . isInfixOf "/.") $ ["Hledger.hs"] ++ fs1 ++ fs2
+ let fs = filter (not . isInfixOf "/.") $ ["Hledger.hs"] ++ fs1 ++ fs2
+ doctest $
+ "--fast" : -- https://github.com/sol/doctest#a-note-on-performance
+ fs